repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
chemicstry/Ventilator
|
[
"4804d9d260c31325cfa72eece18d60f4c3624c78"
] |
[
"software/utils/debug/debug_cli.py"
] |
[
"#!/usr/bin/env python3\n\n# Ventilator debug self.interface: simple command line self.interface\n# For a list of available commands, enter 'help'\n\n__copyright__ = \"Copyright 2021 RespiraWorks\"\n\n__license__ = \"\"\"\n\n Copyright 2021 RespiraWorks\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\n\nimport argparse\nimport cmd\nimport glob\nimport os\nimport shlex\nimport traceback\nfrom lib.colors import *\nfrom lib.error import Error\nfrom lib.serial_detect import detect_stm32_ports, print_detected_ports\nfrom controller_debug import ControllerDebugInterface, MODE_BOOT\nfrom var_info import VAR_ACCESS_READ_ONLY, VAR_ACCESS_WRITE\nimport matplotlib.pyplot as plt\nimport test_data\nfrom pathlib import Path\n\n\nclass ArgparseShowHelpError(Exception):\n \"\"\"Exception raised when CmdArgumentParser encounters --help.\n\n Canonical way of handling this is to catch it in the main command loop and\n then ignore it. Argparse will print out the help message, and that's all\n the user needs to understand what happened.\n \"\"\"\n\n pass\n\n\nclass CmdArgumentParser(argparse.ArgumentParser):\n \"\"\"An ArgumentParser that doesn't call sys.exit() on error.\n\n https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.exit\n \"\"\"\n\n def exit(self, status=0, message=None):\n if status:\n raise Error(f\"Encountered a parse error: {message}\")\n else:\n raise ArgparseShowHelpError()\n\n\n# This class creates a simple command line interface using the standard\n# Python cmd module.\n#\n# Member functions named do_something will implement a command called\n# 'something'. See the Python documentation for the cmd module for\n# more details.\nclass CmdLine(cmd.Cmd):\n\n interface: ControllerDebugInterface\n scripts_directory: str\n test_scenarios_dir: Path\n test_data_dir: Path\n\n def __init__(self, port):\n super(CmdLine, self).__init__()\n self.scripts_directory = \"scripts\"\n self.test_scenarios_dir = Path(\"test_scenarios\").absolute().resolve()\n self.test_data_dir = Path(\"../../../test_data\").absolute().resolve()\n self.interface = ControllerDebugInterface()\n if not port:\n port = auto_select_port()\n if port:\n self.interface.connect(port)\n\n # We must do this so that autocomplete will work with dash `-` in filenames\n try:\n import readline\n\n delims = readline.get_completer_delims()\n readline.set_completer_delims(delims.replace(\"-\", \"\"))\n except ImportError:\n pass\n\n def autoload(self):\n for x in glob.glob(str(self.test_scenarios_dir / \"*.json\")):\n self.interface.tests_import(x)\n\n def update_prompt(self):\n if not self.interface.connected():\n self.prompt = purple(\"[OFFLINE] \")\n return\n\n self.prompt = purple(\"[ERROR] \")\n try:\n mode = self.interface.mode_get()\n except Error as e:\n print(red(str(e)))\n return\n except:\n traceback.print_exc()\n return\n\n if mode == MODE_BOOT:\n self.prompt = orange(f\"[{self.interface.serial_port.port}:boot] \")\n else:\n sn = self.interface.variable_get(\"0_ventilator_serial_number\", raw=True)\n if sn > 0:\n self.prompt = green(f\"[sn:{sn}] \")\n else:\n self.prompt = green(f\"[port:{self.interface.serial_port.port}] \")\n\n def cli_loop(self):\n self.autoload()\n if self.interface.connected():\n self.interface.resynchronize()\n self.interface.variables_update_info()\n self.update_prompt()\n if not self.interface.sanity_checks():\n return\n\n while True:\n try:\n return cmd.Cmd.cmdloop(self)\n except ArgparseShowHelpError:\n pass\n except Error as e:\n print(red(str(e)))\n except:\n traceback.print_exc()\n self.update_prompt()\n\n def emptyline(self):\n self.interface.resynchronize()\n self.update_prompt()\n\n def do_EOF(self, line):\n return True\n\n def do_exit(self, line):\n return True\n\n def do_quit(self, line):\n return True\n\n def do_env(self, line):\n \"\"\"Prints this command interface's environmental variables\n \"\"\"\n print(f\"scripts_directory = {self.scripts_directory}\")\n print(f\"test_scenarios_dir = {self.test_scenarios_dir}\")\n print(f\"self.test_data_dir = {self.test_data_dir}\")\n\n def do_debug(self, line):\n \"\"\"Sets display of low level serial data on/off.\n\n Usage:\n debug on\n debug off\n\n \"\"\"\n line = line.strip().lower()\n if line == \"on\":\n self.interface.print_raw = True\n elif line == \"off\":\n self.interface.print_raw = False\n else:\n print(\"Unknown command; pass 'on' or 'off'.\")\n\n def do_connect(self, line):\n \"\"\"This command manages the connection of the debug interface to the ventilator controller\n\nIf ventilator USB cable is unplugged, the CLI prompt should update to reflect this on the next loop\n(blank line or failed command). You may also start the debugger without a plugged in device.\nUse one of the following commands to reconnect.\n\nconnect list\n searches and lists available STM32 devices on the serial bus\n\nconnect auto\n attempt to connect to a plugged in controller automatically\n\nconnect <port>\n connect to controller on a specific port\n\n \"\"\"\n\n params = shlex.split(line)\n if len(params) < 1:\n print(red(\"Not enough args for `connect`\\n\"))\n return\n subcommand = params[0]\n\n if subcommand == \"list\":\n print_detected_ports()\n\n elif subcommand == \"auto\":\n self.interface.connect(auto_select_port())\n self.interface.resynchronize()\n self.interface.variables_update_info()\n self.update_prompt()\n\n else:\n self.interface.connect(params[0])\n self.interface.resynchronize()\n self.interface.variables_update_info()\n self.update_prompt()\n\n def help_run(self):\n print(\n f\"\"\"\\\nRun an external Python script which can send commands, set variables, etc.\n\nIf no explicit path is given then the current directory and a sub-directory\nnamed {self.scripts_directory} will be searched for the python script.\n \"\"\"\n )\n\n def do_run(self, line):\n params = shlex.split(line)\n\n if len(params) < 1:\n red(\"Must specify script name for `run` command\")\n return\n\n if not os.path.exists(self.scripts_directory + \"/\" + params[0] + \".py\"):\n print(\"Unknown script \" + params[0])\n return\n\n import_command = f\"import {self.scripts_directory}.{params[0]}\"\n exec(import_command)\n\n passed_params = \"\"\n if len(params) > 1:\n passed_params = ', \"' + \" \".join(params[1:]) + '\"'\n run_command = f\"{self.scripts_directory}.{params[0]}.{params[0]}(self.interface{passed_params})\"\n exec(run_command)\n\n def complete_run(self, text, line, begidx, endidx):\n wildcard_path = Path(self.scripts_directory) / (text + \"*.py\")\n return [Path(x).stem for x in glob.glob(str(wildcard_path))]\n\n def do_test(self, line):\n \"\"\"This command is for working with structured test scenarios and saved test data.\n\nA test scenario defines independent variables in question, whether they be ventilator settings\nor manual settings applicable to external test equipment. It may also specify test criteria\nwhich can be confirmed by human examination or other scripts. A scenario also defines which\nvariables to capture, how often to sample them and length of test run.\n\nRunning a test scenario will capture traced variables as well as a snapshot of all ventilator\nvariables and other available metadata concerning the testing environment.\n\nThe unique test identifier (as well as the file name) is defined as\n <UTC date-time>_<tester>_<scenario_name>\n\ntest load <file>\n loads test scenarios from specified .csv or .json <file>\n\ntest autoload\n loads all test scenarios found in `test_scenarios` subdirectory\n\ntest clear\n clears all test scenarios loaded in this session\n\ntest list [--verbose/-v]\n lists all test scenarios loaded in this session\n\ntest show <scenario>\n prints out the full definition of named <scenario>\n\ntest apply <scenario>\n applies all ventilator settings for named <scenario>\n\ntest run <scenario> [--verbose/-v] [--plot/-p] [--csv/-c]\n runs named <scenario> and saves data to .json\n --verbose/-v - also print out full trace data in columns\n --plot/-p - also plot traces and save plots as .png\n --csv/-c - also save traces as .csv\n\ntest read <file> [--verbose/-v] [--plot/-p] [--csv/-c]\n reads test data from <file> and prints it out,\n --verbose/-v - also print out full trace data in columns\n --plot/-p - also plot traces and save plots as .png\n --csv/-c - also save traces as .csv\n \"\"\"\n params = shlex.split(line)\n if len(params) < 1:\n print(red(\"Not enough args for `test`\\n\"))\n return\n subcommand = params[0]\n\n if subcommand == \"load\":\n if len(params) < 2:\n print(red(\"File name not provided for `test load`\\n\"))\n return\n file_name = self.test_scenarios_dir / params[1]\n self.interface.tests_import(str(file_name))\n\n elif subcommand == \"autoload\":\n for x in glob.glob(str(self.test_scenarios_dir / \"*.json\")):\n self.interface.tests_import(x)\n\n elif subcommand == \"clear\":\n self.interface.scenarios.clear()\n\n elif subcommand == \"list\":\n verbose = len(params) > 1 and (\n params[1] == \"-v\" or params[1] == \"--verbose\"\n )\n self.interface.tests_list(verbose)\n\n elif subcommand == \"show\":\n if len(params) < 2:\n print(red(\"Test name not provided for `test show`\\n\"))\n if params[1] not in self.interface.scenarios.keys():\n print(red(f\"Test `{params[1]}` does not exist\\n\"))\n print(self.interface.scenarios[params[1]].long_description())\n\n elif subcommand == \"apply\":\n if len(params) < 2:\n print(red(\"Test name not provided for `test apply`\\n\"))\n return\n if params[1] not in self.interface.scenarios.keys():\n print(red(f\"Test `{params[1]}` does not exist\\n\"))\n self.interface.test_apply(params[1])\n\n elif subcommand == \"run\":\n if len(params) < 2:\n print(red(\"Test name not provided for `test run`\\n\"))\n return\n if params[1] not in self.interface.scenarios.keys():\n print(red(f\"Test `{params[1]}` does not exist\\n\"))\n test = self.interface.test_run(params[1])\n if test is None:\n print(\"Aborted\")\n return\n test.save_json(str(self.test_data_dir), print_self=True)\n if len(params) > 2:\n parser = CmdArgumentParser(\"test\")\n parser.add_argument(\n \"--verbose\", \"-v\", default=False, action=\"store_true\"\n )\n parser.add_argument(\"--plot\", \"-p\", default=False, action=\"store_true\")\n parser.add_argument(\"--csv\", \"-c\", default=False, action=\"store_true\")\n args2 = parser.parse_args(params[2:])\n if args2.verbose:\n print(test.print_traces())\n if args2.plot:\n test.plot(str(self.test_data_dir), save=True, show=True)\n if args2.csv:\n test.save_csv(str(self.test_data_dir))\n\n elif subcommand == \"read\":\n if len(params) < 2:\n print(red(\"File name not provided for `test read`\\n\"))\n return\n file_name = self.test_data_dir / (params[1] + \".json\")\n test = test_data.TestData.from_json(str(file_name))\n print(test)\n if len(params) > 2:\n parser = CmdArgumentParser(\"test\")\n parser.add_argument(\n \"--verbose\", \"-v\", default=False, action=\"store_true\"\n )\n parser.add_argument(\"--plot\", \"-p\", default=False, action=\"store_true\")\n parser.add_argument(\"--csv\", \"-c\", default=False, action=\"store_true\")\n args2 = parser.parse_args(params[2:])\n if args2.verbose:\n print(test.print_traces())\n if args2.plot:\n test.plot(str(self.test_data_dir), save=True, show=True)\n if args2.csv:\n test.save_csv(str(self.test_data_dir))\n\n else:\n print(\"Invalid test args: {}\", params)\n\n def get_dataset_names(self, text=\"\"):\n wildcard_path = self.test_data_dir / (text + \"*.json\")\n return [str(Path(x).stem) for x in glob.glob(str(wildcard_path))]\n\n def get_scenario_file_names(self, text=\"\"):\n wildcard_path = self.test_scenarios_dir / (text + \"*\")\n return [str(Path(x).name) for x in glob.glob(str(wildcard_path))]\n\n def complete_test(self, text, line, begidx, endidx):\n sub_commands = [\n \"load\",\n \"autoload\",\n \"clear\",\n \"list\",\n \"show\",\n \"apply\",\n \"run\",\n \"read\",\n ]\n tokens = line.split()\n if len(tokens) == 3 and tokens[1] == \"read\":\n return self.get_dataset_names(text)\n elif len(tokens) == 3 and tokens[1] == \"load\":\n return self.get_scenario_file_names(text)\n elif len(tokens) == 3 and tokens[1] in [\"run\", \"apply\", \"show\"]:\n return [x for x in self.interface.scenarios.keys() if x.startswith(text)]\n elif len(tokens) == 2 and text == \"read\":\n return [\"read \"]\n elif len(tokens) == 2 and tokens[1] == \"read\" and text == \"\":\n return self.get_dataset_names(\"\")\n elif len(tokens) == 2 and text == \"load\":\n return [\"load \"]\n elif len(tokens) == 2 and tokens[1] == \"load\" and text == \"\":\n return self.get_scenario_file_names(\"\")\n elif len(tokens) == 2 and text in [\"run\", \"apply\", \"show\"]:\n return [text + \" \"]\n elif len(tokens) == 2 and text == \"\" and tokens[1] in [\"run\", \"apply\", \"show\"]:\n return [x for x in self.interface.scenarios.keys()]\n elif len(tokens) == 2 and any(s.startswith(text) for s in sub_commands):\n return [s for s in sub_commands if s.startswith(text)]\n elif len(tokens) == 1:\n return sub_commands\n\n def do_exec(self, line):\n \"\"\"exec()'s a string. Good luck!\"\"\"\n exec(line)\n\n def do_peek(self, line):\n \"\"\"Peek at a memory location.\n\nex: peek <address> <ct> <fmt> <file>\n\n address - the starting address passed as an integer value\n ct - Number of bytes to read (default 1)\n fmt - An optional formatting string.\n file - An optional file to save the data to\n\n The formatting string determines how the data is interpreted and displayed.\n Its a string made up of the following characters:\n + current address\n x 16-bit integer displayed in hex\n i 16-bit signed integer displayed in decimal\n u 16-bit unsigned integer displayed in decimal\n X 32-bit integer displayed in hex\n I 32-bit signed integer displayed in decimal\n U 32-bit unsigned integer displayed in decimal\n f 32-bit float\n e 32-bit float in exponential format\n c Single byte displayed as an ASCII character\n b Single byte displayed in hex\n\n The data is extracted from what's returned and formatted as described in the string.\n If there's more data left over at the end of the string, a new line starts and the\n string starts over.\n\n The default formatting string if none is supplied is +XXXX\n i.e. Data is displayed as a series of 4 32-bit hex values / line\n\"\"\"\n param = shlex.split(line)\n if len(param) < 1:\n print(\"Please specify the address at which to peek at a minimum\")\n return\n ct = 1\n fmt = \"+XXXX\"\n file_name = None\n if len(param) > 3:\n file_name = param[3]\n if len(param) > 2:\n fmt = param[2]\n if len(param) > 1:\n ct = int(param[1], 0)\n address = int(param[0], 0)\n self.interface.peek(address, ct, fmt, file_name)\n\n def do_poke(self, line):\n \"\"\"Write data to a memory address\n\nex: poke [type] <address> <data>\n\n type - Optional type, can be byte, short, long or float\n determines how the data will be interpreted.\n\n address - Address at which to write data\n\n data - One or more data items to write.\n\"\"\"\n param = shlex.split(line)\n if len(param) < 2:\n print(\"Please pass the address and at least one value to write\")\n return\n\n poke_type = \"byte\"\n if param[0] in [\"long\", \"short\", \"float\"]:\n poke_type = param[0]\n param = param[1:]\n\n address = param[0]\n\n if poke_type == \"float\":\n data = [float(x) for x in param[1:]]\n else:\n data = [int(x, 0) for x in param[1:]]\n self.interface.poke(address, data, poke_type)\n\n def do_get(self, line):\n cl = line.split()\n if len(cl) < 1:\n print(\"Please give the variable name to read\")\n return\n\n var_name = cl[0]\n raw = False\n fmt = None\n if len(cl) > 1:\n if cl[1] == \"--raw\":\n raw = True\n else:\n fmt = cl[1]\n\n if var_name == \"all\":\n found = self.interface.variables_find()\n all_vars = self.interface.variables_get(found, raw=raw)\n self.print_variable_values(all_vars, show_access=True)\n return\n elif var_name == \"set\":\n found = self.interface.variables_find(access_filter=VAR_ACCESS_WRITE)\n all_vars = self.interface.variables_get(found, raw=raw)\n self.print_variable_values(all_vars, show_access=False)\n return\n elif var_name == \"read\":\n found = self.interface.variables_find(access_filter=VAR_ACCESS_READ_ONLY)\n all_vars = self.interface.variables_get(found, raw=raw)\n self.print_variable_values(all_vars, show_access=False)\n return\n elif \"*\" in var_name or \"?\" in var_name:\n found = self.interface.variables_find(pattern=var_name)\n all_vars = self.interface.variables_get(found, raw=raw)\n self.print_variable_values(all_vars, show_access=False)\n return\n else:\n variable_md = self.interface.variable_metadata[var_name]\n val = self.interface.variable_get(var_name, raw=raw, fmt=fmt)\n print(variable_md.print_value(val))\n\n def print_variable_values(self, names_values, show_access):\n for count, name in enumerate(sorted(names_values.keys())):\n variable_md = self.interface.variable_metadata[name]\n text = variable_md.print_value(names_values[name], show_access=show_access)\n if (count % 2) == 0:\n print(white(text))\n else:\n print(dark_orange(text))\n\n def complete_get(self, text, line, begidx, endidx):\n return self.interface.variables_find(pattern=(text + \"*\")) + [\n x for x in [\"all\", \"set\", \"read\"] if x.startswith(text)\n ]\n\n def help_get(self):\n print(\"Read the value of a ventilator debug variable and display it.\")\n print(\" get all [--raw] - retrieves all variables\")\n print(\n \" get set [--raw] - retrieves all settings (writable variables)\"\n )\n print(\" get read [--raw] - retrieves all read-only variables\")\n print(\" get <var> [fmt] [--raw] - retrieves a specific variable(s)\")\n print(\" <var> - can contain wildcards * or ?\")\n print(\"Available variables:\")\n for k in sorted(self.interface.variable_metadata.keys()):\n print(f\" {self.interface.variable_metadata[k].verbose()}\")\n\n def do_set(self, line):\n cl = line.split()\n if len(cl) < 1:\n print(red(\"Not enough parameters\"))\n return\n varname = cl[0]\n\n if varname == \"force_off\":\n self.interface.variables_force_off()\n return\n\n if varname == \"force_open\":\n self.interface.variables_force_open()\n return\n\n if len(cl) < 2:\n print(\"Please give the variable name and value\")\n return\n\n if len(cl) == 2:\n self.interface.variable_set(varname, cl[1]) # single variable\n else:\n self.interface.variable_set(varname, cl[1:]) # array\n\n def complete_set(self, text, line, begidx, endidx):\n return self.interface.variables_find(\n pattern=(text + \"*\"), access_filter=VAR_ACCESS_WRITE\n ) + [x for x in [\"force_off\", \"force_open\"] if x.startswith(text)]\n\n def help_set(self):\n print(\"Sets value for a ventilator debug variable (or convenience macro):\")\n print(\" force_off - resets all forced actuator variables\")\n print(\" force_open - forces all valves open and blower to maximum\")\n print(\"Available variables:\")\n for k in sorted(self.interface.variables_find(access_filter=VAR_ACCESS_WRITE)):\n print(\n f\" {self.interface.variable_metadata[k].verbose(show_access=False, show_format=False)}\"\n )\n\n def do_trace(self, line):\n \"\"\"The `trace` command controls/reads the controller's trace buffer.\n\nTracing lets you sample debug variables in real time. Their values are saved\nto a large internal memory buffer in the device, which you can then download\nand/or display as a graph.\n\nFor meaningful performance testing, it is recommended that you use the higher\nlevel `test` self.interface for a more structured experiment control experience. See\n`help test`.\n\nA sub-command must be passed as an option:\n\ntrace start [--period p] [var1 ... ]\n Starts collecting trace data.\n\n You can specify the names of up to TRACE_VAR_CT debug variables to trace. If\n you don't specify any, we use the last known values.\n\n --period controls the sample period in units of one trip through the\n controller's high-priority loop. If you don't specify a period, we use 1.\n\ntrace flush\n Flushes the trace buffer. If trace is ongoing, buffer will be filled with new data.\n\ntrace stop\n Stops trace\n\ntrace status\n Returns the current state of the trace:\n - traced variables\n - trace period\n - number of samples in the trace buffer\n\ntrace save [--verbose/-v] [--plot/-p] [--csv/-c]\n Downloads trace data and saves it as an \"unplanned test\". File will be named as\n <date-time>_<user>_manual_trace.json with a blank test scenario definition.\n See more about tests and test scenarios with `help test`.\n --verbose/-v - also print out full trace data\n --plot/-p - also plot traces and save as .png\n --csv/-c - also save traces as .csv\n\"\"\"\n cl = shlex.split(line)\n if len(cl) < 1:\n print(\"Error, please specify the trace command to run.\")\n print(self.do_trace.__doc__)\n return\n\n if cl[0] == \"flush\":\n self.interface.trace_flush()\n\n elif cl[0] == \"start\":\n parser = CmdArgumentParser(\"trace start\")\n parser.add_argument(\"--period\", type=int)\n parser.add_argument(\"var\", nargs=\"*\")\n args = parser.parse_args(cl[1:])\n\n # TODO: check validity of all variables\n\n if args.period:\n self.interface.trace_set_period(args.period)\n else:\n self.interface.trace_set_period(1)\n\n if args.var:\n self.interface.trace_select(args.var)\n\n self.interface.trace_start()\n\n elif cl[0] == \"stop\":\n self.interface.trace_stop()\n\n elif cl[0] == \"save\":\n test = self.interface.trace_save()\n test.save_json(self.test_data_dir, print_self=True)\n\n if len(cl) > 1:\n parser = CmdArgumentParser(\"test\")\n parser.add_argument(\n \"--verbose\", \"-v\", default=False, action=\"store_true\"\n )\n parser.add_argument(\"--plot\", \"-p\", default=False, action=\"store_true\")\n parser.add_argument(\"--csv\", \"-c\", default=False, action=\"store_true\")\n args2 = parser.parse_args(cl[1:])\n if args2.verbose:\n print(test.print_traces())\n if args2.plot:\n test.plot(self.test_data_dir, save=True, show=True)\n if args2.csv:\n test.save_csv(self.test_data_dir)\n\n elif cl[0] == \"status\":\n print(\"Traced variables:\")\n for var in self.interface.trace_active_variables_list():\n print(f\" - {var.name}\")\n print(f\"Trace period: {self.interface.trace_get_period_us()} \\u03BCs\")\n print(f\"Samples in buffer: {self.interface.trace_num_samples()}\")\n\n else:\n print(f\"Unknown trace sub-command {cl[0]}\")\n return\n\n def complete_trace(self, text, line, begidx, endidx):\n sub_commands = [\"start\", \"flush\", \"stop\", \"status\", \"save\"]\n tokens = shlex.split(line)\n if len(tokens) > 2 and tokens[1] == \"start\":\n return self.interface.variables_find(\n pattern=(text + \"*\"), access_filter=VAR_ACCESS_READ_ONLY\n )\n elif len(tokens) == 2 and text == \"start\":\n return [\"start \"]\n elif len(tokens) == 2 and text == \"\" and tokens[1] == \"start\":\n return self.interface.variables_find(access_filter=VAR_ACCESS_READ_ONLY)\n elif len(tokens) == 2 and any(s.startswith(text) for s in sub_commands):\n return [s for s in sub_commands if s.startswith(text)]\n elif len(tokens) == 1:\n return sub_commands\n\n def do_eeprom(self, line):\n \"\"\"The `eeprom` command allows you to read/write to the controller's\nnon-volatile memory.\n\nA sub-command must be passed as an option:\n\neeprom read <address> <length>\n Reads length bytes in the EEPROM starting at given address.\n\neeprom write <address> <data>\n Writes data (provided as a series of bytes) to the EEPROM.\n\"\"\"\n cl = shlex.split(line)\n if len(cl) < 1:\n print(\"Error, please specify the operation to perform.\")\n return\n if cl[0] == \"read\":\n if len(cl) < 3:\n print(\"Error, please provide address and length.\")\n return\n print(self.interface.eeprom_read(cl[1], cl[2]))\n elif cl[0] == \"write\":\n if len(cl) < 3:\n print(\"Error, please provide address and at least 1 byte of data.\")\n return\n self.interface.eeprom_write(cl[1], list(map(int, cl[2:])))\n else:\n print(\"Error: Unknown subcommand %s\" % cl[0])\n return\n\n\ndef auto_select_port():\n ports = detect_stm32_ports()\n if not ports:\n red(\n \"Could not auto-detect serial port; platformio device list did not \"\n \"yield any STM32 devices.\"\n )\n return None\n if len(ports) > 1:\n red(\n \"Could not auto-detect serial port; platformio device list \"\n f\"yielded multiple STM32 devices: {', '.join(ports)}. \"\n \"Choose port explicitly with --port.\"\n )\n return None\n return ports[0]\n\n\ndef main():\n terminal_parser = argparse.ArgumentParser()\n terminal_parser.add_argument(\n \"--port\",\n \"-p\",\n type=str,\n help=\"Serial port device is connected to, e.g. /dev/ttyACM0.\"\n \" If unspecified, we try to auto-detect the port.\",\n )\n terminal_parser.add_argument(\n \"--command\", \"-c\", type=str, help=\"Run the given command and exit.\"\n )\n\n terminal_args = terminal_parser.parse_args()\n\n interpreter = CmdLine(terminal_args.port)\n path = Path(interpreter.test_data_dir)\n path.mkdir(parents=True, exist_ok=True)\n\n if terminal_args.command:\n # Set matplotlib to noninteractive mode. This causes show() to block,\n # so if you do `controller_debug.py -c \"trace graph\"` the program won't\n # exit until you close the displayed graph.\n plt.ioff()\n interpreter.onecmd(terminal_args.command)\n else:\n # Turn on interactive mode for matplotlib, so show() doesn't block.\n plt.ion()\n interpreter.cli_loop()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.ion"
]
] |
EiffL/bayesfast
|
[
"52e9f405e6c80232ab523165e54406449ac4d0e1"
] |
[
"bayesfast/core/density.py"
] |
[
"import numpy as np\nfrom collections import namedtuple, OrderedDict\nfrom ..utils.collections import VariableDict, PropertyList\nfrom copy import deepcopy\nimport warnings\nfrom .module import Module, Surrogate\nfrom ..transforms._constraint import *\n\n__all__ = ['Pipeline', 'Density', 'DensityLite']\n\n# TODO: add call counter?\n# TODO: review the interface of decay and bound\n# TODO: move fit to Pipeline?\n\n\nclass _PipelineBase:\n \"\"\"Utilities shared by `Pipeline`, `Density` and `DensityLite`.\"\"\"\n @property\n def var_scales(self):\n return self._var_scales\n \n @var_scales.setter\n def var_scales(self, scales):\n if scales is None:\n self._var_scales = None\n else:\n self._var_scales = self._scale_check(scales)\n self._var_scales.flags.writeable = False # TODO: PropertyArray?\n \n @staticmethod\n def _scale_check(scales):\n try:\n scales = np.ascontiguousarray(scales).astype(np.float)\n if scales.ndim == 1:\n scales = np.array((np.zeros_like(scales), scales)).T.copy()\n if not (scales.ndim == 2 and scales.shape[-1] == 2):\n raise ValueError('I do not know how to interpret the shape '\n 'of var_scales.')\n except:\n raise ValueError('Invalid value for var_scales.')\n return scales\n \n @property\n def hard_bounds(self):\n return self._hard_bounds\n \n @hard_bounds.setter\n def hard_bounds(self, bounds):\n if isinstance(bounds, bool):\n self._hard_bounds = bounds\n else:\n self._hard_bounds = self._bound_check(bounds)\n self._hard_bounds.flags.writeable = False # TODO: PropertyArray?\n \n @staticmethod\n def _bound_check(bounds):\n try:\n bounds = np.atleast_1d(bounds).astype(bool).astype(np.uint8).copy()\n if bounds.ndim == 1:\n bounds = np.array((bounds, bounds)).T.copy()\n if not (bounds.ndim == 2 and bounds.shape[-1] == 2):\n raise ValueError(\n 'I do not know how to interpret the shape of hard_bounds.')\n except:\n raise ValueError('Invalid value for hard_bounds')\n return bounds\n \n def _constraint(self, x, out, f, f2, k):\n if self._var_scales is None:\n if k == 0:\n _out = x.copy()\n elif k == 1:\n _out = np.ones_like(x)\n elif k == 2:\n _out = np.zeros_like(x)\n else:\n raise RuntimeError('unexpected value for k.')\n if out is False:\n x = _out\n return\n elif out is True:\n return _out\n else:\n if not (isinstance(out, np.ndarray) and x.shape == out.shape):\n raise ValueError('invalid value for out.')\n out = _out\n return\n else:\n _return = False\n x = np.ascontiguousarray(x)\n if out is False:\n out = x\n elif out is True:\n out = np.empty_like(x)\n _return = True\n else:\n if not (isinstance(out, np.ndarray) and x.shape == out.shape):\n raise ValueError('invalid value for out.')\n out = np.ascontiguousarray(out)\n if isinstance(self._hard_bounds, bool):\n _hb = self._hard_bounds * np.ones((x.shape[-1], 2), np.uint8)\n else:\n _hb = self._hard_bounds\n if x.ndim == 1:\n f(x, self._var_scales, out, _hb, x.shape[0])\n elif x.ndim == 2:\n f2(x, self._var_scales, out, _hb, x.shape[1], x.shape[0])\n else:\n _shape = x.shape\n x = x.reshape((-1, _shape[-1]))\n out = out.reshape((-1, _shape[-1]))\n f2(x, self._var_scales, out, _hb, x.shape[1], x.shape[0])\n x = x.reshape(_shape)\n out = out.reshape(_shape)\n if _return:\n return out\n \n def from_original(self, x, out=True):\n return self._constraint(x, out, _from_original_f, _from_original_f2, 0)\n \n def from_original_grad(self, x, out=True):\n return self._constraint(x, out, _from_original_j, _from_original_j2, 1)\n \n def from_original_grad2(self, x, out=True):\n return self._constraint(\n x, out, _from_original_jj, _from_original_jj2, 2)\n \n def to_original(self, x, out=True):\n return self._constraint(x, out, _to_original_f, _to_original_f2, 0)\n \n def to_original_grad(self, x, out=True):\n return self._constraint(x, out, _to_original_j, _to_original_j2, 1)\n \n def to_original_grad2(self, x, out=True):\n return self._constraint(x, out, _to_original_jj, _to_original_jj2, 2)\n \n def _get_diff(self, x=None, x_trans=None):\n # Returning |dx / dx_trans|.\n if x is not None:\n return -np.sum(np.log(np.abs(self.from_original_grad(x))), axis=-1)\n elif x_trans is not None:\n return np.sum(np.log(np.abs(self.to_original_grad(x_trans))), \n axis=-1)\n else:\n raise ValueError('x and x_trans cannot both be None.')\n \n def to_original_density(self, density, x_trans=None, x=None):\n diff = self._get_diff(x, x_trans)\n density = np.asarray(density)\n if density.size != diff.size:\n raise ValueError('the shape of density is inconsistent with the '\n 'shape of x_trans or x.')\n return density - diff\n \n def from_original_density(self, density, x=None, x_trans=None):\n diff = self._get_diff(x, x_trans)\n density = np.asarray(density)\n if density.size != diff.size:\n raise ValueError('the shape of density is inconsistent with the '\n 'shape of x or x_trans.')\n return density + diff\n \n def print_summary(self):\n raise NotImplementedError\n\n\nclass Pipeline(_PipelineBase):\n \"\"\"\n Constructing composite functions from basic `Module`(s).\n \n Parameters\n ----------\n module_list : Module or 1-d array_like of Module, optional\n List of `Module`(s) constituting the `Pipeline`. Set to `[]` by default.\n input_vars : str or 1-d array_like of str, optional\n Name(s) of input variable(s). Set to `['__var__']` by default.\n var_dims : 1-d array_like of int, or None, optional\n Used to divide and extract the variable(s) from the input. If 1-d\n array_like, should have the same shape as `input_vars`. If `None`, will\n be interpreted as there is only one input variable. Set to `None` by\n default.\n surrogate_list : Surrogate or 1-d array_like of Surrogate, optional\n List of surrogate modules. Set to `[]` by default.\n var_scales : None or array_like of float(s), optional\n Controlling the scaling of input variables. Set to `None` by default.\n hard_bounds : bool or array_like, optional\n Controlling whether `var_scales` should be interpreted as hard bounds,\n or just used to rescale the variables. If bool, will be applied to all\n the variables. If array_like, should have shape of `(input_size,)` or\n `(input_size, 2)`. Set to `False` by default.\n \n Notes\n -----\n See the tutorial for more information of usage.\n \"\"\"\n def __init__(self, module_list=[], input_vars=['__var__'], var_dims=None,\n surrogate_list=[], var_scales=None, hard_bounds=False):\n self.module_list = module_list\n self.input_vars = input_vars\n self.var_dims = var_dims\n self.surrogate_list = surrogate_list\n self.var_scales = var_scales\n self.hard_bounds = hard_bounds\n \n @property\n def module_list(self):\n return self._module_list\n \n @module_list.setter\n def module_list(self, ml):\n if isinstance(ml, Module):\n ml = [ml]\n self._module_list = PropertyList(ml, self._ml_check)\n \n @staticmethod\n def _ml_check(ml):\n for i, m in enumerate(ml):\n if not isinstance(m, Module):\n raise ValueError(\n 'element #{} of module_list is not a Module.'.format(i))\n return ml\n \n @property\n def surrogate_list(self):\n return self._surrogate_list\n \n @surrogate_list.setter\n def surrogate_list(self, sl):\n if isinstance(sl, Surrogate):\n sl = [sl]\n self._surrogate_list = PropertyList(sl, self._sl_check)\n \n def _sl_check(self, sl):\n for i, s in enumerate(sl):\n if not isinstance(s, Surrogate):\n raise ValueError('element #{} of surrogate_list is not a '\n 'Surrogate'.format(i))\n self._build_surrogate_recipe(sl)\n return sl\n \n def _build_surrogate_recipe(self, sl):\n # ((0, start_0, extent_0), (1, start_1, extent_1), ...)\n ns = len(sl)\n if ns > 0:\n self._surrogate_recipe = np.array(\n [[i, *s._scope] for i, s in enumerate(sl)])\n _recipe_sort = np.argsort(\n self._surrogate_recipe[:, 1] % self.n_module)\n self._surrogate_recipe = (\n self._surrogate_recipe[_recipe_sort].astype(np.int))\n for i in range(ns - 1):\n if (np.sum(self._surrogate_recipe[i, 1:]) > \n self._surrogate_recipe[i + 1, 1]):\n raise ValueError('the #{} surrogate model overlaps with '\n 'the next one.'.format(i))\n else:\n self._surrogate_recipe = np.empty((ns, 3), dtype=np.int)\n \n def _options_check(self, start, stop, extract_vars):\n start = self._step_check(start, 'start')\n stop = self._step_check(stop, 'stop')\n if start > stop:\n raise ValueError('start should be no larger than stop.')\n if (extract_vars is None) or isinstance(extract_vars, str):\n pass\n else:\n extract_vars = self._var_check(extract_vars, 'extract', False,\n 'remove')\n return start, stop, extract_vars\n \n def _step_check(self, step, tag):\n if step is None:\n if tag == 'start':\n step = 0\n elif tag == 'stop':\n step = self.n_module - 1\n else:\n raise RuntimeError('unexpected value for tag.')\n else:\n try:\n step = int(step)\n step = step % self.n_module\n except:\n raise ValueError('{} should be an int or None, instead '\n 'of {}.'.format(tag, step))\n return step\n \n _var_check = Module._var_check\n \n @property\n def n_module(self):\n return len(self._module_list)\n \n @property\n def n_surrogate(self):\n return len(self._surrogate_list)\n \n @property\n def has_surrogate(self):\n return self.n_surrogate > 0\n \n def fun(self, x, use_surrogate=False, original_space=True, start=None,\n stop=None, extract_vars=None, copy_x=False):\n if copy_x:\n x = deepcopy(x)\n copy_x = False # for possible recursions\n start, stop, extract_vars = self._options_check(start, stop, \n extract_vars)\n conf = (bool(use_surrogate), bool(original_space), start, stop, \n extract_vars) # for possible recursions\n \n # vectorization using recursions\n # TODO: review this\n if isinstance(x, VariableDict):\n var_dict = x\n else:\n x = np.atleast_1d(x)\n if x.ndim == 1:\n if x.dtype.kind == 'f':\n if not original_space:\n x = self.to_original(x)\n var_dict = VariableDict()\n if self._input_cum is None:\n var_dict._fun[self._input_vars[0]] = x\n else:\n for i, n in enumerate(self._input_vars):\n var_dict._fun[n] = x[\n self._input_cum[i]:self._input_cum[i + 1]]\n elif x.dtype.kind == 'O':\n return np.asarray([self.fun(_x, *conf) for _x in x])\n else:\n raise ValueError('invalid input for fun.')\n else:\n return np.asarray([self.fun(_x, *conf) for _x in x])\n \n if use_surrogate and self.has_surrogate:\n si = np.searchsorted(self._surrogate_recipe[:, 1], start)\n if si == self.n_surrogate:\n use_surrogate = False\n i = start\n while i <= stop:\n try:\n if use_surrogate and self.has_surrogate:\n if i < self._surrogate_recipe[si, 1]:\n _module = self._module_list[i]\n di = 1\n elif i == self._surrogate_recipe[si, 1]:\n _module = self._surrogate_list[\n self._surrogate_recipe[si, 0]]\n di = self._surrogate_recipe[si, 2]\n if si == self.n_surrogate - 1:\n use_surrogate = False\n else:\n si += 1\n else:\n raise RuntimeError('unexpected value for i and si.')\n else:\n _module = self._module_list[i]\n di = 1\n _input = [var_dict._fun[n] for n in _module._input_vars]\n _output = _module.fun(*_input)\n for j, n in enumerate(_module._output_vars):\n var_dict._fun[n] = _output[j]\n for j, n in enumerate(_module._copy_vars):\n try:\n nn = _module._paste_vars[j]\n except:\n k = 1\n while True:\n nn = n + '-Copy{}'.format(k)\n if nn in var_dict._fun:\n k += 1\n else:\n break\n var_dict._fun[nn] = np.copy(var_dict._fun[n])\n for n in _module._delete_vars:\n del var_dict._fun[n]\n except:\n raise RuntimeError(\n 'pipeline fun evaluation failed at step #{}.'.format(i))\n i += di\n if extract_vars is None:\n return var_dict\n elif isinstance(extract_vars, str):\n return var_dict._fun[extract_vars]\n else:\n return var_dict[extract_vars]\n \n __call__ = fun\n \n def jac(self, x, use_surrogate=False, original_space=True, start=None,\n stop=None, extract_vars=None, copy_x=False):\n _faj = self.fun_and_jac(x, use_surrogate, original_space, start, stop,\n extract_vars, copy_x)\n if isinstance(extract_vars, str):\n return _faj[1] # _faj: (fun, jac)\n else:\n return _faj # _faj: VariableDict\n \n def fun_and_jac(self, x, use_surrogate=False, original_space=True,\n start=None, stop=None, extract_vars=None, copy_x=False):\n if copy_x:\n x = deepcopy(x)\n copy_x = False # for possible recursions\n start, stop, extract_vars = self._options_check(start, stop, \n extract_vars)\n conf = (bool(use_surrogate), bool(original_space), start, stop, \n extract_vars) # for possible recursions\n \n # since in certain cases we need to return (vec of fun, vec of jac)\n # seems that we cannot directly use recursions to vectorize here\n # TODO: review this\n if isinstance(x, VariableDict):\n var_dict = x\n else:\n x = np.atleast_1d(x)\n if x.dtype.kind == 'f' and x.ndim == 1:\n if not original_space:\n j = np.diag(self.to_original_grad(x))\n x = self.to_original(x)\n else:\n j = np.eye(x.shape[-1])\n var_dict = VariableDict()\n if self._input_cum is None:\n var_dict._fun[self._input_vars[0]] = x\n var_dict._jac[self._input_vars[0]] = j\n else:\n for i, n in enumerate(self._input_vars):\n var_dict._fun[n] = x[\n self._input_cum[i]:self._input_cum[i + 1]]\n var_dict._jac[n] = j[\n self._input_cum[i]:self._input_cum[i + 1]]\n else:\n x = np.ascontiguousarray(x)\n if x.dtype.kind == 'f':\n shape = x.shape[:-1]\n size = np.prod(shape)\n x_f = x.reshape((size, -1))\n elif x.dtype.kind == 'O':\n shape = x.shape\n size = np.prod(shape)\n x_f = x.reshape(size)\n else:\n ValueError('invalid input for fun_and_jac.')\n if isinstance(extract_vars, str):\n _faj0 = self.fun_and_jac(x_f[0], *conf)\n _fshape = _faj0[0].shape\n _jshape = _faj0[1].shape\n result_f = np.empty((size, *_fshape), dtype=np.float)\n result_j = np.empty((size, *_jshape), dtype=np.float)\n result_f[0] = _faj0[0]\n result_j[0] = _faj0[1]\n for i in range(1, size):\n _faj = self.fun_and_jac(x_f[i], *conf)\n result_f[i] = _faj[0]\n result_j[i] = _faj[1]\n return (result_f.reshape((*shape, *_fshape)), \n result_j.reshape((*shape, *_jshape)))\n else:\n result = np.empty(size, dtype='object')\n for i in range(size):\n result[i] = self.fun_and_jac(x_f[i], *conf)\n return result.reshape(shape)\n \n if use_surrogate and self.has_surrogate:\n si = np.searchsorted(self._surrogate_recipe[:, 1], start)\n if si == self.n_surrogate:\n use_surrogate = False\n i = start\n while i <= stop:\n try:\n if use_surrogate and self.has_surrogate:\n if i < self._surrogate_recipe[si, 1]:\n _module = self._module_list[i]\n di = 1\n elif i == self._surrogate_recipe[si, 1]:\n _module = self._surrogate_list[\n self._surrogate_recipe[si, 0]]\n di = self._surrogate_recipe[si, 2]\n if si == self.n_surrogate - 1:\n use_surrogate = False\n else:\n si += 1\n else:\n raise RuntimeError('unexpected value for i and si.')\n else:\n _module = self._module_list[i]\n di = 1\n _input = [var_dict._fun[n] for n in _module._input_vars]\n _input_jac = np.concatenate(\n [var_dict._jac[n] for n in _module._input_vars], axis=0)\n _output, _output_jac = _module.fun_and_jac(*_input)\n for j, n in enumerate(_module._output_vars):\n var_dict._fun[n] = _output[j]\n var_dict._jac[n] = np.dot(_output_jac[j], _input_jac)\n for j, n in enumerate(_module._copy_vars):\n try:\n nn = _module._paste_vars[j]\n except:\n k = 1\n while True:\n nn = n + '-Copy{}'.format(k)\n if (nn in var_dict._fun) or (nn in var_dict._jac):\n k += 1\n else:\n break\n var_dict[nn] = (np.copy(var_dict._fun[n]), \n np.copy(var_dict._jac[n]))\n for n in _module._delete_vars:\n del var_dict._fun[n], var_dict._jac[n]\n except:\n raise RuntimeError(\n 'pipeline fun_and_jac evaluation failed at step '\n '#{}.'.format(i))\n i += di\n if extract_vars is None:\n return var_dict\n else:\n return var_dict[extract_vars]\n \n @property\n def input_vars(self):\n return self._input_vars\n \n @input_vars.setter\n def input_vars(self, names):\n self._input_vars = PropertyList(\n names, lambda x: self._var_check(x, 'input', False, 'raise'))\n \n @property\n def var_dims(self):\n return self._var_dims\n \n @var_dims.setter\n def var_dims(self, dims):\n if dims is None:\n self._var_dims = None\n self._input_cum = None\n else:\n self._var_dims = self._dim_check(dims)\n self._var_dims.flags.writeable = False # TODO: PropertyArray?\n \n def _dim_check(self, dims):\n try:\n dims = np.atleast_1d(dims).astype(np.int)\n assert np.all(dims > 0)\n assert dims.size > 0 and dims.ndim == 1\n except:\n raise ValueError('var_dims should be a 1-d array_like of positive '\n 'int(s), or None, instead of {}.'.format(dims))\n self._input_cum = np.cumsum(np.insert(dims, 0, 0))\n return dims\n \n @property\n def input_size(self):\n return np.sum(self._var_dims) if (self.var_dims is not None) else None\n\n\nclass Density(Pipeline):\n \"\"\"\n Specialized `Pipeline` for probability densities.\n \n Parameters\n ----------\n density_name : str, optional\n Name of the variable representing the probability density. Set to\n `'__var__'` by default.\n args : array_like, optional\n Additional arguments to be passed to `Pipeline.__init__`.\n kwargs : dict, optional\n Additional keyword arguments to be passed to `Pipeline.__init__`.\n \"\"\"\n def __init__(self, density_name='__var__', *args, **kwargs):\n self.density_name = density_name\n super().__init__(*args, **kwargs)\n self._use_decay = False\n \n @property\n def density_name(self):\n return self._density_name\n \n @density_name.setter\n def density_name(self, name):\n if isinstance(name, str):\n self._density_name = name\n else:\n raise ValueError(\n 'density_name should be a str, instead of {}'.format(name))\n \n def logp(self, x, use_surrogate=False, original_space=True, start=None,\n stop=None, copy_x=False):\n _logp = self.fun(x, use_surrogate, original_space, start, stop,\n self._density_name, copy_x)[..., 0]\n if self._use_decay and use_surrogate:\n x_o = x if original_space else self.to_original(x)\n beta2 = np.einsum('...i,ij,...j', x_o - self._mu, self._hess,\n x_o - self._mu)\n _logp -= self._gamma * np.clip(beta2 - self._alpha2, 0, np.inf)\n if not original_space:\n _logp += self._get_diff(x_trans=x)\n return _logp\n \n __call__ = logp\n \n def grad(self, x, use_surrogate=False, original_space=True, start=None,\n stop=None, copy_x=False):\n _grad = self.jac(x, use_surrogate, original_space, start, stop,\n self._density_name, copy_x)[..., 0, :]\n if self._use_decay and use_surrogate:\n x_o = x if original_space else self.to_original(x)\n beta2 = np.einsum('...i,ij,...j', x_o - self._mu, self._hess,\n x_o - self._mu)\n _grad -= (2 * self._gamma * np.dot(x_o - self._mu, self._hess) * \n (beta2 > self._alpha2)[..., np.newaxis])\n if not original_space:\n _grad += self.to_original_grad2(x) / self.to_original_grad(x)\n return _grad\n \n def logp_and_grad(self, x, use_surrogate=False, original_space=True, \n start=None, stop=None, copy_x=False):\n _logp_and_grad = self.fun_and_jac(\n x, use_surrogate, original_space, start, stop, self._density_name,\n copy_x)\n _logp = _logp_and_grad[0][..., 0]\n _grad = _logp_and_grad[1][..., 0, :]\n if self._use_decay and use_surrogate:\n x_o = x if original_space else self.to_original(x)\n beta2 = np.einsum('...i,ij,...j', x_o - self._mu, self._hess,\n x_o - self._mu)\n _logp -= self._gamma * np.clip(beta2 - self._alpha2, 0, np.inf)\n _grad -= (2 * self._gamma * np.dot(x_o - self._mu, self._hess) *\n (beta2 > self._alpha2)[..., np.newaxis])\n if not original_space:\n _logp += self._get_diff(x_trans=x)\n _grad += self.to_original_grad2(x) / self.to_original_grad(x)\n return _logp, _grad\n \n ############################################################################\n \n @property\n def alpha(self):\n try:\n return self._alpha\n except:\n return None\n \n @alpha.setter\n def alpha(self, a):\n try:\n a = float(a)\n assert a > 0\n except:\n raise ValueError('alpha should be a positive float.')\n self._alpha = a\n self._alpha2 = a**2\n \n @property\n def gamma(self):\n try:\n return self._gamma\n except:\n return None\n \n @gamma.setter\n def gamma(self, g):\n try:\n g = float(g)\n assert g > 0\n except:\n raise ValueError('gamma should be a positive float.')\n self._gamma = g\n \n @property\n def mu(self):\n try:\n return self._mu\n except:\n return None\n \n @property\n def hess(self):\n try:\n return self._hess\n except:\n return None\n \n @property\n def use_decay(self):\n return self._use_decay\n \n @use_decay.setter\n def use_decay(self, decay):\n self._use_decay = bool(decay)\n \n def set_decay(self, x, original_space=True, alpha=None, alpha_p=150,\n gamma=None):\n try:\n x = np.ascontiguousarray(x)\n except:\n raise ValueError('invalid value for x.')\n x = x.reshape((-1, x.shape[-1]))\n x_o = x if original_space else self.to_original(x)\n self._mu = np.mean(x_o, axis=0)\n self._hess = np.linalg.inv(np.cov(x_o, rowvar=False))\n if alpha is None:\n if (self.alpha is None) or (alpha_p is not None):\n try:\n alpha_p = float(alpha_p)\n assert alpha_p > 0\n except:\n raise ValueError('alpha_p should be a positive float.')\n _beta = np.einsum('ij,jk,ik->i', x_o - self._mu, self._hess, \n x_o - self._mu)**0.5\n if alpha_p < 100:\n self.alpha = np.percentile(_beta, alpha_p)\n else:\n self.alpha = np.max(_beta) * alpha_p / 100\n else:\n pass\n else:\n self.alpha = alpha\n if gamma is None:\n if self.gamma is None:\n self._gamma = 0.1\n else:\n pass\n else:\n self.gamma = gamma\n self._use_decay = True\n \n def fit(self, var_dicts, use_decay=False, use_bound=None, use_mu_f=None,\n decay_options={}, fit_options={}):\n if not (hasattr(var_dicts, '__iter__') and\n all(isinstance(vd, VariableDict) for vd in var_dicts)):\n raise ValueError('var_dicts should consist of VariableDict(s).')\n \n if not isinstance(decay_options, dict):\n raise ValueError('decay_options should be a dict.')\n \n if isinstance(fit_options, dict):\n fit_options = [fit_options for i in range(self.n_surrogate)]\n elif (hasattr(fit_options, '__iter__') and \n all(isinstance(fi, dict) for fi in fit_options)):\n fit_options = list(fit_options)\n if len(fit_options) < self.n_surrogate:\n fit_options.extend([{} for i in range(self.n_surrogate - \n len(fit_options))])\n else:\n raise ValueError(\n 'fit_options should be a dict or consist of dict(s).')\n \n if use_decay:\n x = self._fit_var(var_dicts, self._input_vars)\n self.set_decay(x, **decay_options)\n else:\n self._use_decay = False\n \n for i, su in enumerate(self._surrogate_list):\n x = self._fit_var(var_dicts, su._input_vars)\n if su._var_scales is not None:\n x = (x - su._var_scales[:, 0]) / su._var_scales_diff\n y = self._fit_var(var_dicts, su._output_vars)\n _logp = self._fit_var(var_dicts, [self._density_name]).reshape(-1)\n ##### ##### ##### ##### #####\n fo = fit_options[i].copy()\n if use_bound is not None:\n fo['use_bound'] = use_bound\n if use_mu_f is not None:\n fo['use_mu_f'] = use_mu_f\n ##### ##### ##### ##### #####\n su.fit(x, y, logp=_logp, **fo)\n\n @classmethod\n def _fit_var(cls, var_dicts, var_names):\n return np.array([np.concatenate([vd._fun[vn] for vn in var_names]) \n for vd in var_dicts])\n \n ############################################################################\n\n\nclass DensityLite(_PipelineBase):\n \"\"\"\n Directly defines probability densities with logp, grad and/or logp_and_grad.\n \n Parameters\n ----------\n logp : callable or None, optional\n Callable returning the value of logp, or `None` if undefined.\n grad : callable or None, optional\n Callable returning the value of grad_logp, or `None` if undefined.\n logp_and_grad : callable or None, optional\n Callable returning the logp and grad_logp at the same time, or `None`\n if undefined.\n input_size : None or positive int, optional\n The size of input variables. Set to `None` by default.\n var_scales : None or array_like of float(s), optional\n Controlling the scaling of input variables. Set to `None` by default.\n hard_bounds : bool or array_like, optional\n Controlling whether `var_scales` should be interpreted as hard bounds,\n or just used to rescale the variables. If bool, will be applied to all\n the variables. If array_like, should have shape of `(input_size,)` or\n `(input_size, 2)`. Set to `False` by default.\n logp_args, grad_args, logp_and_grad_args : array_like, optional\n Additional arguments to be passed to `logp`, `grad` and `logp_and_grad`.\n Will be stored as tuples.\n logp_kwargs, grad_kwargs, logp_and_grad_kwargs : dict, optional\n Additional keyword arguments to be passed to `logp`, `grad` and\n `logp_and_grad`.\n \"\"\"\n def __init__(self, logp=None, grad=None, logp_and_grad=None,\n input_size=None, var_scales=None, hard_bounds=False,\n logp_args=(), logp_kwargs={}, grad_args=(), grad_kwargs={},\n logp_and_grad_args=(), logp_and_grad_kwargs={}):\n self.logp = logp\n self.grad = grad\n self.logp_and_grad = logp_and_grad\n self.logp_args = logp_args\n self.logp_kwargs = logp_kwargs\n self.grad_args = grad_args\n self.grad_kwargs = grad_kwargs\n self.logp_and_grad_args = logp_and_grad_args\n self.logp_and_grad_kwargs = logp_and_grad_kwargs\n \n @property\n def logp(self):\n if self.has_logp:\n return self._logp_wrapped\n elif self.has_logp_and_grad:\n return lambda *args: self._logp_and_grad_wrapped(*args)[0]\n else:\n raise RuntimeError('No valid definition of logp is found.')\n \n @logp.setter\n def logp(self, lp):\n if callable(lp):\n self._logp = lp\n elif logp_ is None:\n self._logp = None\n else:\n raise ValueError('logp should be callable, or None if you want to '\n 'reset it.')\n \n def _logp_wrapped(self, x, original_space=True, copy_x=False, \n vectorized=True):\n x = np.atleast_1d(x)\n if copy_x:\n x = np.copy(x)\n x_o = x if original_space else self.to_original(x)\n if vectorized:\n _logp = self._logp(x_o, *self.logp_args, **logp_kwargs)\n else:\n _logp = np.apply_along_axis(self._logp, -1, x_o, self.logp_args,\n self.logp_kwargs)\n if not original_space:\n _logp += self._get_diff(x_trans=x)\n return _logp\n \n @property\n def has_logp(self):\n return self._logp is not None\n \n @property\n def grad(self):\n if self.has_grad:\n return self._grad_wrapped\n elif self.has_logp_and_grad:\n return lambda *args: self._logp_and_grad_wrapped(*args)[1]\n else:\n raise RuntimeError('No valid definition of grad is found.')\n \n @grad.setter\n def grad(self, gd):\n if callable(gd):\n self._grad = gd\n elif grad_ is None:\n self._grad = None\n else:\n raise ValueError('grad should be callable, or None if you want to '\n 'reset it.')\n \n def _grad_wrapped(self, x, original_space=True, copy_x=False,\n vectorized=True):\n x = np.atleast_1d(x)\n if copy_x:\n x = np.copy(x)\n x_o = x if original_space else self.to_original(x)\n if vectorized:\n _grad = self._grad(x_o, *self.grad_args, **self.grad_kwargs)\n else:\n _grad = np.apply_along_axis(self._grad, -1, x_o, self.grad_args,\n self.grad_kwargs)\n if not original_space:\n _grad += self.to_original_grad2(x) / self.to_original_grad(x)\n return _grad\n \n @property\n def has_grad(self):\n return self._grad is not None\n \n @property\n def logp_and_grad(self):\n if self.has_logp_and_grad:\n return self._logp_and_grad_wrapped\n elif self.has_logp and self.has_grad:\n return lambda *args: (self._logp_wrapped(*args), \n self._grad_wrapped(*args))\n else:\n raise ValueError('No valid definition of logp_and_grad is found.')\n \n @logp_and_grad.setter\n def logp_and_grad(self, lpgd):\n if callable(lpgd):\n self._logp_and_grad = lpgd\n elif logp_and_grad_ is None:\n self._logp_and_grad = None\n else:\n raise ValueError('logp_and_grad should be callable, or None if you'\n 'want to reset it.')\n \n def _logp_and_grad_wrapped(self, x, original_space=True, copy_x=False,\n vectorized=True):\n x = np.atleast_1d(x)\n if copy_x:\n x = np.copy(x)\n x_o = x if original_space else self.to_original(x)\n if vectorized:\n _logp, _grad = self._logp_and_grad(x_o, *self.logp_and_grad_args,\n **self.logp_and_grad_kwargs)\n else:\n # TODO: review this\n _lag = np.apply_along_axis(\n self._logp_and_grad, -1, x_o, self.logp_and_grad_args,\n self.logp_and_grad_kwargs)\n _logp = _lag[..., 0]\n _grad = np.apply_along_axis(lambda x: list(x), -1, _lag[..., 1])\n # otherwise, it will be an object array\n if not original_space:\n _logp += self._get_diff(x_trans=x)\n _grad += self.to_original_grad2(x) / self.to_original_grad(x)\n return _logp, _grad\n \n @property\n def has_logp_and_grad(self):\n return self._logp_and_grad is not None\n \n @property\n def input_size(self):\n return self._input_size\n \n @input_size.setter\n def input_size(self, size):\n if size is None:\n self._input_size = None\n else:\n try:\n size = int(size)\n assert size > 0\n except:\n raise ValueError('input_size should be a positive int, or '\n 'None, instead of {}.'.format(size))\n self._input_size = size\n \n _args_setter = Module._args_setter\n \n _kwargs_setter = Module._kwargs_setter\n \n @property\n def logp_args(self):\n return self._logp_args\n \n @logp_args.setter\n def logp_args(self, args):\n self._logp_args = self._args_setter(args, 'logp')\n \n @property\n def logp_kwargs(self):\n return self._logp_kwargs\n \n @logp_kwargs.setter\n def logp_kwargs(self, kwargs):\n self._logp_kwargs = self._kwargs_setter(kwargs, 'logp')\n \n @property\n def grad_args(self):\n return self._grad_args\n \n @grad_args.setter\n def grad_args(self, args):\n self._grad_args = self._args_setter(args, 'grad')\n \n @property\n def grad_kwargs(self):\n return self._grad_kwargs\n \n @grad_kwargs.setter\n def grad_kwargs(self, kwargs):\n self._grad_kwargs = self._kwargs_setter(kwargs, 'grad')\n \n @property\n def logp_and_grad_args(self):\n return self._logp_and_grad_args\n \n @logp_and_grad_args.setter\n def logp_and_grad_args(self, args):\n self._logp_and_grad_args = self._args_setter(args, 'logp_and_grad')\n \n @property\n def logp_and_grad_kwargs(self):\n return self._logp_and_grad_kwargs\n \n @logp_and_grad_kwargs.setter\n def logp_and_grad_kwargs(self, kwargs):\n self._logp_and_grad_kwargs = self._kwargs_setter(kwargs,\n 'logp_and_grad')\n"
] |
[
[
"numpy.dot",
"numpy.einsum",
"numpy.asarray",
"numpy.all",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.zeros_like",
"numpy.searchsorted",
"numpy.ones_like",
"numpy.clip",
"numpy.empty_like",
"numpy.eye",
"numpy.atleast_1d",
"numpy.copy",
"numpy.apply_along_axis",
"numpy.insert",
"numpy.ascontiguousarray",
"numpy.cov",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.percentile",
"numpy.ones",
"numpy.prod",
"numpy.empty"
]
] |
ScalableEKNN2021/ColossalAI
|
[
"b9f8521f8c881c5c781e46afa0be7aedd83bdb9c"
] |
[
"tests/test_layers/test_3d/checks_3d/check_layer_3d.py"
] |
[
"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport time\n\nimport torch\nfrom colossalai.constants import INPUT_GROUP_3D, OUTPUT_GROUP_3D, WEIGHT_GROUP_3D\nfrom colossalai.core import global_context\nfrom colossalai.logging import get_dist_logger\nfrom colossalai.nn import (Classifier3D, CrossEntropyLoss3D, Embedding3D, LayerNorm3D, Linear3D, PatchEmbedding3D,\n VanillaClassifier, VanillaPatchEmbedding, VocabParallelClassifier3D,\n VocabParallelCrossEntropyLoss3D, VocabParallelEmbedding3D)\nfrom colossalai.nn.layer.parallel_3d._utils import get_parallel_mode_from_env\nfrom colossalai.utils import get_current_device, print_rank_0\n\nfrom .common import BATCH_SIZE, DEPTH, HIDDEN_SIZE, IMG_SIZE, NUM_CLASSES, SEQ_LENGTH, VOCAB_SIZE, check_equal\n\n\ndef check_linear():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n INPUT_SIZE = HIDDEN_SIZE\n OUTPUT_SIZE = 2 * HIDDEN_SIZE\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n layer = Linear3D(INPUT_SIZE, OUTPUT_SIZE, dtype=dtype, bias=True)\n layer = layer.to(device)\n layer_master = torch.nn.Linear(INPUT_SIZE, OUTPUT_SIZE)\n layer_master = layer_master.to(device)\n\n weight_master = layer_master.weight.data.transpose(0, 1)\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=0)[k]\n weight = torch.chunk(weight, DEPTH, dim=-1)[j]\n layer.weight.data.copy_(weight)\n bias_master = layer_master.bias.data\n torch.distributed.broadcast(bias_master, src=0)\n bias = torch.chunk(bias_master, DEPTH)[j]\n layer.bias.data.copy_(bias)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)\n A_master = torch.randn(A_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = torch.chunk(A_master, DEPTH, dim=0)[i]\n A = torch.chunk(A, DEPTH, dim=-1)[k]\n A = torch.chunk(A, DEPTH, dim=0)[j]\n A = A.clone()\n A.requires_grad = True\n\n fwd_start = time.time()\n out = layer(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'linear forward: {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger)\n A_master = A_master.clone()\n A_master.requires_grad = True\n C_master = layer_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[j]\n C = torch.chunk(C, DEPTH, dim=0)[k]\n logger.info('Rank {} linear forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[j]\n grad = torch.chunk(grad, DEPTH, dim=0)[k]\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('linear backward: {:.3f} s'.format(bwd_end - bwd_start), logger)\n\n C_master.backward(grad_master)\n A_grad = A_master.grad\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]\n A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]\n logger.info('Rank {} linear backward (input_grad): {}'.format(rank, check_equal(A_grad, A.grad)))\n\n B_grad = layer_master.weight.grad.transpose(0, 1)\n B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k]\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]\n logger.info('Rank {} linear backward (weight_grad): {}'.format(rank, check_equal(B_grad, layer.weight.grad)))\n\n bias_grad = layer_master.bias.grad\n bias_grad = torch.chunk(bias_grad, DEPTH)[j]\n logger.info('Rank {} linear backward (bias_grad): {}'.format(rank, check_equal(bias_grad, layer.bias.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_layernorm():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n INPUT_SIZE = HIDDEN_SIZE\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n norm = LayerNorm3D(INPUT_SIZE, eps=1e-6, dtype=dtype)\n norm = norm.to(device)\n norm_master = torch.nn.LayerNorm(INPUT_SIZE, eps=1e-6)\n norm_master = norm_master.to(device)\n\n weight_master = norm_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH)[k]\n norm.weight.data.copy_(weight)\n bias_master = norm_master.bias.data\n torch.distributed.broadcast(bias_master, src=0)\n bias = torch.chunk(bias_master, DEPTH)[k]\n norm.bias.data.copy_(bias)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)\n A_master = torch.randn(A_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = torch.chunk(A_master, DEPTH, dim=0)[i]\n A = torch.chunk(A, DEPTH, dim=-1)[k]\n A = torch.chunk(A, DEPTH, dim=0)[j]\n A = A.clone()\n A.requires_grad = True\n\n fwd_start = time.time()\n out = norm(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'layer norm forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape),\n fwd_end - fwd_start), logger)\n\n A_master = A_master.clone()\n A_master.requires_grad = True\n C_master = norm_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[k]\n C = torch.chunk(C, DEPTH, dim=0)[j]\n logger.info('Rank {} layernorm forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[k]\n grad = torch.chunk(grad, DEPTH, dim=0)[j]\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('layer norm backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger)\n\n C_master.backward(grad_master)\n A_grad = A_master.grad\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]\n A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]\n logger.info('Rank {} layernorm backward (input_grad): {}'.format(rank, check_equal(A_grad, A.grad)))\n\n bias_grad = norm_master.weight.grad\n bias_grad = torch.chunk(bias_grad, DEPTH)[k]\n logger.info('Rank {} layernorm backward (weight_grad): {}'.format(rank, check_equal(bias_grad, norm.weight.grad)))\n\n bias_grad = norm_master.bias.grad\n bias_grad = torch.chunk(bias_grad, DEPTH)[k]\n logger.info('Rank {} layernorm backward (bias_grad): {}'.format(rank, check_equal(bias_grad, norm.bias.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_classifier_no_given_weight():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n INPUT_SIZE = HIDDEN_SIZE\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n layer = Classifier3D(INPUT_SIZE, NUM_CLASSES, dtype=dtype, bias=True)\n layer = layer.to(device)\n\n layer_master = VanillaClassifier(INPUT_SIZE, NUM_CLASSES, bias=True, dtype=dtype)\n layer_master = layer_master.to(device)\n\n weight_master = layer_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]\n layer.weight.data.copy_(weight)\n bias_master = layer_master.bias.data\n torch.distributed.broadcast(bias_master, src=0)\n layer.bias.data.copy_(bias_master)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)\n A_master = torch.randn(A_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = torch.chunk(A_master, DEPTH, dim=0)[i]\n A = torch.chunk(A, DEPTH, dim=-1)[k]\n A = torch.chunk(A, DEPTH, dim=0)[j]\n A = A.clone()\n A.requires_grad = True\n\n fwd_start = time.time()\n out = layer(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s'.format(\n tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger)\n A_master = A_master.clone()\n A_master.requires_grad = True\n C_master = layer_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=0)[j]\n logger.info('Rank {} classifier (no given weight) forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=0)[j]\n grad = grad.clone()\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('classifier (no given weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger)\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n A_grad = A_master.grad\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]\n A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]\n logger.info('Rank {} classifier (no given weight) backward (input_grad): {}'.format(\n rank, check_equal(A_grad, A.grad)))\n\n B_grad = layer_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]\n if j == k:\n logger.info('Rank {} classifier (no given weight) backward (weight_grad): {}'.format(\n rank, check_equal(B_grad, layer.weight.grad)))\n else:\n logger.info('Rank {} classifier (no given weight) backward (weight_grad): {}'.format(\n rank, layer.weight.grad is None))\n\n bias_grad = layer_master.bias.grad\n logger.info('Rank {} classifier (no given weight) backward (bias_grad): {}'.format(\n rank, check_equal(bias_grad, layer.bias.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_vocab_parallel_classifier_no_given_weight():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n INPUT_SIZE = HIDDEN_SIZE\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n layer = VocabParallelClassifier3D(INPUT_SIZE, VOCAB_SIZE, bias=True)\n layer = layer.to(dtype).to(device)\n\n layer_master = VanillaClassifier(INPUT_SIZE, VOCAB_SIZE, bias=True)\n layer_master = layer_master.to(dtype).to(device)\n\n weight_master = layer_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=0)[j]\n weight = torch.chunk(weight, DEPTH, dim=-1)[k]\n layer.weight.data.copy_(weight)\n bias_master = layer_master.bias.data\n torch.distributed.broadcast(bias_master, src=0)\n bias = torch.chunk(bias_master, DEPTH)[j]\n layer.bias.data.copy_(bias)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)\n A_master = torch.randn(A_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = torch.chunk(A_master, DEPTH, dim=0)[i]\n A = torch.chunk(A, DEPTH, dim=-1)[k]\n A = torch.chunk(A, DEPTH, dim=0)[j]\n A = A.clone()\n A.requires_grad = True\n\n fwd_start = time.time()\n out = layer(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'vocab parallel classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s'.format(\n tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger)\n A_master = A_master.clone()\n A_master.requires_grad = True\n C_master = layer_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[j]\n C = torch.chunk(C, DEPTH, dim=0)[k]\n logger.info('Rank {} vocab parallel classifier (no given weight) forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[j]\n grad = torch.chunk(grad, DEPTH, dim=0)[k]\n grad = grad.clone()\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('vocab parallel classifier (no given weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start),\n logger)\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n A_grad = A_master.grad\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]\n A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]\n A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]\n logger.info('Rank {} vocab parallel classifier (no given weight) backward (input_grad): {}'.format(\n rank, check_equal(A_grad, A.grad)))\n\n B_grad = layer_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]\n logger.info('Rank {} vocab parallel classifier (no given weight) backward (weight_grad): {}'.format(\n rank, check_equal(B_grad, layer.weight.grad)))\n\n bias_grad = layer_master.bias.grad\n bias_grad = torch.chunk(bias_grad, DEPTH)[j]\n logger.info('Rank {} vocab parallel classifier (no given weight) backward (bias_grad): {}'.format(\n rank, check_equal(bias_grad, layer.bias.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_classifier_given_embed_weight():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n embed = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE)\n embed = embed.to(dtype).to(device)\n\n embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)\n embed_master = embed_master.to(dtype).to(device)\n\n weight_master = embed_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]\n embed.weight.data.copy_(weight)\n\n layer = Classifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)\n layer = layer.to(dtype).to(device)\n\n layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)\n layer_master = layer_master.to(dtype).to(device)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH)\n A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = A_master.clone()\n\n fwd_start = time.time()\n out = layer(embed(A))\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s'.format(\n tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger)\n A_master = A_master.clone()\n C_master = layer_master(embed_master(A_master))\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=0)[j]\n logger.info('Rank {} classifier (given embed weight) forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=0)[j]\n grad = grad.clone()\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('classifier (given embed weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger)\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n\n B_grad = embed_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]\n if j == k:\n logger.info('Rank {} classifier (given embed weight) backward (weight_grad): {}'.format(\n rank, check_equal(B_grad, embed.weight.grad)))\n else:\n logger.info('Rank {} classifier (given embed weight) backward (weight_grad): {}'.format(\n rank, embed.weight.grad is None))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_vocab_parallel_classifier_given_embed_weight():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n embed = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE)\n embed = embed.to(dtype).to(device)\n\n embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)\n embed_master = embed_master.to(dtype).to(device)\n\n weight_master = embed_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=0)[j]\n weight = torch.chunk(weight, DEPTH, dim=-1)[k]\n embed.weight.data.copy_(weight)\n\n layer = VocabParallelClassifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)\n layer = layer.to(dtype).to(device)\n\n layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)\n layer_master = layer_master.to(dtype).to(device)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH)\n A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = A_master.clone()\n\n fwd_start = time.time()\n out = layer(embed(A))\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'vocab parallel classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s'.format(\n tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger)\n A_master = A_master.clone()\n C_master = layer_master(embed_master(A_master))\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[j]\n C = torch.chunk(C, DEPTH, dim=0)[k]\n logger.info('Rank {} vocab parallel classifier (given embed weight) forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[j]\n grad = torch.chunk(grad, DEPTH, dim=0)[k]\n grad = grad.clone()\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('vocab parallel classifier (given embed weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start),\n logger)\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n\n B_grad = embed_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]\n logger.info('Rank {} vocab parallel embed backward (weight_grad): {}'.format(rank,\n check_equal(B_grad,\n embed.weight.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_patch_embed():\n rank = torch.distributed.get_rank()\n device = get_current_device()\n logger = get_dist_logger()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n layer = PatchEmbedding3D(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)\n torch.nn.init.ones_(layer.cls_token)\n torch.nn.init.ones_(layer.pos_embed)\n layer = layer.to(device)\n\n layer_master = VanillaPatchEmbedding(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)\n torch.nn.init.ones_(layer_master.cls_token)\n torch.nn.init.ones_(layer_master.pos_embed)\n layer_master = layer_master.to(device)\n\n proj_weight_master = layer_master.weight.data\n torch.distributed.broadcast(proj_weight_master, src=0)\n proj_weight = torch.chunk(proj_weight_master, DEPTH, dim=0)[k]\n layer.weight.data.copy_(proj_weight)\n proj_bias_master = layer_master.bias.data\n torch.distributed.broadcast(proj_bias_master, src=0)\n proj_bias = torch.chunk(proj_bias_master, DEPTH)[k]\n layer.bias.data.copy_(proj_bias)\n\n A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE)\n A_master = torch.randn(A_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = A_master.clone()\n\n fwd_start = time.time()\n out = layer(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n print_rank_0(\n 'patch embed forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape),\n fwd_end - fwd_start), logger)\n\n A_master = A_master.clone()\n C_master = layer_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[k]\n C = torch.chunk(C, DEPTH, dim=0)[j]\n logger.info('Rank {} patch embed forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[k]\n grad = torch.chunk(grad, DEPTH, dim=0)[j]\n grad = grad.clone()\n\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n print_rank_0('patch embed backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger)\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n\n cls_grad_master = layer_master.cls_token.grad\n cls_grad = torch.chunk(cls_grad_master, DEPTH, dim=-1)[k]\n logger.info('Rank {} patch embed backward (cls_grad): {}'.format(rank, check_equal(cls_grad, layer.cls_token.grad)))\n\n pos_grad_master = layer_master.pos_embed.grad\n pos_grad = torch.chunk(pos_grad_master, DEPTH, dim=-1)[k]\n logger.info('Rank {} patch embed backward (pos_embed_grad): {}'.format(rank,\n check_equal(pos_grad, layer.pos_embed.grad)))\n\n B_grad = layer_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k]\n logger.info('Rank {} patch embed backward (proj_weight_grad): {}'.format(rank,\n check_equal(B_grad, layer.weight.grad)))\n\n bias_grad = layer_master.bias.grad\n bias_grad = torch.chunk(bias_grad, DEPTH)[k]\n logger.info('Rank {} patch embed backward (proj_bias_grad): {}'.format(rank,\n check_equal(bias_grad, layer.bias.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_embed():\n rank = torch.distributed.get_rank()\n device = get_current_device()\n logger = get_dist_logger()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n layer = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE)\n layer = layer.to(dtype).to(device)\n layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)\n layer_master = layer_master.to(dtype).to(device)\n\n weight_master = layer_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]\n layer.weight.data.copy_(weight)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH)\n A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = A_master.clone()\n\n fwd_start = time.time()\n out = layer(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n logger.info('embed forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape),\n fwd_end - fwd_start),\n ranks=[0])\n\n A_master = A_master.clone()\n C_master = layer_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[k]\n C = torch.chunk(C, DEPTH, dim=0)[j]\n logger.info('Rank {} embed forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[k]\n grad = torch.chunk(grad, DEPTH, dim=0)[j]\n grad = grad.clone()\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n logger.info('embed backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0])\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n\n B_grad = layer_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]\n if j == k:\n logger.info('Rank {} embed backward (weight_grad): {}'.format(rank, check_equal(B_grad, layer.weight.grad)))\n else:\n logger.info('Rank {} embed backward (weight_grad): {}'.format(rank, layer.weight.grad is None))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_vocab_parallel_embed():\n rank = torch.distributed.get_rank()\n device = get_current_device()\n logger = get_dist_logger()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n layer = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE)\n layer = layer.to(dtype).to(device)\n layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)\n layer_master = layer_master.to(dtype).to(device)\n\n weight_master = layer_master.weight.data\n torch.distributed.broadcast(weight_master, src=0)\n weight = torch.chunk(weight_master, DEPTH, dim=0)[j]\n weight = torch.chunk(weight, DEPTH, dim=-1)[k]\n layer.weight.data.copy_(weight)\n\n A_shape = (BATCH_SIZE, SEQ_LENGTH)\n A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)\n torch.distributed.broadcast(A_master, src=0)\n A = A_master.clone()\n\n fwd_start = time.time()\n out = layer(A)\n torch.cuda.synchronize()\n fwd_end = time.time()\n logger.info('vocab parallel embed forward: pass | {0} --> {1} | {2:.3f} s'.format(\n tuple(A.shape), tuple(out.shape), fwd_end - fwd_start),\n ranks=[0])\n\n A_master = A_master.clone()\n C_master = layer_master(A_master)\n C = torch.chunk(C_master, DEPTH, dim=0)[i]\n C = torch.chunk(C, DEPTH, dim=-1)[k]\n C = torch.chunk(C, DEPTH, dim=0)[j]\n logger.info('Rank {} vocab parallel embed forward: {}'.format(rank, check_equal(out, C)))\n\n grad_shape = C_master.shape\n grad_master = torch.randn(grad_shape, dtype=dtype, device=device)\n torch.distributed.broadcast(grad_master, src=0)\n grad = torch.chunk(grad_master, DEPTH, dim=0)[i]\n grad = torch.chunk(grad, DEPTH, dim=-1)[k]\n grad = torch.chunk(grad, DEPTH, dim=0)[j]\n grad = grad.clone()\n bwd_start = time.time()\n out.backward(grad)\n torch.cuda.synchronize()\n bwd_end = time.time()\n logger.info('vocab parallel embed backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0])\n\n grad_master = grad_master.clone()\n C_master.backward(grad_master)\n\n B_grad = layer_master.weight.grad\n B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]\n B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]\n logger.info('Rank {} vocab parallel embed backward (weight_grad): {}'.format(rank,\n check_equal(B_grad,\n layer.weight.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_loss():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n\n criterion = CrossEntropyLoss3D()\n criterion_master = torch.nn.CrossEntropyLoss()\n\n out_shape = (BATCH_SIZE, NUM_CLASSES)\n out_master = torch.randn(out_shape, dtype=dtype, device=device)\n target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE, ), dtype=torch.long, device=device)\n torch.distributed.broadcast(out_master, src=0)\n torch.distributed.broadcast(target_master, src=0)\n out = torch.chunk(out_master, DEPTH, dim=0)[i]\n out = torch.chunk(out, DEPTH, dim=0)[j]\n out = out.clone()\n out.requires_grad = True\n\n fwd_start = time.time()\n loss = criterion(out, target_master)\n fwd_end = time.time()\n logger.info('cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(out.shape), tuple(loss.shape),\n fwd_end - fwd_start),\n ranks=[0])\n\n out_master = out_master.clone()\n out_master.requires_grad = True\n loss_master = criterion_master(out_master, target_master)\n logger.info('Rank {} cross entropy loss forward: {}'.format(rank, check_equal(loss, loss_master)))\n\n bwd_start = time.time()\n loss.backward()\n bwd_end = time.time()\n logger.info('cross entropy loss backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0])\n\n loss_master.backward()\n out_grad = out_master.grad\n out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]\n out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j]\n logger.info('Rank {} cross entropy loss backward: {}'.format(rank, check_equal(out_grad, out.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n\n\ndef check_vocab_parallel_loss():\n rank = torch.distributed.get_rank()\n logger = get_dist_logger()\n device = get_current_device()\n dtype = torch.float32\n\n input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D)\n weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D)\n output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D)\n\n j = global_context.get_local_rank(input_parallel_mode)\n i = global_context.get_local_rank(weight_parallel_mode)\n k = global_context.get_local_rank(output_parallel_mode)\n\n criterion = VocabParallelCrossEntropyLoss3D()\n criterion_master = torch.nn.CrossEntropyLoss()\n\n out_shape = (BATCH_SIZE, NUM_CLASSES)\n out_master = torch.randn(out_shape, dtype=dtype, device=device)\n target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE, ), dtype=torch.long, device=device)\n torch.distributed.broadcast(out_master, src=0)\n torch.distributed.broadcast(target_master, src=0)\n out = torch.chunk(out_master, DEPTH, dim=0)[i]\n out = torch.chunk(out, DEPTH, dim=-1)[k]\n out = torch.chunk(out, DEPTH, dim=0)[j]\n out = out.clone()\n out.requires_grad = True\n\n fwd_start = time.time()\n loss = criterion(out, target_master)\n fwd_end = time.time()\n logger.info('vocab parallel cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s'.format(\n tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start),\n ranks=[0])\n\n out_master = out_master.clone()\n out_master.requires_grad = True\n loss_master = criterion_master(out_master, target_master)\n logger.info('Rank {} vocab parallel cross entropy loss forward: {}'.format(rank, check_equal(loss, loss_master)))\n\n bwd_start = time.time()\n loss.backward()\n bwd_end = time.time()\n logger.info('vocab parallel cross entropy loss backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0])\n\n loss_master.backward()\n out_grad = out_master.grad\n out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]\n out_grad = torch.chunk(out_grad, DEPTH, dim=-1)[k]\n out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j]\n logger.info('Rank {} vocab parallel cross entropy loss backward: {}'.format(rank, check_equal(out_grad, out.grad)))\n\n return fwd_end - fwd_start, bwd_end - bwd_start\n"
] |
[
[
"torch.distributed.broadcast",
"torch.cuda.synchronize",
"torch.randint",
"torch.nn.CrossEntropyLoss",
"torch.randn",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.nn.init.ones_",
"torch.chunk",
"torch.distributed.get_rank"
]
] |
raymondngiam/neural-translation-model-eng-to-ch
|
[
"1dfb76d011526e43fbc0200c98c1082ffae866d6"
] |
[
"src/model.py"
] |
[
"import tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Layer, Input, Masking, LSTM, Embedding, Dense\n\nclass EndTokenEmbedLayer(Layer):\n def __init__(self):\n super(EndTokenEmbedLayer, self).__init__()\n\n def build(self, input_shape):\n self.embedding_size = input_shape[-1]\n self.embedding = self.add_weight(shape=(self.embedding_size,),\n initializer='random_normal',\n name='end_token_embedding')\n \n def call(self, inputs):\n one_row = tf.reshape(self.embedding,(-1,1,self.embedding_size))\n end_token_output = tf.tile(one_row,[tf.shape(inputs)[0],1,1])\n return tf.concat((inputs,end_token_output),axis=1)\n\ndef Encoder(input_shape):\n inputs = Input(input_shape)\n h = EndTokenEmbedLayer()(inputs)\n h = Masking(mask_value=0.)(h)\n lstm , hidden_state, cell_state = LSTM(512,return_sequences=True,return_state=True)(h)\n model = Model(inputs=inputs, outputs=[hidden_state, cell_state])\n return model\n\nclass Decoder(Model):\n def __init__(self,input_embedding_dim):\n super(Decoder, self).__init__()\n self.embedding = Embedding(input_dim = input_embedding_dim[0],\n output_dim = input_embedding_dim[1],\n mask_zero = True)\n self.lstm = LSTM(units=512, return_sequences=True, return_state=True)\n self.dense = Dense(units=input_embedding_dim[0])\n\n def call(self,inputs,hidden_state = None,cell_state = None):\n h = self.embedding(inputs)\n if hidden_state != None and cell_state != None:\n lstm,hidden,cell = self.lstm(h,initial_state =[hidden_state,cell_state])\n else:\n lstm,hidden,cell = self.lstm(h)\n h = self.dense(lstm)\n return h,hidden,cell\n\nclass NeuralTranslationModel(Model):\n def __init__(self,encoder_input_shape,decoder_input_shape):\n super(NeuralTranslationModel, self).__init__()\n self.encoder = Encoder(input_shape=encoder_input_shape)\n self.decoder = Decoder(input_embedding_dim=decoder_input_shape)\n self.model_trainable_variables = self.encoder.trainable_variables + \\\n self.decoder.trainable_variables \n \n def chinese_data_io(self,chinese_data):\n input_data = chinese_data[:,0:tf.shape(chinese_data)[1]-1]\n output_data = chinese_data[:,1:tf.shape(chinese_data)[1]]\n return(input_data,output_data)\n\n def call(self,inputs):\n (encoder_in, decoder_in)=inputs\n hidden_state ,cell_state = self.encoder(encoder_in)\n dense_output, _, _ = self.decoder(decoder_in, hidden_state, cell_state)\n return dense_output\n\n @tf.function\n def train_step(self,data): \n (english,chinese) = data\n chinese_input, chinese_output = self.chinese_data_io(chinese) \n with tf.GradientTape() as tape: \n hidden_state ,cell_state = self.encoder(english)\n dense_output, _, _ = self.decoder(chinese_input, hidden_state, cell_state)\n loss = tf.math.reduce_mean(self.compiled_loss(chinese_output,dense_output))\n grads = tape.gradient(loss, self.model_trainable_variables)\n self.optimizer.apply_gradients(zip(grads,\n self.model_trainable_variables))\n self.compiled_metrics.update_state(chinese_output,dense_output)\n return {m.name:m.result() for m in self.metrics}\n\n @tf.function\n def test_step(self, data):\n (english,chinese) = data\n chinese_input, chinese_output = self.chinese_data_io(chinese) \n hidden_state ,cell_state = self.encoder(english)\n dense_output, _, _ = self.decoder(chinese_input, hidden_state, cell_state)\n loss = tf.math.reduce_mean(self.compiled_loss(chinese_output,dense_output))\n self.compiled_metrics.update_state(chinese_output,dense_output)\n return {m.name:m.result() for m in self.metrics}"
] |
[
[
"tensorflow.concat",
"tensorflow.keras.layers.Masking",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.GradientTape",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Input"
]
] |
amodas/PRIME-augmentations
|
[
"89880bfe2800d8e59fa04232ffd36aa7fc8e8064"
] |
[
"utils/diffeomorphism.py"
] |
[
"import functools\nimport math\nimport torch\nfrom einops import rearrange\nfrom opt_einsum import contract\n\n\nclass Diffeo(torch.nn.Module):\n \"\"\"Randomly apply a diffeomorphism to the image(s).\n The image should be a Tensor and it is expected to have [..., n, n] shape,\n where ... means an arbitrary number of leading dimensions.\n\n A random cut is drawn from a discrete Beta distribution of parameters\n alpha and beta such that\n s = alpha + beta (measures how peaked the distribution is)\n r = alpha / beta (measured how biased towards cutmax the distribution is)\n\n Given cut and the allowed* interval of temperatures [Tmin, Tmax], a random T is\n drawn from a Beta distribution with parameters alpha and beta such that:\n s = alpha + beta (measures how peaked the distribution is)\n r = alpha / beta (measured how biased towards T_max the distribution is)\n Beta ~ delta_function for s -> inf. To apply a specific value x \\in [0, 1]\n in the allowed interval of T or cut, set\n - s = 1e10\n - r = x / (1 - x)\n *the allowed T interval is defined such as:\n - Tmin corresponds to a typical displacement of 1/2 pixel in the center\n of the image\n - Tmax corresponds to the highest T for which no overhangs are present.\n Args:\n sT (float):\n rT (float):\n scut (float):\n rcut (float):\n cut_min (int):\n cut_max (int):\n\n Returns:\n Tensor: Diffeo version of the input image(s).\n \"\"\"\n\n def __init__(self, sT, rT, scut, rcut, cutmin, cutmax, alpha, stochastic=False):\n super().__init__()\n\n self.sT = sT\n self.rT = rT\n self.scut = scut\n self.rcut = rcut\n self.cutmin = cutmin\n self.cutmax = cutmax\n self.alpha = alpha\n\n self.stochastic = stochastic\n if self.stochastic:\n self.cutmax_max = cutmax\n self.alpha_max = alpha\n\n self.betaT = torch.distributions.beta.Beta(sT - sT / (rT + 1), sT / (rT + 1), validate_args=None)\n self.betacut = torch.distributions.beta.Beta(scut - scut / (rcut + 1), scut / (rcut + 1), validate_args=None)\n\n def forward(self, img):\n \"\"\"\n Args:\n img (Tensor): Image(s) to be 'diffeomorphed'.\n Returns:\n Tensor: Diffeo image(s).\n \"\"\"\n\n init_shape = img.shape\n if len(init_shape) < 4:\n img = rearrange(img, \"c h w -> () c h w\")\n\n if self.stochastic:\n self._sample_params()\n\n # image size\n n = img.shape[-1]\n\n cut = (self.betacut.sample() * (self.cutmax + 1 - self.cutmin) + self.cutmin).int().item()\n T1, T2 = temperature_range(n, cut)\n T2 = max(T1, self.alpha * T2)\n T = (self.betaT.sample() * (T2 - T1) + T1)\n\n return deform(img, T, cut).reshape(init_shape)\n\n def _sample_params(self):\n self.cutmax = torch.randint(low=self.cutmin + 1, high=self.cutmax_max + 1, size=(1,)).item()\n # self.alpha = torch.FloatTensor([1]).uniform_(0., self.alpha_max).item()\n\n def __repr__(self):\n return self.__class__.__name__ + f'(sT={self.sT}, rT={self.rT}, scut={self.scut}, rcut={self.rcut}, cutmin={self.cutmin}, cutmax={self.cutmax})'\n\n\n\n@functools.lru_cache()\ndef scalar_field_modes(n, m, dtype=torch.float64, device='cpu'):\n \"\"\"\n sqrt(1 / Energy per mode) and the modes\n \"\"\"\n x = torch.linspace(0, 1, n, dtype=dtype, device=device)\n k = torch.arange(1, m + 1, dtype=dtype, device=device)\n i, j = torch.meshgrid(k, k)\n r = (i.pow(2) + j.pow(2)).sqrt()\n e = (r < m + 0.5) / r\n s = torch.sin(math.pi * x[:, None] * k[None, :])\n return e, s\n\n\ndef scalar_field(n, m, device='cpu'):\n \"\"\"\n random scalar field of size nxn made of the first m modes\n \"\"\"\n e, s = scalar_field_modes(n, m, dtype=torch.get_default_dtype(), device=device)\n c = torch.randn(m, m, device=device) * e\n # return torch.einsum('ij,xi,yj->yx', c, s, s)\n return contract('ij,xi,yj->yx', c, s, s)\n\n\ndef deform(image, T, cut, interp='linear'):\n \"\"\"\n 1. Sample a displacement field tau: R2 -> R2, using tempertature `T` and cutoff `cut`\n 2. Apply tau to `image`\n :param img Tensor: square image(s) [..., y, x]\n :param T float: temperature\n :param cut int: high frequency cutoff\n \"\"\"\n n = image.shape[-1]\n assert image.shape[-2] == n, 'Image(s) should be square.'\n\n device = image.device\n\n # Sample dx, dy\n # u, v are defined in [0, 1]^2\n # dx, dx are defined in [0, n]^2\n u = scalar_field(n, cut, device) # [n,n]\n v = scalar_field(n, cut, device) # [n,n]\n dx = T ** 0.5 * u * n\n dy = T ** 0.5 * v * n\n\n # Apply tau\n return remap(image, dx, dy, interp)\n\n\ndef remap(a, dx, dy, interp):\n \"\"\"\n :param a: Tensor of shape [..., y, x]\n :param dx: Tensor of shape [y, x]\n :param dy: Tensor of shape [y, x]\n :param interp: interpolation method\n \"\"\"\n n, m = a.shape[-2:]\n assert dx.shape == (n, m) and dy.shape == (n, m), 'Image(s) and displacement fields shapes should match.'\n\n y, x = torch.meshgrid(torch.arange(n, dtype=dx.dtype, device=a.device), torch.arange(m, dtype=dx.dtype, device=a.device))\n\n xn = (x - dx).clamp(0, m-1)\n yn = (y - dy).clamp(0, n-1)\n\n if interp == 'linear':\n xf = xn.floor().long()\n yf = yn.floor().long()\n xc = xn.ceil().long()\n yc = yn.ceil().long()\n\n xv = xn - xf\n yv = yn - yf\n\n return (1-yv)*(1-xv)*a[..., yf, xf] + (1-yv)*xv*a[..., yf, xc] + yv*(1-xv)*a[..., yc, xf] + yv*xv*a[..., yc, xc]\n\n if interp == 'gaussian':\n # can be implemented more efficiently by adding a cutoff to the Gaussian\n sigma = 0.4715\n\n dx = (xn[:, :, None, None] - x)\n dy = (yn[:, :, None, None] - y)\n\n c = (-dx**2 - dy**2).div(2 * sigma**2).exp()\n c = c / c.sum([2, 3], keepdim=True)\n\n return (c * a[..., None, None, :, :]).sum([-1, -2])\n\n\ndef temperature_range(n, cut):\n \"\"\"\n Define the range of allowed temperature\n for given image size and cut.\n \"\"\"\n if cut == 0:\n print(\"Cut is zero!\")\n if isinstance(cut, (float, int)):\n cut = cut + 1e-6\n log = math.log(cut)\n else:\n log = cut.log()\n T1 = 1 / (math.pi * n ** 2 * log)\n T2 = 4 / (math.pi ** 3 * cut ** 2 * log)\n return T1, T2\n\n\ndef typical_displacement(T, cut, n):\n if isinstance(cut, (float, int)):\n log = math.log(cut)\n else:\n log = cut.log()\n return n * (math.pi * T * log) ** .5 / 2"
] |
[
[
"torch.linspace",
"torch.randint",
"torch.sin",
"torch.randn",
"torch.distributions.beta.Beta",
"torch.arange",
"torch.get_default_dtype",
"torch.meshgrid"
]
] |
DirkZomerdijk/status
|
[
"299aca6986c0b274500c40613151d55aa98d5f52"
] |
[
"debugger.py"
] |
[
"#%%\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy\nfrom global_variables import *\nfrom model_functions import get_vulnerability, calculate_chronic_state\nimport glob\nimport os\nfrom scipy import stats\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport seaborn as sns\nimport seaborn as sns; sns.set()\nfrom model_functions import *\n\nclass Debug():\n \n def __init__(self, results):\n self.params = results['params']\n self.chronic_threshold = results['params']['chronic_threshold']\n self.df = results['df']\n self.no_interactions = results['params']['interactions']\n self.ses_noise = results['params']['ses_noise']\n self.repeats = results['params']['repeats']\n self.vul_param = results['params']['vul_param']\n self.psr_param = results['params']['psr_param']\n self.recover_param = results['params']['recover_param']\n self.noise_seeds = results['params']['noise_seeds']\n self.similarity = results['params']['similarity']\n self.time = results['params']['time']\n \n self.stress = results['stress']\n # self.stress_agents = results['stress_agents']\n # self.stress_std = results['stress_std']\n # self.stress_low = results['stress_low']\n # self.stress_med = results['stress_med']\n # self.stress_high = results['stress_high']\n # self.stress_low_std = results['stress_low_std']\n # self.stress_med_std = results['stress_med_std']\n # self.stress_high_std = results['stress_high_std']\n\n self.chronic = results['chronic']\n # self.chronic_agents = results['chronic_agents']\n # self.chronic_std = results['chronic_std']\n # self.chronic_low = results['chronic_low']\n # self.chronic_med = results['chronic_med']\n # self.chronic_high = results['chronic_high']\n # self.chronic_low_std = results['chronic_low_std']\n # self.chronic_med_std = results['chronic_med_std']\n # self.chronic_high_std = results['chronic_high_std']\n\n self.prestige = results['prestige']\n # self.prestige_agents = results['prestige_agents']\n # self.prestige_std = results['prestige_std']\n # self.prestige_low = results['prestige_low']\n # self.prestige_med = results['prestige_med']\n # self.prestige_high = results['prestige_high']\n # self.prestige_low_std = results['prestige_low_std']\n # self.prestige_med_std = results['prestige_med_std']\n # self.prestige_high_std = results['prestige_high_std']\n \n self.interactions = results['interactions']\n # self.interactions_agents = results['interactions_agents']\n # self.interactions_std = results['interactions_std']\n # self.interactions_low = results['interactions_low']\n # self.interactions_med = results['interactions_med']\n # self.interactions_high = results['interactions_high']\n # self.interactions_low_std = results['interactions_low_std']\n # self.interactions_med_std = results['interactions_med_std']\n # self.interactions_high_std = results['interactions_high_std']\n print(results.keys())\n self.status_difference = results['status_difference'],\n self.events = results['events'].T\n self.params = self.params \n self.df = self.df[['status', 'psr', 'eth']],\n self.df = self.df[0] \n \n self.no_agents = self.df.shape[0]\n self.low_status, self.med_status, self.high_status = self.split_population_status()\n \n \n def split_population_status(self):\n idx = np.argsort(self.df['status'])\n low = idx[:int(self.no_agents/3)]\n med = idx[int(self.no_agents/3):int(self.no_agents/3*2)]\n high = idx[int(self.no_agents/3*2):]\n return low, med, high\n\n def split_population_psr(self):\n idx = np.argsort(self.df['psr'])\n low = idx[:int(self.no_agents/3)]\n med = idx[int(self.no_agents/3):int(self.no_agents/3*2)]\n high = idx[int(self.no_agents/3*2):]\n return low, med, high\n \n def print_init(self):\n print(\"job_nr\\t\\t\", self.params['job_nr'])\n print(\"chronic_threshold\\t\", self.params['chronic_threshold'])\n print(\"similarity_base\\t\\t\", self.params['similarity_min'])\n print(\"interactions\\t\\t\", self.params['interactions'])\n print(\"ses_noise\\t\\t\", self.params['ses_noise'])\n print(\"repeats\\t\\t\\t\", self.params['repeats'])\n print(\"vul_param\\t\\t\", self.params['vul_param'])\n print(\"psr_param\\t\\t\", self.params['psr_param'])\n print(\"recover_param\\t\\t\", self.params['recover_param'])\n print(\"time\\t\\t\\t\", self.params['time'])\n print(\"stress_max\\t\\t\", self.params['stress_max'])\n print(\"noise_seeds\\t\\t\", len(self.params['noise_seeds']))\n print(\"prestige beta\\t\\t\", self.params['prestige_beta'])\n print(\"prestige param\\t\\t\", self.params['prestige_param'])\n print(\"stressor_param\\t\\t\", self.params['stressor_param'])\n print(\"population_shape\\t\", self.df.shape)\n\n\nif __name__ == \"__main__\":\n jobs = 10\n data = []\n for i in range(jobs):\n job = \"{0:03}\".format(i)\n # job = \"{0:03}\".format(200+i)\n with open(\"./results/pre-test/\"+job+\".pkl\", 'rb') as f:\n file = pickle.load(f)\n d = Debug(file)\n data.append(d)\n\n print(\"data\\t\", len(data))\n \n for i, d in enumerate(data):\n print(d.print_init())\n print(d.status_difference)\n print(d.params['job_nr'])\n slopes = calculate_slopes(d.stress)\n \n mean_slopes = np.mean(slopes, axis=(0, 2))\n std_slopes = np.std(np.mean(slopes, axis=(0)), axis=(1))\n mean_stress = np.mean(d.stress, axis=(0, 2))\n std_stress = np.std(np.mean(d.stress, axis=(0)), axis=(1))\n \n plt.errorbar(x=np.arange(d.time), y=mean_stress, yerr = std_stress)\n plt.show()\n\n plt.errorbar(x=np.arange(d.time-1), y=mean_slopes, yerr = std_slopes)\n plt.show()\n \n \n plt.errorbar(x=np.arange(d.time), y=np.mean(d.stress[d.low_status,:,:], axis=(0, 2)), yerr = np.std(np.mean(d.stress[d.low_status,:,:], axis=(0)), axis=(1)))\n plt.errorbar(x=np.arange(d.time), y=np.mean(d.stress[d.med_status,:,:], axis=(0, 2)), yerr = np.std(np.mean(d.stress[d.med_status,:,:], axis=(0)), axis=(1)))\n plt.errorbar(x=np.arange(d.time), y=np.mean(d.stress[d.high_status,:,:], axis=(0, 2)), yerr = np.std(np.mean(d.stress[d.high_status,:,:], axis=(0)), axis=(1)))\n plt.show() \n \n plt.errorbar(x=np.arange(d.time-1), y=np.mean(slopes[d.low_status,:,:], axis=(0, 2)), yerr = np.std(np.mean(slopes[d.low_status,:,:], axis=(0)), axis=(1)))\n plt.errorbar(x=np.arange(d.time-1), y=np.mean(slopes[d.med_status,:,:], axis=(0, 2)), yerr = np.std(np.mean(slopes[d.med_status,:,:], axis=(0)), axis=(1)))\n plt.errorbar(x=np.arange(d.time-1), y=np.mean(slopes[d.high_status,:,:], axis=(0, 2)), yerr = np.std(np.mean(slopes[d.high_status,:,:], axis=(0)), axis=(1)))\n plt.show()\n \n print(d.prestige.shape)\n print(np.unique(d.prestige))\n print(d.df['status'].unique())\n plt.hist(d.df['status'], bins=15)\n plt.show()\n # plt.scatter((np.array(d.stress.shape[2]*[d.df['status']]).T) , d.prestige[:, -1, :], alpha=0.2)\n plt.scatter(d.df['status'] , d.prestige[:, -1, 0], alpha=0.2)\n # plt.ylim([-1,2])\n # plt.yscale('symlog')\n plt.show()\n plt.scatter((np.array(d.stress.shape[2]*[d.df['status']]).T), d.stress[:, -1, :], alpha=0.2)\n # plt.xscale('symlog')\n plt.show()\n\n# %%\n"
] |
[
[
"matplotlib.pyplot.scatter",
"numpy.unique",
"numpy.arange",
"numpy.mean",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist"
]
] |
argearriojas/sparse
|
[
"aaf97b3933859dd6ff5a4230ecfffc4523cb02ce"
] |
[
"sparse/slicing.py"
] |
[
"# Most of this file is taken from https://github.com/dask/dask/blob/master/dask/array/slicing.py\n# See license at https://github.com/dask/dask/blob/master/LICENSE.txt\n\nimport math\nfrom numbers import Integral, Number\nfrom collections import Iterable\n\nimport numpy as np\n\n\ndef normalize_index(idx, shape):\n \"\"\" Normalize slicing indexes\n 1. Replaces ellipses with many full slices\n 2. Adds full slices to end of index\n 3. Checks bounding conditions\n 4. Replaces numpy arrays with lists\n 5. Posify's slices integers and lists\n 6. Normalizes slices to canonical form\n Examples\n --------\n >>> normalize_index(1, (10,))\n (1,)\n >>> normalize_index(-1, (10,))\n (9,)\n >>> normalize_index([-1], (10,))\n (array([9]),)\n >>> normalize_index(slice(-3, 10, 1), (10,))\n (slice(7, 10, 1),)\n >>> normalize_index((Ellipsis, None), (10,))\n (slice(0, 10, 1), None)\n \"\"\"\n if not isinstance(idx, tuple):\n idx = (idx,)\n idx = replace_ellipsis(len(shape), idx)\n n_sliced_dims = 0\n for i in idx:\n if hasattr(i, 'ndim') and i.ndim >= 1:\n n_sliced_dims += i.ndim\n elif i is None:\n continue\n else:\n n_sliced_dims += 1\n idx = idx + (slice(None),) * (len(shape) - n_sliced_dims)\n if len([i for i in idx if i is not None]) > len(shape):\n raise IndexError(\"Too many indices for array\")\n\n none_shape = []\n i = 0\n for ind in idx:\n if ind is not None:\n none_shape.append(shape[i])\n i += 1\n else:\n none_shape.append(None)\n\n for i, d in zip(idx, none_shape):\n if d is not None:\n check_index(i, d)\n idx = tuple(map(sanitize_index, idx))\n idx = tuple(map(replace_none, idx, none_shape))\n idx = posify_index(none_shape, idx)\n idx = tuple(map(clip_slice, idx, none_shape))\n return idx\n\n\ndef replace_ellipsis(n, index):\n \"\"\" Replace ... with slices, :, : ,:\n >>> replace_ellipsis(4, (3, Ellipsis, 2))\n (3, slice(None, None, None), slice(None, None, None), 2)\n >>> replace_ellipsis(2, (Ellipsis, None))\n (slice(None, None, None), slice(None, None, None), None)\n \"\"\"\n # Careful about using in or index because index may contain arrays\n isellipsis = [i for i, ind in enumerate(index) if ind is Ellipsis]\n if not isellipsis:\n return index\n elif len(isellipsis) > 1:\n raise IndexError(\"an index can only have a single ellipsis ('...')\")\n else:\n loc = isellipsis[0]\n extra_dimensions = n - (len(index) - sum(i is None for i in index) - 1)\n return index[:loc] + (slice(None, None, None),) * extra_dimensions + index[loc + 1:]\n\n\ndef check_index(ind, dimension):\n \"\"\" Check validity of index for a given dimension\n Examples\n --------\n >>> check_index(3, 5)\n >>> check_index(5, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index is not smaller than dimension 5 >= 5\n >>> check_index(6, 5)\n Traceback (most recent call last):\n ...\n IndexError: Index is not smaller than dimension 6 >= 5\n >>> check_index(-1, 5)\n >>> check_index(-6, 5)\n Traceback (most recent call last):\n ...\n IndexError: Negative index is not greater than negative dimension -6 <= -5\n >>> check_index([1, 2], 5)\n >>> check_index([6, 3], 5)\n Traceback (most recent call last):\n ...\n IndexError: Index out of bounds for dimension 5\n >>> check_index(slice(0, 3), 5)\n \"\"\"\n # unknown dimension, assumed to be in bounds\n if isinstance(ind, Iterable):\n x = np.asanyarray(ind)\n if np.issubdtype(x.dtype, np.integer) and \\\n ((x >= dimension) | (x < -dimension)).any():\n raise IndexError(\"Index out of bounds for dimension {:d}\".format(dimension))\n elif x.dtype == bool and len(x) != dimension:\n raise IndexError(\"boolean index did not match indexed array; dimension is {:d} \"\n \"but corresponding boolean dimension is {:d}\".format(dimension, len(x)))\n elif isinstance(ind, slice):\n return\n elif not isinstance(ind, Integral):\n raise IndexError(\"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and \"\n \"integer or boolean arrays are valid indices\")\n\n elif ind >= dimension:\n raise IndexError(\"Index is not smaller than dimension {:d} >= {:d}\".format(ind, dimension))\n\n elif ind < -dimension:\n msg = \"Negative index is not greater than negative dimension {:d} <= -{:d}\"\n raise IndexError(msg.format(ind, dimension))\n\n\ndef sanitize_index(ind):\n \"\"\" Sanitize the elements for indexing along one axis\n >>> sanitize_index([2, 3, 5])\n array([2, 3, 5])\n >>> sanitize_index([True, False, True, False])\n array([0, 2])\n >>> sanitize_index(np.array([1, 2, 3]))\n array([1, 2, 3])\n >>> sanitize_index(np.array([False, True, True]))\n array([1, 2])\n >>> type(sanitize_index(np.int32(0))) # doctest: +SKIP\n <type 'int'>\n >>> sanitize_index(0.5) # doctest: +SKIP\n Traceback (most recent call last):\n ...\n IndexError: only integers, slices (`:`), ellipsis (`...`),\n numpy.newaxis (`None`) and integer or boolean arrays are valid indices\n \"\"\"\n if ind is None:\n return None\n elif isinstance(ind, slice):\n return slice(_sanitize_index_element(ind.start),\n _sanitize_index_element(ind.stop),\n _sanitize_index_element(ind.step))\n elif isinstance(ind, Number):\n return _sanitize_index_element(ind)\n index_array = np.asanyarray(ind)\n if index_array.dtype == np.bool_:\n nonzero = np.nonzero(index_array)\n if len(nonzero) == 1:\n # If a 1-element tuple, unwrap the element\n nonzero = nonzero[0]\n return np.asanyarray(nonzero)\n elif np.issubdtype(index_array.dtype, np.integer):\n return index_array\n else:\n raise IndexError(\"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and \"\n \"integer or boolean arrays are valid indices\")\n\n\ndef _sanitize_index_element(ind):\n \"\"\"Sanitize a one-element index.\"\"\"\n if ind is None:\n return None\n\n return int(ind)\n\n\ndef posify_index(shape, ind):\n \"\"\" Flip negative indices around to positive ones\n >>> posify_index(10, 3)\n 3\n >>> posify_index(10, -3)\n 7\n >>> posify_index(10, [3, -3])\n array([3, 7])\n >>> posify_index((10, 20), (3, -3))\n (3, 17)\n >>> posify_index((10, 20), (3, [3, 4, -3])) # doctest: +NORMALIZE_WHITESPACE\n (3, array([ 3, 4, 17]))\n \"\"\"\n if isinstance(ind, tuple):\n return tuple(map(posify_index, shape, ind))\n if isinstance(ind, Integral):\n if ind < 0 and not math.isnan(shape):\n return ind + shape\n else:\n return ind\n if isinstance(ind, (np.ndarray, list)) and not math.isnan(shape):\n ind = np.asanyarray(ind)\n return np.where(ind < 0, ind + shape, ind)\n if isinstance(ind, slice):\n start, stop, step = ind.start, ind.stop, ind.step\n\n if start < 0:\n start += shape\n\n if not (0 > stop >= step) and stop < 0:\n stop += shape\n\n return slice(start, stop, ind.step)\n\n return ind\n\n\ndef clip_slice(idx, dim):\n \"\"\"\n Clip slice to its effective size given the shape.\n\n Parameters\n ----------\n idx : The index.\n dim : The size along the corresponding dimension.\n\n Returns\n -------\n idx : slice\n\n Examples\n --------\n >>> clip_slice(slice(0, 20, 1), 10)\n slice(0, 10, 1)\n \"\"\"\n if not isinstance(idx, slice):\n return idx\n\n start, stop, step = idx.start, idx.stop, idx.step\n\n if step > 0:\n start = max(start, 0)\n stop = min(stop, dim)\n\n if start > stop:\n start = stop\n else:\n start = min(start, dim - 1)\n stop = max(stop, -1)\n\n if start < stop:\n start = stop\n\n return slice(start, stop, step)\n\n\ndef replace_none(idx, dim):\n \"\"\"\n Normalize slices to canonical form, i.e.\n replace ``None`` with the appropriate integers.\n\n Parameters\n ----------\n idx: slice or other index\n dim: dimension length\n\n Examples\n --------\n >>> replace_none(slice(None, None, None), 10)\n slice(0, 10, 1)\n \"\"\"\n if not isinstance(idx, slice):\n return idx\n\n start, stop, step = idx.start, idx.stop, idx.step\n\n if step is None:\n step = 1\n\n if step > 0:\n if start is None:\n start = 0\n\n if stop is None:\n stop = dim\n else:\n if start is None:\n start = dim - 1\n\n if stop is None:\n stop = -1\n\n return slice(start, stop, step)\n"
] |
[
[
"numpy.issubdtype",
"numpy.asanyarray",
"numpy.where",
"numpy.nonzero"
]
] |
cecabert/onnx-tensorflow
|
[
"c60a32caef3271b93e843bac7a44eda388f67165"
] |
[
"onnx_tf/handlers/backend/gru.py"
] |
[
"from functools import partial\n\nimport tensorflow as tf\n\nfrom onnx_tf.common import get_unique_suffix\nfrom onnx_tf.common import exception\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\nfrom onnx_tf.handlers.handler import partial_support\nfrom onnx_tf.handlers.handler import ps_description\nfrom .rnn_mixin import RNNMixin\n\n\n@onnx_op(\"GRU\")\n@partial_support(True)\n@ps_description(\n \"GRU with clip or GRU with linear_before_reset, or \" +\n \"GRU not using sigmoid for z and r, or \" +\n \"GRU using Elu as the activation function \" + \"with alpha != 1, or \" +\n \"GRU using HardSigmoid as the activation function \" +\n \"with alpha != 0.2 or beta != 0.5 \" + \"are not supported in TensorFlow.\")\nclass GRU(RNNMixin, BackendHandler):\n\n @classmethod\n def args_check(cls, node, **kwargs):\n direction = node.attrs.get(\"direction\", \"forward\")\n num_directions = 2 if direction == \"bidirectional\" else 1\n if \"clip\" in node.attrs:\n exception.OP_UNSUPPORTED_EXCEPT(\"GRU with clip\", \"Tensorflow\")\n if node.attrs.get(\"linear_before_reset\", 0):\n exception.OP_UNSUPPORTED_EXCEPT(\"GRU with linear_before_reset\",\n \"Tensorflow\")\n if \"activations\" in node.attrs:\n activations = list(map(lambda x: x.lower(), node.attrs[\"activations\"]))\n if activations[0] != \"sigmoid\":\n exception.OP_UNSUPPORTED_EXCEPT(\"GRU without sigmoid for `z` and `r`\",\n \"Tensorflow\")\n if num_directions == 2:\n if activations[2] != \"sigmoid\":\n exception.OP_UNSUPPORTED_EXCEPT(\"GRU without sigmoid for `z` and `r`\",\n \"Tensorflow\")\n\n @classmethod\n def _custom_getter(cls,\n getter,\n name,\n node=None,\n tensor_dict=None,\n is_bidirectional=None,\n *args,\n **kwargs):\n names = name.split(\"/\")\n if is_bidirectional:\n if \"fw\" in names:\n index = 0\n elif \"bw\" in names:\n index = 1\n else:\n raise RuntimeError(\"Can not get {} for bidirectional. \"\n \"Either fw and bw is not in name scope.\".format(\n names[-1]))\n if names[-1] == \"kernel\":\n # onnx W[zrh], R[zrh]\n if is_bidirectional:\n w = tf.split(tensor_dict[node.inputs[1]], 2)[index]\n r = tf.split(tensor_dict[node.inputs[2]], 2)[index]\n else:\n w = tensor_dict[node.inputs[1]]\n r = tensor_dict[node.inputs[2]]\n w_z, w_r, w_h = tf.split(tf.squeeze(w), 3)\n r_z, r_r, r_h = tf.split(tf.squeeze(r), 3)\n if names[-2] == \"gates\":\n new_w = tf.transpose(tf.concat([w_r, w_z], 0))\n new_r = tf.transpose(tf.concat([r_r, r_z], 0))\n elif names[-2] == \"candidate\":\n new_w = tf.transpose(w_h)\n new_r = tf.transpose(r_h)\n kernel = tf.concat([new_w, new_r], 0)\n return kernel\n if names[-1] == \"bias\":\n if len(node.inputs) >= 4:\n # onnx Wb[zrh], Rb[zrh]\n if is_bidirectional:\n b = tf.split(tensor_dict[node.inputs[3]], 2)[index]\n else:\n b = tensor_dict[node.inputs[3]]\n w_b, r_b = tf.split(tf.squeeze(b), 2)\n w_b_z, w_b_r, w_b_h = tf.split(w_b, 3)\n r_b_z, r_b_r, r_b_h = tf.split(r_b, 3)\n if names[-2] == \"gates\":\n w_b = tf.transpose(tf.concat([w_b_r, w_b_z], 0))\n r_b = tf.transpose(tf.concat([r_b_r, r_b_z], 0))\n elif names[-2] == \"candidate\":\n w_b = tf.transpose(w_b_h)\n r_b = tf.transpose(r_b_h)\n return tf.add(w_b, r_b)\n return getter(name, *args, **kwargs)\n return getter(name, *args, **kwargs)\n\n @classmethod\n def _common(cls, node, **kwargs):\n tensor_dict = kwargs[\"tensor_dict\"]\n x = tensor_dict[node.inputs[0]]\n input_shape = x.get_shape().as_list()\n input_size = len(node.inputs)\n hidden_size = node.attrs[\"hidden_size\"]\n direction = node.attrs.get(\"direction\", \"forward\")\n num_directions = 2 if direction == \"bidirectional\" else 1\n\n # removed from version 7, default is 0\n output_sequence = node.attrs.get(\"output_sequence\", 0)\n\n # TODO(fumihwh): check if prev node is one of RNN\n # process input if it comes from other previous cell\n # which has shape [seq_length, num_directions, batch_size, hidden_size]\n if len(input_shape) == 4 and input_shape[1] == 1:\n x = tf.squeeze(x)\n\n sequence_length = None\n if input_size >= 5 and node.inputs[4] in tensor_dict:\n sequence_length = tensor_dict[node.inputs[4]]\n\n cell_kwargs = {}\n\n tf_activations = [tf.nn.tanh]\n if \"activations\" in node.attrs:\n activations = list(map(lambda x: x.lower(), node.attrs[\"activations\"]))\n activation_alpha = node.attrs.get(\"activation_alpha\", [None] * 4)\n activation_beta = node.attrs.get(\"activation_beta\", [None] * 4)\n tf_activations = [\n cls.rnn_get_activation(activations[1], activation_alpha[1],\n activation_beta[1])\n ]\n if num_directions == 2:\n tf_activations.append(\n cls.rnn_get_activation(activations[3], activation_alpha[3],\n activation_beta[3]))\n\n # TODO(fumihwh): check if reverse and bidirectional works\n with tf.compat.v1.variable_scope(\n \"GRU_\" + get_unique_suffix(),\n custom_getter=partial(\n cls._custom_getter,\n node=node,\n tensor_dict=tensor_dict,\n is_bidirectional=num_directions == 2)):\n\n cell_kwargs[\"num_units\"] = hidden_size\n if input_size < 4 or node.inputs[3] not in tensor_dict:\n cell_kwargs[\"bias_initializer\"] = tf.zeros_initializer\n initial_state = None\n initial_state_bw = None\n if input_size == 6:\n initial_h = tensor_dict.get(node.inputs[5], None)\n if initial_h is not None:\n initial_state = (initial_h[0],)\n if num_directions == 2:\n initial_state_bw = (initial_h[1],)\n\n rnn_kwargs = {}\n if num_directions == 1:\n rnn_kwargs[\"initial_state\"] = initial_state\n elif num_directions == 2:\n rnn_kwargs[\"initial_state_fw\"] = initial_state\n rnn_kwargs[\"initial_state_bw\"] = initial_state_bw\n rnn_kwargs[\"sequence_length\"] = sequence_length\n rnn_kwargs[\"time_major\"] = True\n rnn_kwargs[\"dtype\"] = tf.float32\n\n outputs, states = cls.rnn(x, tf.compat.v1.nn.rnn_cell.GRUCell,\n cell_kwargs, rnn_kwargs, tf_activations,\n direction)\n\n if num_directions == 1:\n state = states[0]\n h = tf.expand_dims(state, 0)\n output = tf.expand_dims(outputs, 1)\n else:\n state_fw = states[0][0]\n state_bw = states[1][0]\n output_fw = outputs[0]\n output_bw = outputs[1]\n h_fw = tf.expand_dims(state_fw, 0)\n h_bw = tf.expand_dims(state_bw, 0)\n h = tf.concat((h_fw, h_bw), axis=0)\n output_fw = tf.expand_dims(output_fw, 1)\n output_bw = tf.expand_dims(output_bw, 1)\n output = tf.concat((output_fw, output_bw), axis=1)\n\n return [output, h] if output_sequence == 0 else [h]\n\n @classmethod\n def version_1(cls, node, **kwargs):\n return cls._common(node, **kwargs)\n\n @classmethod\n def version_3(cls, node, **kwargs):\n return cls._common(node, **kwargs)\n\n @classmethod\n def version_7(cls, node, **kwargs):\n return cls._common(node, **kwargs)\n"
] |
[
[
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.add",
"tensorflow.split"
]
] |
xealml/text_classification
|
[
"2be2e94b539bb1058ca1807f0002c7942ad60617"
] |
[
"a02_TextCNN/other_experiement/data_util_zhihu.py"
] |
[
"# -*- coding: utf-8 -*-\nimport codecs\nimport numpy as np\n# load data of zhihu\nimport word2vec\nimport os\nimport pickle\nPAD_ID = 0\nfrom tflearn.data_utils import pad_sequences\n_GO = \"_GO\"\n_END = \"_END\"\n_PAD = \"_PAD\"\n\n\n# use pretrained word embedding to get word vocabulary and labels, and its relationship with index\ndef create_voabulary(simple=None,word2vec_model_path='zhihu-word2vec-title-desc.bin-100',name_scope=''):\n cache_path ='cache_vocabulary_label_pik/'+ name_scope + \"_word_voabulary.pik\"\n print(\"cache_path:\",cache_path,\"file_exists:\",os.path.exists(cache_path))\n if os.path.exists(cache_path):#if exists, load it; otherwise create it.\n with open(cache_path, 'r') as data_f:\n vocabulary_word2index, vocabulary_index2word=pickle.load(data_f)\n return vocabulary_word2index, vocabulary_index2word\n else:\n vocabulary_word2index={}\n vocabulary_index2word={}\n if simple is not None:\n word2vec_model_path='zhihu-word2vec.bin-100'\n print(\"create vocabulary. word2vec_model_path:\",word2vec_model_path)\n model=word2vec.load(word2vec_model_path,kind='bin')\n vocabulary_word2index['PAD_ID']=0\n vocabulary_index2word[0]='PAD_ID'\n special_index=0\n if 'biLstmTextRelation' in name_scope:\n vocabulary_word2index['EOS'] = 1 # a special token for biLstTextRelation model. which is used between two sentences.\n vocabulary_index2word[1]='EOS'\n special_index=1\n for i,vocab in enumerate(model.vocab):\n vocabulary_word2index[vocab]=i+1+special_index\n vocabulary_index2word[i+1+special_index]=vocab\n\n # save to file system if vocabulary of words is not exists.\n if not os.path.exists(cache_path): # 如果不存在写到缓存文件中\n with open(cache_path, 'a') as data_f:\n pickle.dump((vocabulary_word2index,vocabulary_index2word), data_f)\n return vocabulary_word2index,vocabulary_index2word\n\n# create vocabulary of lables. label is sorted. 1 is high frequency, 2 is low frequency.\ndef create_voabulary_label(voabulary_label='train-zhihu4-only-title-all.txt',name_scope='',use_seq2seq=False):#'train-zhihu.txt'\n print(\"create_voabulary_label_sorted.started.traning_data_path:\",voabulary_label)\n cache_path ='cache_vocabulary_label_pik/'+ name_scope + \"_label_voabulary.pik\"\n if os.path.exists(cache_path):#如果缓存文件存在,则直接读取\n with open(cache_path, 'r') as data_f:\n vocabulary_word2index_label, vocabulary_index2word_label=pickle.load(data_f)\n return vocabulary_word2index_label, vocabulary_index2word_label\n else:\n zhihu_f_train = codecs.open(voabulary_label, 'r', 'utf8')\n lines=zhihu_f_train.readlines()\n count=0\n vocabulary_word2index_label={}\n vocabulary_index2word_label={}\n vocabulary_label_count_dict={} #{label:count}\n for i,line in enumerate(lines):\n if '__label__' in line: #'__label__-2051131023989903826\n label=line[line.index('__label__')+len('__label__'):].strip().replace(\"\\n\",\"\")\n if vocabulary_label_count_dict.get(label,None) is not None:\n vocabulary_label_count_dict[label]=vocabulary_label_count_dict[label]+1\n else:\n vocabulary_label_count_dict[label]=1\n list_label=sort_by_value(vocabulary_label_count_dict)\n\n print(\"length of list_label:\",len(list_label));#print(\";list_label:\",list_label)\n countt=0\n\n ##########################################################################################\n if use_seq2seq:#if used for seq2seq model,insert two special label(token):_GO AND _END\n i_list=[0,1,2];label_special_list=[_GO,_END,_PAD]\n for i,label in zip(i_list,label_special_list):\n vocabulary_word2index_label[label] = i\n vocabulary_index2word_label[i] = label\n #########################################################################################\n for i,label in enumerate(list_label):\n if i<10:\n count_value=vocabulary_label_count_dict[label]\n print(\"label:\",label,\"count_value:\",count_value)\n countt=countt+count_value\n indexx = i + 3 if use_seq2seq else i\n vocabulary_word2index_label[label]=indexx\n vocabulary_index2word_label[indexx]=label\n print(\"count top10:\",countt)\n\n #save to file system if vocabulary of words is not exists.\n if not os.path.exists(cache_path): #如果不存在写到缓存文件中\n with open(cache_path, 'a') as data_f:\n pickle.dump((vocabulary_word2index_label,vocabulary_index2word_label), data_f)\n print(\"create_voabulary_label_sorted.ended.len of vocabulary_label:\",len(vocabulary_index2word_label))\n return vocabulary_word2index_label,vocabulary_index2word_label\n\ndef sort_by_value(d):\n items=d.items()\n backitems=[[v[1],v[0]] for v in items]\n backitems.sort(reverse=True)\n return [ backitems[i][1] for i in range(0,len(backitems))]\n\ndef create_voabulary_labelO():\n model = word2vec.load('zhihu-word2vec-multilabel.bin-100', kind='bin') #zhihu-word2vec.bin-100\n count=0\n vocabulary_word2index_label={}\n vocabulary_index2word_label={}\n label_unique={}\n for i,vocab in enumerate(model.vocab):\n if '__label__' in vocab: #'__label__-2051131023989903826\n label=vocab[vocab.index('__label__')+len('__label__'):]\n if label_unique.get(label,None) is None: #不曾出现过的话,保持到字典中\n vocabulary_word2index_label[label]=count\n vocabulary_index2word_label[count]=label #ADD\n count=count+1\n label_unique[label]=label\n return vocabulary_word2index_label,vocabulary_index2word_label\n\ndef load_data_multilabel_new(vocabulary_word2index,vocabulary_word2index_label,valid_portion=0.05,max_training_data=1000000,\n traning_data_path='train-zhihu4-only-title-all.txt',multi_label_flag=True,use_seq2seq=False,seq2seq_label_length=6): # n_words=100000,\n \"\"\"\n input: a file path\n :return: train, test, valid. where train=(trainX, trainY). where\n trainX: is a list of list.each list representation a sentence.trainY: is a list of label. each label is a number\n \"\"\"\n # 1.load a zhihu data from file\n # example:\"w305 w6651 w3974 w1005 w54 w109 w110 w3974 w29 w25 w1513 w3645 w6 w111 __label__-400525901828896492\"\n print(\"load_data.started...\")\n print(\"load_data_multilabel_new.training_data_path:\",traning_data_path)\n zhihu_f = codecs.open(traning_data_path, 'r', 'utf8') #-zhihu4-only-title.txt\n lines = zhihu_f.readlines()\n # 2.transform X as indices\n # 3.transform y as scalar\n X = []\n Y = []\n Y_decoder_input=[] #ADD 2017-06-15\n for i, line in enumerate(lines):\n x, y = line.split('__label__') #x='w17314 w5521 w7729 w767 w10147 w111'\n y=y.strip().replace('\\n','')\n x = x.strip()\n if i<1:\n print(i,\"x0:\",x) #get raw x\n #x_=process_one_sentence_to_get_ui_bi_tri_gram(x)\n x=x.split(\" \")\n x = [vocabulary_word2index.get(e,0) for e in x] #if can't find the word, set the index as '0'.(equal to PAD_ID = 0)\n if i<2:\n print(i,\"x1:\",x) #word to index\n if use_seq2seq: # 1)prepare label for seq2seq format(ADD _GO,_END,_PAD for seq2seq)\n ys = y.replace('\\n', '').split(\" \") # ys is a list\n _PAD_INDEX=vocabulary_word2index_label[_PAD]\n ys_mulithot_list=[_PAD_INDEX]*seq2seq_label_length #[3,2,11,14,1]\n ys_decoder_input=[_PAD_INDEX]*seq2seq_label_length\n # below is label.\n for j,y in enumerate(ys):\n if j<seq2seq_label_length-1:\n ys_mulithot_list[j]=vocabulary_word2index_label[y]\n if len(ys)>seq2seq_label_length-1:\n ys_mulithot_list[seq2seq_label_length-1]=vocabulary_word2index_label[_END]#ADD END TOKEN\n else:\n ys_mulithot_list[len(ys)] = vocabulary_word2index_label[_END]\n\n # below is input for decoder.\n ys_decoder_input[0]=vocabulary_word2index_label[_GO]\n for j,y in enumerate(ys):\n if j < seq2seq_label_length - 1:\n ys_decoder_input[j+1]=vocabulary_word2index_label[y]\n if i<10:\n print(i,\"ys:==========>0\", ys)\n print(i,\"ys_mulithot_list:==============>1\", ys_mulithot_list)\n print(i,\"ys_decoder_input:==============>2\", ys_decoder_input)\n else:\n if multi_label_flag: # 2)prepare multi-label format for classification\n ys = y.replace('\\n', '').split(\" \") # ys is a list\n ys_index=[]\n for y in ys:\n y_index = vocabulary_word2index_label[y]\n ys_index.append(y_index)\n ys_mulithot_list=transform_multilabel_as_multihot(ys_index)\n else: #3)prepare single label format for classification\n ys_mulithot_list=vocabulary_word2index_label[y]\n if i<=3:\n print(\"ys_index:\")\n #print(ys_index)\n print(i,\"y:\",y,\" ;ys_mulithot_list:\",ys_mulithot_list) #,\" ;ys_decoder_input:\",ys_decoder_input)\n X.append(x)\n Y.append(ys_mulithot_list)\n if use_seq2seq:\n Y_decoder_input.append(ys_decoder_input) #decoder input\n #if i>50000:\n # break\n # 4.split to train,test and valid data\n number_examples = len(X)\n print(\"number_examples:\",number_examples) #\n train = (X[0:int((1 - valid_portion) * number_examples)], Y[0:int((1 - valid_portion) * number_examples)])\n test = (X[int((1 - valid_portion) * number_examples) + 1:], Y[int((1 - valid_portion) * number_examples) + 1:])\n if use_seq2seq:\n train=train+(Y_decoder_input[0:int((1 - valid_portion) * number_examples)],)\n test=test+(Y_decoder_input[int((1 - valid_portion) * number_examples) + 1:],)\n # 5.return\n print(\"load_data.ended...\")\n return train, test, test\n\ndef load_data_multilabel_new_twoCNN(vocabulary_word2index,vocabulary_word2index_label,valid_portion=0.05,max_training_data=1000000,\n traning_data_path='train-zhihu4-only-title-all.txt',multi_label_flag=True): # n_words=100000,\n \"\"\"\n input: a file path\n :return: train, test, valid. where train=(trainX, trainY). where\n trainX: is a list of list.each list representation a sentence.trainY: is a list of label. each label is a number\n \"\"\"\n # 1.load a zhihu data from file\n # example:\"w305 w6651 w3974 w1005 w54 w109 w110 w3974 w29 w25 w1513 w3645 w6 w111 __label__-400525901828896492\"\n print(\"load_data.twoCNN.started...\")\n print(\"load_data_multilabel_new_twoCNN.training_data_path:\",traning_data_path)\n zhihu_f = codecs.open(traning_data_path, 'r', 'utf8') #-zhihu4-only-title.txt\n lines = zhihu_f.readlines()\n # 2.transform X as indices\n # 3.transform y as scalar\n X = []\n X2=[]\n Y = []\n count_error=0\n for i, line in enumerate(lines):\n x, y = line.split('__label__') #x='w17314 w5521 w7729 w767 w10147 w111'\n y=y.strip().replace('\\n','')\n x = x.strip()\n #print(\"x:===============>\",x)\n try:\n x,x2=x.split(\"\\t\")\n except Exception:\n print(\"x.split.error.\",x,\"count_error:\",count_error)\n count_error+=1\n continue\n if i<1:\n print(i,\"x0:\",x) #get raw x\n #x_=process_one_sentence_to_get_ui_bi_tri_gram(x)\n x=x.split(\" \")\n x = [vocabulary_word2index.get(e,0) for e in x] #if can't find the word, set the index as '0'.(equal to PAD_ID = 0)\n x2=x2.split(\" \")\n x2 =[vocabulary_word2index.get(e, 0) for e in x2]\n if i<1:\n print(i,\"x1:\",x,\"x2:\",x2) #word to index\n if multi_label_flag:\n ys = y.replace('\\n', '').split(\" \") #ys is a list\n ys_index=[]\n for y in ys:\n y_index = vocabulary_word2index_label[y]\n ys_index.append(y_index)\n ys_mulithot_list=transform_multilabel_as_multihot(ys_index)\n else:\n ys_mulithot_list=int(y) #vocabulary_word2index_label[y]\n if i<1:\n print(i,\"y:\",y,\"ys_mulithot_list:\",ys_mulithot_list)\n X.append(x)\n X2.append(x2)\n Y.append(ys_mulithot_list)\n # 4.split to train,test and valid data\n number_examples = len(X)\n print(\"number_examples:\",number_examples) #\n train = (X[0:int((1 - valid_portion) * number_examples)],X2[0:int((1 - valid_portion) * number_examples)],Y[0:int((1 - valid_portion) * number_examples)])\n test = (X[int((1 - valid_portion) * number_examples) + 1:], X2[int((1 - valid_portion) * number_examples) + 1:],Y[int((1 - valid_portion) * number_examples) + 1:])\n # 5.return\n print(\"load_data.ended...\")\n return train, test, test\n\ndef load_data(vocabulary_word2index,vocabulary_word2index_label,valid_portion=0.05,max_training_data=1000000,training_data_path='train-zhihu4-only-title-all.txt'): # n_words=100000,\n \"\"\"\n input: a file path\n :return: train, test, valid. where train=(trainX, trainY). where\n trainX: is a list of list.each list representation a sentence.trainY: is a list of label. each label is a number\n \"\"\"\n # 1.load a zhihu data from file\n # example:\"w305 w6651 w3974 w1005 w54 w109 w110 w3974 w29 w25 w1513 w3645 w6 w111 __label__-400525901828896492\"\n print(\"load_data.started...\")\n zhihu_f = codecs.open(training_data_path, 'r', 'utf8') #-zhihu4-only-title.txt\n lines = zhihu_f.readlines()\n # 2.transform X as indices\n # 3.transform y as scalar\n X = []\n Y = []\n for i, line in enumerate(lines):\n x, y = line.split('__label__') #x='w17314 w5521 w7729 w767 w10147 w111'\n y=y.replace('\\n','')\n x = x.replace(\"\\t\",' EOS ').strip()\n if i<5:\n print(\"x0:\",x) #get raw x\n #x_=process_one_sentence_to_get_ui_bi_tri_gram(x)\n #if i<5:\n # print(\"x1:\",x_) #\n x=x.split(\" \")\n x = [vocabulary_word2index.get(e,0) for e in x] #if can't find the word, set the index as '0'.(equal to PAD_ID = 0)\n if i<5:\n print(\"x1:\",x) #word to index\n y = vocabulary_word2index_label[y] #np.abs(hash(y))\n X.append(x)\n Y.append(y)\n # 4.split to train,test and valid data\n number_examples = len(X)\n print(\"number_examples:\",number_examples) #\n train = (X[0:int((1 - valid_portion) * number_examples)], Y[0:int((1 - valid_portion) * number_examples)])\n test = (X[int((1 - valid_portion) * number_examples) + 1:], Y[int((1 - valid_portion) * number_examples) + 1:])\n # 5.return\n print(\"load_data.ended...\")\n return train, test, test\n\n # 将一句话转化为(uigram,bigram,trigram)后的字符串\ndef process_one_sentence_to_get_ui_bi_tri_gram(sentence,n_gram=3):\n \"\"\"\n :param sentence: string. example:'w17314 w5521 w7729 w767 w10147 w111'\n :param n_gram:\n :return:string. example:'w17314 w17314w5521 w17314w5521w7729 w5521 w5521w7729 w5521w7729w767 w7729 w7729w767 w7729w767w10147 w767 w767w10147 w767w10147w111 w10147 w10147w111 w111'\n \"\"\"\n result=[]\n word_list=sentence.split(\" \") #[sentence[i] for i in range(len(sentence))]\n unigram='';bigram='';trigram='';fourgram=''\n length_sentence=len(word_list)\n for i,word in enumerate(word_list):\n unigram=word #ui-gram\n word_i=unigram\n if n_gram>=2 and i+2<=length_sentence: #bi-gram\n bigram=\"\".join(word_list[i:i+2])\n word_i=word_i+' '+bigram\n if n_gram>=3 and i+3<=length_sentence: #tri-gram\n trigram=\"\".join(word_list[i:i+3])\n word_i = word_i + ' ' + trigram\n if n_gram>=4 and i+4<=length_sentence: #four-gram\n fourgram=\"\".join(word_list[i:i+4])\n word_i = word_i + ' ' + fourgram\n if n_gram>=5 and i+5<=length_sentence: #five-gram\n fivegram=\"\".join(word_list[i:i+5])\n word_i = word_i + ' ' + fivegram\n result.append(word_i)\n result=\" \".join(result)\n return result\n\n# 加载数据,标签包含多个label:load data with multi-labels\ndef load_data_with_multilabels(vocabulary_word2index,vocabulary_word2index_label,traning_path,valid_portion=0.05,max_training_data=1000000): # n_words=100000,\n \"\"\"\n input: a file path\n :return: train, test, valid. where train=(trainX, trainY). where\n trainX: is a list of list.each list representation a sentence.trainY: is a list of label. each label is a number\n \"\"\"\n # 1.load a zhihu data from file\n # example: 'w140 w13867 w10344 w2673 w9514 w269 w460 w6 w35053 w844 w10147 w111 __label__-2379261820462209275 -5535923551616745326 6038661761506862294'\n print(\"load_data_with_multilabels.ended...\")\n zhihu_f = codecs.open(traning_path,'r','utf8') #('/home/xul/xul/9_ZhihuCup/'+data_type+'-zhihu5-only-title-multilabel.txt', 'r', 'utf8') #home/xul/xul/9_ZhihuCup/'\n lines = zhihu_f.readlines()\n # 2.transform X as indices\n # 3.transform y as scalar\n X = []\n Y = []\n Y_label1999=[]\n for i, line in enumerate(lines):\n #if i>max_training_data:\n # break\n x, ys = line.split('__label__') #x='w17314 w5521 w7729 w767 w10147 w111'\n ys=ys.replace('\\n','').split(\" \")\n x = x.strip()\n if i < 5:\n print(\"x0:\", x) # u'w4260 w4260w86860 w4260w86860w30907 w86860 w86860w30907 w86860w30907w11 w30907 w30907w11 w30907w11w31 w11 w11w31 w11w31w72 w31 w31w72 w31w72w166 w72 w72w166 w72w166w346 w166 w166w346 w166w346w2182 w346 w346w2182 w346w2182w224 w2182 w2182w224 w2182w224w2148 w224 w224w2148 w224w2148w6 w2148 w2148w6 w2148w6w2566 w6 w6w2566 w6w2566w25 w2566 w2566w25 w2566w25w1110 w25 w25w1110 w25w1110w111 w1110 w1110w111 w111'\n #x_=process_one_sentence_to_get_ui_bi_tri_gram(x)\n #if i < 5:\n # print(\"x1:\", x_)\n x=x.split(\" \")\n x = [vocabulary_word2index.get(e,0) for e in x] #if can't find the word, set the index as '0'.(equal to PAD_ID = 0)\n if i<5:\n print(\"x2:\", x)\n #print(\"ys:\",ys) #['501174938575526146', '-4317515119936650885']\n ys_list=[]\n for y in ys:\n y_ = vocabulary_word2index_label[y]\n ys_list.append(y_)\n X.append(x)\n #TODO ys_list_array=transform_multilabel_as_multihot(ys_list) #it is 2-d array. [[ 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]...]\n ys_list_=proces_label_to_algin(ys_list)\n Y.append(ys_list_)\n #TODO Y_label1999.append(ys_list_array)\n if i==0:\n print(X,Y)\n print(Y_label1999)\n # 4.split to train,test and valid data\n number_examples = len(X)\n train = (X[0:int((1 - valid_portion) * number_examples)], Y[0:int((1 - valid_portion) * number_examples)]) #TODO Y_label1999[0:int((1 - valid_portion) * number_examples)]\n test = (X[int((1 - valid_portion) * number_examples) + 1:], Y[int((1 - valid_portion) * number_examples) + 1:]) #TODO ,Y_label1999[int((1 - valid_portion) * number_examples) + 1:]\n print(\"load_data_with_multilabels.ended...\")\n return train, test\n\n#将LABEL转化为MULTI-HOT\ndef transform_multilabel_as_multihot(label_list,label_size=1999): #1999label_list=[0,1,4,9,5]\n \"\"\"\n :param label_list: e.g.[0,1,4]\n :param label_size: e.g.199\n :return:e.g.[1,1,0,1,0,0,........]\n \"\"\"\n result=np.zeros(label_size)\n #set those location as 1, all else place as 0.\n result[label_list] = 1\n return result\n\n#将LABEL转化为MULTI-HOT\ndef transform_multilabel_as_multihotO(label_list,label_size=1999): #1999label_list=[0,1,4,9,5]\n batch_size=len(label_list)\n result=np.zeros((batch_size,label_size))\n #set those location as 1, all else place as 0.\n result[(range(batch_size),label_list)]=1\n return result\n\ndef load_final_test_data(file_path):\n final_test_file_predict_object = codecs.open(file_path, 'r', 'utf8')\n lines=final_test_file_predict_object.readlines()\n question_lists_result=[]\n for i,line in enumerate(lines):\n question_id,question_string=line.split(\"\\t\")\n question_string=question_string.strip().replace(\"\\n\",\"\")\n question_lists_result.append((question_id,question_string))\n print(\"length of total question lists:\",len(question_lists_result))\n return question_lists_result\n\ndef load_data_predict(vocabulary_word2index,vocabulary_word2index_label,questionid_question_lists,uni_to_tri_gram=False): # n_words=100000,\n final_list=[]\n for i, tuplee in enumerate(questionid_question_lists):\n queston_id,question_string_list=tuplee\n if uni_to_tri_gram:\n x_=process_one_sentence_to_get_ui_bi_tri_gram(question_string_list)\n x=x_.split(\" \")\n else:\n x=question_string_list.split(\" \")\n x = [vocabulary_word2index.get(e, 0) for e in x] #if can't find the word, set the index as '0'.(equal to PAD_ID = 0)\n if i<=2:\n print(\"question_id:\",queston_id);print(\"question_string_list:\",question_string_list);print(\"x_indexed:\",x)\n final_list.append((queston_id,x))\n number_examples = len(final_list)\n print(\"number_examples:\",number_examples) #\n return final_list\n\n\ndef proces_label_to_algin(ys_list,require_size=5):\n \"\"\"\n :param ys_list: a list\n :return: a list\n \"\"\"\n ys_list_result=[0 for x in range(require_size)]\n if len(ys_list)>=require_size: #超长\n ys_list_result=ys_list[0:require_size]\n else:#太短\n if len(ys_list)==1:\n ys_list_result =[ys_list[0] for x in range(require_size)]\n elif len(ys_list)==2:\n ys_list_result = [ys_list[0],ys_list[0],ys_list[0],ys_list[1],ys_list[1]]\n elif len(ys_list) == 3:\n ys_list_result = [ys_list[0], ys_list[0], ys_list[1], ys_list[1], ys_list[2]]\n elif len(ys_list) == 4:\n ys_list_result = [ys_list[0], ys_list[0], ys_list[1], ys_list[2], ys_list[3]]\n return ys_list_result\n\ndef write_uigram_to_trigram():\n pass\n #1.read file.\n #2.uigram--->trigram\n #3.write each line to file system.\n\ndef test_pad():\n trainX='w18476 w4454 w1674 w6 w25 w474 w1333 w1467 w863 w6 w4430 w11 w813 w4463 w863 w6 w4430 w111'\n trainX=trainX.split(\" \")\n trainX = pad_sequences([[trainX]], maxlen=100, value=0.)\n print(\"trainX:\",trainX)\n\ntopic_info_file_path='topic_info.txt'\ndef read_topic_info():\n f = codecs.open(topic_info_file_path, 'r', 'utf8')\n lines=f.readlines()\n dict_questionid_title={}\n for i,line in enumerate(lines):\n topic_id,partent_ids,title_character,title_words,desc_character,decs_words=line.split(\"\\t\").strip()\n # print(i,\"------------------------------------------------------\")\n # print(\"topic_id:\",topic_id)\n # print(\"partent_ids:\",partent_ids)\n # print(\"title_character:\",title_character)\n # print(\"title_words:\",title_words)\n # print(\"desc_character:\",desc_character)\n # print(\"decs_words:\",decs_words)\n dict_questionid_title[topic_id]=title_words+\" \"+decs_words\n print(\"len(dict_questionid_title):\",len(dict_questionid_title))\n return dict_questionid_title\n\ndef stat_training_data_length():\n training_data='train-zhihu4-only-title-all.txt'\n f = codecs.open(training_data, 'r', 'utf8')\n lines=f.readlines()\n length_dict={0:0,5:0,10:0,15:0,20:0,25:0,30:0,35:0,40:0,100:0,150:0,200:0,1500:0}\n length_list=[0,5,10,15,20,25,30,35,40,100,150,200,1500]\n for i,line in enumerate(lines):\n line_list=line.split('__label__')[0].strip().split(\" \")\n length=len(line_list)\n #print(i,\"length:\",length)\n for l in length_list:\n if length<l:\n length=l\n #print(\"length.assigned:\",length)\n break\n #print(\"length.before dict assign:\", length)\n length_dict[length]=length_dict[length]+1\n print(\"length_dict:\",length_dict)\n\n\nif __name__ == '__main__':\n if __name__ == '__main__':\n if __name__ == '__main__':\n #1.\n #vocabulary_word2index, vocabulary_index2word=create_voabulary()\n #vocabulary_word2index_label, vocabulary_index2word_label=create_voabulary_label()\n #load_data_with_multilabels(vocabulary_word2index,vocabulary_word2index_label,data_type='test')\n #2.\n #sentence=u'我想开通创业板'\n #sentence='w18476 w4454 w1674 w6 w25 w474 w1333 w1467 w863 w6 w4430 w11 w813 w4463 w863 w6 w4430 w111'\n #result=process_one_sentence_to_get_ui_bi_tri_gram(sentence,n_gram=3)\n #print(len(result),\"result:\",result)\n\n #3. transform to multilabel\n #label_list=[0,1,4,9,5]\n #result=transform_multilabel_as_multihot(label_list,label_size=15)\n #print(\"result:\",result)\n\n #4.load data for predict-----------------------------------------------------------------\n #file_path='test-zhihu-forpredict-v4only-title.txt'\n #questionid_question_lists=load_final_test_data(file_path)\n\n #vocabulary_word2index, vocabulary_index2word=create_voabulary()\n #vocabulary_word2index_label,_=create_voabulary_label()\n #final_list=load_data_predict(vocabulary_word2index, vocabulary_word2index_label, questionid_question_lists)\n\n #5.process label require lengh\n #ys_list=[99999]\n #ys_list_result=proces_label_to_algin(ys_list,require_size=5)\n #print(ys_list,\"ys_list_result1.:\",ys_list_result)\n #ys_list=[99999,23423432,67566765]\n #ys_list_result=proces_label_to_algin(ys_list,require_size=5)\n #print(ys_list,\"ys_list_result2.:\",ys_list_result)\n #ys_list=[99999,23423432,67566765,23333333]\n #ys_list_result=proces_label_to_algin(ys_list,require_size=5)\n #print(ys_list,\"ys_list_result2.:\",ys_list_result)\n #ys_list = [99999, 23423432, 67566765,44543543,546546546,323423434]\n #ys_list_result = proces_label_to_algin(ys_list, require_size=5)\n #print(ys_list, \"ys_list_result3.:\", ys_list_result)\n\n #6.create vocabulary label. sorted.\n #create_voabulary_label()\n\n #d={'a':3,'b':2,'c':11}\n #d_=sort_by_value(d)\n #print(\"d_\",d_)\n\n #7.\n #test_pad()\n\n #8.read topic info\n #read_topic_info()\n\n #9。\n stat_training_data_length()\n"
] |
[
[
"numpy.zeros"
]
] |
Kirkados/Field_Robotics_2021
|
[
"26823b75d303386a17c06b643a471a771e342779",
"26823b75d303386a17c06b643a471a771e342779"
] |
[
"learner.py",
"replay_buffer.py"
] |
[
"\"\"\"\nThis Class builds the Learner which consititutes the Critic, the Agent, and their\ntarget networks. Additionally, it samples data from the replay_buffer and trains\nboth the Critic and Agent neural networks.\n\nWhen a Learner instance is created, all the appropriate networks and training\noperations are built. Then, simply run Learner.run() to initiate the continuous\ntraining process.\n\nAdapted from msinto93, and SuRELI's github code. Many thanks!\n\nTraining:\n The critic is trained using supervised learning to minimize the\n cross-entropy loss between the Q value and the target value\n y = r_t + gamma * Q_target(next_state, Action(next_state))\n\n To train the actor, we apply the policy gradient\n Grad = grad(Q(s,a), A)) * grad(A, params)\n\n@author: Kirk Hovell (khovell@gmail.com)\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport multiprocessing\nimport queue # for empty error catching\n\nfrom build_neural_networks import BuildActorNetwork, BuildQNetwork\nfrom settings import Settings\n\nclass Learner:\n def __init__(self, sess, saver, replay_buffer, writer):\n print(\"Initialising learner...\")\n\n # Saving items to the self. object for future use\n self.sess = sess\n self.saver = saver\n self.replay_buffer = replay_buffer\n self.writer = writer\n\n with tf.variable_scope(\"Preparing_placeholders\"):\n # Defining placeholders for training\n# self.state_placeholder = tf.placeholder(dtype = tf.float32, shape = [Settings.MINI_BATCH_SIZE, Settings.OBSERVATION_SIZE], name = \"state_placeholder\") # the '*' unpacks the OBSERVATION_SIZE list (incase it's pixels of higher dimension)\n# self.action_placeholder = tf.placeholder(dtype = tf.float32, shape = [Settings.MINI_BATCH_SIZE, Settings.ACTION_SIZE], name = \"action_placeholder\") # placeholder for actions\n# self.target_bins_placeholder = tf.placeholder(dtype = tf.float32, shape = [Settings.MINI_BATCH_SIZE, Settings.NUMBER_OF_BINS], name = \"target_bins_placeholder\") # Bin values of target network with Bellman update applied\n# self.target_q_distribution_placeholder = tf.placeholder(dtype = tf.float32, shape = [Settings.MINI_BATCH_SIZE, Settings.NUMBER_OF_BINS], name = \"target_q_distribution_placeholder\") # Future q-distribution from target critic\n# self.dQ_dAction_placeholder = tf.placeholder(dtype = tf.float32, shape = [Settings.MINI_BATCH_SIZE, Settings.ACTION_SIZE], name = \"dQ_dAction_placeholder\") # Gradient of critic predicted value with respect to input actions\n# self.importance_sampling_weights_placeholder = tf.placeholder(dtype = tf.float32, shape = Settings.MINI_BATCH_SIZE, name = \"importance_sampling_weights_placeholder\") # [PRIORITY_REPLAY_BUFFER only] Holds the weights that are used to remove bias from priority sampling\n\n self.state_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.OBSERVATION_SIZE], name = \"state_placeholder\") # the '*' unpacks the OBSERVATION_SIZE list (incase it's pixels of higher dimension)\n self.action_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.ACTION_SIZE], name = \"action_placeholder\") # placeholder for actions\n self.target_bins_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.NUMBER_OF_BINS], name = \"target_bins_placeholder\") # Bin values of target network with Bellman update applied\n self.target_q_distribution_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.NUMBER_OF_BINS], name = \"target_q_distribution_placeholder\") # Future q-distribution from target critic\n self.dQ_dAction_placeholder = tf.placeholder(dtype = tf.float32, shape = [Settings.MINI_BATCH_SIZE, Settings.ACTION_SIZE], name = \"dQ_dAction_placeholder\") # Gradient of critic predicted value with respect to input actions\n self.importance_sampling_weights_placeholder = tf.placeholder(dtype = tf.float32, shape = None, name = \"importance_sampling_weights_placeholder\") # [PRIORITY_REPLAY_BUFFER only] Holds the weights that are used to remove bias from priority sampling\n\n # The reward options that the distributional critic predicts the liklihood of being in\n self.bins = np.linspace(Settings.MIN_V, Settings.MAX_V, Settings.NUMBER_OF_BINS, dtype = np.float32)\n\n ######################################################\n ##### Build the networks and training operations #####\n ######################################################\n self.build_main_networks()\n self.build_target_networks()\n\n # Build the operation to update the target network parameters\n self.build_target_parameter_update_operations()\n\n # Create operstions for Tensorboard logging\n self.writer = writer\n self.create_summary_functions()\n\n print(\"Learner created!\")\n\n\n def create_summary_functions(self):\n\n # Creates the operation that, when run, will log the appropriate data to tensorboard\n with tf.variable_scope(\"Logging_Learning\"):\n # The critic loss during training is the only logged item\n self.iteration_loss_placeholder = tf.placeholder(tf.float32)\n self.iteration_loss_summary = tf.summary.scalar(\"Loss\", self.iteration_loss_placeholder)\n self.iteration_summary = tf.summary.merge([self.iteration_loss_summary])\n\n\n def build_main_networks(self):\n ##################################\n #### Build the learned critic ####\n ##################################\n self.critic = BuildQNetwork(self.state_placeholder, self.action_placeholder, scope='learner_critic_main')\n\n # Build the critic training function\n self.train_critic_one_step, self.projected_target_distribution = self.critic.generate_training_function(self.target_q_distribution_placeholder, self.target_bins_placeholder, self.importance_sampling_weights_placeholder)\n\n #################################\n #### Build the learned actor ####\n #################################\n self.actor = BuildActorNetwork(self.state_placeholder, scope='learner_actor_main')\n\n # Build the actor training function\n self.train_actor_one_step = self.actor.generate_training_function(self.dQ_dAction_placeholder)\n\n\n def build_target_networks(self):\n ###########################################\n #### Build the target actor and critic ####\n ###########################################\n self.target_critic = BuildQNetwork(self.state_placeholder, self.action_placeholder, scope='learner_critic_target')\n self.target_actor = BuildActorNetwork(self.state_placeholder, scope='learner_actor_target')\n\n\n def build_target_parameter_update_operations(self):\n # Build operations that either\n # 1) initialize target networks to be identical to main networks 2) slowly\n # 2) slowly copy main network parameters to target networks according to Settings.TARGET_NETWORK_TAU\n main_parameters = self.actor.parameters + self.critic.parameters\n target_parameters = self.target_actor.parameters + self.target_critic.parameters\n\n # Build operation that fully copies the main network parameters to the targets [Option 1 above]\n initialize_target_network_parameters = []\n # Looping across all variables in the main critic and main actor\n for source_variable, destination_variable in zip(main_parameters, target_parameters):\n initialize_target_network_parameters.append(destination_variable.assign(source_variable))\n\n # Build operation that slowly updates target networks according to Settings.TARGET_NETWORK_TAU [Option 2 above]\n update_target_network_parameters = []\n # Looping across all variables in the main critic and main actor\n for source_variable, destination_variable in zip(main_parameters, target_parameters):\n # target = tau*main + (1 - tau)*target\n update_target_network_parameters.append(destination_variable.assign((tf.multiply(source_variable, Settings.TARGET_NETWORK_TAU) + tf.multiply(destination_variable, 1. - Settings.TARGET_NETWORK_TAU))))\n\n # Save both operations to self object for later use\n self.initialize_target_network_parameters = initialize_target_network_parameters\n self.update_target_network_parameters = update_target_network_parameters\n\n def generate_queue(self):\n # Generate the queues responsible for communicating with the learner\n self.agent_to_learner = multiprocessing.Queue(maxsize = 1)\n self.learner_to_agent = multiprocessing.Queue(maxsize = 1)\n\n return self.agent_to_learner, self.learner_to_agent\n\n def run(self, stop_run_flag, replay_buffer_dump_flag, starting_training_iteration):\n # Continuously train the actor and the critic, by applying stochastic gradient\n # descent to batches of data sampled from the replay buffer\n\n # Initializing the counter of training iterations\n self.total_training_iterations = starting_training_iteration\n\n # Starting time\n start_time = time.time()\n\n # Initialize the target networks to be identical to the main networks\n self.sess.run(self.initialize_target_network_parameters)\n\n # Setup priority replay buffer parameters, if used\n if Settings.PRIORITY_REPLAY_BUFFER:\n # When priority_beta = 1, the priority sampling bias is fully accounted for.\n # We slowly anneal priority_beta towards 1.0 over the course of training.\n # Lower beta allows the prioritized samples to be weighted unfairly,\n # but this can help training, at least initially.\n priority_beta = Settings.PRIORITY_BETA_START # starting beta value\n beta_increment = (Settings.PRIORITY_BETA_END - Settings.PRIORITY_BETA_START) / Settings.MAX_TRAINING_ITERATIONS # to increment on each iteration\n else:\n # If we aren't using a priority buffer, set the importance sampled weights to ones for the entire run\n weights_batch = np.ones(shape = Settings.MINI_BATCH_SIZE)\n\n\n ###############################\n ##### Start Training Loop #####\n ###############################\n while self.total_training_iterations < Settings.MAX_TRAINING_ITERATIONS and not stop_run_flag.is_set():\n\n # Check if the agent wants some q-distributions calculated\n try:\n state_log, action_log, next_state_log, reward_log, done_log, gamma_log = self.agent_to_learner.get(False)\n\n # Reshapping\n gamma_log = np.reshape(gamma_log, [-1, 1])\n \n # Get the online q-distribution\n critic_distribution = self.sess.run(self.critic.q_distribution, feed_dict = {self.state_placeholder: state_log, self.action_placeholder: action_log}) # [episode length, number of bins]\n\n # Clean next actions from the target actor\n clean_next_actions = self.sess.run(self.target_actor.action_scaled, {self.state_placeholder:next_state_log}) # [episode length, num_actions]\n\n # Get the target q-distribution\n target_critic_distribution = self.sess.run(self.target_critic.q_distribution, feed_dict = {self.state_placeholder:state_log, self.action_placeholder:clean_next_actions}) # [episode length, number of bins]\n\n # Create batch of bins [see further description below]\n target_bins = np.repeat(np.expand_dims(self.bins, axis = 0), len(reward_log), axis = 0) # [episode length, number_of_bins]\n target_bins[done_log, :] = 0.0\n target_bins = np.expand_dims(reward_log, axis = 1) + (target_bins*gamma_log)\n\n # Calculating the bellman distribution (r + gamma*target_q_distribution). The critic loss is with respect to this projection.\n projected_target_distribution = self.sess.run(self.projected_target_distribution, feed_dict = {self.target_q_distribution_placeholder: target_critic_distribution, self.target_bins_placeholder: target_bins})\n\n # Calculating the loss at each timestep\n weights_batch = weights_batch = np.ones(shape = len(reward_log))\n loss_log = self.sess.run(self.critic.loss, feed_dict = {self.state_placeholder:state_log, self.action_placeholder:action_log, self.target_q_distribution_placeholder:target_critic_distribution, self.target_bins_placeholder:target_bins, self.importance_sampling_weights_placeholder:weights_batch})\n\n # Send the results back to the agent\n self.learner_to_agent.put((critic_distribution, target_critic_distribution, projected_target_distribution, loss_log))\n\n except queue.Empty:\n # If queue was empty, do nothing\n pass\n\n # If we don't have enough data yet to train OR we want to wait before we start to train\n if (self.replay_buffer.how_filled() < Settings.MINI_BATCH_SIZE) or (self.replay_buffer.how_filled() < Settings.REPLAY_BUFFER_START_TRAINING_FULLNESS):\n continue # Skip this training iteration. Wait for more training data.\n\n # Sample a mini-batch of data from the replay_buffer\n if Settings.PRIORITY_REPLAY_BUFFER:\n sampled_batch = self.replay_buffer.sample(priority_beta)\n weights_batch = sampled_batch[6] # [priority-only data] used for removing bias in prioritized data\n index_batch = sampled_batch[7] # [priority-only data] used for updating priorities\n else:\n sampled_batch = self.replay_buffer.sample()\n\n # Unpack the training data\n states_batch = sampled_batch[0]\n actions_batch = sampled_batch[1]\n rewards_batch = sampled_batch[2]\n next_states_batch = sampled_batch[3]\n dones_batch = sampled_batch[4]\n gammas_batch = sampled_batch[5]\n\n ###################################\n ##### Prepare Critic Training #####\n ###################################\n # Get clean next actions by feeding the next states through the target actor\n clean_next_actions = self.sess.run(self.target_actor.action_scaled, {self.state_placeholder:next_states_batch}) # [batch_size, num_actions]\n\n # Get the next q-distribution by passing the next states and clean next actions through the target critic\n target_critic_distribution = self.sess.run(self.target_critic.q_distribution, {self.state_placeholder:next_states_batch, self.action_placeholder:clean_next_actions}) # [batch_size, number_of_bins]\n\n # Create batch of bins\n target_bins = np.repeat(np.expand_dims(self.bins, axis = 0), Settings.MINI_BATCH_SIZE, axis = 0) # [batch_size, number_of_bins]\n\n # If this data in the batch corresponds to the end of an episode (dones_batch[i] = True),\n # set all the bins to 0.0. This will eliminate the inclusion of the predicted future\n # reward when computing the bellman update (i.e., the predicted future rewards are only\n # the current reward, since we aren't continuing the episode any further).\n target_bins[dones_batch, :] = 0.0\n\n # Bellman projection. reward + gamma^N*bin -> The new\n # expected reward, according to the recently-received reward.\n # If the new reward is outside of the current bin, then we will\n # adjust the probability that is assigned to the bin.\n target_bins = np.expand_dims(rewards_batch, axis = 1) + (target_bins*gammas_batch)\n\n #####################################\n ##### TRAIN THE CRITIC ONE STEP #####\n #####################################\n critic_loss, _ = self.sess.run([self.critic.loss, self.train_critic_one_step], {self.state_placeholder:states_batch, self.action_placeholder:actions_batch, self.target_q_distribution_placeholder:target_critic_distribution, self.target_bins_placeholder:target_bins, self.importance_sampling_weights_placeholder:weights_batch})\n\n\n ##################################\n ##### Prepare Actor Training #####\n ##################################\n # Get clean actions that the main actor would have taken for this batch of states if there were no noise added\n clean_actions = self.sess.run(self.actor.action_scaled, {self.state_placeholder:states_batch})\n\n # Calculate the derivative of the main critic's q-value with respect to these actions\n dQ_dAction = self.sess.run(self.critic.dQ_dAction, {self.state_placeholder:states_batch, self.action_placeholder:clean_actions}) # also known as action gradients\n\n ####################################\n ##### TRAIN THE ACTOR ONE STEP #####\n ####################################\n self.sess.run(self.train_actor_one_step, {self.state_placeholder:states_batch, self.dQ_dAction_placeholder:dQ_dAction[0]})\n\n\n # If it's time to update the target networks\n if self.total_training_iterations % Settings.UPDATE_TARGET_NETWORKS_EVERY_NUM_ITERATIONS == 0:\n # Update target networks according to TAU!\n self.sess.run(self.update_target_network_parameters)\n\n # If we're using a priority buffer, tend to it now.\n if Settings.PRIORITY_REPLAY_BUFFER:\n # The priority replay buffer ranks the data according to how unexpected they were\n # An unexpected data point will have high loss. Now that we've just calculated the loss,\n # update the priorities in the replay buffer.\n self.replay_buffer.update_priorities(index_batch, (np.abs(critic_loss)+Settings.PRIORITY_EPSILON))\n\n # Increment priority beta value slightly closer towards 1.0\n priority_beta += beta_increment\n\n # If it's time to check if the prioritized replay buffer is overful\n if Settings.PRIORITY_REPLAY_BUFFER and (self.total_training_iterations % Settings.DUMP_PRIORITY_REPLAY_BUFFER_EVER_NUM_ITERATIONS == 0):\n # If the buffer is overfilled\n if (self.replay_buffer.how_filled() > Settings.REPLAY_BUFFER_SIZE):\n # Make the agents wait before adding any more data to the buffer\n replay_buffer_dump_flag.clear()\n # How overful is the buffer?\n samples_to_remove = self.replay_buffer.how_filled() - Settings.REPLAY_BUFFER_SIZE\n # Remove the appropriate number of samples\n self.replay_buffer.remove(samples_to_remove)\n # Allow the agents to continue now that the buffer is ready\n replay_buffer_dump_flag.set()\n\n # If it's time to log the training performance to TensorBoard\n if self.total_training_iterations % Settings.LOG_TRAINING_PERFORMANCE_EVERY_NUM_ITERATIONS == 0:\n # Logging the mean critic loss across the batch\n summary = self.sess.run(self.iteration_summary, feed_dict = {self.iteration_loss_placeholder: np.mean(critic_loss)})\n self.writer.add_summary(summary, self.total_training_iterations)\n\n # If it's time to save a checkpoint. Be it a regular checkpoint, the final planned iteration, or the final unplanned iteration\n if (self.total_training_iterations % Settings.SAVE_CHECKPOINT_EVERY_NUM_ITERATIONS == 0) or (self.total_training_iterations == Settings.MAX_TRAINING_ITERATIONS) or stop_run_flag.is_set():\n # Save the state of all networks and note the training iteration\n self.saver.save(self.total_training_iterations, self.state_placeholder, self.actor.action_scaled)\n\n # If it's time to print the training performance to the screen\n if self.total_training_iterations % Settings.DISPLAY_TRAINING_PERFORMANCE_EVERY_NUM_ITERATIONS == 0:\n print(\"Trained actor and critic %i iterations in %.2f minutes, at %.3f s/iteration. Now at iteration %i.\" % (Settings.DISPLAY_TRAINING_PERFORMANCE_EVERY_NUM_ITERATIONS, (time.time() - start_time)/60, (time.time() - start_time)/Settings.DISPLAY_TRAINING_PERFORMANCE_EVERY_NUM_ITERATIONS, self.total_training_iterations))\n start_time = time.time() # resetting the timer for the next PERFORMANCE_UPDATE_EVERY_NUM_ITERATIONS of iterations\n\n # Incrementing training iteration counter\n self.total_training_iterations += 1\n\n # If we are done training\n print(\"Learner finished after running \" + str(self.total_training_iterations) + \" training iterations!\")\n\n # Flip the flag signalling all agents to stop\n stop_run_flag.set()\n\n\n\"\"\"\nMy version of the critic training is below. I find it more legible than the\nabove training operations, but it does not run as fast as the above implementation\nby msinto93. So, I have opted to use theirs. I've verified that the same inputs\nalways produces the same outputs between the two implementations.\n\"\"\"\n\n# Alternate implementation of D4PG critic training\n\"\"\"\nwith tf.variable_scope(\"Train_Critic\"): # grouping tensorboard graph\n\n ##################################################\n ###### Generating Updated Bin Probabilities ######\n ##################################################\n\n # Initializing the matrix that will hold the new bin probabilities as they get generated\n new_bin_probabilities = tf.zeros([Settings.MINI_BATCH_SIZE, Settings.NUMBER_OF_BINS])\n\n # For each bin, project where that bin's probability should end up after receiving the reward\n # by calculating the new expected reward. Then, find out what bin the projection lies in.\n # Then, distribute the probability into that bin. Then, build a loss function to minimize\n # the difference between the current distribution and the calculated distribution.\n for this_bin in range(Settings.NUMBER_OF_BINS): # for each bin\n\n # Bellman projection. reward + gamma^N*not_done*bin -> The new\n # expected reward, according to the recently-received reward.\n # If the new reward is outside of the current bin, then we will\n # adjust the probability that is assigned to the bin.\n # If the episode terminates here, the new expected reward from\n # this state-action pair is just the reward.\n projection = self.reward_placeholder + (self.discount_factor_placeholder)*(1.0 - self.done_placeholder)*self.bins[this_bin]\n\n # Clipping projected reward to its bounds.\n projection = tf.squeeze(tf.clip_by_value(projection, Settings.MIN_V, Settings.MAX_V)) # Squeezing -> shape [batch_size]\n\n # Which bin number the projected value ends up in (so we know which bin to increase the probability of)\n new_bin = (projection - Settings.MIN_V)/self.bin_width # shape [batch_size]\n\n # However, it is unlikely the new bin number will lie directly\n # on an existing bin number. Therefore, determine the nearby\n # bins so we know where we should distribute the probability into.\n adjacent_bin_upper = tf.ceil(new_bin) # shape [batch_size]\n adjacent_bin_lower = tf.floor(new_bin) # shape [batch_size]\n\n # Checking if the upper and lower bins are the same bin (!!!).\n # This occurs when the projection lies directly on a bin.\n # Common causes are: 1) The reward is large and pushes the projection\n # to one of the bounds (where it is clipped). 2) There is a\n # reward of 0 for bin[i] = 0.\n are_bins_identical = tf.equal(adjacent_bin_upper, adjacent_bin_lower) # shape [batch_size]\n are_bins_different = tf.logical_not(are_bins_identical) # shape [batch_size]\n\n # Generating two one-hot matrices that will be used to place the\n # projected next-state probabilities from the target critic\n # network into the appropriate bin. The appropriate bin is the\n # one who we would like to increase their probability.\n # Only one element in each row is a 1, all others are 0.\n one_hot_upper = tf.one_hot(tf.to_int32(adjacent_bin_upper), depth = Settings.NUMBER_OF_BINS) # shape [batch_size, #atoms]\n one_hot_lower = tf.one_hot(tf.to_int32(adjacent_bin_lower), depth = Settings.NUMBER_OF_BINS) # shape [batch_size, #atoms]\n\n # Disributing the next-state bin probabilities (from the target\n # q_network) into both bins dictated by the projection.\n # Accumulating the new bin probabilities as we loop through all bins.\n # Note: the \"upper\" part gets multiplied by the one_hot_lower because\n # the (upper - new_bin) is essentially \"how far\" the new bin is from the\n # upper bin. Therefore, the larger that number, the more we\n # should put in the lower bin.\n # This accumulation only applies to samples in the batch that\n # have been assigned different bins (by multiplying by are_bins_different)\n new_bin_probabilities += tf.reshape(self.target_q_network[:, this_bin] * (adjacent_bin_upper - new_bin) * tf.to_float(are_bins_different), [-1, 1]) * one_hot_lower # [batch_size, 1] * [batch_size, #atoms] = [batch_size, #atoms]\n new_bin_probabilities += tf.reshape(self.target_q_network[:, this_bin] * (new_bin - adjacent_bin_lower) * tf.to_float(are_bins_different), [-1, 1]) * one_hot_upper # [batch_size, 1] * [batch_size, #atoms] = [batch_size, #atoms]\n\n # If, by chance, the new_bin lies directly on a bin, then the\n # adjacent_bin_upper and adjacent_bin_lower will be identical.\n # In that case, the full next-state probability is added to that\n # bin.\n new_bin_probabilities += tf.reshape(self.target_q_network[:, this_bin] * tf.to_float(are_bins_identical), [-1, 1]) * one_hot_upper # [batch_size, 1] * [batch_size, #atoms] = [batch_size, #atoms]\n\n\n ###########################################\n ##### Generating Critic Loss Function #####\n ###########################################\n\n # DEBUGGING\n #self.TEST_PROBS = new_bin_probabilities\n # END DEBUGGING\n\n # We've now got the new distribution (bin probabilities),\n # now we must generate a loss function for the critic!\n self.critic_losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits = self.q_network_logits,\n labels = tf.stop_gradient(new_bin_probabilities)) # not sure if tf.stop_gradients is needed, but it certainly doesn't hurt\n\n # Taking the mean loss across the batch\n self.critic_loss = tf.reduce_mean(self.critic_losses)\n\n # Optional L2 Regularization\n if Settings.L2_REGULARIZATION:\n # Penalize the critic for having large weights -> L2 Regularization\n self.critic_loss += l2_regularization(self.critic_parameters)\n\n\n ##############################################################################\n ##### Develop the Operation that Trains the Critic with Gradient Descent #####\n ##############################################################################\n self.critic_trainer = tf.train.AdamOptimizer(Settings.CRITIC_LEARNING_RATE)\n self.train_critic_one_step = self.critic_trainer.minimize(self.critic_loss, var_list = self.critic_parameters) # RUN THIS TO TRAIN THE CRITIC ONE STEP\n #self.train_critic_one_step = self.critic_trainer.minimize(self.critic_loss) # RUN THIS TO TRAIN THE CRITIC ONE STEP\n\n\n\"\"\"",
"\"\"\"\nGenerates and manages the large experience replay buffer.\n\nThe experience replay buffer holds all the data that is dumped into it from the \nmany agents who are running episodes of their own. The learner then trains off \nthis heap of data continually and in its own thread.\n\n@author: Kirk Hovell (khovell@gmail.com)\n\"\"\"\n\nimport random\nimport numpy as np\n\nfrom collections import deque\n\nfrom settings import Settings\n\nclass ReplayBuffer():\n # Generates and manages a non-prioritized replay buffer\n \n def __init__(self):\n # Generate the buffer\n self.buffer = deque(maxlen = Settings.REPLAY_BUFFER_SIZE)\n\n # Query how many entries are in the buffer\n def how_filled(self):\n return len(self.buffer)\n \n # Add new experience to the buffer\n def add(self, experience):\n self.buffer.append(experience)\n \n # Randomly sample data from the buffer\n def sample(self):\n # Decide how much data to sample\n # (maybe the buffer doesn't contain enough samples yet to fill a MINI_BATCH)\n batch_size = min(Settings.MINI_BATCH_SIZE, len(self.buffer)) \n # Sample the data\n sampled_batch = np.asarray(random.sample(self.buffer, batch_size))\n\n # Unpack the training data\n states_batch = np.stack(sampled_batch[:, 0])\n actions_batch = np.stack(sampled_batch[:, 1])\n rewards_batch = sampled_batch[:, 2]\n next_states_batch = np.stack(sampled_batch[:, 3])\n dones_batch = np.stack(sampled_batch[:,4])\n gammas_batch = np.reshape(sampled_batch[:, 5], [-1, 1])\n\n return states_batch, actions_batch, rewards_batch, next_states_batch, dones_batch, gammas_batch\n "
] |
[
[
"numpy.expand_dims",
"tensorflow.multiply",
"numpy.abs",
"numpy.linspace",
"numpy.reshape",
"tensorflow.placeholder",
"numpy.ones",
"numpy.mean",
"tensorflow.variable_scope",
"tensorflow.summary.scalar",
"tensorflow.summary.merge"
],
[
"numpy.reshape",
"numpy.stack"
]
] |
arp95/til_biomarker_ovarian_cancer
|
[
"b4e9f8126a6468d547fe1935fc4a224b36703ebe",
"b4e9f8126a6468d547fe1935fc4a224b36703ebe"
] |
[
"code/epithelium_stroma_segmentation.py",
"misc/epi_stroma_model/seg_GAN.py"
] |
[
"\"\"\"\nOriginal Author: Cheng Lu\nModified By: Arpit Aggarwal\nDescription of the file: Epi/Stroma segmentation. Updated script for my use case.\n\"\"\"\n\n\n# header files needed\nfrom unet import *\nfrom glob import glob\nfrom PIL import Image\nimport numpy as np\nimport cv2\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\nimport sys\nimport os\nfrom matplotlib import cm\nfrom torch.utils.data import DataLoader\n\n\n# parameters\nmodel_path = \"/mnt/rstor/CSE_BME_AXM788/home/axa1399/til_biomarker_ovarian_cancer/model_files/epi_seg_unet.pth\"\ninput_path = \"/scratch/users/axa1399/tcga_ovarian_cancer/patches/\"\ninput_images_path = \"/mnt/rstor/CSE_BME_AXM788/data/TCGA_Ovarian Cancer/TCGA_Ovarian_Diagnostic_Path/*\"\noutput_path = \"/mnt/rstor/CSE_BME_AXM788/home/axa1399/tcga_ovarian_cancer/epi_stroma_masks/\"\nimage_size = 3000\ninput_image_size = 750\n\n\n# load model\n#device = torch.device(f'cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\nnet = torch.load(model_path, map_location=device)\nnet.eval()\n\n\n# function to return epi/stroma mask for a given patch\ndef get_patch_epithelium_stroma_mask(input_path):\n # read image and get original patch dimensions\n patch = cv2.imread(input_path)\n patch = cv2.resize(patch, (input_image_size, input_image_size))\n np_original_patch = np.array(patch).astype(np.uint8)\n h = int(np_original_patch.shape[0])\n w = int(np_original_patch.shape[1])\n\n # get output mask\n np_patch = np.array(patch).astype(np.uint8)\n output_patch_mask = np.zeros((h, w)).astype(np.uint8)\n \n for index1 in range(0, h, input_image_size):\n for index2 in range(0, w, input_image_size):\n np_patch_part = np_patch[index1:index1+ input_image_size, index2:index2+ input_image_size]\n h_part = int(np_patch_part.shape[0])\n w_part = int(np_patch_part.shape[1])\n\n np_patch_part = np_patch_part.transpose((2, 0, 1))\n np_patch_part = np_patch_part / 255\n tensor_patch = torch.from_numpy(np_patch_part)\n x = tensor_patch.unsqueeze(0)\n x = x.to(device, dtype=torch.float32)\n output = net(x)\n output = torch.sigmoid(output)\n pred = output.detach().squeeze().cpu().numpy()\n mask_pred = (pred>.7).astype(np.uint8)\n pil_mask_pred = Image.fromarray(mask_pred*255)\n np_mask_pred = (np.array(pil_mask_pred)/255).astype(np.uint8)\n\n # update output\n output_patch_mask[index1:index1+h_part, index2:index2+w_part] = np_mask_pred\n return output_patch_mask\n\n\n# function to save epi/stroma mask for a given patch\ndef save_patch_epithelium_stroma_mask(patch, output_path):\n h = patch.shape[0]\n w = patch.shape[1]\n image = np.array(patch)\n image_inv = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n # filter using contour area and remove small noise\n cnts = cv2.findContours(image_inv, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n for c in cnts:\n area = cv2.contourArea(c)\n if area < 100:\n cv2.drawContours(image_inv, [c], -1, (0, 0, 0), -1)\n\n # filter using contour area and remove small noise\n output_mask = 255 - image_inv\n cnts = cv2.findContours(output_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n for c in cnts:\n area = cv2.contourArea(c)\n if area < 100:\n cv2.drawContours(output_mask, [c], -1, (0, 0, 0), -1)\n\n # fill the holes\n #for index in range(0, 3):\n # final_mask = cv2.dilate(output_mask.copy(), None, iterations=index+1)\n patch = Image.fromarray(output_mask).resize((image_size, image_size), Image.BICUBIC)\n patch.save(output_path)\n\n\n# run code\nif __name__ == '__main__':\n im_paths = sorted(glob(input_images_path))\n im_paths = [im_paths[102]]\n for im_path in im_paths:\n image = im_path.split(\"/\")[-1][:-4]\n patches = glob(input_path + image + \"*\")\n for patch in patches:\n filename = patch.split(\"/\")[-1]\n output_mask = get_patch_epithelium_stroma_mask(patch)\n save_patch_epithelium_stroma_mask(output_mask, output_path + filename)\n print(\"Done!\")",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 23 17:15:48 2018\n@author: zzl\n\"\"\"\nimport torch.nn as nn\n\nclass net_G(nn.Module):\n def __init__(self, input_nc=3, output_nc=3, ngf=64, n_downsampling=4, n_blocks=9, norm_layer=nn.InstanceNorm2d): #BatchNorm2d/InstanceNorm2d\n assert(n_blocks >= 0)\n super(net_G, self).__init__() \n\n model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), nn.ReLU(True)]\n ### downsample\n for i in range(n_downsampling):\n mult = 2**i\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),\n norm_layer(ngf * mult * 2), nn.ReLU(True)]\n\n ### resnet blocks\n mult = 2**n_downsampling\n for i in range(n_blocks):\n model += [ResnetBlock(ngf * mult,norm_layer=norm_layer)]\n \n ### upsample \n for i in range(n_downsampling):\n mult = 2**(n_downsampling - i)\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),\n norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()] \n self.model = nn.Sequential(*model)\n \n def forward(self, input):\n return self.model(input) \n \n# Define a resnet block\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, norm_layer):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, norm_layer)\n\n def build_conv_block(self, dim, norm_layer):\n conv_block = []\n p = 0\n conv_block += [nn.ReflectionPad2d(1)]\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),\n norm_layer(dim),\n nn.ReLU(True)]\n\n conv_block += [nn.ReflectionPad2d(1)]\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),\n norm_layer(dim)]\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\n"
] |
[
[
"torch.sigmoid",
"torch.load",
"torch.from_numpy",
"numpy.array",
"numpy.zeros"
],
[
"torch.nn.Sequential",
"torch.nn.ReflectionPad2d",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.ReLU"
]
] |
DataResponsibly/fairDAGs
|
[
"ee6cfb447044af35b457f606ebcc0b70a7e7de77"
] |
[
"fairness_instru.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Fairness-Aware Instrumentation of ML-Pipelines\n\n# ## Preparations\n\nimport os\nfrom collections import defaultdict\nimport inspect\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport re\nfrom graphviz import Digraph\nimport pickle\nimport random\nimport plotly.express as px\n\n\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler, label_binarize, KBinsDiscretizer, LabelEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom utils import *\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\npd.set_option('display.max_colwidth',1000)\nnp.set_printoptions(precision = 4)\npd.set_option(\"display.precision\", 4)\npd.set_option('expand_frame_repr', True)\n\n\n# ## Logs Part\n\n# In[2]:\n\n\n# Current Version\ndef describe_ver(pipeline_to_test, cat_col, numerical_col, sensi_atts, target_name, training, save_path):\n \"\"\"\n Generates intermediate dicts. saved in dataframe format.\n\n args:\n pipeline_to_test: pipeline code to be tested. String.\n cat_col: catagorical attributes used for tracing changes. String.\n numerical_col: numerical attributes used for tracing changes. String.\n sensi_atts: sensible attributes used to generate static labels. String.\n target_name: target attributes. String.\n training: Indicator for training session. False to be testing. Used for classifier train/val. Bool.\n save_path: save_path specified to save intermediate dict files. String.\n\n return:\n log_dict: dictionary saving all intermediate results(pandas dataframe).\n plot_dict: dictionary saving all data used for histogram plotting.\n eval(target_df): dataframe after all pandas & sklearn operations.\n clf: classifier.\n \"\"\"\n to_csv_count = 1\n log_dict = {}\n plot_dict = {}\n raw_func = inspect.getsource(pipeline_to_test)\n\n\n input_args, executable_list, outputs = func_aggregation(raw_func)\n\n for line in input_args:\n exec(line)\n\n prev = {}\n\n numerical_metric_list = ['count', 'missing_count', 'median', 'mad', 'range']\n numerical_df = pd.DataFrame(np.inf, index = numerical_col, columns = numerical_metric_list)\n\n cat_metric_list = ['missing_count', 'num_class', 'class_count', 'class_percent']\n cat_df = pd.DataFrame(np.inf, index = cat_col, columns = cat_metric_list)\n\n\n ######################################\n # Execution\n ######################################\n for cur_line in executable_list:\n print_bool = False\n exec(cur_line)\n# print(cur_line)\n if '#' in cur_line:\n continue\n try:\n if str(eval(f\"type({cur_line.split('=')[0].strip()})\")) == \"<class 'pandas.core.frame.DataFrame'>\":\n\n target_df = cur_line.split('=')[0].strip()\n\n plot_dict[line_cleansing(cur_line)] = static_label(eval(target_df), sensi_atts, target_name)\n eval(target_df).to_csv(save_path+'/checkpoints/csv/training/o'+str(to_csv_count)+'.csv' if training else save_path+'/checkpoints/csv/testing/o'+str(to_csv_count)+'.csv')\n to_csv_count+=1\n\n col_list = eval(target_df).columns.tolist()\n numerical_col_sub = [i for i in numerical_col if i in col_list]\n\n cat_col_sub = [j for j in cat_col if j in col_list]\n\n if len(numerical_col_sub) != 0:\n ######################################################################################\n # numerical features & metrices\n # counts, missing values, Median and MAD, range/scaling\n ######################################################################################\n for numeric_feature in numerical_col_sub:\n\n numerical_df = cal_numerical(eval(target_df), numeric_feature, numerical_df)\n\n if len(cat_col_sub) != 0:\n ######################################################################################\n # categorical features & metrices\n # missing values, number of classes, counts for each group, percentage for each group\n ######################################################################################\n for cat_feature in cat_col_sub:\n\n cat_df = cal_categorical(eval(target_df), cat_feature, cat_df)\n\n ######################################################################################\n # Comparison occurs here!\n ######################################################################################\n\n if len(prev) != 0:\n if len(numerical_col_sub) != 0:\n numerical_dif = numerical_df - prev['numerical']\n if (numerical_dif.values != 0).any():\n log_dict[line_cleansing(cur_line)] = {'num': numerical_dif}\n\n ##################################\n # ⬆️ numerical\n # ⬇️ categorical\n ##################################\n if len(cat_col_sub) != 0:\n cat_dif = get_categorical_dif(cat_df, cat_metric_list, prev['categorical'])\n cat_dif_flag = check_cat_dif(cat_dif)\n if cat_dif_flag:\n\n log_dict[line_cleansing(cur_line)] = {'cat':cat_dif}\n\n print_bool = True\n\n\n # save the output for next round comparison\n prev['numerical'] = numerical_df.copy()\n prev['categorical'] = cat_df.copy()\n\n elif str(eval(f\"type({cur_line.split('=')[0].strip()})\")).startswith(\"<class 'sklearn\"):\n pass\n else:\n pass\n\n except:\n if len(numerical_col_sub) != 0:\n ######################################################################################\n # numerical features & metrices\n # counts, missing values, Median and MAD, range/scaling\n ######################################################################################\n for numeric_feature in numerical_col:\n\n numerical_df = cal_numerical(eval(target_df), numeric_feature, numerical_df)\n if len(cat_col_sub) != 0:\n ######################################################################################\n # categorical features & metrices\n # missing values, number of classes, counts for each group, percentage for each group\n ######################################################################################\n for cat_feature in cat_col:\n\n cat_df = cal_categorical(eval(target_df), cat_feature, cat_df)\n\n ######################################################################################\n # Comparison occurs here!\n ######################################################################################\n if len(prev) != 0:\n if len(numerical_col_sub) != 0:\n numerical_dif = numerical_df - prev['numerical']\n if (numerical_dif.values != 0).any():\n log_dict[line_cleansing(cur_line)] = {'num':numerical_dif}\n\n\n ##################################\n # ⬆️ numerical\n # ⬇️ categorical\n ##################################\n if len(cat_col_sub) != 0:\n cat_dif = get_categorical_dif(cat_df, cat_metric_list, prev['categorical'])\n cat_dif_flag = check_cat_dif(cat_dif)\n if cat_dif_flag:\n\n log_dict[line_cleansing(cur_line)] = {'cat':cat_dif}\n\n\n print_bool = True\n\n # save the output for next round comparison\n prev['numerical'] = numerical_df.copy()\n prev['categorical'] = cat_df.copy()\n\n\n nested_graph = pipeline_to_dataflow_graph(eval(f'{outputs[0]}'))\n\n # print('####################### Start Sklearn Pipeline #######################')\n\n for item in nested_graph:\n ######################################################################################\n # numerical features & metrices\n # counts, missing values, Median and MAD, range/scaling\n ######################################################################################\n if item.name in numerical_col:\n numeric_feature = item.name\n\n eval(target_df)[item.name] = item.operation.fit_transform(eval(target_df)[item.name].values.reshape(-1,1))\n # print('-------------------------------------------------------')\n # print(f\"Operations {str(item.operation).split('(')[0]} on {item.name}\")\n # print('-------------------------------------------------------')\n # print()\n plot_dict[line_cleansing(f\"{item.name}__{str(item.operation).split('(')[0]}\")] = static_label(eval(target_df), sensi_atts, target_name)\n eval(target_df).to_csv(save_path+'/checkpoints/csv/training/o'+str(to_csv_count)+'.csv' if training else save_path+'/checkpoints/csv/testing/o'+str(to_csv_count)+'.csv')\n\n to_csv_count+=1\n ##############################\n # Metrices Calculation\n ##############################\n numerical_df = cal_numerical(eval(target_df), numeric_feature, numerical_df)\n\n ##############################\n # Comparison\n ##############################\n numerical_dif = numerical_df - prev['numerical']\n\n if (numerical_dif.loc[numeric_feature,:].values != 0).any():\n # print(f'Metrics: {mat} changed in {col} with value {dif}')\n # print('*'*10)\n # print('Changes in numerical features!')\n # display(numerical_dif.loc[numeric_feature,:].to_frame())\n log_dict[line_cleansing(f\"{item.name}__{str(item.operation).split('(')[0]}\")] = {'num':numerical_dif.loc[numeric_feature,:].to_frame().transpose()}\n # print('*'*10)\n # print()\n\n ######################################################################################\n # categorical features & metrices\n # missing values, number of classes, counts for each group, percentage for each group\n ######################################################################################\n elif item.name in cat_col:\n cat_feature = item.name\n ##############################\n try:\n eval(target_df)[item.name] = item.operation.fit_transform(eval(target_df)[item.name].values.reshape(-1,1)).toarray()\n except:\n eval(target_df)[item.name] = item.operation.fit_transform(eval(target_df)[item.name].values.reshape(-1,1))\n plot_dict[line_cleansing(f\"{item.name}__{str(item.operation).split('(')[0]}\")] = static_label(eval(target_df), sensi_atts, target_name)\n eval(target_df).to_csv(save_path+'/checkpoints/csv/training/o'+str(to_csv_count)+'.csv' if training else save_path+'/checkpoints/csv/testing/o'+str(to_csv_count)+'.csv')\n to_csv_count+=1\n\n ##############################\n # Metrices Calculation\n ##############################\n cat_df = cal_categorical(eval(target_df), cat_feature, cat_df)\n\n ##############################\n # Comparison\n ##############################\n cat_dif = get_categorical_dif(cat_df, cat_metric_list, prev['categorical'])\n cat_dif_flag = check_cat_dif(cat_dif)\n if cat_dif_flag:\n # print('*'*10)\n # print('Changes in categorical features!')\n # display(cat_dif.loc[cat_feature,:].to_frame())\n log_dict[line_cleansing(f\"{item.name}__{str(item.operation).split('(')[0]}\")] = {'cat':cat_dif.loc[cat_feature,:].to_frame().transpose()}\n # print('*'*10)\n\n else:\n try:\n eval(target_df)[item.name] = item.operation.fit_transform(eval(target_df)[item.name].values.reshape(-1,1)).toarray()\n except:\n pass\n plot_dict[line_cleansing(f\"{item.name}__{str(item.operation).split('(')[0]}\")] = static_label(eval(target_df), sensi_atts, target_name)\n eval(target_df).to_csv(save_path+'/checkpoints/csv/training/o'+str(to_csv_count)+'.csv' if training else save_path+'/checkpoints/csv/testing/o'+str(to_csv_count)+'.csv')\n to_csv_count+=1\n\n prev['numerical'] = numerical_df.copy()\n prev['categorical'] = cat_df.copy()\n\n # run classifier\n classi_match = re.findall(\"'classifier',.\\w+\\(\\)\", executable_list[-1])[0].split(', ')[-1]\n clf = eval(classi_match)\n if training:\n to_train = eval(target_df).select_dtypes(include=['int', 'float64'])\n eval(target_df)[target_name] = eval('labels')\n\n clf.fit(to_train, eval('labels'))\n eval(target_df)['pred_'+target_name] = clf.predict(to_train)\n else:\n # to_train = eval(target_df).select_dtypes(exclude=['object'])\n eval(target_df)[target_name] = eval('labels')\n # clf.fit(to_train, eval('labels'))\n # eval(target_df)['pred_'+target_name] = clf.predict(to_train)\n # print(eval(target))\n return log_dict, plot_dict, eval(target_df), clf\n\n\n# ## DAGs Part\n\n# In[405]:\n\ndef find_pd_lines(pipeline_func):\n \"\"\"\n function used for extract pandas operations from raw pipeline codes.\n\n args:\n pipeline_func: raw pipeline codes. String.\n\n return:\n rows including pandas operations.\n \"\"\"\n pipeline_func = inspect.getsource(pipeline_func)\n pd_lines = []\n input_args , executable_list, _ = func_aggregation(pipeline_func)\n for line in input_args:\n exec(line)\n for cur_line in executable_list:\n exec(cur_line)\n try:\n if 'inplace' in cur_line:\n pd_lines.append(cur_line)\n elif str(eval(f\"type({cur_line.split('=')[0].strip()})\")).startswith(\"<class 'pandas\"):\n pd_lines.append(cur_line)\n except:\n pass\n return pd_lines\n\ndef pd_to_dataflow_graph(pipeline_func, log_list, parent_vertices=[]):\n \"\"\"\n Function translating pandas operations to DAGs.\n\n args:\n pipeline_func: raw pipeline codes. String.\n log_list: log_list storing all operation identifiers. List.\n parent_vertices: parent nodes. default to be None. No operations before pandas. List.\n\n return:\n graph: list of nodes in the eligible format from graphviz.\n previous: last node used for parent node of sklearn part.\n log_list: log_list storing all operation identifiers.\n \"\"\"\n executable_list = find_pd_lines(pipeline_func)\n graph = []\n previous = []\n\n for line in executable_list:\n line.replace('{','').replace('}', '')\n if 'inplace' in line and '#' not in line:\n log_list.append(line_cleansing(line))\n\n df_name = line.split('.')[0]\n func_name = line.split('.')[1].split('(')[0].strip()\n col_effect = line.split('[')[1].split(']')[0].strip()\n if len(previous) > 1:\n for node in previous:\n if node.name == df_name:\n vertex = DataFlowVertex([node], df_name+'_drop', func_name+' '+col_effect, col_effect)\n previous.append(vertex)\n previous.remove(node)\n else:\n vertex = DataFlowVertex(previous, df_name+'_drop', func_name+' '+col_effect, col_effect)\n previous = [vertex]\n else:\n var_name = line.split('=')[0].strip()\n\n # match \".func_name(...)\"\n pd_func = re.search('\\.\\s*([_a-z]*)\\s*\\(',line)\n if pd_func:\n func_name = pd_func.group(1)\n params = re.search('\\(([^\\)]*)\\)',line) #\"(...)\"\n\n if params:\n params = params.group(1).strip()\n\n if func_name == 'read_csv': #df = pd.read_csv(path)\n vertex = DataFlowVertex(parent_vertices,var_name, func_name, params)\n previous.append(vertex)\n log_list.append(line_cleansing(line))\n elif func_name in ['join','merge','concat']:\n log_list.append(line_cleansing(line))\n if func_name == 'concat': #df_new = pd.concat([df1,df2],keys=[])\n df_names = [item.strip() for item in params.split(']')[0].strip().strip('[]').split(',')]\n\n else: # df_new = df1.join/merge(df2,on='...',how='...')\n df_names = [line.split('=')[1].strip().split('.')[0], params.split(',')[0].strip()]\n parent_vertices = search_vertex_by_names(df_names, graph) #search in graph by df_names\n vertex = DataFlowVertex(parent_vertices, var_name, func_name, params) #TODO vertex name?\n previous = [vertex] + list(set(previous) - set(parent_vertices))\n elif 'lambda' in params:\n log_list.append(line_cleansing(line))\n cols = var_name.split('[')[1].split(']')[0].strip()\n vertex = DataFlowVertex(previous, func_name+' '+cols, func_name, params)\n previous = [vertex]\n elif '[' in var_name:\n log_list.append(line_cleansing(line))\n cols = var_name.split('[')[1].split(']')[0].strip()\n vertex = DataFlowVertex(previous, func_name+' '+cols+' '+params, func_name, params)\n previous = [vertex]\n else:\n log_list.append(line_cleansing(line))\n vertex = DataFlowVertex(previous, func_name+' '+params, func_name, params)\n previous = [vertex]\n\n\n # filter operation: \"df[[cols]]\", \"df[condition]\",\"df.loc[]\",\"df.iloc[]\"\n else:\n if '[[' in line:\n is_filter = re.search('\\[([^\\]]*)\\]',line) #\"[...]\"\n else:\n is_filter = re.search('\\(([^\\)]*)\\)',line) #\"[...]\"\n if is_filter:\n log_list.append(line_cleansing(line))\n filter_cond = is_filter.group(1).strip('[').strip(']')\n vertex = DataFlowVertex(previous, 'select '+filter_cond, 'filter', filter_cond)\n previous = [vertex]\n\n graph.append(vertex)\n\n return graph, previous, log_list\n\n\ndef sklearn_to_dataflow_graph(pipeline, log_list, parent_vertices=[]):\n \"\"\"\n Function translating sklearn operations to DAGs.\n\n args:\n pipeline_func: raw pipeline codes. String.\n log_list: log_list storing all operation identifiers. List.\n parent_vertices: parent nodes. default to be None. No operations before pandas. List.\n\n return:\n graph: list of nodes in the eligible format from graphviz.\n log_list: log_list storing all operation identifiers.\n \"\"\"\n graph = pipeline_to_dataflow_graph_full(pipeline)\n graph_dict = pipeline_to_dataflow_graph(pipeline)\n for node in graph_dict:\n log_list.append(line_cleansing(f\"{node.name}__{str(node.operation).split('(')[0]}\"))\n for node in graph:\n if node.parent_vertices==[]:\n node.parent_vertices = parent_vertices\n return graph, log_list\n\ndef visualize(nested_graph, log_list, save_path, dag_save):\n \"\"\"\n Use graphvis to generate DAGs from graph list generated from pandas_to_dataflow_graph and sklearn_to_dataflow_graph.\n\n args:\n nested_graph: graph list generated from pandas_to_dataflow_graph and sklearn_to_dataflow_graph. List.\n log_list: log_list storing all operation identifiers. List.\n save_path: path used for saving DAG. String.\n dag_save: format of DAG to be saved. String.\n\n return:\n dot: graphviz DAG object.\n rand_rgb: color list storing the sequence of color used for DAG nodes.\n \"\"\"\n no_nodes = len(log_list)\n rand_rgb = ['#191970', '#ff0000', '#006400', '#32cd32', '#ffd700', '#9932cc', '#ff69b4', '#8b4513', '#00ced1', '#d2691e'] if no_nodes <= 10 else [\"#\"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(no_nodes)]\n dot = Digraph(comment='preprocessing_pipeline')\n dot.format = dag_save\n previous = {}\n for i, node in enumerate(nested_graph):\n node_name = node.name.replace('>=', '≥').replace('<=', '≤')[:50] +' ...' if len(node.name)>40 else node.name.replace('>=', '≥').replace('<=', '≤')\n dot.node(node.name, color = rand_rgb[i], fontcolor = rand_rgb[i], href = \"{{url_for('home', type=\"+str(log_list[i])+\")}}\", label = f'''<<font POINT-SIZE=\"14\"><b>{node_name}</b></font><br/><font POINT-SIZE=\"10\">{node.operation}</font>>''')\n # dot.node(node.name, color = rand_rgb[i], fontcolor = rand_rgb[i], href = \"{{url_for('home', type=\"+str(log_list[i])+\")}}\", label = f\"{node.name}\\n{node.operation}\")\n parents = node.parent_vertices\n\n for parent in parents:\n dot.edge(parent.name, node.name)\n\n if not os.path.exists(save_path+'/DAG'):\n os.mkdir(save_path+'/DAG')\n dot.render(save_path+'/DAG/pipeline', view=False)\n return dot, rand_rgb\n\n\n# ## Combine and make Func Wrapper\n\n\ndef tracer(cat_col, numerical_col, sensi_atts, target_name, training = True, save_path = '', dag_save = 'pdf'):\n \"\"\"\n combines describe_ver(generate intermediate dict) and visualize(DAG generation).\n\n args:\n cat_col: catagorical attributes used for tracing changes. String.\n numerical_col: numerical attributes used for tracing changes. String.\n sensi_atts: sensible attributes used to generate static labels. String.\n target_name: target attributes. String.\n training: Indicator for training session. False to be testing. Used for classifier train/val. Bool.\n save_path: save_path specified to save intermediate dict files. String.\n dag_save: format of DAG to be saved. String.\n\n return:\n function wrapper. all outputs saved to save_path using pickle.\n\n \"\"\"\n def wrapper(func):\n def call(*args, **kwargs):\n if not os.path.exists('experiments'):\n os.mkdir('experiments')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n if not os.path.exists(save_path+'/checkpoints'):\n os.mkdir(save_path+'/checkpoints')\n if not os.path.exists(save_path+'/checkpoints/csv'):\n os.mkdir(save_path+'/checkpoints/csv')\n if training and not os.path.exists(save_path+'/checkpoints/csv/training'):\n os.mkdir(save_path+'/checkpoints/csv/training')\n if not training and not os.path.exists(save_path+'/checkpoints/csv/testing'):\n os.mkdir(save_path+'/checkpoints/csv/testing')\n\n log_dict, plot_dict, target_df, clf = describe_ver(func, cat_col, numerical_col, sensi_atts, target_name, training, save_path)\n pickle.dump(clf, open(save_path+\"/checkpoints/clf.p\", \"wb\"))\n if training:\n pickle.dump(target_df, open(save_path+\"/checkpoints/target_df_train.p\", \"wb\"))\n pickle.dump(log_dict, open(save_path+\"/checkpoints/log_dict_train.p\", \"wb\"))\n pickle.dump(plot_dict, open(save_path+\"/checkpoints/plot_dict_train.p\", \"wb\"))\n else:\n pickle.dump(target_df, open(save_path+\"/checkpoints/target_df_test.p\", \"wb\"))\n pickle.dump(log_dict, open(save_path+\"/checkpoints/log_dict_test.p\", \"wb\"))\n pickle.dump(plot_dict, open(save_path+\"/checkpoints/plot_dict_test.p\", \"wb\"))\n log_list = []\n pd_graph, parent_vertices, log_list = pd_to_dataflow_graph(func, log_list)\n pipeline = func(*args, **kwargs)\n sklearn_graph, log_list = sklearn_to_dataflow_graph(pipeline, log_list, parent_vertices)\n pd_graph.extend(sklearn_graph)\n _, rand_rgb = visualize(pd_graph, log_list, save_path, dag_save)\n colors = dict(zip(log_list, rand_rgb))\n if training:\n pickle.dump(colors, open(save_path+\"/checkpoints/rand_color_train.p\", \"wb\"))\n pickle.dump(log_list, open(save_path+\"/checkpoints/log_list_dag_train.p\", \"wb\"))\n else:\n pickle.dump(colors, open(save_path+\"/checkpoints/rand_color_test.p\", \"wb\"))\n pickle.dump(log_list, open(save_path+\"/checkpoints/log_list_dag_test.p\", \"wb\"))\n return call\n return wrapper\n"
] |
[
[
"pandas.set_option",
"numpy.set_printoptions",
"pandas.DataFrame"
]
] |
baofff/stability_ho
|
[
"1fa378209acde9c223855659c43f5ae842d37eb4"
] |
[
"core/datasets.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport random\nfrom collections import defaultdict\nfrom torch.utils.data import Dataset\n\n\nclass PMLabel(object):\n def __init__(self, num_classes):\n self.num_classes = num_classes\n\n def __call__(self, label):\n onehot = F.one_hot(torch.tensor(label), num_classes=self.num_classes).float()\n return onehot * 2. - 1.\n\n\nclass Flatten(object):\n def __call__(self, tensor):\n return tensor.view(-1)\n\n\nclass QuickDataset(Dataset):\n def __init__(self, array):\n self.array = array\n\n def __len__(self):\n return len(self.array)\n\n def __getitem__(self, item):\n return self.array[item]\n\n\nclass CorruptedMnist(object):\n def __init__(self, width, task, flatten):\n self.task = task\n _transform = [transforms.Resize(width), transforms.ToTensor()]\n if flatten:\n _transform.append(Flatten())\n target_transform = None\n if task == 'regression':\n target_transform = PMLabel(10)\n self.dst = datasets.MNIST('workspace/datasets/mnist', train=True, transform=transforms.Compose(_transform), target_transform=target_transform, download=True)\n self.idxes = list(range(len(self.dst)))\n random.shuffle(self.idxes)\n self.current_p = 0\n\n def _get_corrupted_data(self, m):\n assert m % 2 == 0\n data_lst = []\n for i, k in enumerate(range(self.current_p, self.current_p + m)):\n x, y = self.dst[self.idxes[k]]\n if k >= self.current_p + m // 2:\n y = random.randint(0, 9)\n data_lst.append((i, x, y))\n self.current_p += m\n return QuickDataset(data_lst)\n\n def _get_clean_data(self, m):\n data_lst = []\n for k in range(self.current_p, self.current_p + m):\n data_lst.append(self.dst[self.idxes[k]])\n self.current_p += m\n return QuickDataset(data_lst)\n\n def get_data(self, m_tr, m_val, m_te, m_mval, dim):\n if self.current_p + m_tr + m_val + m_te > len(self.dst):\n self.current_p = 0\n random.shuffle(self.idxes)\n return self._get_corrupted_data(m_tr), self._get_clean_data(m_val), self._get_clean_data(m_te), self._get_clean_data(m_mval)\n\n\nclass Omniglot(object):\n r\"\"\"\n 964 classes in background, 659 classes in evaluation\n 20 samples for each class\n \"\"\"\n def __init__(self, width, flatten, num_classes):\n self.num_classes = num_classes\n _transform = [transforms.Resize(width), transforms.ToTensor()]\n if flatten:\n _transform.append(Flatten())\n dst = datasets.Omniglot('workspace/datasets/omniglot', transform=transforms.Compose(_transform), download=True)\n\n background_classes = list(range(964))\n random.shuffle(background_classes)\n chosen_classes = background_classes[:num_classes]\n class_idx_map = {cls: idx for idx, cls in enumerate(chosen_classes)}\n\n all_class_dct = defaultdict(list)\n for z in dst:\n all_class_dct[z[1]].append(z)\n\n self.idx_dct = {}\n for cls in chosen_classes:\n idx = class_idx_map[cls]\n self.idx_dct[idx] = [(1. - x, idx) for x, y in all_class_dct[cls]]\n\n self.pt = 0\n\n def shuffle_dataset(self):\n for idx, lst in self.idx_dct.items():\n random.shuffle(lst)\n\n def _get_data(self, m):\n assert m % self.num_classes == 0\n m_per_cls = m // self.num_classes\n if m_per_cls >= 20:\n raise ValueError\n if self.pt + m_per_cls > 20:\n self.shuffle_dataset()\n self.pt = 0\n res = []\n for idx in range(self.num_classes):\n res.extend(self.idx_dct[idx][self.pt: self.pt + m_per_cls])\n self.pt += m_per_cls\n return QuickDataset(res)\n\n def get_data(self, m_tr, m_val, m_te, m_mval, dim):\n return self._get_data(m_tr), self._get_data(m_val), self._get_data(m_te), self._get_data(m_mval)\n\n"
] |
[
[
"torch.tensor"
]
] |
QROST/SHMS_2015
|
[
"975c1c5837513260c6741c8f53d693f458c544b1"
] |
[
"bridge.py"
] |
[
"# coding: utf-8\r\n\r\nimport numpy as np\r\n\r\nm_span = 50.0 # m 主跨\r\ns_span = 50.0 # m 边跨\r\nE = 36500000000 # Pa # C65混凝土\r\nI = 1.24 # m^4 # T形,上翼缘宽度3.5m,梁高3.3m,翼缘厚0.2m,腹板厚0.25m\r\nm = 148600.0 / 25 # kg/m # (3.5*0.2+2.1*0.2)*50*26.0/9.8 # 预应力混凝土重力密度 26kN/m^3\r\ndamp_c = 50000\r\n\r\n\r\nclass Bridge:\r\n def __init__(self, bridge_id=\"\", m_span=m_span, s_span=s_span, EI=E * I, m=m, damp_c=damp_c):\r\n self.bridge_id = bridge_id\r\n self.m_span = m_span # 中跨\r\n self.s_span = s_span # 左右边跨\r\n self.EI = EI\r\n self.m = m\r\n self.damp_c = damp_c\r\n self.num_mode = 0\r\n\r\n self.omegaList = [] # 模态固有频率列表\r\n self.zetaList = [] # 模态阻尼比列表\r\n self.modeShapeList = [] # 模态向量列表(每个元素也是列表,维数与坐标维数一致)\r\n self.x = None\r\n\r\n def SetSegmentNumber(self, num_segment=128):\r\n self.x = np.linspace(0, self.m_span, num_segment)\r\n\r\n def ModeAnalyze(self, num_mode=3):\r\n self.num_mode = num_mode\r\n for i in range(1, num_mode + 1):\r\n omega = (i * np.pi / self.m_span) * (i * np.pi / self.m_span) * np.sqrt(self.EI / self.m)\r\n self.omegaList.append(omega)\r\n\r\n zeta = self.damp_c / self.m / 2.0 / omega\r\n self.zetaList.append(zeta)\r\n\r\n y = np.sin(i * np.pi * self.x / self.m_span)\r\n self.modeShapeList.append(y)\r\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.sin"
]
] |
jjerphan/pils
|
[
"a7b3f4bd8204f56b24c793c9f8a32df80d7d4e9c"
] |
[
"pils/problems/tsp/optimizers.py"
] |
[
"import os\nimport csv\nimport numpy as np\n\nimport optunity\nfrom hyperopt import hp, tpe, fmin\n\nfrom pils.optimizers import Optimizer\nfrom pils.settings import BIN_FOLDER, clean_lines\nfrom pils.problems.tsp.settings import TSP_INSTANCES_FOLDER, TSP_INSTANCES, NAIVE_COST_CSV, OPT_COST_CSV, \\\n TSP_OPTS_FOLDER\n\n\nclass LocalTSPOptimizerTrait(Optimizer):\n \"\"\"\n An abstract class to evaluate problem on local tsp directly.\n\n \"\"\"\n\n def __init__(self, algo_path, opt_cost_csv_file: str=OPT_COST_CSV,\n naive_cost_csv_file: str=NAIVE_COST_CSV,\n tsp_instances: str=TSP_INSTANCES,\n tsp_instances_folder: str=TSP_INSTANCES_FOLDER,\n tsp_opts_folder: str=TSP_OPTS_FOLDER,\n bin_folder: str=BIN_FOLDER):\n super().__init__(algo_path)\n\n self._opt_cost_csv_file = opt_cost_csv_file\n self._naive_cost_csv_file = naive_cost_csv_file\n self._tsp_instances = tsp_instances\n self._tsp_instances_folder = tsp_instances_folder\n self._tsp_opts_folder = tsp_opts_folder\n self._bin_folder = bin_folder\n if not(os.path.exists(self._bin_folder)):\n os.makedirs(self._bin_folder)\n\n self._dist_matrices = {\n instance: self._distance_matrix(os.path.join(self._tsp_instances_folder, instance)) \\\n for instance in self._tsp_instances\n }\n self._opt_costs = self._csv_to_dict(self._opt_cost_csv_file)\n self._naive_costs = self._csv_to_dict(self._naive_cost_csv_file)\n\n @staticmethod\n def _parse_str(str, type):\n coords = tuple(map(type, str.split()))\n return coords\n\n @staticmethod\n def _parse_floats(str):\n return LocalTSPOptimizerTrait._parse_str(str, float)\n\n @staticmethod\n def _parse_ints(str):\n return LocalTSPOptimizerTrait._parse_str(str, int)\n\n @staticmethod\n def _get_opt_tour(opt_file):\n \"\"\"\n Returns the optimal solution for a given file.\n\n :param opt_file: file containing the optimal solution\n :return:\n \"\"\"\n with open(opt_file, \"r\") as f:\n lines = clean_lines(f.readlines())\n\n # 1 indexing to 0 indexing\n tour = np.array(list(map(LocalTSPOptimizerTrait._parse_ints, lines))).flatten() - 1\n return tour\n\n @staticmethod\n def _distance_matrix(file_case):\n \"\"\"\n Return the distance matrix for a specific test case\n\n :param file_case:\n :return:\n \"\"\"\n with open(file_case, \"r\") as f:\n lines = clean_lines(f.readlines())\n\n N = int(lines[0])\n del lines[0]\n\n points = np.array(list(map(LocalTSPOptimizerTrait._parse_floats, lines)))\n dist_matrix = np.zeros((N, N))\n\n for i in range(N):\n for j in range(i, N):\n dist_ij = np.sqrt(np.sum((points[i] - points[j]) ** 2))\n dist_matrix[i, j] = dist_ij\n dist_matrix[j, i] = dist_ij\n\n return dist_matrix\n\n @staticmethod\n def _get_cost(tour, dist_matrix):\n \"\"\"\n Return the cost of the solution of a test case using an algo.\n\n :param tour: a list on int\n :param dist_matrix: the distance matrix\n \"\"\"\n tour_edges = list(zip(tour[:-1], tour[1:]))\n tour_edges.append(([tour[-1]], tour[0]))\n\n # Compute the length of the tour\n cost = sum(list(map(lambda edge: dist_matrix[edge[0], edge[1]], tour_edges)))[0]\n return cost\n\n @staticmethod\n def _csv_to_dict(file):\n \"\"\"\n Construct map from info present in a csv file of the form:\n case, score\n a.tsp, 42.0\n b.tsp, 1337.0\n …, …\n\n :param file: path to the csv file\n :return: a default\n \"\"\"\n\n with open(file, \"r\") as f:\n lines = clean_lines(f.readlines())\n\n # Skip headers\n del lines[0]\n\n def make_key_value(line):\n parsed = line.split(\",\")\n return parsed[0], float(parsed[1])\n\n return dict(map(make_key_value, lines))\n\n @staticmethod\n def _get_tour(binary_path, test_case: str):\n \"\"\"\n Returns the tour of a given test case for a binary\n\n :param binary_path:\n :param test_case:\n :return:\n \"\"\"\n out_put = os.popen(\"{} < {}\".format(binary_path, test_case)).read()\n tour = list(map(int, filter(lambda x: x != \"\", out_put.split(\"\\n\"))))\n return tour\n\n @staticmethod\n def _score(cost, instance, opt_costs, naive_costs):\n \"\"\"\n Compute the score of a solution of an algo for a given instance\n :param cost: the cost of a solution\n :param instance: the name of the test case\n :return: the score of this test case\n \"\"\"\n opt = opt_costs[instance]\n naive = naive_costs[instance]\n return 0.02 ** ((cost - opt) / (naive - opt))\n\n def save_opt_costs(self, verbose):\n with open(file=self._opt_cost_csv_file, mode=\"w+\") as f:\n writer = csv.DictWriter(f=f, fieldnames=[\"test_case\", \"opt_cost\"])\n writer.writeheader()\n for instance in self._tsp_instances:\n dist_mat = self._distance_matrix(os.path.join(self._tsp_instances_folder, instance))\n tour = self._get_opt_tour(os.path.join(self._tsp_opts_folder, instance.replace(\".tsp\", \".opt.tour\")))\n cost = self._get_cost(tour, dist_mat)\n if verbose:\n print(\"Case {} of cost {}\".format(instance, cost))\n writer.writerow({\"test_case\": instance, \"opt_cost\": cost})\n\n def _get_final_score(self, hyperparameters, verbose):\n \"\"\"\n Compile the algo, run it return the final score\n \"\"\"\n binary = os.path.join(self._bin_folder, self._algo_name) + self._generate_id()\n macros = \" \".join(list(map(lambda key: '-D{}=\"{}\"'.format(key, hyperparameters[key]), hyperparameters)))\n compile_command = 'g++ {} {} -o {}'.format(self._algo_path, macros, binary)\n if verbose:\n print(\"Compiling with:\")\n print(compile_command)\n\n os.system(compile_command)\n if verbose:\n print(\"Done Compiling\")\n\n final_score = 0\n nb_cases = len(self._tsp_instances)\n for i, instance in enumerate(self._tsp_instances):\n dist_matrix = self._dist_matrices[instance]\n tour = self._get_tour(binary, os.path.join(self._tsp_instances_folder, instance))\n cost = self._get_cost(tour, dist_matrix)\n final_score += self._score(cost, instance, self._opt_costs, self._naive_costs)\n if verbose:\n print(\"{:3.2f} % Done {:>15s}\".format((i + 1) / nb_cases * 100, instance))\n\n return final_score\n\n\nclass LocalOptunityTSPOptimizer(LocalTSPOptimizerTrait):\n \"\"\"\n A local hyper-parameters optimizer using the `optunity` library.\n\n \"\"\"\n\n def __init__(self, algo_path):\n super().__init__(algo_path)\n\n def _get_best_hyperparameters(self, objective_function, method, number_evaluations):\n if method is None:\n method = \"tpe\"\n\n print(self._hyperparameters)\n best_hyperparameters, extra_info, solver_info = optunity.minimize(objective_function,\n solver_name=method,\n num_evals=number_evaluations,\n **self._hyperparameters)\n return best_hyperparameters\n\n\nclass LocalHyperOptTSPOptimizer(LocalTSPOptimizerTrait):\n \"\"\"\n A local hyper-parameters optimizer using the `hyperopt` library.\n\n See https://github.com/hyperopt/hyperopt/wiki/FMin for usage\n\n \"\"\"\n\n def __init__(self, algo_path):\n super().__init__(algo_path)\n\n def _get_best_hyperparameters(self, objective_function, method, number_evaluations):\n hyper_opt_hp = dict(map(lambda key: (key, hp.uniform(key, *self._hyperparameters[key])), self._hyperparameters))\n\n if method is None:\n method = tpe.suggest\n\n # A tweak\n def unfold_positional_args(x):\n return objective_function(**x)\n\n best_hyperparameters = fmin(unfold_positional_args,\n hyper_opt_hp,\n algo=method,\n max_evals=number_evaluations)\n\n return best_hyperparameters\n"
] |
[
[
"numpy.zeros",
"numpy.sum"
]
] |
cdigap/Python_Project_2018
|
[
"136e70fb781ebd7aede0f2f11e57fb8f64ee0e22"
] |
[
"Iris_versicolor.py"
] |
[
"# Ashok Gangadharan 2018-04-09\n# Python Project...\n# \n# Plotting Graph for the different Iris Setosa flower , Average Sepal & Petal data\n#\n\nimport matplotlib.pyplot as plt\nimport csv\n\nx = []\ny = []\na = []\nb = []\ncount = 0\nsl = 0\nsw = 0\npl = 0\npw = 0\nasl = 0\nasw = 0\napl = 0\napw = 0\n\n\ndef avg(S,C):\n \"\"\"This Function returns the average of the numbers given\"\"\"\n avg = 0\n avg = S/C\n\n return round(avg,2)\n\nwith open('iris_data.csv','r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n if row[4] == \"Iris-versicolor\":\n x.append(float(row[0]))\n y.append(float(row[1]))\n a.append(float(row[2]))\n b.append(float(row[3]))\n count += 1\n sl += float(row[0])\n sw += float(row[1])\n pl += float(row[2])\n pw += float(row[3])\n \nasl = avg(sl,count)\nasw = avg(sw,count)\napl = avg(pl,count)\napw = avg(pw,count)\n\n# calling plt to create a scatter graph\n\nplt.scatter(asl,asw, marker=\"o\", label='Sepal',color=['red','green'])\n\nplt.scatter(apl,apw, marker=\"*\", label='Petal',color=['yellow','blue'])\n\n\nplt.ylabel('Length')\nplt.xlabel('Width')\nplt.title('Fishers Iris Data\\n Iris-versicolor')\nplt.legend()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
XuCheney/Face_Recognition
|
[
"9112439e3ba37f0ba1bd7665da2c28d8543bf364"
] |
[
"face_recognition.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# author:cheney<XZCheney@gmail.com>\n# 人脸识别\n\nimport os\nimport sys\nimport cv2\nimport dlib\nimport queue\nimport logging\nimport logging.config\nimport threading\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nfrom PyQt5.QtCore import QTimer, pyqtSignal, Qt\nfrom PyQt5.QtGui import QImage, QPixmap, QIcon, QTextCursor\nfrom PyQt5.QtWidgets import QApplication,QWidget\nfrom PyQt5.uic import loadUi\n\n\nclass UI_face_reco(QWidget):\n receiveLogSignal = pyqtSignal(str)\n\n def __init__(self):\n super(UI_face_reco, self).__init__()\n loadUi('ui/face_reco.ui', self)\n self.setWindowIcon(QIcon('icons/icon.png'))\n self.setWindowTitle('人脸识别')\n self.setFixedSize(1040, 610)\n\n self.cap = cv2.VideoCapture()\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')\n self.face_reco = dlib.face_recognition_model_v1(\"models/face_recognition_resnet_model_v1.dat\")\n\n self.info_csv = \"data\\\\info.csv\" # 人员信息\n self.features_csv = \"data\\\\features.csv\" #人脸特征 从人脸图像中提取人脸特征\n self.features_known_arr = self.return_features_known_arr()\n\n self.font = cv2.FONT_HERSHEY_COMPLEX\n self.logQueue = queue.Queue() # 日志队列\n\n # 摄像头\n self.isLocalCameraEnabled = False\n self.CheckBox_uselocalcamera.stateChanged.connect(self.use_local_camera)\n\n self.btn_opencam.clicked.connect(self.opencam)\n\n # 计时器\n self.timer_camera = QTimer(self)\n self.timer_camera.timeout.connect(self.show_camera)\n\n # 调试模式\n self.isDebugMode = False\n self.confidenceThreshold = 0.4\n self.CheckBox_debug.stateChanged.connect(self.enableDebug)\n self.SpinBox_Threshold.valueChanged.connect(self.setConfidenceThreshold)\n\n # 日志系统\n self.receiveLogSignal.connect(self.logOutput)\n self.logOutputThread = threading.Thread(target=self.receiveLog, daemon=True)\n self.logOutputThread.start()\n\n # 读取已知人脸特征数据\n def return_features_known_arr(self):\n if not os.path.isfile(self.info_csv):\n logging.error('系统找不到人员信息文件{}'.format(self.info_csv))\n self.logQueue.put('Error:系统找不到人员信息文件,请先录入相关人脸信息!')\n elif not os.path.isfile(self.features_csv):\n logging.error('系统找不到人脸特征{}'.format(self.features_csv))\n self.logQueue.put('Error:未找不到人脸特征文件,请先提取人脸特征!')\n else:\n self.features_reader = pd.read_csv(self.features_csv, header=None)\n self.info_reader = pd.read_csv(self.info_csv, header=None)\n\n features_known_arr = []\n for i in range(self.features_reader.shape[0]):\n features_someone_arr = []\n for j in range(0, len(self.features_reader.iloc[i, :])):\n features_someone_arr.append(self.features_reader.iloc[i, :][j])\n features_known_arr.append(features_someone_arr)\n\n self.LcdNum_faces.display(len(features_known_arr))\n\n return features_known_arr\n\n # 是否使用本地摄像头\n def use_local_camera(self, state):\n if state == Qt.Checked:\n self.isLocalCameraEnabled = True\n else:\n self.isLocalCameraEnabled = False\n\n # 打开摄像头\n def opencam(self):\n if self.isLocalCameraEnabled:\n CAMNUM = 1\n else:\n CAMNUM = 0\n\n if self.timer_camera.isActive() == False:\n ok = self.cap.open(CAMNUM)\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n if not ok:\n logging.error('无法调用电脑摄像头{}'.format(CAMNUM))\n self.logQueue.put('Error:初始化摄像头失败')\n self.cap.release()\n self.btn_opencam.setIcon(QIcon('icons/error.png'))\n else:\n self.btn_opencam.setText(u'关闭摄像头')\n self.timer_camera.start(5)\n self.btn_opencam.setIcon(QIcon('icons/success.png'))\n else:\n self.timer_camera.stop()\n self.cap.release()\n self.label_show_camera.clear()\n self.label_show_camera.setText(u'<font color=red>摄像头未开启</font>')\n self.btn_opencam.setText(u'打开摄像头')\n self.btn_opencam.setIcon(QIcon())\n\n # 显示图像\n def show_camera(self):\n ok, frame = self.cap.read()\n self.displayImage(frame)\n\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n faceRects = self.detector(frame_gray, 0)\n\n positions_list = [] # 人脸坐标\n names_list = [] # 姓名\n\n if len(faceRects) != 0:\n # 获取当前捕获到的图像的所有人脸的特征,存储到 features_arr\n features_arr = []\n for i in range(len(faceRects)):\n shape = self.predictor(frame, faceRects[i])\n features_arr.append(self.face_reco.compute_face_descriptor(frame, shape))\n names_list.append(\"unknown\")\n\n # 每个捕获人脸的名字坐标\n positions_list.append(tuple(\n [faceRects[i].left(), int(faceRects[i].bottom() + (faceRects[i].bottom() - faceRects[i].top()) / 4)]))\n\n e_distance_list = []\n for k in range(len(self.features_known_arr)):\n if str(self.features_known_arr[k][0]) != '0.0':\n e_distance = self.return_euclidean_distance(features_arr[i], self.features_known_arr[k])\n e_distance_list.append(e_distance)\n else:\n e_distance_list.append(999999999)\n the_most_similar_person = e_distance_list.index(min(e_distance_list))\n\n if min(e_distance_list) < self.confidenceThreshold :\n names_list[i] = self.info_reader.iloc[the_most_similar_person, 0].split('_')[0]\n cv2.putText(frame, names_list[i], positions_list[i], self.font, 1, (0, 255, 255), 1, cv2.LINE_AA)\n self.logQueue.put('人脸识别成功,欢迎{}!'.format(names_list[i]))\n else:\n cv2.putText(frame, names_list[i], positions_list[i], self.font, 1, (0, 255, 255), 1, cv2.LINE_AA)\n logging.error('Error:{}尝试非法闯入!'.format(names_list[i]))\n self.logQueue.put('Error:{}尝试非法闯入!'.format(names_list[i]))\n\n for faceRect in faceRects:\n x = faceRect.left()\n y = faceRect.top()\n w = faceRect.right() - faceRect.left()\n h = faceRect.bottom() - faceRect.top()\n cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), (0, 255, 255), 2)\n self.displayImage(frame)\n # else:\n # self.displayImage(frame)\n\n def displayImage(self, img):\n img = cv2.resize(img, (640, 480))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n ShowImage = QImage(img.data, img.shape[1], img.shape[0], QImage.Format_RGB888)\n self.label_show_camera.setPixmap(QPixmap.fromImage(ShowImage))\n\n # 计算两个128D向量间的欧式距离\n def return_euclidean_distance(self,feature_1, feature_2):\n feature_1 = np.array(feature_1)\n feature_2 = np.array(feature_2)\n dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))\n return dist\n\n # 是否开启调试模式\n def enableDebug(self,state):\n if state == Qt.Checked:\n self.isDebugMode = True\n self.logQueue.put('Debug模式已开启!')\n else:\n self.isDebugMode = False\n self.logQueue.put('Debug模式已关闭!')\n\n # 设置置信度阈值\n def setConfidenceThreshold(self):\n if self.isDebugMode:\n self.confidenceThreshold = self.SpinBox_Threshold.value()\n self.logQueue.put('当前置信度阈值为{}!'.format(self.confidenceThreshold))\n\n # 系统日志服务常驻,接收并处理系统日志\n def receiveLog(self):\n while True:\n data = self.logQueue.get()\n if data:\n self.receiveLogSignal.emit(data)\n else:\n continue\n\n # LOG输出\n def logOutput(self, log):\n # 获取当前系统时间\n time = datetime.now().strftime('[%Y/%m/%d %H:%M:%S]')\n log = time + ' ' + log + '\\n'\n self.TextEdit_log.moveCursor(QTextCursor.End)\n self.TextEdit_log.insertPlainText(log)\n self.TextEdit_log.ensureCursorVisible() # 自动滚屏\n\n # 窗口关闭事件,关闭定时器、摄像头\n def closeEvent(self, event):\n if self.timer_camera.isActive():\n self.timer_camera.stop()\n\n if self.cap.isOpened():\n self.cap.release()\n event.accept()\n\nif __name__ == '__main__':\n logging.config.fileConfig('config/logging.conf')\n app = QApplication(sys.argv)\n w = UI_face_reco()\n w.show()\n sys.exit(app.exec())\n"
] |
[
[
"numpy.square",
"numpy.array",
"pandas.read_csv"
]
] |
Tobias-Fischer/ros_people_object_detection_tensorflow
|
[
"2a0af311b4eef55c053bd2349e1dff10abe1f32a",
"2a0af311b4eef55c053bd2349e1dff10abe1f32a"
] |
[
"src/object_detection/models/ssd_inception_v3_feature_extractor.py",
"src/object_detection/model_test_util.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"SSDFeatureExtractor for InceptionV3 features.\"\"\"\nimport tensorflow.compat.v1 as tf\n\nfrom object_detection.meta_architectures import ssd_meta_arch\nfrom object_detection.models import feature_map_generators\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom nets import inception_v3\n\nslim = tf.contrib.slim\n\n\nclass SSDInceptionV3FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):\n \"\"\"SSD Feature Extractor using InceptionV3 features.\"\"\"\n\n def __init__(self,\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n conv_hyperparams,\n batch_norm_trainable=True,\n reuse_weights=None,\n use_explicit_padding=False,\n use_depthwise=False):\n \"\"\"InceptionV3 Feature Extractor for SSD Models.\n\n Args:\n is_training: whether the network is in training mode.\n depth_multiplier: float depth multiplier for feature extractor.\n min_depth: minimum feature extractor depth.\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.\n batch_norm_trainable: Whether to update batch norm parameters during\n training or not. When training with a small batch size\n (e.g. 1), it is desirable to disable batch norm update and use\n pretrained batch norm params.\n reuse_weights: Whether to reuse variables. Default is None.\n use_explicit_padding: Whether to use explicit padding when extracting\n features. Default is False.\n use_depthwise: Whether to use depthwise convolutions. Default is False.\n \"\"\"\n super(SSDInceptionV3FeatureExtractor, self).__init__(\n is_training, depth_multiplier, min_depth, pad_to_multiple,\n conv_hyperparams, batch_norm_trainable, reuse_weights,\n use_explicit_padding, use_depthwise)\n\n def preprocess(self, resized_inputs):\n \"\"\"SSD preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def extract_features(self, preprocessed_inputs):\n \"\"\"Extract features from preprocessed inputs.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n feature_maps: a list of tensors where the ith tensor has shape\n [batch, height_i, width_i, depth_i]\n \"\"\"\n preprocessed_inputs = shape_utils.check_min_image_dim(\n 33, preprocessed_inputs)\n\n feature_map_layout = {\n 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],\n 'layer_depth': [-1, -1, -1, 512, 256, 128],\n 'use_explicit_padding': self._use_explicit_padding,\n 'use_depthwise': self._use_depthwise,\n }\n\n with slim.arg_scope(self._conv_hyperparams):\n with tf.variable_scope('InceptionV3', reuse=self._reuse_weights) as scope:\n _, image_features = inception_v3.inception_v3_base(\n ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),\n final_endpoint='Mixed_7c',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n\n return feature_maps.values()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common utils for tests for object detection tflearn model.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport tempfile\nimport tensorflow.compat.v1 as tf\n\n\nfrom object_detection import model\nfrom object_detection import model_hparams\n\nFLAGS = tf.flags.FLAGS\n\nFASTER_RCNN_MODEL_NAME = 'faster_rcnn_resnet50_pets'\nSSD_INCEPTION_MODEL_NAME = 'ssd_inception_v2_pets'\nPATH_BASE = 'google3/third_party/tensorflow_models/object_detection/'\n\n\ndef GetPipelineConfigPath(model_name):\n \"\"\"Returns path to the local pipeline config file.\"\"\"\n return os.path.join(FLAGS.test_srcdir, PATH_BASE, 'samples', 'configs',\n model_name + '.config')\n\n\ndef InitializeFlags(model_name_for_test):\n FLAGS.model_dir = tempfile.mkdtemp()\n FLAGS.pipeline_config_path = GetPipelineConfigPath(model_name_for_test)\n\n\ndef BuildExperiment():\n \"\"\"Builds an Experiment object for testing purposes.\"\"\"\n run_config = tf.contrib.learn.RunConfig()\n hparams = model_hparams.create_hparams(\n hparams_overrides='load_pretrained=false')\n\n # pylint: disable=protected-access\n experiment_fn = model.build_experiment_fn(10, 10)\n # pylint: enable=protected-access\n return experiment_fn(run_config, hparams)\n"
] |
[
[
"tensorflow.compat.v1.variable_scope"
],
[
"tensorflow.compat.v1.contrib.learn.RunConfig"
]
] |
ElegantLin/CVWC-2019
|
[
"41c3d35c8a5eb21d109da137b75a872def301765"
] |
[
"reid/main.py"
] |
[
"# Creator: Tennant\n# Email: Tennant_1999@outlook.com\n\nimport os\nimport os.path as osp\n\n# PyTorch as the main lib for neural network\nimport torch\ntorch.backends.cudnn.benchmark = True\ntorch.multiprocessing.set_sharing_strategy('file_system')\nimport torch.nn as nn\nimport torchvision as tv\nimport numpy as np\n\n# Use visdom for moniting the training process\nimport visdom\nfrom utils import Visualizer\nfrom utils import setup_logger\nfrom utils import rank_list_to_im\n\n# Use yacs for training config management\n# argparse for overwrite\nfrom config import cfg\nimport argparse\n\n# import losses and model\nfrom losses import make_loss\nfrom model import build_model, convert_model\nfrom trainer import BaseTrainer, PCBTrainer\n\n# dataset\nfrom dataset import make_dataloader\n\nfrom optim import make_optimizer, WarmupMultiStepLR\n\n\nfrom evaluate import eval_func, euclidean_dist, re_rank\nfrom tqdm import tqdm\nimport shutil\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"ReID training\")\n parser.add_argument('-c', '--config_file', type=str,\n help='the path to the training config')\n parser.add_argument('-t', '--test', action='store_true',\n default=False, help='Model test')\n parser.add_argument('--local_rank', default=0, type=int)\n parser.add_argument('opts', help='overwriting the training config' \n 'from commandline', default=None,\n nargs=argparse.REMAINDER)\n args = parser.parse_args()\n return args\n\ndef main():\n args = parse_args()\n if args.test:\n test(args)\n else:\n train(args)\n\ndef train(args):\n if args.config_file != \"\":\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n shutil.copy(args.config_file, cfg.OUTPUT_DIR)\n\n num_gpus = torch.cuda.device_count()\n\n logger = setup_logger('reid_baseline', output_dir, 0)\n logger.info('Using {} GPUS'.format(num_gpus))\n logger.info(args)\n logger.info('Running with config:\\n{}'.format(cfg))\n\n train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus) \n\n model = build_model(cfg, num_classes)\n\n loss_func = make_loss(cfg, num_classes)\n\n if cfg.MODEL.NAME == 'pcb_model':\n trainer = PCBTrainer(cfg, model, train_dl, val_dl,\n loss_func, num_query, num_gpus)\n else:\n trainer = BaseTrainer(cfg, model, train_dl, val_dl,\n loss_func, num_query, num_gpus)\n\n for epoch in range(trainer.epochs):\n for batch in trainer.train_dl:\n trainer.step(batch)\n trainer.handle_new_batch()\n trainer.handle_new_epoch()\n\ndef test(args):\n if args.config_file != \"\":\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n logger = setup_logger('reid_baseline.eval', cfg.OUTPUT_DIR, 0, train=False)\n\n logger.info('Running with config:\\n{}'.format(cfg))\n \n _, val_dl, num_query, num_classes = make_dataloader(cfg)\n\n model = build_model(cfg, num_classes)\n if cfg.TEST.MULTI_GPU:\n model = nn.DataParallel(model)\n model = convert_model(model)\n logger.info('Use multi gpu to inference')\n para_dict = torch.load(cfg.TEST.WEIGHT)\n model.load_state_dict(para_dict)\n model.cuda()\n model.eval()\n\n feats, pids, camids, paths = [], [], [], []\n with torch.no_grad():\n for batch in tqdm(val_dl, total=len(val_dl),\n leave=False):\n data, pid, camid, path = batch\n paths.extend(list(path))\n data = data.cuda()\n feat = model(data).detach().cpu()\n feats.append(feat)\n pids.append(pid)\n camids.append(camid)\n feats = torch.cat(feats, dim=0)\n pids = torch.cat(pids, dim=0)\n camids = torch.cat(camids, dim=0)\n\n query_feat = feats[:num_query]\n query_pid = pids[:num_query]\n query_camid = camids[:num_query]\n query_path = np.array(paths[:num_query])\n\n gallery_feat = feats[num_query:]\n gallery_pid = pids[num_query:]\n gallery_camid = camids[num_query:]\n gallery_path = np.array(paths[num_query:])\n \n distmat = euclidean_dist(query_feat, gallery_feat)\n\n cmc, mAP, all_AP = eval_func(distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(), \n query_camid.numpy(), gallery_camid.numpy(),\n use_cython=True)\n \n if cfg.TEST.VIS:\n worst_q = np.argsort(all_AP)[:cfg.TEST.VIS_Q_NUM]\n qid = query_pid[worst_q]\n q_im = query_path[worst_q]\n\n ind = np.argsort(distmat, axis=1)\n gid = gallery_pid[ind[worst_q]][..., :cfg.TEST.VIS_G_NUM]\n g_im = gallery_path[ind[worst_q]][..., :cfg.TEST.VIS_G_NUM]\n\n for idx in range(cfg.TEST.VIS_Q_NUM):\n sid = qid[idx] == gid[idx]\n im = rank_list_to_im(range(len(g_im[idx])), sid, q_im[idx], g_im[idx])\n \n im.save(osp.join(cfg.OUTPUT_DIR,\n 'worst_query_{}.jpg'.format(str(idx).zfill(2))))\n\n\n logger.info('Validation Result:')\n for r in cfg.TEST.CMC:\n logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r-1]))\n logger.info('mAP: {:.2%}'.format(mAP))\n logger.info('-' * 20)\n\n if not cfg.TEST.RERANK:\n return\n\n distmat = re_rank(query_feat, gallery_feat)\n cmc, mAP, all_AP = eval_func(distmat, query_pid.numpy(), gallery_pid.numpy(),\n query_camid.numpy(), gallery_camid.numpy(),\n use_cython=True)\n\n logger.info('ReRanking Result:')\n for r in cfg.TEST.CMC:\n logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r-1]))\n logger.info('mAP: {:.2%}'.format(mAP))\n logger.info('-' * 20)\n\n\nif __name__ == '__main__':\n main()\n\n\n"
] |
[
[
"torch.cat",
"torch.load",
"torch.nn.DataParallel",
"torch.no_grad",
"numpy.argsort",
"torch.cuda.device_count",
"numpy.array",
"torch.multiprocessing.set_sharing_strategy"
]
] |
goodok/sgnn
|
[
"a1ea5023c5b7e4f1a66afd1daed10a60786e6ac1"
] |
[
"old_versions/sgnn_original_python2.7/torch/train.py"
] |
[
"from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os, sys, time\nimport shutil\nimport random\nimport torch\nimport numpy as np\nimport gc\n\nimport data_util\nimport scene_dataloader\nimport model\nimport loss as loss_util\n\n\n# python train.py --gpu 0 --data_path ./data/completion_blocks_2cm_hierarchy/release_64-64-128 --train_file_list train_list.txt --val_file_list val_list.txt --save logs/mp\n\n# params\nparser = argparse.ArgumentParser()\n# data paths\nparser.add_argument('--gpu', type=int, default=0, help='which gpu to use')\nparser.add_argument('--data_path', required=True, help='path to data')\nparser.add_argument('--train_file_list', required=True, help='path to file list of train data')\nparser.add_argument('--val_file_list', default='', help='path to file list of val data')\nparser.add_argument('--save', default='./logs', help='folder to output model checkpoints')\n# model params\nparser.add_argument('--retrain', type=str, default='', help='model to load from')\nparser.add_argument('--input_dim', type=int, default=0, help='voxel dim.')\nparser.add_argument('--encoder_dim', type=int, default=8, help='pointnet feature dim')\nparser.add_argument('--coarse_feat_dim', type=int, default=16, help='feature dim')\nparser.add_argument('--refine_feat_dim', type=int, default=16, help='feature dim')\nparser.add_argument('--no_pass_occ', dest='no_pass_occ', action='store_true')\nparser.add_argument('--no_pass_feats', dest='no_pass_feats', action='store_true')\nparser.add_argument('--use_skip_sparse', type=int, default=1, help='use skip connections between sparse convs')\nparser.add_argument('--use_skip_dense', type=int, default=1, help='use skip connections between dense convs')\nparser.add_argument('--no_logweight_target_sdf', dest='logweight_target_sdf', action='store_false')\n# train params\nparser.add_argument('--num_hierarchy_levels', type=int, default=4, help='#hierarchy levels (must be > 1).')\nparser.add_argument('--num_iters_per_level', type=int, default=2000, help='#iters before fading in training for next level.')\nparser.add_argument('--truncation', type=float, default=3, help='truncation in voxels')\nparser.add_argument('--batch_size', type=int, default=8, help='input batch size')\nparser.add_argument('--start_epoch', type=int, default=0, help='start epoch')\nparser.add_argument('--max_epoch', type=int, default=5, help='number of epochs to train for')\nparser.add_argument('--save_epoch', type=int, default=1, help='save every nth epoch')\nparser.add_argument('--lr', type=float, default=0.001, help='learning rate, default=0.001')\nparser.add_argument('--decay_lr', type=int, default=10, help='decay learning rate by half every n epochs')\nparser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay.')\nparser.add_argument('--weight_sdf_loss', type=float, default=1.0, help='weight sdf loss vs occ.')\nparser.add_argument('--weight_missing_geo', type=float, default=5.0, help='weight missing geometry vs rest of sdf.')\nparser.add_argument('--vis_dfs', type=int, default=0, help='use df (iso 1) to visualize')\nparser.add_argument('--use_loss_masking', dest='use_loss_masking', action='store_true')\nparser.add_argument('--no_loss_masking', dest='use_loss_masking', action='store_false')\nparser.add_argument('--scheduler_step_size', type=int, default=0, help='#iters before scheduler step (0 for each epoch)')\n\nparser.set_defaults(no_pass_occ=False, no_pass_feats=False, logweight_target_sdf=True, use_loss_masking=True)\nargs = parser.parse_args()\nassert( not (args.no_pass_feats and args.no_pass_occ) )\nassert( args.weight_missing_geo >= 1)\nassert( args.num_hierarchy_levels > 1 )\nif args.input_dim == 0: # set default values\n args.input_dim = 2 ** (3+args.num_hierarchy_levels)\n #TODO FIX THIS PART\n if '64-64-128' in args.data_path:\n args.input_dim = (128, 64, 64)\n elif '96-96-160' in args.data_path:\n args.input_dim = (160, 96, 96)\n if '64-64-64' in args.data_path:\n args.input_dim = (64, 64, 64)\n\n\n args.input_dim = (64, 64, 64) # ok\n # args.input_dim = (160, 96, 96) # fail\n #args.input_dim = (128, 64, 64) # ok\n\nargs.input_nf = 1\nUP_AXIS = 0\nprint(args)\n\n# specify gpu\nos.environ['CUDA_VISIBLE_DEVICES']=str(args.gpu)\n\n# create model\nmodel = model.GenModel(args.encoder_dim, args.input_dim, args.input_nf, args.coarse_feat_dim, args.refine_feat_dim, args.num_hierarchy_levels, not args.no_pass_occ, not args.no_pass_feats, args.use_skip_sparse, args.use_skip_dense).cuda()\noptimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\nif args.retrain:\n print('loading model:', args.retrain)\n checkpoint = torch.load(args.retrain)\n args.start_epoch = args.start_epoch if args.start_epoch != 0 else checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict']) #, strict=False)\n optimizer.load_state_dict(checkpoint['optimizer'])\nlast_epoch = -1 if not args.retrain else args.start_epoch - 1\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.decay_lr, gamma=0.5, last_epoch=last_epoch)\n\n# data files\ntrain_files, val_files = data_util.get_train_files(args.data_path, args.train_file_list, args.val_file_list)\n_OVERFIT = False\nif len(train_files) == 1:\n _OVERFIT = True\n args.use_loss_masking = False\nnum_overfit_train = 0 if not _OVERFIT else 640\nnum_overfit_val = 0 if not _OVERFIT else 160\nprint('#train files = ', len(train_files))\nprint('#val files = ', len(val_files))\ntrain_dataset = scene_dataloader.SceneDataset(train_files, args.input_dim, args.truncation, args.num_hierarchy_levels, 0, num_overfit_train)\ntrain_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2, collate_fn=scene_dataloader.collate)\nif len(val_files) > 0:\n val_dataset = scene_dataloader.SceneDataset(val_files, args.input_dim, args.truncation, args.num_hierarchy_levels, 0, num_overfit_val)\n print('val_dataset', len(val_dataset))\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=2, collate_fn=scene_dataloader.collate)\n\n_SPLITTER = ','\n\ndef print_log_info(epoch, iter, mean_train_losses, mean_train_l1pred, mean_train_l1tgt, mean_train_ious, mean_val_losses, mean_val_l1pred, mean_val_l1tgt, mean_val_ious, time, log):\n splitters = ['Epoch: ', ' iter: '] if log is None else ['', ',']\n values = [epoch, iter]\n values.extend(mean_train_losses)\n for h in range(len(mean_train_losses)):\n id = 'total' if h == 0 else str(h-1)\n id = 'sdf' if h + 1 == len(mean_train_losses) else id\n if log is None:\n splitters.append(' loss_train(' + id + '): ')\n else:\n splitters.append(',')\n values.extend([mean_train_l1pred, mean_train_l1tgt])\n if log is None:\n splitters.extend([' train_l1pred: ', ' train_l1tgt: '])\n else:\n splitters.extend([',', ','])\n values.extend(mean_train_ious)\n for h in range(len(mean_train_ious)):\n id = str(h)\n if log is None:\n splitters.append(' iou_train(' + id + '): ')\n else:\n splitters.append(',')\n if mean_val_losses is not None:\n values.extend(mean_val_losses)\n for h in range(len(mean_val_losses)):\n id = 'total' if h == 0 else str(h-1)\n id = 'sdf' if h + 1 == len(mean_val_losses) else id\n if log is None:\n splitters.append(' loss_val(' + id + '): ')\n else:\n splitters.append(',')\n values.extend([mean_val_l1pred, mean_val_l1tgt])\n if log is None:\n splitters.extend([' val_l1pred: ', ' val_l1tgt: '])\n else:\n splitters.extend([',', ','])\n values.extend(mean_val_ious)\n for h in range(len(mean_val_ious)):\n id = str(h)\n if log is None:\n splitters.append(' iou_val(' + id + '): ')\n else:\n splitters.append(',')\n else:\n splitters.extend([''] * (len(mean_train_losses) + len(mean_train_ious) + 2))\n values.extend([''] * (len(mean_train_losses) + len(mean_train_ious) + 2))\n values.append(time)\n if log is None:\n splitters.append(' time: ')\n else:\n splitters.append(',')\n info = ''\n for k in range(len(splitters)):\n if log is None and isinstance(values[k], float):\n info += splitters[k] + '{:.6f}'.format(values[k])\n else:\n info += splitters[k] + str(values[k])\n if log is None:\n print(info, file=sys.stdout)\n else:\n print(info, file=log)\n\ndef print_log(log, epoch, iter, train_losses, train_l1preds, train_l1tgts, train_ious, val_losses, val_l1preds, val_l1tgts, val_ious, time):\n train_losses = np.array(train_losses)\n train_l1preds = np.array(train_l1preds)\n train_l1tgts = np.array(train_l1tgts)\n train_ious = np.array(train_ious)\n mean_train_losses = [(-1 if np.all(x < 0) else np.mean(x[x >= 0])) for x in train_losses]\n mean_train_l1pred = -1 if (len(train_l1preds) == 0 or np.all(train_l1preds < 0)) else np.mean(train_l1preds[train_l1preds >= 0])\n mean_train_l1tgt = -1 if (len(train_l1tgts) == 0 or np.all(train_l1tgts < 0)) else np.mean(train_l1tgts[train_l1tgts >= 0])\n mean_train_ious = [(-1 if np.all(x < 0) else np.mean(x[x >= 0])) for x in train_ious]\n mean_val_losses = None\n mean_val_l1pred = None\n mean_val_l1tgt = None\n mean_val_ious = None\n if val_losses:\n val_losses = np.array(val_losses)\n val_l1preds = np.array(val_l1preds)\n val_l1tgts = np.array(val_l1tgts)\n val_ious = np.array(val_ious)\n mean_val_losses = [-1 if np.all(x < 0) else np.mean(x[x >= 0]) for x in val_losses]\n mean_val_l1pred = -1 if (len(val_l1preds) == 0 or np.all(val_l1preds < 0)) else np.mean(val_l1preds[val_l1preds >= 0])\n mean_val_l1tgt = -1 if (len(val_l1tgts) == 0 or np.all(val_l1tgts < 0)) else np.mean(val_l1tgts[val_l1tgts >= 0])\n mean_val_ious = [-1 if np.all(x < 0) else np.mean(x[x >= 0]) for x in val_ious]\n print_log_info(epoch, iter, mean_train_losses, mean_train_l1pred, mean_train_l1tgt, mean_train_ious, mean_val_losses, mean_val_l1pred, mean_val_l1tgt, mean_val_ious, time, None)\n print_log_info(epoch, iter, mean_train_losses, mean_train_l1pred, mean_train_l1tgt, mean_train_ious, mean_val_losses, mean_val_l1pred, mean_val_l1tgt, mean_val_ious, time, log)\n else:\n print_log_info(epoch, iter, mean_train_losses, mean_train_l1pred, mean_train_l1tgt, mean_train_ious, None, None, None, None, time, None)\n print_log_info(epoch, iter, mean_train_losses, mean_train_l1pred, mean_train_l1tgt, mean_train_ious, None, None, None, None, time, log)\n log.flush()\n\n\ndef get_loss_weights(iter, num_hierarchy_levels, num_iters_per_level, factor_l1_loss):\n weights = np.zeros(num_hierarchy_levels+1, dtype=np.float32)\n cur_level = iter // num_iters_per_level\n if cur_level > num_hierarchy_levels:\n weights.fill(1)\n weights[-1] = factor_l1_loss\n if iter == (num_hierarchy_levels + 1) * num_iters_per_level:\n print('[iter %d] updating loss weights:' % iter, weights)\n return weights\n for level in range(0, cur_level+1):\n weights[level] = 1.0\n step_factor = 20\n fade_amount = max(1.0, min(100, num_iters_per_level//step_factor))\n fade_level = iter % num_iters_per_level\n cur_weight = 0.0\n l1_weight = 0.0\n if fade_level >= num_iters_per_level - fade_amount + step_factor:\n fade_level_step = (fade_level - num_iters_per_level + fade_amount) // step_factor\n cur_weight = float(fade_level_step) / float(fade_amount//step_factor)\n if cur_level+1 < num_hierarchy_levels:\n weights[cur_level+1] = cur_weight\n elif cur_level < num_hierarchy_levels:\n l1_weight = factor_l1_loss * cur_weight\n else:\n l1_weight = 1.0\n weights[-1] = l1_weight\n if iter % num_iters_per_level == 0 or (fade_level >= num_iters_per_level - fade_amount + step_factor and (fade_level - num_iters_per_level + fade_amount) % step_factor == 0):\n print('[iter %d] updating loss weights:' % iter, weights)\n return weights\n\ndef train(epoch, iter, dataloader, log_file, output_save):\n train_losses = [ [] for i in range(args.num_hierarchy_levels+2) ]\n train_l1preds = []\n train_l1tgts = []\n train_ious = [ [] for i in range(args.num_hierarchy_levels) ]\n model.train()\n start = time.time()\n \n if args.scheduler_step_size == 0:\n scheduler.step()\n\n num_batches = len(dataloader)\n for t, sample in enumerate(dataloader):\n loss_weights = get_loss_weights(iter, args.num_hierarchy_levels, args.num_iters_per_level, args.weight_sdf_loss)\n if epoch == args.start_epoch and t == 0:\n print('[iter %d/epoch %d] loss_weights' % (iter, epoch), loss_weights)\n\n sdfs = sample['sdf']\n if sdfs.shape[0] < args.batch_size:\n continue # maintain same batch size for training\n inputs = sample['input']\n known = sample['known']\n hierarchy = sample['hierarchy']\n for h in range(len(hierarchy)):\n hierarchy[h] = hierarchy[h].cuda()\n if args.use_loss_masking:\n known = known.cuda()\n inputs[0] = inputs[0].cuda()\n inputs[1] = inputs[1].cuda()\n target_for_sdf, target_for_occs, target_for_hier = loss_util.compute_targets(sdfs.cuda(), hierarchy, args.num_hierarchy_levels, args.truncation, args.use_loss_masking, known)\n\n optimizer.zero_grad()\n output_sdf, output_occs = model(inputs, loss_weights) \n loss, losses = loss_util.compute_loss(output_sdf, output_occs, target_for_sdf, target_for_occs, target_for_hier, loss_weights, args.truncation, args.logweight_target_sdf, args.weight_missing_geo, inputs[0], args.use_loss_masking, known)\n loss.backward()\n optimizer.step()\n\n output_visual = output_save and t + 2 == num_batches\n compute_pred_occs = (iter % 20 == 0) or output_visual\n if compute_pred_occs:\n pred_occs = [None] * args.num_hierarchy_levels\n for h in range(args.num_hierarchy_levels):\n factor = 2**(args.num_hierarchy_levels-h-1)\n pred_occs[h] = [None] * args.batch_size\n if len(output_occs[h][0]) == 0:\n continue\n output_occs[h][1] = torch.nn.Sigmoid()(output_occs[h][1][:,0].detach()) > 0.5\n for b in range(args.batch_size):\n batchmask = output_occs[h][0][:,-1] == b\n locs = output_occs[h][0][batchmask][:,:-1]\n vals = output_occs[h][1][batchmask]\n pred_occs[h][b] = locs[vals.view(-1)]\n train_losses[0].append(loss.item())\n for h in range(args.num_hierarchy_levels):\n train_losses[h+1].append(losses[h])\n target = target_for_occs[h].byte()\n if compute_pred_occs:\n iou = loss_util.compute_iou_sparse_dense(pred_occs[h], target, args.use_loss_masking)\n train_ious[h].append(iou)\n train_losses[args.num_hierarchy_levels+1].append(losses[-1])\n if len(output_sdf[0]) > 0:\n output_sdf = [output_sdf[0].detach(), output_sdf[1].detach()]\n if loss_weights[-1] > 0 and iter % 20 == 0:\n train_l1preds.append(loss_util.compute_l1_predsurf_sparse_dense(output_sdf[0], output_sdf[1], target_for_sdf, None, False, args.use_loss_masking, known).item())\n train_l1tgts.append(loss_util.compute_l1_tgtsurf_sparse_dense(output_sdf[0], output_sdf[1], target_for_sdf, args.truncation, args.use_loss_masking, known))\n\n iter += 1\n if args.scheduler_step_size > 0 and iter % args.scheduler_step_size == 0:\n scheduler.step()\n if iter % 20 == 0:\n took = time.time() - start\n print_log(log_file, epoch, iter, train_losses, train_l1preds, train_l1tgts, train_ious, None, None, None, None, took)\n if iter % 2000 == 0:\n torch.save({'epoch': epoch,'state_dict': model.state_dict(),'optimizer' : optimizer.state_dict()}, os.path.join(args.save, 'model-iter%s-epoch%s.pth' % (iter, epoch)))\n if output_visual:\n vis_pred_sdf = [None] * args.batch_size\n if len(output_sdf[0]) > 0:\n for b in range(args.batch_size):\n mask = output_sdf[0][:,-1] == b\n if len(mask) > 0:\n vis_pred_sdf[b] = [output_sdf[0][mask].cpu().numpy(), output_sdf[1][mask].squeeze().cpu().numpy()]\n inputs = [inputs[0].cpu().numpy(), inputs[1].cpu().numpy()]\n for h in range(args.num_hierarchy_levels):\n for b in range(args.batch_size):\n if pred_occs[h][b] is not None:\n pred_occs[h][b] = pred_occs[h][b].cpu().numpy()\n data_util.save_predictions(os.path.join(args.save, 'iter%d-epoch%d' % (iter, epoch), 'train'), sample['name'], inputs, target_for_sdf.cpu().numpy(), [x.cpu().numpy() for x in target_for_occs], vis_pred_sdf, pred_occs, sample['world2grid'].numpy(), args.vis_dfs, args.truncation)\n\n return train_losses, train_l1preds, train_l1tgts, train_ious, iter, loss_weights\n\n\ndef test(epoch, iter, loss_weights, dataloader, log_file, output_save):\n val_losses = [ [] for i in range(args.num_hierarchy_levels+2) ]\n val_l1preds = []\n val_l1tgts = []\n val_ious = [ [] for i in range(args.num_hierarchy_levels) ]\n model.eval()\n #start = time.time()\n\n num_batches = len(dataloader)\n with torch.no_grad():\n for t, sample in enumerate(dataloader):\n sdfs = sample['sdf']\n if sdfs.shape[0] < args.batch_size:\n continue # maintain same batch size\n inputs = sample['input']\n known = sample['known']\n hierarchy = sample['hierarchy']\n for h in range(len(hierarchy)):\n hierarchy[h] = hierarchy[h].cuda()\n if args.use_loss_masking:\n known = known.cuda()\n inputs[0] = inputs[0].cuda()\n inputs[1] = inputs[1].cuda()\n target_for_sdf, target_for_occs, target_for_hier = loss_util.compute_targets(sdfs.cuda(), hierarchy, args.num_hierarchy_levels, args.truncation, args.use_loss_masking, known)\n\n output_sdf, output_occs = model(inputs, loss_weights)\n loss, losses = loss_util.compute_loss(output_sdf, output_occs, target_for_sdf, target_for_occs, target_for_hier, loss_weights, args.truncation, args.logweight_target_sdf, args.weight_missing_geo, inputs[0], args.use_loss_masking, known)\n\n output_visual = output_save and t + 2 == num_batches\n compute_pred_occs = (t % 20 == 0) or output_visual\n if compute_pred_occs:\n pred_occs = [None] * args.num_hierarchy_levels\n for h in range(args.num_hierarchy_levels):\n factor = 2**(args.num_hierarchy_levels-h-1)\n pred_occs[h] = [None] * args.batch_size\n if len(output_occs[h][0]) == 0:\n continue\n for b in range(args.batch_size):\n batchmask = output_occs[h][0][:,-1] == b\n locs = output_occs[h][0][batchmask][:,:-1]\n vals = torch.nn.Sigmoid()(output_occs[h][1][:,0].detach()[batchmask]) > 0.5\n pred_occs[h][b] = locs[vals.view(-1)]\n val_losses[0].append(loss.item())\n for h in range(args.num_hierarchy_levels):\n val_losses[h+1].append(losses[h])\n target = target_for_occs[h].byte()\n if compute_pred_occs:\n iou = loss_util.compute_iou_sparse_dense(pred_occs[h], target, args.use_loss_masking)\n val_ious[h].append(iou)\n val_losses[args.num_hierarchy_levels+1].append(losses[-1])\n if len(output_sdf[0]) > 0:\n output_sdf = [output_sdf[0].detach(), output_sdf[1].detach()]\n if loss_weights[-1] > 0 and t % 20 == 0:\n val_l1preds.append(loss_util.compute_l1_predsurf_sparse_dense(output_sdf[0], output_sdf[1], target_for_sdf, None, False, args.use_loss_masking, known).item())\n val_l1tgts.append(loss_util.compute_l1_tgtsurf_sparse_dense(output_sdf[0], output_sdf[1], target_for_sdf, args.truncation, args.use_loss_masking, known))\n if output_visual:\n vis_pred_sdf = [None] * args.batch_size\n if len(output_sdf[0]) > 0:\n for b in range(args.batch_size):\n mask = output_sdf[0][:,-1] == b\n if len(mask) > 0:\n vis_pred_sdf[b] = [output_sdf[0][mask].cpu().numpy(), output_sdf[1][mask].squeeze().cpu().numpy()]\n inputs = [inputs[0].cpu().numpy(), inputs[1].cpu().numpy()]\n for h in range(args.num_hierarchy_levels):\n for b in range(args.batch_size):\n if pred_occs[h][b] is not None:\n pred_occs[h][b] = pred_occs[h][b].cpu().numpy()\n data_util.save_predictions(os.path.join(args.save, 'iter%d-epoch%d' % (iter, epoch), 'val'), sample['name'], inputs, target_for_sdf.cpu().numpy(), [x.cpu().numpy() for x in target_for_occs], vis_pred_sdf, pred_occs, sample['world2grid'], args.vis_dfs, args.truncation)\n\n #took = time.time() - start\n return val_losses, val_l1preds, val_l1tgts, val_ious\n\n\ndef main():\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n elif not _OVERFIT:\n raw_input('warning: save dir %s exists, press key to delete and continue' % args.save)\n\n data_util.dump_args_txt(args, os.path.join(args.save, 'args.txt'))\n log_file = open(os.path.join(args.save, 'log.csv'), 'w')\n headers = ['epoch','iter','train_loss(total)']\n for h in range(args.num_hierarchy_levels):\n headers.append('train_loss(' + str(h) + ')')\n headers.extend(['train_loss(sdf)', 'train_l1-pred', 'train_l1-tgt'])\n for h in range(args.num_hierarchy_levels):\n headers.append('train_iou(' + str(h) + ')')\n headers.extend(['time'])\n log_file.write(_SPLITTER.join(headers) + '\\n')\n log_file.flush()\n\n has_val = len(val_files) > 0\n log_file_val = None\n if has_val:\n headers = headers[:-1]\n headers.append('val_loss(total)')\n for h in range(args.num_hierarchy_levels):\n headers.append('val_loss(' + str(h) + ')')\n headers.extend(['val_loss(sdf)', 'val_l1-pred', 'val_l1-tgt'])\n for h in range(args.num_hierarchy_levels):\n headers.append('val_iou(' + str(h) + ')')\n headers.extend(['time'])\n log_file_val = open(os.path.join(args.save, 'log_val.csv'), 'w')\n log_file_val.write(_SPLITTER.join(headers) + '\\n')\n log_file_val.flush()\n # start training\n print('starting training...')\n iter = args.start_epoch * (len(train_dataset) // args.batch_size)\n for epoch in range(args.start_epoch, args.max_epoch):\n start = time.time()\n\n train_losses, train_l1preds, train_l1tgts, train_ious, iter, loss_weights = train(epoch, iter, train_dataloader, log_file, output_save=(epoch % args.save_epoch == 0))\n if has_val:\n val_losses, val_l1preds, val_l1tgts, val_ious = test(epoch, iter, loss_weights, val_dataloader, log_file_val, output_save=(epoch % args.save_epoch == 0))\n\n took = time.time() - start\n if has_val:\n print_log(log_file_val, epoch, iter, train_losses, train_l1preds, train_l1tgts, train_ious, val_losses, val_l1preds, val_l1tgts, val_ious, took)\n else:\n print_log(log_file, epoch, iter, train_losses, train_l1preds, train_l1tgts, train_ious, None, None, None, None, took)\n torch.save({'epoch': epoch + 1,'state_dict': model.state_dict(),'optimizer' : optimizer.state_dict()}, os.path.join(args.save, 'model-epoch-%s.pth' % epoch))\n log_file.close()\n if has_val:\n log_file_val.close()\n\n\n\nif __name__ == '__main__':\n main()\n\n\n"
] |
[
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.Sigmoid",
"numpy.all",
"numpy.mean",
"torch.no_grad",
"numpy.array",
"numpy.zeros",
"torch.optim.lr_scheduler.StepLR"
]
] |
quarkfin/QF-Lib
|
[
"1504c65c9ed8bbbd19948088fe7b924a7b6be709"
] |
[
"qf_lib_tests/unit_tests/data_providers/test_general_price_provider_mock.py"
] |
[
"# Copyright 2016-present CERN – European Organization for Nuclear Research\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom unittest.mock import Mock\n\nimport pandas as pd\n\nfrom qf_lib.common.enums.frequency import Frequency\nfrom qf_lib.common.enums.price_field import PriceField\nfrom qf_lib.common.tickers.tickers import QuandlTicker, BloombergTicker, HaverTicker\nfrom qf_lib.common.utils.dateutils.string_to_date import str_to_date\nfrom qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame\nfrom qf_lib.containers.qf_data_array import QFDataArray\nfrom qf_lib.data_providers.bloomberg import BloombergDataProvider\nfrom qf_lib.data_providers.general_price_provider import GeneralPriceProvider\nfrom qf_lib.data_providers.haver import HaverDataProvider\nfrom qf_lib.data_providers.quandl.quandl_data_provider import QuandlDataProvider\n\n\nclass TestGeneralPriceProviderMock(unittest.TestCase):\n START_DATE = str_to_date('2017-10-02')\n END_DATE = str_to_date('2017-10-17')\n FREQUENCY = Frequency.DAILY\n\n BBG_TICKERS = [BloombergTicker('BBG1'), BloombergTicker('BBG2'),\n BloombergTicker('BBG3'), BloombergTicker('BBG4')]\n\n QUANDL_TICKERS = [QuandlTicker('Quandl1', 'DB'), QuandlTicker('Quandl2', 'DB'),\n QuandlTicker('Quandl3', 'DB'), QuandlTicker('Quandl4', 'DB')]\n\n HAVER_TICKERS = [HaverTicker('Haver1', 'DB'), HaverTicker('Haver2', 'DB'),\n HaverTicker('Haver3', 'DB'), HaverTicker('Haver4', 'DB')]\n\n SINGLE_PRICE_FIELD = PriceField.Close\n PRICE_FIELDS = [SINGLE_PRICE_FIELD]\n\n NUM_OF_DATES = 12\n\n def setUp(self):\n\n datetime_index = pd.DatetimeIndex([\n '2017-10-02', '2017-10-03', '2017-10-04', '2017-10-05', '2017-10-06',\n '2017-10-09', '2017-10-10', '2017-10-11', '2017-10-12', '2017-10-13',\n '2017-10-16', '2017-10-17'\n ])\n\n data = [\n [[263.7628], [None], [111.02], [321.8249]],\n [[263.9803], [106.39], [121.29], [322.0949]],\n [[264.1640], [106.36], [121.22], [322.3203]],\n [[264.0932], [106.25], [121.05], [322.4172]],\n [[263.9816], [106.12], [120.95], [322.1411]],\n [[263.9816], [106.24], [121.05], [None]],\n [[264.4529], [106.28], [121.13], [None]],\n [[264.5108], [106.40], [121.07], [322.3553]],\n [[264.8223], [106.50], [121.10], [322.7489]],\n [[264.4531], [106.23], [121.31], [322.9710]],\n [[264.4690], [106.16], [121.14], [323.0688]],\n [[None], [106.06], [121.01], [323.1553]]\n ]\n\n bloomberg = Mock(spec=BloombergDataProvider)\n bloomberg.get_price.return_value = QFDataArray.create(dates=datetime_index, tickers=self.BBG_TICKERS,\n fields=self.PRICE_FIELDS, data=data)\n bloomberg.supported_ticker_types.return_value = {BloombergTicker}\n\n quandl = Mock(spec=QuandlDataProvider)\n quandl.get_price.return_value = QFDataArray.create(dates=datetime_index, tickers=self.QUANDL_TICKERS,\n fields=self.PRICE_FIELDS, data=data)\n quandl.supported_ticker_types.return_value = {QuandlTicker}\n\n haver = Mock(spec=HaverDataProvider)\n haver.get_price.return_value = QFDataArray.create(dates=datetime_index, tickers=self.HAVER_TICKERS,\n fields=self.PRICE_FIELDS, data=data)\n haver.supported_ticker_types.return_value = {HaverTicker}\n\n self.price_provider = GeneralPriceProvider(bloomberg, quandl, haver)\n\n # =========================== Test get_price method ==========================================================\n\n def test_price_single_provider_single_field(self):\n data = self.price_provider.get_price(tickers=self.QUANDL_TICKERS, fields=self.SINGLE_PRICE_FIELD,\n start_date=self.START_DATE, end_date=self.END_DATE)\n\n self.assertEqual(PricesDataFrame, type(data))\n self.assertEqual((self.NUM_OF_DATES, len(self.QUANDL_TICKERS)), data.shape)\n self.assertEqual(list(data.columns), self.QUANDL_TICKERS)\n\n def test_price_multiple_providers_single_field(self):\n tickers = self.BBG_TICKERS + self.QUANDL_TICKERS + self.HAVER_TICKERS\n data = self.price_provider.get_price(tickers=tickers, fields=self.SINGLE_PRICE_FIELD,\n start_date=self.START_DATE, end_date=self.END_DATE)\n self.assertEqual(type(data), PricesDataFrame)\n self.assertEqual(data.shape, (self.NUM_OF_DATES, len(tickers)))\n self.assertEqual(list(data.columns), tickers)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"pandas.DatetimeIndex"
]
] |
QuantTraderEd/vnpy_crypto
|
[
"844381797a475a01c05a4e162592a5a6e3a48032"
] |
[
"venv/lib/python3.6/site-packages/pykalman/datasets/base.py"
] |
[
"\"\"\"\nDataset\n\"\"\"\n\nfrom os.path import dirname, join\n\nimport numpy as np\nfrom numpy import ma\nfrom scipy import io\n\nfrom ..utils import Bunch, check_random_state\n\n\ndef load_robot():\n \"\"\"Load and return synthetic robot state data (state estimation)\n\n =================================\n Number of time steps 501\n Dimensionality of Observations 2\n Dimensionality of States 5\n =================================\n\n Returns\n -------\n data : Bunch\n Dictionary-like object containing all data. Access attributes as you\n would the contents of a dictionary or of an object.\n\n Examples\n --------\n >>> from pykalman.datasets import load_robot\n >>> data = load_robot()\n >>> data.data.shape\n (501, 2)\n \"\"\"\n def pad_and_mask(X):\n \"\"\"Pad X's first index with zeros and mask it\"\"\"\n zeros = np.zeros(X.shape[1:])[np.newaxis]\n X = np.vstack([zeros, X])\n mask = np.zeros(X.shape)\n mask[0] = True\n return ma.array(X, mask=mask)\n\n module_path = dirname(__file__)\n data = io.loadmat(join(module_path, 'data', 'robot.mat'))\n descr = open(join(module_path, 'descr', 'robot.rst')).read()\n Z = pad_and_mask(data['y'].T)\n X = data['x'].T\n A = data['A']\n b = data['b'].T\n C = data['C']\n d = data['d'][:, 0]\n Q_0 = 10.0 * np.eye(5)\n R_0 = 10.0 * np.eye(2)\n Q = data['Q']\n R = data['R']\n x_0 = data['x0'][:, 0]\n V_0 = data['P_0']\n X_filt = data['xfilt'].T\n V_filt = data['Vfilt'][0]\n ll = data['ll'][0]\n X_smooth = data['xsmooth'].T\n V_smooth = data['Vsmooth'][0]\n T = Z.shape[0]\n\n # V_filt is actually an object array where each object is a 2D array.\n # Convert it to a proper, 3D array. Likewise for V_smooth.\n V_filt = np.asarray([V_filt[t] for t in range(V_filt.shape[0])])\n V_smooth = np.asarray([V_smooth[t] for t in range(V_smooth.shape[0])])\n\n return Bunch(n_timesteps=T, observations=Z, states=X, transition_matrix=A,\n transition_offsets=b, observation_matrix=C, observation_offset=d,\n initial_transition_covariance=Q_0, initial_observation_covariance=R_0,\n transition_covariance=Q, observation_covariance=R,\n initial_state_mean=x_0, initial_state_covariance=V_0,\n filtered_state_means=X_filt, filtered_state_covariances=V_filt,\n loglikelihoods=ll, smoothed_state_means=X_smooth,\n smoothed_state_covariances=V_smooth, DESCR=descr)\n"
] |
[
[
"numpy.ma.array",
"numpy.eye",
"numpy.zeros",
"numpy.vstack"
]
] |
afshinamini/Geoscience-BC-project-2019-014
|
[
"3f91a021ad99ef02950e2ae919c132e8409d35d0"
] |
[
"Data/Geological_Features/Formation Tops/Montney Grid/rbfInterp.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 24 18:41:36 2020\r\n\r\n@author: aamini\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.interpolate import Rbf\r\nfrom sklearn.neighbors import NearestNeighbors\r\n\r\nd1 = pd.read_csv('Montney.csv')\r\nd2 = pd.read_csv('Montney2.5kmgrid.csv')\r\n\r\n\r\n#%%\r\n\r\nx = d1[\"X\"]\r\ny = d1[\"Y\"]\r\nd = d1[\"Montney-TVD\"]\r\n\r\nrbfi = Rbf(x, y, d)\r\n#%%\r\nxi = d2[\"X\"]\r\nyi = d2[\"Y\"]\r\n\r\ndi = rbfi(xi, yi)\r\n\r\n#%%\r\n\r\nneigh = NearestNeighbors(n_neighbors=3)\r\nneigh.fit(samples)"
] |
[
[
"scipy.interpolate.Rbf",
"pandas.read_csv",
"sklearn.neighbors.NearestNeighbors"
]
] |
MalcolmGomes/CPS040-Thesis
|
[
"1d7a750169f56923ffbd14d96c7c8e4c5d377bf9"
] |
[
"Main/MemNet/MemNet_PyTorch-master/eval.py"
] |
[
"import argparse, os\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport time, math, glob\nimport scipy.io as sio\nfrom torch.backends import cudnn\nfrom memnet1 import MemNet\nfrom utils import convert_state_dict\n\ntorch.backends.cudnn.benchmark = True\ncudnn.benchmark = True\n\nparser = argparse.ArgumentParser(description=\"PyTorch MemNet Eval\")\nparser.add_argument(\"--cuda\", action=\"store_true\", help=\"use cuda?\")\nparser.add_argument(\"--model\", default=\"checkpoint1/model_epoch_50.pth\", type=str, help=\"model path\")\nparser.add_argument(\"--dataset\", default=\"Set5\", type=str, help=\"dataset name, Default: Set5\")\nparser.add_argument(\"--gpus\", default=\"4\", type=str, help=\"gpu ids (default: 0)\")\n\ndef PSNR(pred, gt, shave_border=0):\n height, width = pred.shape[:2]\n pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]\n gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]\n imdff = pred - gt\n rmse = math.sqrt(np.mean(imdff ** 2))\n if rmse == 0:\n return 100\n return 20 * math.log10(255.0 / rmse)\n\nopt = parser.parse_args()\ncuda = opt.cuda\n\nif cuda:\n print(\"=> use gpu id: '{}'\".format(opt.gpus))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = opt.gpus\n if not torch.cuda.is_available():\n raise Exception(\"No GPU found or Wrong gpu id, please run without --cuda\")\n\n#model = torch.load(opt.model, map_location=lambda storage, loc: storage)[\"model\"]\nmodel = MemNet(1,64,6,6)\nstate = convert_state_dict( torch.load(opt.model)['model'])\nmodel.load_state_dict(state)\n\nif cuda:\n model = model.cuda()\nelse:\n model = model.cpu()\n\nmodel.eval()\n\nscales = [2,3,4]\n\n#image_list = glob.glob(opt.dataset+\"_mat/*.*\") \nimage_list = glob.glob('data/SuperResolution/'+opt.dataset+\"_mat/*.*\") \n\nfor scale in scales:\n avg_psnr_predicted = 0.0\n avg_psnr_bicubic = 0.0\n avg_elapsed_time = 0.0\n count = 0.0\n for image_name in image_list:\n if str(scale) in image_name:\n count += 1\n print(\"Processing \", image_name)\n im_gt_y = sio.loadmat(image_name)['im_gt_y']\n im_b_y = sio.loadmat(image_name)['im_b_y']\n \n im_gt_y = im_gt_y.astype(float)\n im_b_y = im_b_y.astype(float)\n\n psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=scale)\n avg_psnr_bicubic += psnr_bicubic\n\n im_input = im_b_y/255.\n #print(im_input.shape)\n\n im_input = Variable((torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1]), volatile= True)\n \n\n if cuda:\n im_input = im_input.cuda()\n\n start_time = time.time()\n HR = model(im_input)\n elapsed_time = time.time() - start_time\n avg_elapsed_time += elapsed_time\n\n HR = HR.cpu()\n\n im_h_y = HR.data[0].numpy().astype(np.float32)\n\n im_h_y = im_h_y * 255.\n im_h_y[im_h_y < 0] = 0\n im_h_y[im_h_y > 255.] = 255.\n im_h_y = im_h_y[0,:,:]\n\n psnr_predicted = PSNR(im_gt_y, im_h_y,shave_border=scale)\n avg_psnr_predicted += psnr_predicted\n\n print(\"Scale=\", scale)\n print(\"Dataset=\", opt.dataset)\n print(\"PSNR_predicted=\", avg_psnr_predicted/count)\n print(\"PSNR_bicubic=\", avg_psnr_bicubic/count)\n print(\"It takes average {}s for processing\".format(avg_elapsed_time/count))\n"
] |
[
[
"torch.load",
"scipy.io.loadmat",
"torch.from_numpy",
"numpy.mean",
"torch.cuda.is_available"
]
] |
991166chun/TeaDisease
|
[
"3cf6499617c01b3a22babcbf65e8241c9cac3c06"
] |
[
"mmdet/datasets/multistage.py"
] |
[
"import mmcv\nimport numpy as np\n\nfrom mmdet.core import eval_map\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\n@DATASETS.register_module()\nclass MultiStageDataset(CustomDataset):\n\n CLASSES_1 = ('disease','back')\n\n CLASSES_2 = ('brownblight', 'blister', 'algal', 'fungi_early',\n 'miner', 'thrips',\n 'mosquito_early', 'mosquito_late',\n 'moth', 'tortrix', 'flushworm',\n 'roller')\n\n def __init__(self,**kwargs):\n super(XMLDataset, self).__init__(**kwargs)\n self.cat2category = {cat: i for i, cat in enumerate(self.CLASSES_1)}\n self.cat2pest = {cat: i for i, cat in enumerate(self.CLASSES_2)}\n \n def load_annotations(self, ann_file):\n data_infos = []\n img_ids = mmcv.list_from_file(ann_file)\n for img_id in img_ids:\n filename = f'JPEGImages/{img_id}.jpg'\n xml_path = osp.join(self.img_prefix, 'MultiLabel',\n f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n size = root.find('size')\n width = 0\n height = 0\n if size is not None:\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n else:\n img_path = osp.join(self.img_prefix, 'JPEGImages',\n '{}.jpg'.format(img_id))\n img = Image.open(img_path)\n width, height = img.size\n data_infos.append(\n dict(id=img_id,\n filename=filename, \n width=width, \n height=height))\n\n return data_infos\n\n def get_subset_by_classes(self):\n \"\"\"Filter imgs by user-defined categories\n \"\"\"\n subset_data_infos = []\n for data_info in self.data_infos:\n img_id = data_info['id']\n xml_path = osp.join(self.img_prefix, 'MultiLabel',\n f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n for obj in root.findall('object'):\n \n # category = obj.find('category').text\n # if category in self.CLASSES_1:\n # subset_data_infos.append(data_info)\n # break\n\n label = obj.find('label').text\n if label in self.CLASSES_2:\n subset_data_infos.append(data_info)\n break\n \n\n return subset_data_infos\n\n def get_ann_info(self, idx):\n img_id = self.data_infos[idx]['id']\n xml_path = osp.join(self.img_prefix, 'MultiLabel', f'{img_id}.xml')\n tree = ET.parse(xml_path)\n root = tree.getroot()\n bboxes = []\n cates = []\n pests = []\n bboxes_ignore = []\n labels_ignore = []\n for obj in root.findall('object'):\n\n\n category = obj.find('category').text\n if category not in self.CLASSES_1:\n continue\n category = self.cat2category[category]\n\n pest = obj.find('label').text\n if pest not in self.CLASSES_2:\n continue\n pest = self.cat2pest[pest]\n\n difficult = int(obj.find('difficult').text)\n bnd_box = obj.find('bndbox')\n # TODO: check whether it is necessary to use int\n # Coordinates may be float type\n bbox = [\n int(float(bnd_box.find('xmin').text)),\n int(float(bnd_box.find('ymin').text)),\n int(float(bnd_box.find('xmax').text)),\n int(float(bnd_box.find('ymax').text))\n ]\n ignore = False\n \n bboxes.append(bbox)\n cates.append(category)\n pests.append(pest)\n\n \n bboxes = np.array(bboxes, ndmin=2) - 1\n cates = np.array(cates)\n pests = np.array(pests)\n\n bboxes_ignore = np.zeros((0, 4))\n labels_ignore = np.zeros((0, ))\n\n ann = dict(\n bboxes=bboxes.astype(np.float32),\n category=cates.astype(np.int64),\n pests=pests.astype(np.int64),\n\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64))\n return ann\n\n def evaluate(self,\n results,\n metric='mAP',\n logger=None,\n proposal_nums=(100, 300, 1000),\n iou_thr=0.5,\n scale_ranges=None):\n if not isinstance(metric, str):\n assert len(metric) == 1\n metric = metric[0]\n allowed_metrics = ['mAP',]\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n annotations = [self.get_ann_info(i) for i in range(len(self))]\n\n eval_results = {}\n\n if metric == 'mAP':\n assert isinstance(iou_thr, float)\n\n for stage in range(3):\n\n if stage == 0:\n ds_name = self.dataset.CLASSES_1\n label = 'category'\n else:\n ds_name = self.dataset.CLASSES_2\n label = 'pests'\n\n stage_annotations = []\n for anno in annotations:\n anno['labels']=anno[label]\n stage_annotations.append(anno)\n '''\n result (temp) : [[det_results_1], [det_results_2], [det_results_3]]\n det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n The outer list indicates images, and the inner list indicates\n per-class detected bboxes.\n annotations (list[dict]): Ground truth annotations where each item of\n the list indicates an image. Keys of annotations are:\n\n - `bboxes`: numpy array of shape (n, 4)\n - `labels`: numpy array of shape (n, )\n - `bboxes_ignore` (optional): numpy array of shape (k, 4)\n - `labels_ignore` (optional): numpy array of shape (k, )\n \n '''\n print('eval stage %d mAP from outputs' %(stage+1))\n mean_ap, _ = eval_map(\n results[stage],\n stage_annotations,\n scale_ranges=None,\n iou_thr=iou_thr,\n dataset=ds_name,\n logger=logger)\n eval_results['mAP'] = mean_ap\n \n return eval_results"
] |
[
[
"numpy.array",
"numpy.zeros"
]
] |
ciubecca/3dunet-cavity
|
[
"cfcc827773b18a95d221ab86c1afc5e2f7c30ecb"
] |
[
"tests/random_tests.py"
] |
[
"import unittest\n\nfrom pytorch3dunet.datasets.featurizer import get_features, ComposedFeatures, LabelClass\nfrom pytorch3dunet.datasets.features import PotentialGrid\nfrom pytorch3dunet.augment.transforms import Phase\nimport numpy as np\nfrom typing import Mapping, Iterable, Callable\nfrom pytorch3dunet.augment.utils import Transformer, take_while_deterministic\nimport torch\n\n\nclass TestRandom(unittest.TestCase):\n '''\n Test that same random transoformations are applied to different features\n '''\n\n N = 100\n\n def __init__(self, *args, **kwargs):\n features_config = [{'name': 'PotentialGrid'}, {'name': 'KalasantyFeatures'}, {'name': 'AtomLabel'}]\n channels: ComposedFeatures = get_features(features_config)\n self.feature_types = channels.feature_types\n\n random3darray = np.random.normal(size=(self.N,self.N,self.N))\n\n self.raws = np.stack([random3darray for _ in range(channels.num_features)])\n self.labels = np.expand_dims(random3darray, axis=0)\n\n assert self.raws.shape == (channels.num_features, self.N,self.N,self.N)\n assert self.labels.shape == (1, self.N, self.N, self.N)\n\n super().__init__(*args, **kwargs)\n\n def validate(self, transformer_config:Iterable[Mapping],\n validate_label_outputs: Callable[[torch.Tensor, torch.Tensor, type], bool],\n validate_input_output:Callable[[torch.Tensor,torch.Tensor],bool]\n ) -> None:\n phase = Phase.TRAIN\n self.transformer = Transformer(transformer_config=transformer_config, common_config={},\n allowRotations=True, debug_str='', stats=None)\n self.transformer.validate()\n self.raw_transform = self.transformer.create_transform(phase, '_raw', convert_to_torch=False)\n self.label_transform = self.transformer.create_transform(phase, '_label', convert_to_torch=False)\n\n raws_transformed = self.raw_transform(self.raws, self.feature_types)\n label_transformed = self.label_transform(self.labels, [LabelClass])[0]\n\n assert validate_input_output(self.labels[0], label_transformed)\n\n for feature_type, raw, raw_transformed in zip(self.feature_types, self.raws, raws_transformed):\n assert raw_transformed.shape == (self.N, self.N, self.N)\n assert validate_label_outputs(label_transformed, raw_transformed, feature_type)\n assert validate_input_output(raw, raw_transformed)\n\n def test_seeds(self):\n transformer_config = [\n {\n 'name': 'TrivialTransform',\n 'local':\n {\n 'train':\n {\n 'LabelClass': 'skipped'\n }\n }\n },\n {\n 'name': 'RandomRotate3D',\n 'local': {\n 'train':\n {\n 'PotentialGrid': {'mode': 'constant'}\n }\n }\n }\n ]\n self.validate(transformer_config, lambda x,y,t: np.array_equal(x,y), lambda x, y: not np.array_equal(x, y))\n\n\n def test_seeds2(self):\n transformer_config = [\n {\n 'name': 'TrivialTransform',\n 'local':\n {\n 'train':\n {\n 'PotentialGrid': 'skipped'\n }\n }\n },\n {\n 'name': 'RandomRotate3D',\n 'local': {\n 'train':\n {\n 'PotentialGrid': {'mode': 'constant'}\n }\n }\n }\n ]\n self.validate(transformer_config, lambda x,y,t: np.array_equal(x,y), lambda x, y: not np.array_equal(x, y))\n\n def test_control3(self):\n transformer_config = [\n {\n 'name': 'TrivialTransform',\n 'local':\n {\n 'train':\n {\n 'PotentialGrid': 'skipped'\n }\n }\n },\n {\n 'name': 'RandomRotate3D',\n 'global': {'train': 'skipped'},\n 'local': {\n 'train':\n {\n 'KalasantyFeatures': {'mode': 'nearest'}\n }\n }\n }\n ]\n self.validate(transformer_config, lambda x,y,t: np.array_equal(x,y), lambda x, y: np.array_equal(x, y))\n\n def test_baseline(self):\n transformer_config = [\n {\n 'name': 'RandomRotate3D',\n 'local': {\n 'train':\n {\n 'PotentialGrid': {'mode': 'constant'}\n }\n }\n }\n ]\n self.validate(transformer_config, lambda x, y, t: np.array_equal(x, y), lambda x, y: not np.array_equal(x, y))\n\n def test_control(self):\n transformer_config = [\n {\n 'name': 'RandomRotate3D'\n }\n ]\n\n def validate_label_outputs(label,raw,t):\n if t == PotentialGrid:\n return not np.array_equal(label,raw)\n return np.array_equal(label,raw)\n\n self.validate(transformer_config, validate_label_outputs, lambda x, y: not np.array_equal(x, y))\n\n def test_control2(self):\n transformer_config = [\n {\n 'name': 'RandomFlip',\n 'train': {\n 'axis_prob': 1.0\n }\n },\n {\n 'name': 'RandomRotate3D'\n }\n ]\n def validate_label_outputs(label,raw,t):\n if t == PotentialGrid:\n return not np.array_equal(label,raw)\n return np.array_equal(label,raw)\n\n self.validate(transformer_config, validate_label_outputs, lambda x, y: not np.array_equal(x, y))\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.normal",
"numpy.expand_dims",
"numpy.array_equal"
]
] |
soyoung97/MixText
|
[
"22993cd028a4223a54e138a89b53cd7978a5e38b"
] |
[
"code/mixtext.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom transformers import *\nfrom transformers.modeling_bert import BertEmbeddings, BertPooler, BertLayer\nfrom normal_bert import ClassificationBert, MixupBert\n\nclass BertModel4Mix(BertPreTrainedModel):\n def __init__(self, config):\n super(BertModel4Mix, self).__init__(config)\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder4Mix(config)\n self.pooler = BertPooler(config)\n\n self.init_weights()\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.embeddings.word_embeddings\n new_embeddings = self._get_resized_embeddings(\n old_embeddings, new_num_tokens)\n self.embeddings.word_embeddings = new_embeddings\n return self.embeddings.word_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def forward(self, input_ids, input_ids2=None, l=None, mix_layer=1000, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):\n\n if attention_mask is None:\n if input_ids2 is not None:\n attention_mask2 = torch.ones_like(input_ids2)\n attention_mask = torch.ones_like(input_ids)\n\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n if input_ids2 is not None:\n token_type_ids2 = torch.zeros_like(input_ids2)\n\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(\n dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n if input_ids2 is not None:\n\n extended_attention_mask2 = attention_mask2.unsqueeze(\n 1).unsqueeze(2)\n\n extended_attention_mask2 = extended_attention_mask2.to(\n dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask2 = (\n 1.0 - extended_attention_mask2) * -10000.0\n\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(\n 0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(\n self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n # We can specify head_mask for each layer\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n # switch to fload if need + fp16 compatibility\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype)\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n embedding_output = self.embeddings(\n input_ids, position_ids=position_ids, token_type_ids=token_type_ids)\n\n if input_ids2 is not None:\n embedding_output2 = self.embeddings(\n input_ids2, position_ids=position_ids, token_type_ids=token_type_ids2)\n\n if input_ids2 is not None:\n encoder_outputs = self.encoder(embedding_output, embedding_output2, l, mix_layer,\n extended_attention_mask, extended_attention_mask2, head_mask=head_mask)\n else:\n encoder_outputs = self.encoder(\n embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask)\n\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n # add hidden_states and attentions if they are here\n outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]\n # sequence_output, pooled_output, (hidden_states), (attentions)\n return outputs\n\n\nclass BertEncoder4Mix(nn.Module):\n def __init__(self, config):\n super(BertEncoder4Mix, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([BertLayer(config)\n for _ in range(config.num_hidden_layers)])\n\n def forward(self, hidden_states, hidden_states2=None, l=None, mix_layer=1000, attention_mask=None, attention_mask2=None, head_mask=None):\n all_hidden_states = ()\n all_attentions = ()\n\n # Perform mix at till the mix_layer\n if mix_layer == -1:\n if hidden_states2 is not None:\n hidden_states = l * hidden_states + (1-l)*hidden_states2\n\n for i, layer_module in enumerate(self.layer):\n if i <= mix_layer:\n\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states, attention_mask, head_mask[i])\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if hidden_states2 is not None:\n layer_outputs2 = layer_module(\n hidden_states2, attention_mask2, head_mask[i])\n hidden_states2 = layer_outputs2[0]\n\n if i == mix_layer:\n if hidden_states2 is not None:\n hidden_states = l * hidden_states + (1-l)*hidden_states2\n\n if i > mix_layer:\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states, attention_mask, head_mask[i])\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n # last-layer hidden state, (all hidden states), (all attentions)\n return outputs\n\n\nclass MixText(nn.Module):\n def __init__(self, num_labels=2, mix_option=False):\n super(MixText, self).__init__()\n self.mix_option = mix_option\n\n if mix_option:\n self.bert = BertModel4Mix.from_pretrained('bert-base-uncased')\n else:\n self.bert = ClassificationBert(num_labels).cuda()\n #self.bert = MixupBert(num_labels).cuda()\n\n self.linear = nn.Sequential(nn.Linear(768, 128),\n nn.Tanh(),\n nn.Linear(128, num_labels))\n\n def forward(self, x, x2=None, l=None, mix_layer=1000):\n\n if x2 is not None:\n if self.mix_option:\n all_hidden, _ = self.bert(x, x2, l, mix_layer)\n pooled_output = torch.mean(all_hidden, 1)\n predict = self.linear(pooled_output)\n return predict\n else:\n x_aug = self.augment(x, x2)\n predict_x = self.bert(x)\n predict_aug = self.bert(x_aug)\n return predict_x, predict_aug\n\n else:\n all_hidden, _ = self.bert(x)\n pooled_output = torch.mean(all_hidden, 1)\n predict = self.linear(pooled_output)\n return predict\n\n"
] |
[
[
"torch.mean",
"torch.zeros_like",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.ones_like"
]
] |
pculliton/PySyft
|
[
"23a0d1442d3d901b1139aeabe079ccf4177ebc0d"
] |
[
"packages/syft/src/syft/core/node/common/client.py"
] |
[
"# stdlib\nimport sys\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n# third party\nfrom google.protobuf.reflection import GeneratedProtocolMessageType\nfrom nacl.signing import SigningKey\nfrom nacl.signing import VerifyKey\nimport pandas as pd\n\n# syft absolute\nimport syft as sy\n\n# relative\nfrom ....logger import critical\nfrom ....logger import debug\nfrom ....logger import error\nfrom ....logger import traceback_and_raise\nfrom ....proto.core.node.common.client_pb2 import Client as Client_PB\nfrom ....proto.core.node.common.metadata_pb2 import Metadata as Metadata_PB\nfrom ....util import get_fully_qualified_name\nfrom ...common.message import EventualSyftMessageWithoutReply\nfrom ...common.message import ImmediateSyftMessageWithReply\nfrom ...common.message import ImmediateSyftMessageWithoutReply\nfrom ...common.message import SignedEventualSyftMessageWithoutReply\nfrom ...common.message import SignedImmediateSyftMessageWithReply\nfrom ...common.message import SignedImmediateSyftMessageWithoutReply\nfrom ...common.message import SyftMessage\nfrom ...common.serde.serializable import serializable\nfrom ...common.uid import UID\nfrom ...io.location import Location\nfrom ...io.location import SpecificLocation\nfrom ...io.route import Route\nfrom ...io.route import SoloRoute\nfrom ...io.virtual import VirtualClientConnection\nfrom ...pointer.garbage_collection import GarbageCollection\nfrom ...pointer.garbage_collection import gc_get_default_strategy\nfrom ...pointer.pointer import Pointer\nfrom ..abstract.node import AbstractNodeClient\nfrom .action.exception_action import ExceptionMessage\nfrom .node_service.child_node_lifecycle.child_node_lifecycle_service import (\n RegisterChildNodeMessage,\n)\nfrom .node_service.object_search.obj_search_service import ObjectSearchMessage\n\n\n@serializable()\nclass Client(AbstractNodeClient):\n \"\"\"Client is an incredibly powerful abstraction in Syft. We assume that,\n no matter where a client is, it can figure out how to communicate with\n the Node it is supposed to point to. If I send you a client I have\n with all of the metadata in it, you should have all the information\n you need to know to interact with a node (although you might not\n have permissions - clients should not store private keys).\"\"\"\n\n def __init__(\n self,\n name: Optional[str],\n routes: List[Route],\n network: Optional[Location] = None,\n domain: Optional[Location] = None,\n device: Optional[Location] = None,\n vm: Optional[Location] = None,\n signing_key: Optional[SigningKey] = None,\n verify_key: Optional[VerifyKey] = None,\n ):\n name = f\"{name}\" if name is not None else None\n super().__init__(\n name=name, network=network, domain=domain, device=device, vm=vm\n )\n\n self.routes = routes\n self.default_route_index = 0\n\n gc_strategy_name = gc_get_default_strategy()\n self.gc = GarbageCollection(gc_strategy_name)\n\n # create a signing key if one isn't provided\n if signing_key is None:\n self.signing_key = SigningKey.generate()\n else:\n self.signing_key = signing_key\n\n # if verify key isn't provided, get verify key from signing key\n if verify_key is None:\n self.verify_key = self.signing_key.verify_key\n else:\n self.verify_key = verify_key\n\n self.install_supported_frameworks()\n\n self.store = StoreClient(client=self)\n\n @property\n def icon(self) -> str:\n icon = \"📡\"\n sub = []\n if self.vm is not None:\n sub.append(\"🍰\")\n if self.device is not None:\n sub.append(\"📱\")\n if self.domain is not None:\n sub.append(\"🏰\")\n if self.network is not None:\n sub.append(\"🔗\")\n\n if len(sub) > 0:\n icon = f\"{icon} [\"\n for s in sub:\n icon += s\n icon += \"]\"\n return icon\n\n @staticmethod\n def deserialize_client_metadata_from_node(\n metadata: Metadata_PB,\n ) -> Tuple[SpecificLocation, str, UID]:\n # string of bytes\n meta = sy.deserialize(blob=metadata)\n return meta.node, meta.name, meta.id\n\n def install_supported_frameworks(self) -> None:\n self.lib_ast = sy.lib.create_lib_ast(client=self)\n\n # first time we want to register for future updates\n self.lib_ast.register_updates(self)\n\n if self.lib_ast is not None:\n for attr_name, attr in self.lib_ast.attrs.items():\n setattr(self, attr_name, attr)\n\n # shortcut syft.lib.python to just python\n if hasattr(self.lib_ast, \"syft\"):\n try:\n lib_attr = getattr(self.lib_ast.syft, \"lib\", None)\n\n if lib_attr is not None:\n python_attr = getattr(lib_attr, \"python\", None)\n setattr(self, \"python\", python_attr)\n python_attr = getattr(lib_attr, \"adp\", None)\n setattr(self, \"adp\", python_attr)\n\n except Exception as e:\n critical(f\"Failed to set python attribute on client. {e}\")\n\n def add_me_to_my_address(self) -> None:\n traceback_and_raise(NotImplementedError)\n\n def register_in_memory_client(self, client: AbstractNodeClient) -> None:\n # WARNING: Gross hack\n route_index = self.default_route_index\n # this ID should be unique but persistent so that lookups are universal\n route = self.routes[route_index]\n if isinstance(route, SoloRoute):\n connection = route.connection\n if isinstance(connection, VirtualClientConnection):\n connection.server.node.in_memory_client_registry[\n client.address.target_id.id\n ] = client\n else:\n traceback_and_raise(\n Exception(\n \"Unable to save client reference without VirtualClientConnection\"\n )\n )\n else:\n traceback_and_raise(\n Exception(\"Unable to save client reference without SoloRoute\")\n )\n\n def register(self, client: AbstractNodeClient) -> None:\n debug(f\"> Registering {client.pprint} with {self.pprint}\")\n self.register_in_memory_client(client=client)\n msg = RegisterChildNodeMessage(\n lookup_id=client.id,\n child_node_client_address=client.address,\n address=self.address,\n )\n\n if self.network is not None:\n client.network = (\n self.network if self.network is not None else client.network\n )\n\n # QUESTION\n # if the client is a network and the domain is not none this will set it\n # on the network causing an exception\n # but we can't check if the client is a NetworkClient here because\n # this is a superclass of NetworkClient\n # Remove: if self.domain is not None:\n # then see the test line node_test.py:\n # bob_network_client.register(client=bob_domain_client)\n if self.domain is not None:\n client.domain = self.domain if self.domain is not None else client.domain\n\n if self.device is not None:\n client.device = self.device if self.device is not None else client.device\n\n if self.device != client.device:\n raise AttributeError(\"Devices don't match\")\n\n if self.vm is not None:\n client.vm = self.vm\n\n self.send_immediate_msg_without_reply(msg=msg)\n\n @property\n def id(self) -> UID:\n \"\"\"This client points to an node, this returns the id of that node.\"\"\"\n traceback_and_raise(NotImplementedError)\n\n # TODO fix the msg type but currently tensor needs SyftMessage\n\n def send_immediate_msg_with_reply(\n self,\n msg: Union[\n SignedImmediateSyftMessageWithReply,\n ImmediateSyftMessageWithReply,\n Any, # TEMPORARY until we switch everything to NodeRunnableMessage types.\n ],\n route_index: int = 0,\n ) -> SyftMessage:\n\n # relative\n from .node_service.simple.simple_messages import NodeRunnableMessageWithReply\n\n # TEMPORARY: if message is instance of NodeRunnableMessageWithReply then we need to wrap it in a SimpleMessage\n if isinstance(msg, NodeRunnableMessageWithReply):\n msg = msg.prepare(address=self.address, reply_to=self.address)\n\n route_index = route_index or self.default_route_index\n\n if isinstance(msg, ImmediateSyftMessageWithReply):\n output = (\n f\"> {self.pprint} Signing {msg.pprint} with \"\n + f\"{self.key_emoji(key=self.signing_key.verify_key)}\"\n )\n debug(output)\n msg = msg.sign(signing_key=self.signing_key)\n\n response = self.routes[route_index].send_immediate_msg_with_reply(msg=msg)\n if response.is_valid:\n # check if we have an ExceptionMessage to trigger a local exception\n # from a remote exception that we caused\n if isinstance(response.message, ExceptionMessage):\n exception_msg = response.message\n exception = exception_msg.exception_type(exception_msg.exception_msg)\n error(str(exception))\n traceback_and_raise(exception)\n else:\n return response.message\n\n traceback_and_raise(\n Exception(\"Response was signed by a fake key or was corrupted in transit.\")\n )\n\n # TODO fix the msg type but currently tensor needs SyftMessage\n\n def send_immediate_msg_without_reply(\n self,\n msg: Union[\n SignedImmediateSyftMessageWithoutReply, ImmediateSyftMessageWithoutReply\n ],\n route_index: int = 0,\n ) -> None:\n route_index = route_index or self.default_route_index\n\n if isinstance(msg, ImmediateSyftMessageWithoutReply):\n output = (\n f\"> {self.pprint} Signing {msg.pprint} with \"\n + f\"{self.key_emoji(key=self.signing_key.verify_key)}\"\n )\n debug(output)\n msg = msg.sign(signing_key=self.signing_key)\n debug(f\"> Sending {msg.pprint} {self.pprint} ➡️ {msg.address.pprint}\")\n self.routes[route_index].send_immediate_msg_without_reply(msg=msg)\n\n def send_eventual_msg_without_reply(\n self, msg: EventualSyftMessageWithoutReply, route_index: int = 0\n ) -> None:\n route_index = route_index or self.default_route_index\n output = (\n f\"> {self.pprint} Signing {msg.pprint} with \"\n + f\"{self.key_emoji(key=self.signing_key.verify_key)}\"\n )\n debug(output)\n signed_msg: SignedEventualSyftMessageWithoutReply = msg.sign(\n signing_key=self.signing_key\n )\n\n self.routes[route_index].send_eventual_msg_without_reply(msg=signed_msg)\n\n def __repr__(self) -> str:\n return f\"<Client pointing to node with id:{self.id}>\"\n\n def register_route(self, route: Route) -> None:\n self.routes.append(route)\n\n def set_default_route(self, route_index: int) -> None:\n self.default_route = route_index\n\n def _object2proto(self) -> Client_PB:\n client_pb = Client_PB(\n obj_type=get_fully_qualified_name(obj=self),\n id=sy.serialize(self.id),\n name=self.name,\n routes=[sy.serialize(route) for route in self.routes],\n network=self.network._object2proto() if self.network else None,\n domain=self.domain._object2proto() if self.domain else None,\n device=self.device._object2proto() if self.device else None,\n vm=self.vm._object2proto() if self.vm else None,\n )\n return client_pb\n\n @staticmethod\n def _proto2object(proto: Client_PB) -> \"Client\":\n module_parts = proto.obj_type.split(\".\")\n klass = module_parts.pop()\n obj_type = getattr(sys.modules[\".\".join(module_parts)], klass)\n\n obj = obj_type(\n name=proto.name,\n routes=[sy.deserialize(route) for route in proto.routes],\n network=sy.deserialize(proto.network)\n if proto.HasField(\"network\")\n else None,\n domain=sy.deserialize(proto.domain) if proto.HasField(\"domain\") else None,\n device=sy.deserialize(proto.device) if proto.HasField(\"device\") else None,\n vm=sy.deserialize(proto.vm) if proto.HasField(\"vm\") else None,\n )\n\n if type(obj) != obj_type:\n traceback_and_raise(\n TypeError(\n f\"Deserializing Client. Expected type {obj_type}. Got {type(obj)}\"\n )\n )\n\n return obj\n\n @staticmethod\n def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return Client_PB\n\n @property\n def keys(self) -> str:\n verify = (\n self.key_emoji(key=self.signing_key.verify_key)\n if self.signing_key is not None\n else \"🚫\"\n )\n keys = f\"🔑 {verify}\"\n\n return keys\n\n def __hash__(self) -> Any:\n return hash(self.id)\n\n\nclass StoreClient:\n def __init__(self, client: Client) -> None:\n self.client = client\n\n @property\n def store(self) -> List[Pointer]:\n msg = ObjectSearchMessage(\n address=self.client.address, reply_to=self.client.address\n )\n\n results = getattr(\n self.client.send_immediate_msg_with_reply(msg=msg), \"results\", None\n )\n if results is None:\n traceback_and_raise(ValueError(\"TODO\"))\n\n # This is because of a current limitation in Pointer where we cannot\n # serialize a client object. TODO: Fix limitation in Pointer so that we don't need this.\n for result in results:\n result.gc_enabled = False\n result.client = self.client\n\n return results\n\n def __len__(self) -> int:\n \"\"\"Return the number of items in the object store we're allowed to know about\"\"\"\n return len(self.store)\n\n def __iter__(self) -> Iterator[Any]:\n return self.store.__iter__()\n\n def __getitem__(self, key: Union[str, int]) -> Pointer:\n if isinstance(key, str):\n matches = 0\n match_obj: Optional[Pointer] = None\n\n for obj in self.store:\n if key in obj.tags:\n matches += 1\n match_obj = obj\n if matches == 1 and match_obj is not None:\n return match_obj\n elif matches > 1:\n traceback_and_raise(KeyError(\"More than one item with tag:\" + str(key)))\n else:\n # If key does not math with any tags, we then try to match it with id string.\n # But we only do this if len(key)>=5, because if key is too short, for example\n # if key=\"a\", there are chances of mismatch it with id string, and I don't\n # think the user pass a key such short as part of id string.\n if len(key) >= 5:\n for obj in self.store:\n if key in str(obj.id_at_location.value).replace(\"-\", \"\"):\n return obj\n else:\n traceback_and_raise(\n KeyError(\n f\"No such item found for tag: {key}, and we \"\n + \"don't consider it as part of id string because its too short.\"\n )\n )\n\n traceback_and_raise(KeyError(\"No such item found for id:\" + str(key)))\n if isinstance(key, int):\n return self.store[key]\n else:\n traceback_and_raise(KeyError(\"Please pass in a string or int key\"))\n\n def __repr__(self) -> str:\n return repr(self.store)\n\n @property\n def pandas(self) -> pd.DataFrame:\n obj_lines: List[Dict[str, Any]] = list()\n for obj in self.store:\n obj_lines.append(\n {\n \"ID\": obj.id_at_location,\n \"Tags\": obj.tags,\n \"Description\": obj.description,\n \"object_type\": obj.object_type,\n }\n )\n return pd.DataFrame(obj_lines)\n\n def _repr_html_(self) -> str:\n return self.pandas._repr_html_()\n"
] |
[
[
"pandas.DataFrame"
]
] |
AyufhSri/GANAccImprover
|
[
"eff3a944bd6e5d9761ec815f28c0d32c87096308"
] |
[
"utils.py"
] |
[
"import torch\nimport torch.nn as nn\nimport os\nimport numpy as np\nimport shutil\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\n\n\n\n\ndef label_level_loss(model,data, target,criterion,args):\n model.eval()\n n=10\n if args.is_cifar100:\n n=100\n l=[0]*n\n with torch.no_grad():\n output = model(data)\n for i in range(data.shape[0]):\n l[int(target[i])]+=(criterion(output[i].reshape([1,n]),target[i].reshape([1])))/data.shape[0]\n return l\n\n\nclass AvgrageMeter(object):\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0\n\n def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt\n\n\ndef accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k.mul_(100.0/batch_size))\n return res\n\n\nclass Cutout(object):\n def __init__(self, length):\n self.length = length\n\n def __call__(self, img):\n h, w = img.size(1), img.size(2)\n mask = np.ones((h, w), np.float32)\n y = np.random.randint(h)\n x = np.random.randint(w)\n\n y1 = np.clip(y - self.length // 2, 0, h)\n y2 = np.clip(y + self.length // 2, 0, h)\n x1 = np.clip(x - self.length // 2, 0, w)\n x2 = np.clip(x + self.length // 2, 0, w)\n\n mask[y1: y2, x1: x2] = 0.\n mask = torch.from_numpy(mask)\n mask = mask.expand_as(img)\n img *= mask\n return img\n\n\ndef _data_transforms_cifar10(args):\n CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]\n CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n return train_transform, valid_transform\n\n\ndef _data_transforms_cifar100(args):\n CIFAR_MEAN = [0.5071, 0.4867, 0.4408]\n CIFAR_STD = [0.2675, 0.2565, 0.2761]\n\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n if args.cutout:\n train_transform.transforms.append(Cutout(args.cutout_length))\n\n valid_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ])\n return train_transform, valid_transform\n\n\ndef count_parameters_in_MB(model):\n return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if \"auxiliary\" not in name)/1e6\n\n\ndef save_checkpoint(state, is_best, save):\n filename = os.path.join(save, 'checkpoint.pth.tar')\n torch.save(state, filename)\n if is_best:\n best_filename = os.path.join(save, 'model_best.pth.tar')\n shutil.copyfile(filename, best_filename)\n\n\ndef save(model, model_path):\n torch.save(model.state_dict(), model_path)\n\n\ndef load(model, model_path):\n model.load_state_dict(torch.load(model_path))\n\n\ndef drop_path(x, drop_prob):\n if drop_prob > 0.:\n keep_prob = 1.-drop_prob\n mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))\n x.div_(keep_prob)\n x.mul_(mask)\n return x\n\n\ndef create_exp_dir(path, scripts_to_save=None):\n if not os.path.exists(path):\n os.mkdir(path)\n print('Experiment dir : {}'.format(path))\n\n if scripts_to_save is not None:\n os.mkdir(os.path.join(path, 'scripts'))\n for script in scripts_to_save:\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\n shutil.copyfile(script, dst_file)\n\n\nif __name__=='__main__':\n print('yes')\n\n\n\n"
] |
[
[
"numpy.clip",
"torch.load",
"torch.from_numpy",
"numpy.ones",
"torch.no_grad",
"torch.save",
"numpy.random.randint"
]
] |
gallantlab/Eyetracking
|
[
"8cbf9251897672ca6d8ce6028bca4f23d7973a80"
] |
[
"EyetrackingUtilities.py"
] |
[
"import numpy\nfrom enum import IntEnum\ntry:\n\timport cPickle\nexcept:\n\timport _pickle as cPickle\nimport re\nimport io\n\nimport multiprocessing\n\ndef parallelize(function, iterable, nThreads = multiprocessing.cpu_count()):\n\t\"\"\"\n\tParallelizes a function. Copied from pycortex so as to not have that import\n\t@param function:\tfunction to parallelize\n\t@param iterable:\titerable object for each instance of the function\n\t@param nThreads:\tnumber of threads to use\n\t@type function:\tfunction with the signature Function(arg) -> value\n\t@type iterable: list<T>\n\t@type nThreads:\tint\n\t@return: results in a list for each instance\n\t@rtype: list<T>\n\t\"\"\"\n\tinputQueue = multiprocessing.Queue()\n\toutputQueue = multiprocessing.Queue()\n\tlength = multiprocessing.Value('i', 0)\n\n\tdef _fill(iterable, nThreads, inputQueue, outputQueue):\n\t\tfor data in enumerate(iterable):\n\t\t\tinputQueue.put(data)\n\t\t\tlength.value += 1\n\t\tfor _ in range(nThreads * 2):\n\t\t\tinputQueue.put((-1, -1))\n\n\tdef _func(proc, inputQueue, outputQueue):\n\t\tindex, data = inputQueue.get()\n\t\twhile index != -1:\n\t\t\toutputQueue.put((index, function(data)))\n\t\t\tindex, data = inputQueue.get()\n\n\tfiller = multiprocessing.Process(target = _fill, args = (iterable, nThreads, inputQueue, outputQueue))\n\tfiller.daemon = True\n\tfiller.start()\n\tfor i in range(nThreads):\n\t\tproc = multiprocessing.Process(target = _func, args = (i, inputQueue, outputQueue))\n\t\tproc.daemon = True\n\t\tproc.start()\n\n\ttry:\n\t\titerlen = len(iterable)\n\texcept:\n\t\tfiller.join()\n\t\titerlen = length.value\n\n\tdata = [[]] * iterlen\n\tfor _ in range(iterlen):\n\t\tindex, result = outputQueue.get()\n\t\tdata[index] = result\n\n\treturn data\n\n\ndef TimeToSeconds(time):\n\t\"\"\"\n\tConverts a timestamp to just seconds elapsed\n\t@param time: \tHH:MM:SS.SSS timestamp\n\t@type time:\ttuple<int, int, int, int> \n\t@return: \tseconds equivalence\n\t@rtype:\t\tfloat\n\t\"\"\"\n\treturn 3600 * time[0] + 60 * time[1] + time[2] + 0.001 * time[3]\n\n\nclass AvotecFile(IntEnum):\n\t\"\"\"\n\tEnum for files that can be parsed for TTL information\n\t\"\"\"\n\tHistory = 0,\n\tEvents = 1\n\n\ndef ParseRecordsForStartTTLs(fileName, useMovieMarkers = True, TR = 2.0, onset = False, threshold = 5.0,\n\t\t\t\t\t\t\t fileType = AvotecFile.History, index = 0):\n\t\"\"\"\n\tParses either the history or events file from Avotec for the start TTL timings for runs. Use index to\n\tspecify the nth TTL to return\n\t@param fileName:\t\tname of file to parse\n\t@param useMovieMarkers:\tuse the start/stop save movie entries to calculate runs? if true, the TR, onset, and threshold arguments are useless\n\t@param TR:\t\t\t\tTR length used\n\t@param onset:\t\t\tuse the TTL pulse HI instead of the LO value?\n\t@param threshold:\t\tmultiple of the TR interval to use as a threshold as a break?\n\t@param fileType:\t\tare we parsing a history or events file?\n\t@param index:\t\t\t0-indexed index of TTL to return\n\t@type fileName:\t\t\tstr\n\t@type useMovieMarkers:\tbool\n\t@type TR:\t\t\t\tfloat\n\t@type onset:\t\t\tbool\n\t@type threshold:\t\tfloat\n\t@type fileType:\t\t\tAvotecFile\n\t@type index:\t\t\tint\n\t@return:\tfirst value is the timestamp of the first TTL in a run, and the second is number of TRs in each run\n\t@rtype:\tlist<tuple<tuple<float>, int>>\n\t\"\"\"\n\n\truns = None\n\tif (index < 0):\n\t\tindex = 0\n\t\tprint('negative index set to 0')\n\tif (fileType == AvotecFile.History):\n\t\truns = ParseHistoryForTTLs(fileName, useMovieMarkers, TR, onset, threshold)\n\telif (fileType == AvotecFile.Events):\n\t\truns = ParseEventsForTTLs(fileName, TR, onset, threshold)\n\telse:\n\t\traise ValueError('Unknown file type')\n\treturn [(run[0][index], run[1]) for run in runs]\n\n\ndef ParseRecordsForEndTTLs(fileName, useMovieMarkers = True, TR = 2.0, onset = False, threshold = 5.0,\n\t\t\t\t\t\t fileType = AvotecFile.History):\n\t\"\"\"\n\tParses either the history or events file from Avotec for the last TTL in each run\n\t@param fileName:\t\tname of file from avotec to parse\n\t@param useMovieMarkers:\tuse the start/stop save movie entries to calculate runs? if true, the TR, onset, and threshold arguments are useless\n\t@param TR:\t\t\t\tTR length used\n\t@param onset:\t\t\tuse the TTL pulse HI instead of the LO value?\n\t@param threshold:\t\tmultiple of the TR interval to use as a threshold as a break?\n\t@param fileType:\t\tare we parsing a history or events file?\n\t@type fileName:\t\t\tstr\n\t@type useMovieMarkers:\tbool\n\t@type TR:\t\t\t\tfloat\n\t@type onset:\t\t\tbool\n\t@type threshold:\t\tfloat\n\t@type fileType:\t\t\tAvotecFile\n\t@return:\tfirst value is the timestamp of the last TTL in a run, and the second is number of TRs in each run\n\t@rtype:\tlist<tuple<tuple<float>, int>>\n\t\"\"\"\n\n\truns = None\n\tif (fileType == AvotecFile.History):\n\t\truns = ParseHistoryForTTLs(fileName, useMovieMarkers, TR, onset, threshold)\n\telif (fileType == AvotecFile.Events):\n\t\truns = ParseEventsForTTLs(fileName, TR, onset, threshold)\n\telse:\n\t\traise ValueError('Unknown file type')\n\treturn [(run[0][-1], run[1]) for run in runs]\n\n\ndef ParseHistoryForTTLs(historyFileName, useMovieMarkers = True, TR = 2.0, onset = False, threshold = 1.5):\n\t\"\"\"\n\tParses the history file from Avotec for the TTLs in each run\n\t@param historyFileName:\tname of history file from avotec\n\t@param useMovieMarkers:\tuse the start/stop save movie entries to calculate runs? if true, the TR, onset, and threshold arguments are useless\n\t@param TR:\t\t\t\tTR length used\n\t@param onset:\t\t\tuse the TTL pulse HI instead of the LO value?\n\t@param threshold:\t\tmultiple of the TR interval to use as a threshold as a break?\n\t@type historyFileName:\tstr\n\t@type useMovieMarkers:\tbool\n\t@type TR:\t\t\t\tfloat\n\t@type onset:\t\t\tbool\n\t@type threshold:\t\tfloat\n\t@return:\ttimestamps of TTLs in each run, each run is a list of TTL timestamps and the number of TTLs\n\t@rtype:\tlist<tuple<list<float>, int>>\n\t\"\"\"\n\n\thistoryFile = open(historyFileName, 'r')\n\tTTLtoken = 'HI' if onset else 'LO'\n\tTTLs = []\n\tlastTime = (0, 0, 0, 0)\n\tduplicates = 0\n\n\truns = []\n\tthisRun = []\n\n\tif useMovieMarkers:\n\t\tnTTLs = 0\n\t\tisStarted = False\n\t\tline = historyFile.readline()\n\t\twhile line != '':\n\t\t\ttokens = line.split()\n\t\t\tif len(tokens) > 0:\n\t\t\t\tif tokens[-1] == 'saveMovie[0]:':\n\t\t\t\t\tisStarted = True\n\t\t\t\t\tnTTLs = 0\n\t\t\t\telif tokens[3] == 'Closing':\n\t\t\t\t\tisStarted = False\n\t\t\t\t\tif (nTTLs > 0):\n\t\t\t\t\t\truns.append((thisRun, nTTLs))\n\t\t\t\t\tthisRun = []\n\t\t\t\tif isStarted:\n\t\t\t\t\tif tokens[-1] == TTLtoken and tokens[4] == 'TTL':\n\t\t\t\t\t\ttime = tuple([int(token) for token in re.split('[:\\.]', tokens[0])])\n\t\t\t\t\t\tif ((TimeToSeconds(time) - TimeToSeconds(lastTime)) > 0.1):\t# long enough of an interval since last one such that it's not a duplicate\n\t\t\t\t\t\t\tnTTLs += 1\n\t\t\t\t\t\t\tthisRun.append(time)\n\t\t\t\t\t\t\tlastTime = time\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tduplicates += 1\n\t\t\tline = historyFile.readline()\n\n\telse:\n\t\tline = historyFile.readline()\n\t\twhile line != '':\n\t\t\ttokens = line.split()\n\t\t\tif len(tokens) > 0 and tokens[-1] == TTLtoken:\n\t\t\t\ttime = tuple([int(token) for token in re.split('[:\\.]', tokens[0])])\n\t\t\t\tif (TimeToSeconds(time) - TimeToSeconds(lastTime) > 0.1): # long enough of an interval since last one such that it's not a duplicate\n\t\t\t\t\tTTLs.append(time)\n\t\t\t\t\tlastTime = time\n\t\t\t\telse:\n\t\t\t\t\tduplicates += 1\n\t\t\tline = historyFile.readline()\n\n\t\tnTRs = 1\n\t\tthisRun.append(TTLs[0])\n\t\tfor i in range(1, len(TTLs) - 1):\n\t\t\tthis = TTLs[i]\n\t\t\tlast = TTLs[i - 1]\n\t\t\tdt = TimeToSeconds(this) - TimeToSeconds(last)\n\t\t\tif dt > threshold * TR:\n\t\t\t\truns.append((thisRun, nTRs))\n\t\t\t\tthisRun = [this]\n\t\t\t\tnTRs = 1\n\t\t\telse:\n\t\t\t\tthisRun.append(this)\n\t\t\t\tnTRs += 1\n\t\truns.append((thisRun, nTRs + 1)) # account for last run without a faraway TTL\n\n\thistoryFile.close()\n\tprint('{} duplicated TTLs'.format(duplicates))\n\treturn runs\n\n\ndef ParseEventsForTTLs(eventsFileName, TR = 2.0, onset = False, threshold = 5.0):\n\t\"\"\"\n\tParses the events file from Avotec for TTLs. Use if history file is not available.\n\tThe events files does not contain save movie start/stops, so use the history file if possible\n\t@param eventsFileName: \tname of events file from avotec\n\t@param TR: \t\t\t\tTR duration in seconds\n\t@param onset: \t\t\tuse the TTL pulse onset instead of the offset for timestamps?\n\t@param threshold: \t\tmultiple of the TR interval to use as a threshold as a break between runs\n\t@type eventsFileName:\tstr\n\t@type TR:\t\t\t\tfloat\n\t@type onset:\t\t\tbool\n\t@type threshold:\t\tfloat\n\t@return: timestamps of TTLs in each run, each run is a list of TTL timestamps and the number of TTLs\n\t@rtype: list<tuple<list<float>, int>>\n\t\"\"\"\n\n\teventsFile = open(eventsFileName, 'r')\n\tTTLtoken = 'S' if onset else 's'\n\tTTLs = []\n\tlastTime = (0, 0, 0, 0)\n\tduplicates = 0\n\n\truns = []\n\tthisRun = []\n\n\tline = eventsFile.readline()\n\twhile line != '':\n\t\ttokens = line.split()\n\t\tif len(tokens) > 0 and tokens[-1] == TTLtoken:\n\t\t\ttime = []\n\t\t\tfor token in re.split('[:\\.]', re.match('[0-9\\. ]+:[0-9\\. ]+:[0-9 ]+\\.[0-9]+', line).group()):\n\t\t\t\tif (len(token) > 2):\t# the milliseconds have rather high precision\n\t\t\t\t\ttime.append(int(numpy.round(float(token) * 0.001)))\n\t\t\t\telse:\n\t\t\t\t\ttime.append(int(token))\n\t\t\ttime = tuple(time)\n\t\t\tif (TimeToSeconds(time) - TimeToSeconds(lastTime) > 0.1): # long enough of an interval since last one such that it's not a duplicate\n\t\t\t\tTTLs.append(time)\n\t\t\t\tlastTime = time\n\t\t\telse:\n\t\t\t\tduplicates += 1\n\t\tline = eventsFile.readline()\n\n\tnTRs = 1\n\tthisRun.append(TTLs[0])\n\tfor i in range(1, len(TTLs) - 1):\n\t\tthis = TTLs[i]\n\t\tlast = TTLs[i - 1]\n\t\tdt = TimeToSeconds(this) - TimeToSeconds(last)\n\t\tif dt > threshold * TR:\n\t\t\truns.append((thisRun, nTRs))\n\t\t\tthisRun = [this]\n\t\t\tnTRs = 1\n\t\telse:\n\t\t\tthisRun.append(this)\n\t\t\tnTRs += 1\n\truns.append((thisRun, nTRs + 1)) # account for last run without a faraway TTL\n\n\teventsFile.close()\n\tprint('{} duplicated TTLs'.format(duplicates))\n\tfor i in range(len(runs)):\n\t\tduration = TimeToSeconds(runs[i][0][-1]) - TimeToSeconds(runs[i][0][0])\n\t\texpectedTRs = int(numpy.round(duration / TR))\n\t\tif (i == len(runs) - 1):\n\t\t\texpectedTRs += 1\t# account for last run without a faraway TTL\n\t\tprint('Run {} expected {} TTLs from duration, actual recorded {} TTLs'.format(i + 1, expectedTRs, len(runs[i][0])))\n\treturn runs\n\n\ndef SaveNPY(array, zipfile, name):\n\t\"\"\"\n\tSaves a numpy array into a zip\n\t@param array: \tnumpy array\n\t@param zipfile: ZipFile to write into\n\t@param name: \tname to save\n\t@type array: \tnumpy.ndarray\n\t@type zipfile: \tZipFile\n\t@type name: \tstr\n\t\"\"\"\n\tarrayFile = io.BytesIO()\n\tnumpy.save(arrayFile, array)\n\tarrayFile.seek(0)\n\tzipfile.writestr(name, arrayFile.read())\n\tarrayFile.close()\n\tdel arrayFile\n\n\ndef ReadNPY(zipfile, subFileName):\n\t\"\"\"\n\tReads a saved npy from inside a zip\n\t@param zipfile: \t\tZipFile to read from\n\t@param subFileName: \tfile name\n\t@type zipfile: \t\t\tZipFile\n\t@type subFileName: \t\tstr\n\t@return: array\n\t@rtype: numpy.ndarray\n\t\"\"\"\n\tarrayFile = io.BytesIO(zipfile.read(subFileName))\n\tarrayFile.seek(0)\n\tout = numpy.load(arrayFile)\n\tdel arrayFile\n\treturn out\n\n\ndef ReadPickle(zipfile, subFileName):\n\t\"\"\"\n\tReads a saved pickle from inside the zip\n\t@param zipfile: \t\tZipFile to read from\n\t@param subFileName: \tfile name\n\t@type zipfile: \t\t\tZipFile\n\t@type subFileName: \t\tstr\n\t@return: object\n\t@rtype: object\n\t\"\"\"\n\tobjFile = io.BytesIO(zipfile.read(subFileName))\n\tobjFile.seek(0)\n\tout = cPickle.load(objFile)\n\tdel objFile\n\treturn out\n\n\ndef SavePickle(obj, zipfile, name):\n\t\"\"\"\n\tPickles an object into a zip\n\t@param obj: \tobject to save\n\t@param zipfile: ZipFile to write into\n\t@param name: \tname to save\n\t@type obj: \t\tobject\n\t@type zipfile: \tZipFile\n\t@type name: \tstr\n\t\"\"\"\n\tpickleFile = io.BytesIO()\n\tcPickle.dump(obj, pickleFile)\n\tpickleFile.seek(0)\n\tzipfile.writestr(name, pickleFile.read())\n\tpickleFile.close()\n\tdel pickleFile"
] |
[
[
"numpy.round",
"numpy.load",
"numpy.save"
]
] |
scivision/robust-flow
|
[
"d9b52a70e62995cf06743275509b9ac726df2b51"
] |
[
"BlackRobustFlow.py"
] |
[
"#!/usr/bin/env python3\nimport logging\nimport imageio\nfrom pathlib import Path\nimport numpy as np\nfrom robustflow import runblack, loadflow\n\ntry:\n from matplotlib.pyplot import figure, show\nexcept ImportError:\n figure = show = None\n\n\ndef main(stem: Path, frames, outpath: Path):\n stem = Path(stem).expanduser()\n\n runblack(stem, frames, outpath)\n\n u, v = loadflow(stem, frames, outpath)\n # %%\n if (u[0, 0] == u).all():\n logging.error(f\"all elements of U identical {u[0,0]}\")\n\n if (v[0, 0] == v).all():\n logging.error(f\"all elements of V identical {v[0,0]}\")\n\n imgfn = stem.parent / (stem.name + f\"{frames[1]}.pgm\")\n img = imageio.imread(imgfn)\n y, x = img.shape\n\n s = 10\n X = np.arange(0, x, s)\n Y = np.arange(0, y, s)\n X, Y = np.meshgrid(X, Y)\n\n if figure is None:\n return\n\n ax = figure().gca()\n ax.imshow(img, cmap=\"gray\", origin=\"upper\")\n ax.quiver(X, Y, u[::s, ::s], v[::s, ::s])\n\n ax.set_title(f\"{imgfn} robust optical flow\")\n\n show()\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n p = ArgumentParser()\n p.add_argument(\"pgmstem\", help=\"stem of pgm files\")\n p.add_argument(\n \"-f\",\n \"--frames\",\n help=\"start stop frame indices\",\n nargs=2,\n type=int,\n default=[0, 1],\n )\n p.add_argument(\"-o\", \"--outpath\", default=\"results\")\n p = p.parse_args()\n\n main(p.pgmstem, p.frames, p.outpath)\n"
] |
[
[
"numpy.arange",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
csdongxian/PaddleSleeve
|
[
"4322d70ec21460e657a57f2fa9b09e5efc420efb",
"4322d70ec21460e657a57f2fa9b09e5efc420efb"
] |
[
"AdvBox/attacks/gradient_method.py",
"AdvBox/examples/image_adversarial_training/cifar10_tutorial_fgsm_advtraining.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module provides the implementation for FGSM attack method.\n\"\"\"\nfrom __future__ import division\n\nimport logging\nfrom collections import Iterable\n\nimport numpy as np\nimport paddle\nfrom .base import Attack\n\n\n__all__ = [\n 'GradientMethodAttack', 'FastGradientSignMethodAttack', 'FGSM',\n 'FastGradientSignMethodTargetedAttack', 'FGSMT',\n 'BasicIterativeMethodAttack', 'BIM',\n 'IterativeLeastLikelyClassMethodAttack', 'ILCM',\n 'MomentumIteratorAttack', 'MIFGSM',\n 'ProjectedGradientDescentAttack', 'PGD'\n]\n\n\nclass GradientMethodAttack(Attack):\n \"\"\"\n This class implements gradient attack method, and is the base of FGSM, BIM, ILCM, etc.\n \"\"\"\n def __init__(self, model, support_targeted=True, pgd_flag=False):\n \"\"\"\n Args:\n model: An instance of a paddle model to be attacked.\n support_targeted(Does): this attack method support targeted.\n pgd_flag: place it true if use pgd\n \"\"\"\n super(GradientMethodAttack, self).__init__(model)\n self.support_targeted = support_targeted\n self.pgd_flag = pgd_flag\n\n def _apply(self,\n adversary,\n norm_ord=None,\n epsilons=0.01,\n epsilon_steps=10,\n steps=100,\n perturb=16.0 / 256,\n ):\n \"\"\"\n Apply the gradient attack method.\n Args:\n adversary: The Adversary object.\n norm_ord: Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.\n epsilons: Attack step size (input variation). Largest step size if epsilons is not iterable.\n epsilon_steps: The number of Epsilons' iteration for each attack iteration.\n steps: The number of attack iteration.\n\n Returns:\n adversary(Adversary): The Adversary object.\n \"\"\"\n if norm_ord is None:\n norm_ord = np.inf\n\n if norm_ord == 0:\n raise ValueError(\"L0 norm is not supported!\")\n\n if not self.support_targeted:\n if adversary.is_targeted_attack:\n raise ValueError(\n \"This attack method doesn't support targeted attack!\")\n\n if not isinstance(epsilons, Iterable):\n if epsilon_steps == 1:\n epsilons = [epsilons]\n else:\n epsilons = np.linspace(0, epsilons, num=epsilon_steps)\n assert self.model.channel_axis == adversary.original.ndim\n assert (self.model.channel_axis == 1 or\n self.model.channel_axis == adversary.original.shape[0] or\n self.model.channel_axis == adversary.original.shape[-1])\n\n original_label = adversary.original_label\n min_, max_ = self.model.bounds\n adv_img = adversary.original\n if len(adv_img.shape) < 4:\n adv_img = np.expand_dims(adv_img, axis=0)\n\n adv_img = paddle.to_tensor(adv_img, dtype='float32', place=self._device)\n adv_img.stop_gradient = False\n\n if adversary.is_targeted_attack:\n target_label = adversary.target_label\n target_label = paddle.to_tensor(target_label, dtype='int64', place=self._device)\n for epsilon in epsilons[:]:\n if epsilon == 0.0:\n continue\n\n for step in range(steps):\n if adversary.is_targeted_attack:\n gradient = - self.model.gradient(adv_img, target_label)\n else:\n gradient = self.model.gradient(adv_img, original_label)\n\n gradient = paddle.to_tensor(gradient, dtype='float32', place=self._device)\n if norm_ord == np.inf:\n gradient_norm = paddle.sign(gradient)\n else:\n gradient_norm = gradient / self._norm(gradient.numpy(), ord=norm_ord)\n\n if len(adv_img.shape) < 4:\n adv_img = np.expand_dims(adv_img.numpy(), axis=0)\n\n if self.pgd_flag:\n # linf\n adv_img = adv_img + gradient_norm * epsilon\n clip_max = np.clip(adv_img.numpy() * (1.0 + perturb), min_, max_)\n clip_min = np.clip(adv_img.numpy() * (1.0 - perturb), min_, max_)\n adv_img = np.clip(adv_img.numpy(), clip_min, clip_max) \n adv_label = np.argmax(self.model.predict(paddle.to_tensor(adv_img)))\n adv_img = paddle.to_tensor(adv_img)\n else:\n adv_img = adv_img + gradient_norm * epsilon\n adv_label = np.argmax(self.model.predict(adv_img))\n\n if adversary.try_accept_the_example(np.squeeze(adv_img.numpy()), adv_label):\n return adversary\n\n return adversary\n\n @staticmethod\n def _norm(a, ord):\n if a.ndim == 1 or a.ndim == 2:\n return np.linalg.norm(a, ord=ord)\n # channel first\n elif a.ndim == a.shape[0]:\n norm_shape = a.ndim * a.shape[1:][0] * a.shape[1:][0]\n # channel last\n else:\n norm_shape = a.ndim * a.shape[:-1][0] * a.shape[:-1][1]\n return np.linalg.norm(a.reshape(norm_shape), ord=ord)\n\n\nclass FastGradientSignMethodTargetedAttack(GradientMethodAttack):\n \"\"\"\n \"Fast Gradient Sign Method\" is extended to support targeted attack.\n \"Fast Gradient Sign Method\" was originally implemented by Goodfellow et\n al. (2015) with the infinity norm.\n\n Paper link: https://arxiv.org/abs/1412.6572\n \"\"\"\n\n def _apply(self, adversary, **kwargs):\n \"\"\"\n Launch an attack process.\n Args:\n adversary: Adversary. An adversary instance with initial status.\n **kwargs: Other named arguments.\n\n Returns:\n An adversary status with changed status.\n \"\"\"\n\n return GradientMethodAttack._apply(self, adversary=adversary, **kwargs)\n\n\nclass FastGradientSignMethodAttack(FastGradientSignMethodTargetedAttack):\n \"\"\"\n This attack was originally implemented by Goodfellow et al. (2015) with the\n infinity norm, and is known as the \"Fast Gradient Sign Method\".\n\n Paper link: https://arxiv.org/abs/1412.6572\n \"\"\"\n\n def __init__(self, model):\n \"\"\"\n FGSM attack init.\n Args:\n model: PaddleWhiteBoxModel.\n \"\"\"\n\n super(FastGradientSignMethodAttack, self).__init__(model, False)\n\n\nclass IterativeLeastLikelyClassMethodAttack(GradientMethodAttack):\n \"\"\"\n \"Iterative Least-likely Class Method (ILCM)\" extends \"BIM\" to support\n targeted attack.\n \"The Basic Iterative Method (BIM)\" is to extend \"FSGM\". \"BIM\" iteratively\n take multiple small steps while adjusting the direction after each step.\n\n Paper link: https://arxiv.org/abs/1607.02533\n \"\"\"\n\n def _apply(self, adversary, epsilons=0.01, steps=1000):\n \"\"\"\n Launch an attack process.\n Args:\n adversary: Adversary. An adversary instance with initial status.\n epsilons: float. A single step perturbation length.\n steps: int. Total steps number.\n\n Returns:\n An adversary status with changed status.\n \"\"\"\n return GradientMethodAttack._apply(\n self,\n adversary=adversary,\n norm_ord=np.inf,\n epsilons=epsilons,\n steps=steps)\n\n\nclass BasicIterativeMethodAttack(IterativeLeastLikelyClassMethodAttack):\n \"\"\"\n FGSM is a one-step method. \"The Basic Iterative Method (BIM)\" iteratively\n take multiple small steps while adjusting the direction after each step.\n Paper link: https://arxiv.org/abs/1607.02533\n \"\"\"\n\n def __init__(self, model):\n \"\"\"\n\n Args:\n model: PaddleWhiteBoxModel.\n \"\"\"\n super(BasicIterativeMethodAttack, self).__init__(model, False)\n\n\nclass MomentumIteratorAttack(GradientMethodAttack):\n \"\"\"\n The Momentum Iterative Fast Gradient Sign Method (Dong et al. 2017).\n This method won the first places in NIPS 2017 Non-targeted Adversarial\n Attacks and Targeted Adversarial Attacks. The original paper used\n hard labels for this attack; no label smoothing. inf norm.\n Paper link: https://arxiv.org/pdf/1710.06081.pdf\n \"\"\"\n\n def __init__(self, model, support_targeted=True):\n \"\"\"\n MIFGSM attack init.\n Args:\n model: PaddleWhiteBoxModel.\n support_targeted: bool.\n \"\"\"\n super(MomentumIteratorAttack, self).__init__(model)\n self.support_targeted = support_targeted\n\n def _apply(self,\n adversary,\n norm_ord=None,\n epsilons=0.1,\n steps=100,\n epsilon_steps=100,\n decay_factor=1):\n \"\"\"\n Apply the momentum iterative gradient attack method.\n Args:\n adversary: Adversary. An adversary instance with initial status.\n norm_ord: int. Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.\n epsilons: (list|tuple|float). Attack step size (input variation). Largest step size if epsilons is not iterable.\n steps: int. The number of attack iteration.\n epsilon_steps: int. The number of Epsilons' iteration for each attack iteration.\n decay_factor: float. The decay factor for the momentum term.\n\n Returns:\n An adversary status with changed status.\n \"\"\"\n if norm_ord is None:\n norm_ord = np.inf\n\n if norm_ord == 0:\n raise ValueError(\"L0 norm is not supported!\")\n\n if not self.support_targeted:\n if adversary.is_targeted_attack:\n raise ValueError(\n \"This attack method doesn't support targeted attack!\")\n\n if not isinstance(epsilons, Iterable):\n epsilons = np.linspace(0, epsilons, num=epsilon_steps)\n\n min_, max_ = self.model.bounds\n\n original_label = adversary.original_label\n original_label = paddle.to_tensor(original_label, dtype='int64', place=self._device)\n\n if adversary.is_targeted_attack:\n target_label = adversary.target_label\n target_label = paddle.to_tensor(target_label, dtype='int64', place=self._device)\n\n for epsilon in epsilons[:]:\n if epsilon == 0.0:\n continue\n\n adv_img = adversary.original\n if len(adv_img.shape) < 4:\n adv_img = np.expand_dims(adv_img, axis=0)\n adv_img = paddle.to_tensor(adv_img, dtype='float32', place=self._device)\n adv_img.stop_gradient = False\n\n momentum = 0\n for step in range(steps):\n\n if adversary.is_targeted_attack:\n gradient = - self.model.gradient(adv_img, target_label)\n else:\n gradient = self.model.gradient(adv_img, original_label)\n\n gradient = np.squeeze(gradient)\n velocity = gradient / self._norm(gradient, ord=1)\n velocity = np.expand_dims(velocity, axis=0)\n\n momentum = decay_factor * momentum + velocity\n if norm_ord == np.inf:\n normalized_grad = np.sign(momentum)\n else:\n normalized_grad = self._norm(momentum, ord=norm_ord)\n\n perturbation = epsilon * normalized_grad\n perturbation = paddle.to_tensor(perturbation)\n adv_img = adv_img + perturbation\n adv_label = np.argmax(self.model.predict(adv_img))\n\n logging.info('step={}, epsilon = {:.5f}, pre_label = {}, adv_label={}' .format(step,\n epsilon,\n original_label,\n adv_label))\n\n if adversary.try_accept_the_example(np.squeeze(adv_img.numpy()), adv_label):\n return adversary\n\n return adversary\n\n\nclass ProjectedGradientDescentAttack(GradientMethodAttack):\n \"\"\"\n Projected Gradient Descent\n Towards deep learning models resistant to adversarial attacks, A. Madry, A. Makelov, L. Schmidt, D. Tsipras, \n and A. Vladu, ICLR 2018\n \"\"\"\n\n def __init__(self, model, support_targeted=True, pgd_flag=True):\n \"\"\"\n PGD attack init.\n Args:\n model: PaddleWhiteBoxModel.\n \"\"\"\n super(ProjectedGradientDescentAttack, self).__init__(model)\n self.support_targeted = support_targeted\n self.pgd_flag = pgd_flag \n\n\nFGSM = FastGradientSignMethodAttack\nFGSMT = FastGradientSignMethodTargetedAttack\nBIM = BasicIterativeMethodAttack\nILCM = IterativeLeastLikelyClassMethodAttack\nMIFGSM = MomentumIteratorAttack\nPGD = ProjectedGradientDescentAttack\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\npaddle2 model adversarial training demo on CIFAR10 data\n\"\"\"\nimport sys\nsys.path.append(\"../..\")\nimport paddle\nimport paddle.nn.functional as F\nimport numpy as np\nfrom attacks.gradient_method import FGSM\nfrom defences.adversarial_transform import ClassificationAdversarialTransform\nfrom models.whitebox import PaddleWhiteBoxModel\n\nfrom main_setting import cifar10_train, cifar10_test, advtrain_settings, enhance_config\nCIFAR10_TRAIN = cifar10_train\nCIFAR10_TEST = cifar10_test\nADVTRAIN_SETTINGS = advtrain_settings\nENHANCE_CONFIG = enhance_config\n\nfrom main_setting import MODEL, MODEL_PATH, MODEL_PARA_NAME, MODEL_OPT_PARA_NAME\nMODEL = MODEL\nMODEL_PATH = MODEL_PATH\nMODEL_PARA_NAME = MODEL_PARA_NAME\nMODEL_OPT_PARA_NAME = MODEL_OPT_PARA_NAME\n\nUSE_GPU = paddle.get_device()\nif USE_GPU.startswith('gpu'):\n paddle.set_device(\"gpu\")\nelse:\n paddle.set_device(\"cpu\")\npaddle.seed(2021)\n\ndef adverarial_train(model, cifar10_train, cifar10_test, save_path=None, **kwargs):\n \"\"\"\n A demo for adversarial training based on data augmentation.\n Args:\n model: paddle model.\n cifar10_train: paddle dataloader.\n cifar10_test: paddle dataloader.\n save_path: str. path for saving model.\n **kwargs: Other named arguments.\n Returns:\n training log\n \"\"\"\n assert save_path is not None\n print('start training ... ')\n val_acc_history = []\n val_loss_history = []\n epoch_num = kwargs[\"epoch_num\"]\n advtrain_start_num = kwargs[\"advtrain_start_num\"]\n batch_size = kwargs[\"batch_size\"]\n adversarial_trans = kwargs[\"adversarial_trans\"]\n opt = kwargs[\"optimizer\"]\n train_loader = paddle.io.DataLoader(cifar10_train, shuffle=True, batch_size=batch_size)\n valid_loader = paddle.io.DataLoader(cifar10_test, batch_size=batch_size)\n max_acc = 0\n for epoch in range(epoch_num):\n for batch_id, data in enumerate(train_loader()):\n x_data = data[0]\n y_data = paddle.unsqueeze(data[1], 1)\n # adversarial training late start\n if epoch >= advtrain_start_num and adversarial_trans is not None:\n x_data_augmented, y_data_augmented = adversarial_trans(x_data.numpy(), y_data.numpy())\n else:\n x_data_augmented, y_data_augmented = x_data, y_data\n # turn model into training mode\n model.train()\n # make sure gradient flow for model parameter\n for param in model.parameters():\n param.stop_gradient = False\n\n # numpy to paddle.Tensor\n x_data_augmented = paddle.to_tensor(x_data_augmented, dtype='float32', place=USE_GPU)\n y_data_augmented = paddle.to_tensor(y_data_augmented, dtype='int64', place=USE_GPU)\n y_data_augmented = paddle.unsqueeze(y_data_augmented, 1)\n\n logits = model(x_data_augmented)\n loss = F.cross_entropy(logits, y_data_augmented)\n acc = paddle.metric.accuracy(logits, y_data_augmented)\n acc = acc.numpy()\n acc = round(acc[0], 3)\n if batch_id % 10 == 0:\n print(\"epoch:{}, batch_id:{}, loss:{}, acc:{}\".format(epoch, batch_id, loss.numpy(), acc))\n loss.backward()\n opt.step()\n opt.clear_grad()\n # evaluate model after one epoch\n model.eval()\n accuracies = []\n losses = []\n with paddle.no_grad():\n for batch_id, data in enumerate(valid_loader()):\n x_data = data[0]\n y_data = paddle.unsqueeze(data[1], 1)\n logits = model(x_data)\n loss = F.cross_entropy(logits, y_data)\n acc = paddle.metric.accuracy(logits, y_data)\n accuracies.append(acc.numpy())\n losses.append(loss.numpy())\n avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)\n avg_acc = round(avg_acc, 6)\n avg_loss = round(avg_loss, 6)\n if avg_acc > max_acc:\n max_acc = avg_acc\n paddle.save(model.state_dict(), save_path + MODEL_PARA_NAME + str(max_acc) + '.pdparams')\n paddle.save(opt.state_dict(), save_path + MODEL_OPT_PARA_NAME + str(max_acc) + '.pdopt')\n print(\"best saved at: \", save_path)\n else:\n pass\n print(\"[validation] accuracy/loss:{}/{}, max_acc:{}\".format(avg_acc, avg_loss, max_acc))\n val_acc_history.append(avg_acc)\n val_loss_history.append(avg_loss)\n model.train()\n\n return val_acc_history, val_loss_history\n\n\ndef main():\n # init a paddle model\n paddle_model = PaddleWhiteBoxModel(\n [MODEL],\n [1],\n paddle.nn.CrossEntropyLoss(),\n (-3, 3),\n channel_axis=3,\n nb_classes=10)\n adversarial_trans = ClassificationAdversarialTransform(paddle_model, [FGSM], [None], [ENHANCE_CONFIG])\n ADVTRAIN_SETTINGS[\"adversarial_trans\"] = adversarial_trans\n val_acc_history, val_loss_history = adverarial_train(MODEL, CIFAR10_TRAIN, CIFAR10_TEST,\n save_path=MODEL_PATH, **ADVTRAIN_SETTINGS)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.expand_dims",
"numpy.linspace",
"numpy.squeeze",
"numpy.linalg.norm",
"numpy.sign"
],
[
"numpy.mean"
]
] |
shiaki/sforzando
|
[
"24aa5c49693fe783336cf41847b1b361e709d086"
] |
[
"scripts/search-vizier.py"
] |
[
"#!/usr/bin/python\n\n'''\n Find possible host galaxies of these candidates.\n'''\n\nimport os\nimport sys\nimport json\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom astroquery.vizier import Vizier\n\n# New, 190506\nfrom astropy.cosmology import WMAP9 as cosmo\n\nfrom catalogs import *\n\ndef as_tuple(rec):\n ''' Convert a table record into a tuple '''\n rv = [w if (not np.ma.is_masked(w)) else None for w in rec]\n return tuple(rv)\n\n# encoder for numpy types from: https://github.com/mpld3/mpld3/issues/434\nclass npEncoder(json.JSONEncoder):\n \"\"\" Special json encoder for np types \"\"\"\n def default(self, obj):\n if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,\n np.int16, np.int32, np.int64, np.uint8,\n np.uint16,np.uint32, np.uint64)):\n return int(obj)\n elif isinstance(obj, (np.float_, np.float16, np.float32,\n np.float64)):\n return float(obj)\n elif isinstance(obj,(np.ndarray,)):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\nif __name__ == '__main__':\n\n # read candidates\n with open('candidate-events.json', 'r') as fp:\n candidate_events = json.load(fp, object_pairs_hook=OrderedDict)\n\n candidate_hosts = OrderedDict()\n\n # for each event: search for\n I_counter = 0\n for cand_i, cand_info_i in tqdm(candidate_events.items(),\n total=candidate_events.__len__()):\n\n if cand_i in candidate_hosts:\n continue\n\n # some events do not have complete RA/Dec info.\n if not (cand_info_i['ra'] and cand_info_i['dec']):\n continue\n\n # construct coord\n crd_i = SkyCoord(ra=cand_info_i['ra'],\n dec=cand_info_i['dec'],\n unit=('hour', 'deg'))\n\n # New 190506: use 30 kpc redshift cut.\n zred_i = np.abs(float(cand_info_i['redshift']))\n try:\n ksc_i = cosmo.kpc_proper_per_arcmin(zred_i).value / 60. # kpc/asec\n rad_i = min(30. / ksc_i, 120.)\n except:\n rad_i = 120.\n\n # search catalogs. (30\" limit)\n tab_list_i = Vizier.query_region(crd_i,\n radius=rad_i * u.arcsec,\n catalog=vizier_cats)\n\n sources_i = OrderedDict([('search_radius', rad_i)])\n for cat_name_i, tab_i in tab_list_i._dict.items():\n sources_i[cat_name_i] = list()\n for rec_j in tab_i:\n sources_i[cat_name_i].append(as_tuple(rec_j))\n\n candidate_hosts[cand_i] = sources_i\n\n I_counter += 1\n if not (I_counter % 269): # save into a file.\n with open('candidate-hosts.json', 'w') as fp:\n json.dump(candidate_hosts, fp, indent=4, cls=npEncoder)\n\n # save into a file.\n with open('candidate-hosts.json', 'w') as fp:\n json.dump(candidate_hosts, fp, indent=4, cls=npEncoder)\n\n# EOF\n"
] |
[
[
"numpy.ma.is_masked"
]
] |
alephdata/followthemoney-predict
|
[
"77626c81908b071296c9fd3496ca309d97e128a8"
] |
[
"followthemoney_predict/pipelines/xref/models/util.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nfrom .. import settings\n\n\ndef value_or_first_list_item(value):\n if isinstance(value, (list, tuple)):\n return value[0]\n return value\n\n\ndef aux_fields(sample, prefix):\n for feature in settings.FEATURE_IDXS:\n key = f\"{prefix}_{feature}\"\n value = value_or_first_list_item(sample.get(key, pd.NA))\n if feature != \"name\" and pd.notna(value):\n yield f\"{feature[:2]}: {value[:6]}\"\n\n\ndef format_prediction(sample, p):\n p *= 100\n left_nonnone = \", \".join(aux_fields(sample, \"left\"))\n right_nonnone = \", \".join(aux_fields(sample, \"right\"))\n\n left_name = value_or_first_list_item(sample.left_name)\n right_name = value_or_first_list_item(sample.right_name)\n\n return f\" [{sample.source[:3]}] {left_name} ({left_nonnone}) vs {right_name} ({right_nonnone})-> {{ F: {p[0]:0.2f}, T: {p[1]:0.2f} }}\"\n\n\ndef get_phases(df, phases=None):\n phases = phases or df.phase.cat.categories\n data = {}\n for phase in phases:\n data[phase] = (\n df.query(f\"phase == '{phase}'\").sample(frac=1).reset_index(drop=True)\n )\n return data\n\n\ndef xarray(X):\n return np.asarray([*X])\n"
] |
[
[
"numpy.asarray",
"pandas.notna"
]
] |
cyclone923/op3
|
[
"81050a38d81da2de27f463d6d5823154ec46cd0e",
"81050a38d81da2de27f463d6d5823154ec46cd0e"
] |
[
"op3/core/logging.py",
"op3/launchers/launcher_util.py"
] |
[
"\"\"\"\nBased on rllab's logger.\n\nhttps://github.com/rll/rllab\n\"\"\"\nfrom enum import Enum\nfrom contextlib import contextmanager\nimport numpy as np\nimport os\nimport os.path as osp\nimport sys\nimport datetime\nimport dateutil.tz\nimport csv\nimport json\nimport pickle\nimport errno\nfrom collections import OrderedDict\n\nfrom op3.core.tabulate import tabulate\n\ndef add_prefix(log_dict: OrderedDict, prefix: str):\n with_prefix = OrderedDict()\n for key, val in log_dict.items():\n with_prefix[prefix + key] = val\n return with_prefix\n\ndef append_log(log_dict, to_add_dict, prefix=None):\n if prefix is not None:\n to_add_dict = add_prefix(to_add_dict, prefix=prefix)\n return log_dict.update(to_add_dict)\n\nclass TerminalTablePrinter(object):\n def __init__(self):\n self.headers = None\n self.tabulars = []\n\n def print_tabular(self, new_tabular):\n if self.headers is None:\n self.headers = [x[0] for x in new_tabular]\n else:\n assert len(self.headers) == len(new_tabular)\n self.tabulars.append([x[1] for x in new_tabular])\n self.refresh()\n\n def refresh(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n tabulars = self.tabulars[-(int(rows) - 3):]\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n sys.stdout.write(tabulate(tabulars, self.headers))\n sys.stdout.write(\"\\n\")\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, type):\n return {'$class': o.__module__ + \".\" + o.__name__}\n elif isinstance(o, Enum):\n return {\n '$enum': o.__module__ + \".\" + o.__class__.__name__ + '.' + o.name\n }\n elif callable(o):\n return {\n '$function': o.__module__ + \".\" + o.__name__\n }\n return json.JSONEncoder.default(self, o)\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\nclass Logger(object):\n def __init__(self):\n self._prefixes = []\n self._prefix_str = ''\n\n self._tabular_prefixes = []\n self._tabular_prefix_str = ''\n\n self._tabular = []\n\n self._text_outputs = []\n self._tabular_outputs = []\n self._tabular_keys = None\n\n self._text_fds = {}\n self._tabular_fds = {}\n self._tabular_header_written = set()\n\n self._snapshot_dir = None\n self._snapshot_mode = 'all'\n self._snapshot_gap = 1\n\n self._log_tabular_only = False\n self._header_printed = False\n self.table_printer = TerminalTablePrinter()\n\n def reset(self):\n self.__init__()\n\n def _add_output(self, file_name, arr, fds, mode='a'):\n if file_name not in arr:\n mkdir_p(os.path.dirname(file_name))\n arr.append(file_name)\n fds[file_name] = open(file_name, mode)\n\n def _remove_output(self, file_name, arr, fds):\n if file_name in arr:\n fds[file_name].close()\n del fds[file_name]\n arr.remove(file_name)\n\n def push_prefix(self, prefix):\n self._prefixes.append(prefix)\n self._prefix_str = ''.join(self._prefixes)\n\n def add_text_output(self, file_name):\n self._add_output(file_name, self._text_outputs, self._text_fds,\n mode='a')\n\n def remove_text_output(self, file_name):\n self._remove_output(file_name, self._text_outputs, self._text_fds)\n\n def add_tabular_output(self, file_name, relative_to_snapshot_dir=False):\n if relative_to_snapshot_dir:\n file_name = osp.join(self._snapshot_dir, file_name)\n self._add_output(file_name, self._tabular_outputs, self._tabular_fds,\n mode='w')\n\n def remove_tabular_output(self, file_name, relative_to_snapshot_dir=False):\n if relative_to_snapshot_dir:\n file_name = osp.join(self._snapshot_dir, file_name)\n if self._tabular_fds[file_name] in self._tabular_header_written:\n self._tabular_header_written.remove(self._tabular_fds[file_name])\n self._remove_output(file_name, self._tabular_outputs, self._tabular_fds)\n\n def set_snapshot_dir(self, dir_name):\n self._snapshot_dir = dir_name\n\n def get_snapshot_dir(self, ):\n return self._snapshot_dir\n\n def get_snapshot_mode(self, ):\n return self._snapshot_mode\n\n def set_snapshot_mode(self, mode):\n self._snapshot_mode = mode\n\n def get_snapshot_gap(self, ):\n return self._snapshot_gap\n\n def set_snapshot_gap(self, gap):\n self._snapshot_gap = gap\n\n def set_log_tabular_only(self, log_tabular_only):\n self._log_tabular_only = log_tabular_only\n\n def get_log_tabular_only(self, ):\n return self._log_tabular_only\n\n def log(self, s, with_prefix=True, with_timestamp=True):\n out = s\n if with_prefix:\n out = self._prefix_str + out\n if with_timestamp:\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out = \"%s | %s\" % (timestamp, out)\n if not self._log_tabular_only:\n # Also log to stdout\n print(out)\n for fd in list(self._text_fds.values()):\n fd.write(out + '\\n')\n fd.flush()\n sys.stdout.flush()\n\n def record_tabular(self, key, val):\n self._tabular.append((self._tabular_prefix_str + str(key), str(val)))\n\n def record_dict(self, d, prefix=None):\n if prefix is not None:\n self.push_tabular_prefix(prefix)\n for k, v in d.items():\n self.record_tabular(k, v)\n if prefix is not None:\n self.pop_tabular_prefix()\n\n def push_tabular_prefix(self, key):\n self._tabular_prefixes.append(key)\n self._tabular_prefix_str = ''.join(self._tabular_prefixes)\n\n def pop_tabular_prefix(self, ):\n del self._tabular_prefixes[-1]\n self._tabular_prefix_str = ''.join(self._tabular_prefixes)\n\n def save_extra_data(self, data, file_name='extra_data.pkl', mode='joblib'):\n \"\"\"\n Data saved here will always override the last entry\n\n :param data: Something pickle'able.\n \"\"\"\n file_name = osp.join(self._snapshot_dir, file_name)\n if mode == 'joblib':\n import joblib\n joblib.dump(data, file_name, compress=3)\n elif mode == 'pickle':\n pickle.dump(data, open(file_name, \"wb\"))\n else:\n raise ValueError(\"Invalid mode: {}\".format(mode))\n return file_name\n\n def get_table_dict(self, ):\n return dict(self._tabular)\n\n def get_table_key_set(self, ):\n return set(key for key, value in self._tabular)\n\n @contextmanager\n def prefix(self, key):\n self.push_prefix(key)\n try:\n yield\n finally:\n self.pop_prefix()\n\n @contextmanager\n def tabular_prefix(self, key):\n self.push_tabular_prefix(key)\n yield\n self.pop_tabular_prefix()\n\n def log_variant(self, log_file, variant_data):\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n def record_tabular_misc_stat(self, key, values, placement='back'):\n if placement == 'front':\n prefix = \"\"\n suffix = key\n else:\n prefix = key\n suffix = \"\"\n if len(values) > 0:\n self.record_tabular(prefix + \"Average\" + suffix, np.average(values))\n self.record_tabular(prefix + \"Std\" + suffix, np.std(values))\n self.record_tabular(prefix + \"Median\" + suffix, np.median(values))\n self.record_tabular(prefix + \"Min\" + suffix, np.min(values))\n self.record_tabular(prefix + \"Max\" + suffix, np.max(values))\n else:\n self.record_tabular(prefix + \"Average\" + suffix, np.nan)\n self.record_tabular(prefix + \"Std\" + suffix, np.nan)\n self.record_tabular(prefix + \"Median\" + suffix, np.nan)\n self.record_tabular(prefix + \"Min\" + suffix, np.nan)\n self.record_tabular(prefix + \"Max\" + suffix, np.nan)\n\n def dump_tabular(self, *args, **kwargs):\n wh = kwargs.pop(\"write_header\", None)\n if len(self._tabular) > 0:\n if self._log_tabular_only:\n self.table_printer.print_tabular(self._tabular)\n else:\n for line in tabulate(self._tabular).split('\\n'):\n self.log(line, *args, **kwargs)\n tabular_dict = dict(self._tabular)\n\n # Only saves keys in first iteration to CSV!\n # (But every key is printed out in text)\n if self._tabular_keys is None:\n self._tabular_keys = list(sorted(tabular_dict.keys()))\n\n # Write to the csv files\n for tabular_fd in list(self._tabular_fds.values()):\n writer = csv.DictWriter(tabular_fd,\n fieldnames=self._tabular_keys,\n extrasaction=\"ignore\",)\n if wh or (\n wh is None and tabular_fd not in self._tabular_header_written):\n writer.writeheader()\n self._tabular_header_written.add(tabular_fd)\n writer.writerow(tabular_dict)\n tabular_fd.flush()\n del self._tabular[:]\n\n def pop_prefix(self, ):\n del self._prefixes[-1]\n self._prefix_str = ''.join(self._prefixes)\n\n def save_itr_params(self, itr, params):\n if self._snapshot_dir:\n if self._snapshot_mode == 'all':\n file_name = osp.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n pickle.dump(params, open(file_name, \"wb\"))\n elif self._snapshot_mode == 'last':\n # override previous params\n file_name = osp.join(self._snapshot_dir, 'params.pkl')\n pickle.dump(params, open(file_name, \"wb\"))\n elif self._snapshot_mode == \"gap\":\n if itr % self._snapshot_gap == 0:\n file_name = osp.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n pickle.dump(params, open(file_name, \"wb\"))\n elif self._snapshot_mode == \"gap_and_last\":\n if itr % self._snapshot_gap == 0:\n file_name = osp.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n pickle.dump(params, open(file_name, \"wb\"))\n file_name = osp.join(self._snapshot_dir, 'params.pkl')\n pickle.dump(params, open(file_name, \"wb\"))\n elif self._snapshot_mode == 'none':\n pass\n else:\n raise NotImplementedError\n\n\nlogger = Logger()\n",
"import datetime\nimport json\nimport os\nimport os.path as osp\nimport pickle\nimport random\nimport sys\nimport time\nfrom collections import namedtuple\n\nimport __main__ as main\nimport dateutil.tz\nimport numpy as np\n\nfrom op3.core import logger\nfrom op3.launchers import conf\nfrom op3.torch.pytorch_util import set_gpu_mode\nimport op3.pythonplusplus as ppp\nimport pdb\n\nGitInfo = namedtuple(\n 'GitInfo',\n [\n 'directory',\n 'code_diff',\n 'code_diff_staged',\n 'commit_hash',\n 'branch_name',\n ],\n)\n\n\ndef get_git_infos(dirs):\n try:\n import git\n git_infos = []\n for directory in dirs:\n # Idk how to query these things, so I'm just doing try-catch\n try:\n repo = git.Repo(directory)\n try:\n branch_name = repo.active_branch.name\n except TypeError:\n branch_name = '[DETACHED]'\n git_infos.append(GitInfo(\n directory=directory,\n code_diff=repo.git.diff(None),\n code_diff_staged=repo.git.diff('--staged'),\n commit_hash=repo.head.commit.hexsha,\n branch_name=branch_name,\n ))\n except git.exc.InvalidGitRepositoryError as e:\n print(\"Not a valid git repo: {}\".format(directory))\n except ImportError:\n git_infos = None\n return git_infos\n\n\ndef recursive_items(dictionary):\n \"\"\"\n Get all (key, item) recursively in a potentially recursive dictionary.\n Usage:\n\n ```\n x = {\n 'foo' : {\n 'bar' : 5\n }\n }\n recursive_items(x)\n # output:\n # ('foo', {'bar' : 5})\n # ('bar', 5)\n ```\n :param dictionary:\n :return:\n \"\"\"\n for key, value in dictionary.items():\n yield key, value\n if type(value) is dict:\n yield from recursive_items(value)\n\n\ndef save_experiment_data(dictionary, log_dir):\n with open(log_dir + '/experiment.pkl', 'wb') as handle:\n pickle.dump(dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef run_experiment_here(\n experiment_function,\n variant=None,\n exp_id=0,\n seed=None,\n use_gpu=True,\n # Logger params:\n exp_prefix=\"default\",\n snapshot_mode='last',\n snapshot_gap=1,\n git_infos=None,\n script_name=None,\n base_log_dir=None,\n force_randomize_seed=False,\n log_dir=None,\n **setup_logger_kwargs\n):\n \"\"\"\n Run an experiment locally without any serialization.\n\n :param experiment_function: Function. `variant` will be passed in as its\n only argument.\n :param exp_prefix: Experiment prefix for the save file.\n :param variant: Dictionary passed in to `experiment_function`.\n :param exp_id: Experiment ID. Should be unique across all\n experiments. Note that one experiment may correspond to multiple seeds,.\n :param seed: Seed used for this experiment.\n :param use_gpu: Run with GPU. By default False.\n :param script_name: Name of the running script\n :param log_dir: If set, set the log directory to this. Otherwise,\n the directory will be auto-generated based on the exp_prefix.\n :return:\n \"\"\"\n if variant is None:\n variant = {}\n variant['exp_id'] = str(exp_id)\n\n if force_randomize_seed or seed is None:\n seed = random.randint(0, 100000)\n variant['seed'] = str(seed)\n reset_execution_environment()\n\n actual_log_dir = setup_logger(\n exp_prefix=exp_prefix,\n variant=variant,\n exp_id=exp_id,\n seed=seed,\n snapshot_mode=snapshot_mode,\n snapshot_gap=snapshot_gap,\n base_log_dir=base_log_dir,\n log_dir=log_dir,\n git_infos=git_infos,\n script_name=script_name,\n **setup_logger_kwargs\n )\n\n set_seed(seed)\n set_gpu_mode(use_gpu)\n\n run_experiment_here_kwargs = dict(\n variant=variant,\n exp_id=exp_id,\n seed=seed,\n use_gpu=use_gpu,\n exp_prefix=exp_prefix,\n snapshot_mode=snapshot_mode,\n snapshot_gap=snapshot_gap,\n git_infos=git_infos,\n script_name=script_name,\n base_log_dir=base_log_dir,\n **setup_logger_kwargs\n )\n save_experiment_data(\n dict(\n run_experiment_here_kwargs=run_experiment_here_kwargs\n ),\n actual_log_dir\n )\n return experiment_function(variant)\n\n\ndef create_exp_name(exp_prefix, exp_id=0, seed=0):\n \"\"\"\n Create a semi-unique experiment name that has a timestamp\n :param exp_prefix:\n :param exp_id:\n :return:\n \"\"\"\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n return \"%s_%s_%04d--s-%d\" % (exp_prefix, timestamp, exp_id, seed)\n\n\ndef create_log_dir(\n exp_prefix,\n exp_id=0,\n seed=0,\n base_log_dir=None,\n include_exp_prefix_sub_dir=True,\n):\n \"\"\"\n Creates and returns a unique log directory.\n\n :param exp_prefix: All experiments with this prefix will have log\n directories be under this directory.\n :param exp_id: The number of the specific experiment run within this\n experiment.\n :param base_log_dir: The directory where all log should be saved.\n :return:\n \"\"\"\n exp_name = create_exp_name(exp_prefix, exp_id=exp_id,\n seed=seed)\n if base_log_dir is None:\n base_log_dir = conf.LOCAL_LOG_DIR\n if include_exp_prefix_sub_dir:\n log_dir = osp.join(base_log_dir, exp_prefix.replace(\"_\", \"-\"), exp_name)\n else:\n log_dir = osp.join(base_log_dir, exp_name)\n if osp.exists(log_dir):\n print(\"WARNING: Log directory already exists {}\".format(log_dir))\n os.makedirs(log_dir, exist_ok=True)\n return log_dir\n\n\ndef setup_logger(\n exp_prefix=\"default\",\n variant=None,\n text_log_file=\"debug.log\",\n variant_log_file=\"variant.json\",\n tabular_log_file=\"progress.csv\",\n snapshot_mode=\"last\",\n snapshot_gap=1,\n log_tabular_only=False,\n log_dir=None,\n git_infos=None,\n script_name=None,\n **create_log_dir_kwargs\n):\n \"\"\"\n Set up logger to have some reasonable default settings.\n\n Will save log output to\n\n based_log_dir/exp_prefix/exp_name.\n\n exp_name will be auto-generated to be unique.\n\n If log_dir is specified, then that directory is used as the output dir.\n\n :param exp_prefix: The sub-directory for this specific experiment.\n :param variant:\n :param text_log_file:\n :param variant_log_file:\n :param tabular_log_file:\n :param snapshot_mode:\n :param log_tabular_only:\n :param snapshot_gap:\n :param log_dir:\n :param git_infos:\n :param script_name: If set, save the script name to this.\n :return:\n \"\"\"\n if git_infos is None:\n git_infos = get_git_infos(conf.CODE_DIRS_TO_MOUNT)\n first_time = log_dir is None\n if first_time:\n log_dir = create_log_dir(exp_prefix, **create_log_dir_kwargs)\n\n if variant is not None:\n logger.log(\"Variant:\")\n logger.log(json.dumps(dict_to_safe_json(variant), indent=2))\n variant_log_path = osp.join(log_dir, variant_log_file)\n logger.log_variant(variant_log_path, variant)\n\n tabular_log_path = osp.join(log_dir, tabular_log_file)\n text_log_path = osp.join(log_dir, text_log_file)\n\n logger.add_text_output(text_log_path)\n if first_time:\n logger.add_tabular_output(tabular_log_path)\n else:\n logger._add_output(tabular_log_path, logger._tabular_outputs,\n logger._tabular_fds, mode='a')\n for tabular_fd in logger._tabular_fds:\n logger._tabular_header_written.add(tabular_fd)\n logger.set_snapshot_dir(log_dir)\n logger.set_snapshot_mode(snapshot_mode)\n logger.set_snapshot_gap(snapshot_gap)\n logger.set_log_tabular_only(log_tabular_only)\n exp_name = log_dir.split(\"/\")[-1]\n logger.push_prefix(\"[%s] \" % exp_name)\n\n if git_infos is not None:\n for (\n directory, code_diff, code_diff_staged, commit_hash, branch_name\n ) in git_infos:\n if directory[-1] == '/':\n directory = directory[:-1]\n diff_file_name = directory[1:].replace(\"/\", \"-\") + \".patch\"\n diff_staged_file_name = (\n directory[1:].replace(\"/\", \"-\") + \"_staged.patch\"\n )\n if code_diff is not None and len(code_diff) > 0:\n with open(osp.join(log_dir, diff_file_name), \"w\") as f:\n f.write(code_diff + '\\n')\n if code_diff_staged is not None and len(code_diff_staged) > 0:\n with open(osp.join(log_dir, diff_staged_file_name), \"w\") as f:\n f.write(code_diff_staged + '\\n')\n with open(osp.join(log_dir, \"git_infos.txt\"), \"a\") as f:\n f.write(\"directory: {}\\n\".format(directory))\n f.write(\"git hash: {}\\n\".format(commit_hash))\n f.write(\"git branch name: {}\\n\\n\".format(branch_name))\n if script_name is not None:\n with open(osp.join(log_dir, \"script_name.txt\"), \"w\") as f:\n f.write(script_name)\n return log_dir\n\n\ndef dict_to_safe_json(d):\n \"\"\"\n Convert each value in the dictionary into a JSON'able primitive.\n :param d:\n :return:\n \"\"\"\n new_d = {}\n for key, item in d.items():\n if safe_json(item):\n new_d[key] = item\n else:\n if isinstance(item, dict):\n new_d[key] = dict_to_safe_json(item)\n else:\n new_d[key] = str(item)\n return new_d\n\n\ndef safe_json(data):\n if data is None:\n return True\n elif isinstance(data, (bool, int, float)):\n return True\n elif isinstance(data, (tuple, list)):\n return all(safe_json(x) for x in data)\n elif isinstance(data, dict):\n return all(isinstance(k, str) and safe_json(v) for k, v in data.items())\n return False\n\n\ndef set_seed(seed):\n \"\"\"\n Set the seed for all the possible random number generators.\n\n :param seed:\n :return: None\n \"\"\"\n seed = int(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef reset_execution_environment():\n \"\"\"\n Call this between calls to separate experiments.\n :return:\n \"\"\"\n logger.reset()\n\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is True for \"yes\" or False for \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\n\"\"\"\nBelow is doodad-specific code\n\"\"\"\nec2_okayed = False\ngpu_ec2_okayed = False\nfirst_sss_launch = True\n\ntry:\n import doodad.mount as mount\n from doodad.utils import REPO_DIR\n CODE_MOUNTS = [\n mount.MountLocal(local_dir=REPO_DIR, pythonpath=True),\n ]\n for code_dir in conf.CODE_DIRS_TO_MOUNT:\n CODE_MOUNTS.append(mount.MountLocal(local_dir=code_dir, pythonpath=True))\n\n NON_CODE_MOUNTS = []\n for non_code_mapping in conf.DIR_AND_MOUNT_POINT_MAPPINGS:\n NON_CODE_MOUNTS.append(mount.MountLocal(**non_code_mapping))\n\n SSS_CODE_MOUNTS = []\n SSS_NON_CODE_MOUNTS = []\n if hasattr(conf, 'SSS_DIR_AND_MOUNT_POINT_MAPPINGS'):\n for non_code_mapping in conf.SSS_DIR_AND_MOUNT_POINT_MAPPINGS:\n SSS_NON_CODE_MOUNTS.append(mount.MountLocal(**non_code_mapping))\n if hasattr(conf, 'SSS_CODE_DIRS_TO_MOUNT'):\n for code_dir in conf.SSS_CODE_DIRS_TO_MOUNT:\n SSS_CODE_MOUNTS.append(\n mount.MountLocal(local_dir=code_dir, pythonpath=True)\n )\nexcept ImportError:\n print(\"doodad not detected\")\n\ntarget_mount = None\n\n\ndef run_experiment(\n method_call,\n mode='local',\n exp_prefix='default',\n seed=None,\n variant=None,\n exp_id=0,\n prepend_date_to_exp_prefix=True,\n use_gpu=False,\n snapshot_mode='last',\n snapshot_gap=1,\n base_log_dir=None,\n local_input_dir_to_mount_point_dict=None, # TODO(vitchyr): test this\n # local settings\n skip_wait=False,\n # ec2 settings\n sync_interval=180,\n region='us-east-1',\n instance_type=None,\n spot_price=None,\n verbose=False,\n num_exps_per_instance=1,\n # sss settings\n time_in_mins=None,\n # ssh settings\n ssh_host=None,\n # gcp\n gcp_kwargs=None,\n):\n \"\"\"\n Usage:\n ```\n def foo(variant):\n x = variant['x']\n y = variant['y']\n logger.log(\"sum\", x+y)\n variant = {\n 'x': 4,\n 'y': 3,\n }\n run_experiment(foo, variant, exp_prefix=\"my-experiment\")\n ```\n Results are saved to\n `base_log_dir/<date>-my-experiment/<date>-my-experiment-<unique-id>`\n By default, the base_log_dir is determined by\n `config.LOCAL_LOG_DIR/`\n :param method_call: a function that takes in a dictionary as argument\n :param mode: A string:\n - 'local'\n - 'local_docker'\n - 'ec2'\n - 'here_no_doodad': Run without doodad call\n :param exp_prefix: name of experiment\n :param seed: Seed for this specific trial.\n :param variant: Dictionary\n :param exp_id: One experiment = one variant setting + multiple seeds\n :param prepend_date_to_exp_prefix: If False, do not prepend the date to\n the experiment directory.\n :param use_gpu:\n :param snapshot_mode: See rlkit.core.logging\n :param snapshot_gap: See rlkit.core.logging\n :param base_log_dir: Will over\n :param sync_interval: How often to sync s3 data (in seconds).\n :param local_input_dir_to_mount_point_dict: Dictionary for doodad.\n :param ssh_host: the name of the host you want to ssh onto, should correspond to an entry in\n config.py of the following form:\n SSH_HOSTS=dict(\n ssh_host=dict(\n username='username',\n hostname='hostname/ip address',\n )\n )\n - if ssh_host is set to None, you will use ssh_host specified by\n config.SSH_DEFAULT_HOST\n :return:\n \"\"\"\n try:\n import doodad\n import doodad.mode\n import doodad.ssh\n except ImportError:\n print(\"Doodad not set up! Running experiment here.\")\n mode = 'here_no_doodad'\n global ec2_okayed\n global gpu_ec2_okayed\n global target_mount\n global first_sss_launch\n\n \"\"\"\n Sanitize inputs as needed\n \"\"\"\n if seed is None:\n seed = random.randint(0, 100000)\n if variant is None:\n variant = {}\n if mode == 'ssh' and base_log_dir is None:\n base_log_dir = conf.SSH_LOG_DIR\n if base_log_dir is None:\n if mode == 'sss':\n base_log_dir = conf.SSS_LOG_DIR\n else:\n base_log_dir = conf.LOCAL_LOG_DIR\n\n for key, value in ppp.recursive_items(variant):\n # This check isn't really necessary, but it's to prevent myself from\n # forgetting to pass a variant through dot_map_dict_to_nested_dict.\n if \".\" in key:\n raise Exception(\n \"Variants should not have periods in keys. Did you mean to \"\n \"convert {} into a nested dictionary?\".format(key)\n )\n if prepend_date_to_exp_prefix:\n exp_prefix = time.strftime(\"%m-%d\") + \"-\" + exp_prefix\n variant['seed'] = str(seed)\n variant['exp_id'] = str(exp_id)\n variant['exp_prefix'] = str(exp_prefix)\n variant['instance_type'] = str(instance_type)\n\n try:\n import git\n doodad_path = osp.abspath(osp.join(\n osp.dirname(doodad.__file__),\n os.pardir\n ))\n dirs = conf.CODE_DIRS_TO_MOUNT + [doodad_path]\n\n git_infos = []\n for directory in dirs:\n # Idk how to query these things, so I'm just doing try-catch\n try:\n repo = git.Repo(directory)\n try:\n branch_name = repo.active_branch.name\n except TypeError:\n branch_name = '[DETACHED]'\n git_infos.append(GitInfo(\n directory=directory,\n code_diff=repo.git.diff(None),\n code_diff_staged=repo.git.diff('--staged'),\n commit_hash=repo.head.commit.hexsha,\n branch_name=branch_name,\n ))\n except git.exc.InvalidGitRepositoryError:\n pass\n except ImportError:\n git_infos = None\n run_experiment_kwargs = dict(\n exp_prefix=exp_prefix,\n variant=variant,\n exp_id=exp_id,\n seed=seed,\n use_gpu=use_gpu,\n snapshot_mode=snapshot_mode,\n snapshot_gap=snapshot_gap,\n git_infos=git_infos,\n script_name=main.__file__,\n )\n if mode == 'here_no_doodad':\n run_experiment_kwargs['base_log_dir'] = base_log_dir\n return run_experiment_here(\n method_call,\n **run_experiment_kwargs\n )\n\n \"\"\"\n Safety Checks\n \"\"\"\n\n if mode == 'ec2' or mode == 'gcp':\n if not ec2_okayed and not query_yes_no(\n \"{} costs money. Are you sure you want to run?\".format(mode)\n ):\n sys.exit(1)\n if not gpu_ec2_okayed and use_gpu:\n if not query_yes_no(\n \"{} is more expensive with GPUs. Confirm?\".format(mode)\n ):\n sys.exit(1)\n gpu_ec2_okayed = True\n ec2_okayed = True\n\n \"\"\"\n GPU vs normal configs\n \"\"\"\n if use_gpu:\n docker_image = conf.GPU_DOODAD_DOCKER_IMAGE\n if instance_type is None:\n instance_type = conf.GPU_INSTANCE_TYPE\n else:\n assert instance_type[0] == 'g'\n if spot_price is None:\n spot_price = conf.GPU_SPOT_PRICE\n else:\n docker_image = conf.DOODAD_DOCKER_IMAGE\n if instance_type is None:\n instance_type = conf.INSTANCE_TYPE\n if spot_price is None:\n spot_price = conf.SPOT_PRICE\n if mode == 'sss':\n singularity_image = conf.SSS_IMAGE\n elif mode in ['local_singularity', 'slurm_singularity']:\n singularity_image = conf.SINGULARITY_IMAGE\n else:\n singularity_image = None\n\n\n \"\"\"\n Get the mode\n \"\"\"\n mode_kwargs = {}\n if use_gpu and mode == 'ec2':\n image_id = conf.REGION_TO_GPU_AWS_IMAGE_ID[region]\n if region == 'us-east-1':\n avail_zone = conf.REGION_TO_GPU_AWS_AVAIL_ZONE.get(region, \"us-east-1b\")\n mode_kwargs['extra_ec2_instance_kwargs'] = dict(\n Placement=dict(\n AvailabilityZone=avail_zone,\n ),\n )\n if region == 'us-west-2':\n avail_zone = conf.REGION_TO_GPU_AWS_AVAIL_ZONE.get(region, \"us-west-2c\")\n mode_kwargs['extra_ec2_instance_kwargs'] = dict(\n Placement=dict(\n AvailabilityZone=avail_zone,\n ),\n )\n else:\n image_id = None\n if hasattr(conf, \"AWS_S3_PATH\"):\n aws_s3_path = conf.AWS_S3_PATH\n else:\n aws_s3_path = None\n # pdb.set_trace()\n \"\"\"\n Create mode\n \"\"\"\n if mode == 'local':\n dmode = doodad.mode.Local(skip_wait=skip_wait)\n elif mode == 'local_docker':\n dmode = doodad.mode.LocalDocker(\n image=docker_image,\n gpu=use_gpu,\n )\n elif mode == 'ssh':\n if ssh_host == None:\n ssh_dict = conf.SSH_HOSTS[conf.SSH_DEFAULT_HOST]\n else:\n ssh_dict = conf.SSH_HOSTS[ssh_host]\n credentials = doodad.ssh.credentials.SSHCredentials(\n username=ssh_dict['username'],\n hostname=ssh_dict['hostname'],\n identity_file=conf.SSH_PRIVATE_KEY\n )\n dmode = doodad.mode.SSHDocker(\n credentials=credentials,\n image=docker_image,\n gpu=use_gpu,\n )\n elif mode == 'local_singularity':\n dmode = doodad.mode.LocalSingularity(\n image=singularity_image,\n gpu=use_gpu,\n )\n elif mode == 'slurm_singularity' or mode == 'sss':\n assert time_in_mins is not None, \"Must approximate/set time in minutes\"\n if use_gpu:\n kwargs = conf.SLURM_GPU_CONFIG\n else:\n kwargs = conf.SLURM_CPU_CONFIG\n if mode == 'slurm_singularity':\n dmode = doodad.mode.SlurmSingularity(\n image=singularity_image,\n gpu=use_gpu,\n time_in_mins=time_in_mins,\n skip_wait=skip_wait,\n pre_cmd=conf.SINGULARITY_PRE_CMDS,\n **kwargs\n )\n else:\n dmode = doodad.mode.ScriptSlurmSingularity(\n image=singularity_image,\n gpu=use_gpu,\n time_in_mins=time_in_mins,\n skip_wait=skip_wait,\n pre_cmd=conf.SSS_PRE_CMDS,\n **kwargs\n )\n elif mode == 'ec2':\n # Do this separately in case someone does not have EC2 configured\n dmode = doodad.mode.EC2AutoconfigDocker(\n image=docker_image,\n image_id=image_id,\n region=region,\n instance_type=instance_type,\n spot_price=spot_price,\n s3_log_prefix=exp_prefix,\n # Ask Vitchyr or Steven from an explanation, but basically we\n # will start just making the sub-directories within rlkit rather\n # than relying on doodad to do that.\n s3_log_name=\"\",\n gpu=use_gpu,\n aws_s3_path=aws_s3_path,\n num_exps=num_exps_per_instance,\n **mode_kwargs\n )\n elif mode == 'gcp':\n image_name = conf.GCP_IMAGE_NAME\n if use_gpu:\n image_name = conf.GCP_GPU_IMAGE_NAME\n\n if gcp_kwargs is None:\n gcp_kwargs = {}\n config_kwargs = {\n **conf.GCP_DEFAULT_KWARGS,\n **dict(image_name=image_name),\n **gcp_kwargs\n }\n dmode = doodad.mode.GCPDocker(\n image=docker_image,\n gpu=use_gpu,\n gcp_bucket_name=conf.GCP_BUCKET_NAME,\n gcp_log_prefix=exp_prefix,\n gcp_log_name=\"\",\n **config_kwargs\n )\n else:\n raise NotImplementedError(\"Mode not supported: {}\".format(mode))\n\n \"\"\"\n Get the mounts\n \"\"\"\n mounts = create_mounts(\n base_log_dir=base_log_dir,\n mode=mode,\n sync_interval=sync_interval,\n local_input_dir_to_mount_point_dict=local_input_dir_to_mount_point_dict,\n )\n\n \"\"\"\n Get the outputs\n \"\"\"\n launch_locally = None\n target = conf.RUN_DOODAD_EXPERIMENT_SCRIPT_PATH\n if mode == 'ec2':\n # Ignored since I'm setting the snapshot dir directly\n base_log_dir_for_script = None\n run_experiment_kwargs['force_randomize_seed'] = True\n # The snapshot dir needs to be specified for S3 because S3 will\n # automatically create the experiment director and sub-directory.\n snapshot_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET\n elif mode == 'local':\n base_log_dir_for_script = base_log_dir\n # The snapshot dir will be automatically created\n snapshot_dir_for_script = None\n elif mode == 'local_docker':\n base_log_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET\n # The snapshot dir will be automatically created\n snapshot_dir_for_script = None\n elif mode == 'ssh':\n base_log_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET\n # The snapshot dir will be automatically created\n snapshot_dir_for_script = None\n elif mode in ['local_singularity', 'slurm_singularity', 'sss']:\n base_log_dir_for_script = base_log_dir\n # The snapshot dir will be automatically created\n snapshot_dir_for_script = None\n launch_locally = True\n if mode == 'sss':\n dmode.set_first_time(first_sss_launch)\n first_sss_launch = False\n target = conf.SSS_RUN_DOODAD_EXPERIMENT_SCRIPT_PATH\n elif mode == 'here_no_doodad':\n base_log_dir_for_script = base_log_dir\n # The snapshot dir will be automatically created\n snapshot_dir_for_script = None\n elif mode == 'gcp':\n # Ignored since I'm setting the snapshot dir directly\n base_log_dir_for_script = None\n run_experiment_kwargs['force_randomize_seed'] = True\n snapshot_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET\n else:\n raise NotImplementedError(\"Mode not supported: {}\".format(mode))\n run_experiment_kwargs['base_log_dir'] = base_log_dir_for_script\n target_mount = doodad.launch_python(\n target=target,\n mode=dmode,\n mount_points=mounts,\n args={\n 'method_call': method_call,\n 'output_dir': snapshot_dir_for_script,\n 'run_experiment_kwargs': run_experiment_kwargs,\n 'mode': mode,\n },\n use_cloudpickle=True,\n target_mount=target_mount,\n verbose=verbose,\n launch_locally=launch_locally,\n )\n\n\ndef create_mounts(\n mode,\n base_log_dir,\n sync_interval=180,\n local_input_dir_to_mount_point_dict=None,\n):\n if mode == 'sss':\n code_mounts = SSS_CODE_MOUNTS\n non_code_mounts = SSS_NON_CODE_MOUNTS\n else:\n code_mounts = CODE_MOUNTS\n non_code_mounts = NON_CODE_MOUNTS\n\n if local_input_dir_to_mount_point_dict is None:\n local_input_dir_to_mount_point_dict = {}\n else:\n raise NotImplementedError(\"TODO(vitchyr): Implement this\")\n\n mounts = [m for m in code_mounts]\n for dir, mount_point in local_input_dir_to_mount_point_dict.items():\n mounts.append(mount.MountLocal(\n local_dir=dir,\n mount_point=mount_point,\n pythonpath=False,\n ))\n\n if mode != 'local':\n for m in non_code_mounts:\n mounts.append(m)\n\n if mode == 'ec2':\n output_mount = mount.MountS3(\n s3_path='',\n mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,\n output=True,\n sync_interval=sync_interval,\n include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar',\n '*.log', '*.pkl', '*.mp4', '*.png', '*.jpg',\n '*.jpeg', '*.patch'),\n )\n elif mode == 'gcp':\n output_mount = mount.MountGCP(\n gcp_path='',\n mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,\n output=True,\n gcp_bucket_name=conf.GCP_BUCKET_NAME,\n sync_interval=sync_interval,\n include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar',\n '*.log', '*.pkl', '*.mp4', '*.png', '*.jpg',\n '*.jpeg', '*.patch'),\n )\n\n elif mode in ['local', 'local_singularity', 'slurm_singularity', 'sss']:\n # To save directly to local files (singularity does this), skip mounting\n output_mount = mount.MountLocal(\n local_dir=base_log_dir,\n mount_point=None,\n output=True,\n )\n elif mode == 'local_docker':\n output_mount = mount.MountLocal(\n local_dir=base_log_dir,\n mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,\n output=True,\n )\n elif mode == 'ssh':\n output_mount = mount.MountLocal(\n local_dir=base_log_dir,\n mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,\n output=True,\n )\n else:\n raise NotImplementedError(\"Mode not supported: {}\".format(mode))\n mounts.append(output_mount)\n return mounts"
] |
[
[
"numpy.min",
"numpy.median",
"numpy.max",
"numpy.std",
"numpy.average"
],
[
"numpy.random.seed"
]
] |
adalisan/keras-visual-semantic-embedding
|
[
"0c50d12799a2be0f51692d176ad7803245fcf053"
] |
[
"keras_vse/eval.py"
] |
[
"#!/usr/bin/env python3\n#encoding: utf-8\nimport os ,sys\nimport argparse\nimport datetime\nfrom os.path import join as osp\nfrom shutil import copytree, rmtree\nfrom math import ceil\nimport json\nimport numpy as np\nfrom models import encode_sentences\nfrom models import build_pretrained_models\nimport pandas as pd\nfrom keras.optimizers import Nadam\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras_image_caption_data_generator import MultimodalInputDataGenerator as datagen\nfrom keras.preprocessing.image import ImageDataGenerator as IDG\nfrom keras.utils import to_categorical\nfrom models import concept_detector\n\nfrom keras import backend as K\n\nimport tensorflow as tf\nfrom keras.models import load_model, model_from_json\nfrom layers import L2Normalize\nimport json\nimport tensorflow as tf\nfrom utils import caption_image\n\ntry:\n import cPickle as pkl\nexcept ImportError:\n import pickle as pkl\n\nif 'tensorflow' == K.backend():\n\n from keras.backend.tensorflow_backend import set_session\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser('Visual semantic embeddings')\n parser.add_argument('--model_file', type=str,default = None)\n parser.add_argument('--train_csv_file', type=str)\n parser.add_argument('--tokenizer_pkl_file_id', dest=\"train_file_id\", type=str)\n parser.add_argument('--eval_csv_file', type=str)\n parser.add_argument('--synset_file', type=str)\n parser.add_argument('--source_dataset', default=\"GI\",choices = [\"GI\",\"VG\",\"OI\",\"GCC\",\"AIDASeedling\"])\n parser.add_argument('--debug', default=False, action=\"store_true\")\n parser.add_argument('--fix_gpu', type=int, default=-1)\n parser.add_argument('--dataaug', default=False, action=\"store_true\")\n parser.add_argument('--verbose', default=False, action=\"store_true\")\n parser.add_argument('--image_only_model', default=False, action=\"store_true\")\n parser.add_argument('--exp_id',default=None,type=str)\n parser.add_argument('--model_train_timestamp', dest=\"train_timestamp\", type=str)\n parser.add_argument('--class_ct_threshold', default = 120, type = int)\n \n args = parser.parse_args()\n \n debug = args.debug\n verbose =args.verbose\n K.set_floatx('float32')\n batch_size = 32\n\n model_fname = \"{}_keras_vse_model-{}\".format(args.train_file_id,args.train_timestamp)\n\n #Depending on the source data copy the images to local storage a subdir of /export/u10 \n dataset_localized = False\n timestamp = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M\")\n if args.source_dataset==\"GI\":\n KERAS_DATAGEN_DIR = \"/nfs/mercury-11/u113/projects/AIDA/GoogleImageDownload_Rus_Scenario/image_data_links\"\n regex_exp = r'/nfs/mercury-11/u113/projects/AIDA/GoogleImageDownload_Rus_Scenario/image_data_links(.*)'\n LOCAL_STORAGE_DIR = \"/export/u10/sadali/AIDA/images/GoogleImageDownload_Rus_Scenario/squared\"\n replace_regex_exp = r'/export/u10/sadali/AIDA/images/GoogleImageDownload_Rus_Scenario/squared\\1'\n #Use bash script to crop and resize the images\n if not os.path.exists (osp(LOCAL_STORAGE_DIR,\"successful_local_clone\")):\n print (\"trying to copy to local storage\")\n try:\n os.system(\"resize_and_copy_local.sh valid_images_unique.txt\" )\n #copytree(KERAS_DATAGEN_DIR,LOCAL_STORAGE_DIR)\n dataset_localized = True\n with open(osp(LOCAL_STORAGE_DIR,\"successful_local_clone\"),\"w\") as fh:\n fh.write(timestamp+\"\\n\")\n\n except Exception as e:\n dataset_localized = False\n\n elif args.source_dataset==\"VG\":\n KERAS_DATAGEN_DIR = \"/nfs/mercury-11/u113/projects/AIDA/VisualGenomeData/image_data\"\n regex_exp = r'/nfs/mercury-11/u113/projects/AIDA/VisualGenomeData/image_data(.*)'\n LOCAL_STORAGE_DIR = \"/export/u10/sadali/AIDA/images/VisualGenomeData/image_data\"\n replace_regex_exp = r'/export/u10/sadali/AIDA/images/VisualGenomeData/image_data\\1'\n \n if not os.path.exists (osp(LOCAL_STORAGE_DIR,\"VG_100K\")):\n print (\"copyying VG data from \",KERAS_DATAGEN_DIR,LOCAL_STORAGE_DIR)\n try:\n copytree(KERAS_DATAGEN_DIR,LOCAL_STORAGE_DIR)\n dataset_localized = True\n except Exception as e:\n print (e)\n print (\"Unable to copy image files for {} \".format(args.source_dataset) )\n dataset_localized = False\n else:\n dataset_localized = True\n elif args.source_dataset==\"AIDASeedling\":\n KERAS_DATAGEN_DIR = \"/nfs/raid66/u12/users/rbock/aida/image_captions/annotation_of_seedling_corpus/images\"\n regex_exp = r'/nfs/raid66/u12/users/rbock/aida/image_captions/annotation_of_seedling_corpus/images(.*)'\n LOCAL_STORAGE_DIR = \"/export/u10/sadali/AIDA/images/AIDASeedling/image_data\"\n replace_regex_exp = r'/export/u10/sadali/AIDA/images/AIDASeedling/image_data\\1'\n if not os.path.exists (osp(LOCAL_STORAGE_DIR,\"LDC2018E01\")):\n print (\"copyying AIDASeedling data from \",KERAS_DATAGEN_DIR,LOCAL_STORAGE_DIR)\n try:\n copytree(KERAS_DATAGEN_DIR,LOCAL_STORAGE_DIR)\n dataset_localized = True\n except Exception as e:\n print (e)\n print (\"Unable to copy image files for {} \".format(args.source_dataset) )\n dataset_localized = False\n print (\"Removing any localized dirs\")\n try:\n rmtree(osp(LOCAL_STORAGE_DIR,\"LDC2018E01\"))\n except Exception as e:\n print (e)\n try:\n rmtree(osp(LOCAL_STORAGE_DIR,\"LDC2018E52\"))\n except Exception as e:\n print(e)\n else:\n dataset_localized = True\n\n # Form the experiment,training identifier\n output_id = args.exp_id +\"_\"+args.train_file_id\n \n if not os.path.exists(\"./{}\".format(output_id)):\n os.makedirs(output_id)\n #Determine GPU number\n gpu_id = 1\n gpu_id_str = str(int(gpu_id)) \n if args.fix_gpu >= 0:\n print (\"Overwriting gpu id from cfg file with given arg {}\".format(args.fix_gpu))\n gpu_id_str = str(args.fix_gpu)\n #%% \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = gpu_id_str\n\n #check_gpu_availability()\n #session = tf.Session(config=config)\n set_session(tf.Session(config=config))\n # trans_df = pd.read_csv(\n # \"/nfs/mercury-11/u113/projects/AIDA/GoogleImageDownload_Rus_Scenario/all_image_concepts_GI_specific_translation_en_es_ru_uk_gen_limit.csv\",\n # encoding=\"utf-16\",\n # dtype=\"str\"\n # )\n synynomys = pd.read_csv(args.synset_file, encoding='utf-16', dtype=\"object\")\n bbn_anno_labels= dict()\n recode_dict =dict()\n set_of_tuples = dict()\n for idx,row in synynomys.iterrows():\n generic_eng_name = row[0]\n generic_eng_name = generic_eng_name.strip('\"')\n generic_eng_name = generic_eng_name.strip(\"'\")\n\n for k in row:\n recode_dict.update({k:generic_eng_name})\n\n syns = list(row[1:-1])\n if row[-1] != \"\" and not pd.isnull(row[-1]):\n bbn_label = row[-1]\n syns.append(bbn_label)\n # if bbn_label in bbn_anno_labels.keys():\n # bbn_anno_labels[bbn_label].append(row[0])\n # else:\n # bbn_anno_labels[bbn_label] = [row[0]]\n bbn_anno_labels[row[0]]=bbn_label\n\n set_of_tuples.update({row[0]:syns})\n\n #REad the dataafreme which includes filepaths , captions and classnames\n test_df = pd.read_csv(args.train_csv_file, encoding='utf8')\n if verbose:\n print( test_df.apply(lambda x: pd.lib.infer_dtype(x.values)))\n texts = test_df[\"image_captions\"].values.tolist()\n class_names_pd = pd.unique(test_df[\"class\"].values)\n init_classnames =class_names_pd.tolist()\n\n class_counts = test_df[\"class\"].value_counts()\n class_counts.to_csv(\"class_counts_orig.csv\")\n class_ct_threshold = 9\n\n #REmove any classes that have less # of examples than class_ct_threshold\n untrainable_classes = class_counts < class_ct_threshold \n untrainable_classnames = untrainable_classes[untrainable_classes].index.tolist()\n if verbose:\n print (\"Removed classes:\\n\", untrainable_classnames)\n print (\"length of test_df\",len(test_df))\n test_df = test_df.loc[~test_df['class'].isin(untrainable_classnames),:]\n\n #Update the filepaths if images were copied to local storage\n if dataset_localized :\n test_df =test_df.replace(KERAS_DATAGEN_DIR,LOCAL_STORAGE_DIR,regex= True)\n print (\"new examplar count {}\".format(len(test_df)))\n #classnames= [k for k in init_classnames if k not in untrainable_classnames] \n\n test_df = test_df.replace(recode_dict)\n classnames = pd.unique(test_df[\"class\"].values)\n if verbose:\n print(\"Num of classes \")\n print (len(classnames))\n new_class_counts = test_df[\"class\"].value_counts()\n new_class_counts.to_csv(\"class_counts_test.csv\")\n try:\n if True: #not args.restore_checkpoint:\n class_indices_json = \"./models_dir/{}/{}_{}_class_indices.json\".format(output_id,model_fname,args.class_ct_threshold)\n else:\n class_indices_json = \"./models_dir/GI_image_only_softmax_GI_keras_train_qa_image_only_epoch_3/GI_keras_train_qa_image_only_epoch_3_keras_vse_model-2019_01_09_23_31_class_indices.json\"\n print(\"class_indices file is located \",class_indices_json)\n assert os.path.exists(class_indices_json)\n with open (class_indices_json,\"r\") as json_fh:\n class_indices_for_model = json.load(json_fh, encoding=\"utf8\")\n except Exception as e:\n print (e)\n class_dirs=os.listdir(KERAS_DATAGEN_DIR)\n classnames_ordered = np.sort(np.array(class_dirs)).tolist()\n print (\"This is a temp hack.Should not be necessary if class_indices.json is available\")\n classnames_ordered = [\"class_{}\".format(i) for i in range(854)]\n class_indices_for_model = dict(zip(classnames_ordered,range(len(classnames_ordered))))\n print(class_indices_for_model)\n\n if dataset_localized:\n test_df =test_df.replace(KERAS_DATAGEN_DIR, LOCAL_STORAGE_DIR, regex= True)\n \n if verbose:\n print(\"Dimensions of training set dataframe\")\n print(test_df.shape)\n new_class_counts = test_df[\"class\"].value_counts()\n new_class_counts.to_csv(\"class_counts_eval.csv\")\n\n\n # Given image captions read from csv , compile the vocab(list of tokens) for encoding the captions\n texts_ascii = [k.encode('ascii','ignore').decode() for k in texts]\n\n\n print (type(texts[0]))\n\n with open('./{}/keras_captiontokenizer_{}.pkl'.format(output_id,args.train_file_id),\"rb\") as kfh:\n tokenizer=pkl.load(kfh)\n tokenizer.fit_on_texts(texts_ascii)\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n\n \n end2endmodel = load_model(args.model_file,custom_objects={'L2Normalize':L2Normalize})\n print (\"Model summary\",end2endmodel.summary())\n #end2endmodel.compile(optimizer='nadam', loss=\"binary_crossentropy\")\n model_output_dim = end2endmodel.outputs[0].shape[1]\n print(\"dense_1 wts: \\n\", end2endmodel.get_layer('dense_1').get_weights())\n if not args.image_only_model:\n print(\"gru_1 wts: \\n\", end2endmodel.get_layer('gru_1').get_weights())\n print(\"dense_2 wts: \\n\", end2endmodel.get_layer('dense_2').get_weights())\n with open(\"layer_weights_at_test_time.txt\",\"w\") as fh:\n fh.write(\"dense_1 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('dense_1').get_weights()))\n if not args.image_only_model:\n fh.write(\"\\ngru_1 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('gru_1').get_weights()))\n fh.write(\"\\ndense_2 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('dense_2').get_weights()))\n fh.write(\"\\nblock5_conv4 wts: \\n\")\n fh.write(str(end2endmodel.get_layer('block5_conv4').get_weights()))\n end2endmodel.compile(optimizer='nadam', loss=\"categorical_crossentropy\")\n\n # For debugging, print some weights\n with open(\"{}_trained_layer_weights_{}.txt\".format(args.train_file_id,timestamp),\"w\") as fh:\n fh.write(\"dense_1 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('dense_1').get_weights()))\n if not args.image_only_model:\n fh.write(\"\\ngru_1 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('gru_1').get_weights()))\n fh.write(\"\\ndense_2 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('dense_2').get_weights()))\n fh.write(\"\\nblock5_conv4 wts: \\n\")\n fh.write(str( end2endmodel.get_layer('block5_conv4').get_weights()))\n fh.write(\"\\n\")\n\n\n\n\n #create a test data generator for testing/sanity-checking the trained model using training data\n test_datagen = None\n if args.image_only_model:\n if args.dataaug:\n test_datagen = IDG(width_shift_range = 0.2,zoom_range=0.2,rotation_range=25, height_shift_range=0.3 )\n else:\n test_datagen = IDG()\n else:\n if args.dataaug:\n test_datagen = datagen(width_shift_range = 0.2,zoom_range=0.2,rotation_range=25, height_shift_range=0.3 )\n else:\n test_datagen = datagen()\n \n if args.image_only_model:\n test_data_it = test_datagen.flow_from_dataframe( \n dataframe= test_df,\n directory= None,\n x_col=\"filenames\", y_col=\"class\", has_ext=True,\n target_size=(256, 256), color_mode='rgb',\n class_mode='sparse',\n batch_size=batch_size, shuffle=False, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n sort=False,\n follow_links= True)\n else:\n test_data_it = test_datagen.flow_from_dataframe( \n dataframe= test_df,\n directory= None,\n x_col=[\"filenames\",\"image_captions\"], \n y_col=\"class\", has_ext=True,\n target_size=(256, 256), color_mode='rgb',\n class_mode='sparse',\n batch_size=batch_size , shuffle=False, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n sort=False,\n cap_token_vocab=word_index,\n num_tokens = len(word_index),\n follow_links= True)\n\n\n\n # Actually run the prediction on the training test.\n # preds_out.write(\"{}\\n\".format(pr))\n \n batch_ctr = 0\n output_dir = \"/export/u10/sadali/AIDA/images/captioned_images/{}-{}\".format(output_id, model_fname)\n\n if not os.path.exists (output_dir):\n os.makedirs(output_dir)\n\n model_classnames = [\"\"]*model_output_dim\n for k,v in class_indices_for_model.items():\n model_classnames[v] = k\n\n end_of_samples = False\n b_it = 0\n sample_count = len(test_data_it.filenames)\n batch_ctr_max = ceil(sample_count/batch_size )\n dec_vector = np.zeros(shape=(sample_count,model_output_dim))\n while not end_of_samples:\n batch_indices = []\n batch_idx =None\n for batch_idx in next(test_data_it.index_generator, -1):\n if batch_idx < 0:\n print(batch_idx)\n end_of_samples = True\n break\n #print (batch_ctr)\n \n batch_indices.append(batch_idx)\n #files_in_batch = test_df[\"filenames\"][example_it:batch_end].values.tolist()\n \n\n b_it += 1\n if b_it == batch_size:\n break\n if batch_idx <0 :\n end_of_samples = True\n #print(batch_indices)\n \n test_batch = test_data_it._get_batches_of_transformed_samples(batch_indices)\n files_in_batch = [test_data_it.filenames[k] for k in batch_indices]\n\n #print(\"files_in_batch:\",str(files_in_batch))\n y_values = test_batch[1]\n preds_out = end2endmodel.predict_on_batch(test_batch[0])\n dec_vector[batch_indices,:] = preds_out\n\n print(\"predictions tensor shape\", preds_out.shape)\n if not os.path.exists(\"./{}\".format(output_id)):\n os.makedirs(output_id)\n preds_out_file = open(\"./{}/{}_{}_{}_eval_preds_out.txt\".format(output_id,\n args.train_file_id, \n args.train_timestamp,timestamp),\"w\")\n print(\"predictions\")\n for pr in preds_out:\n preds_out_file.write(\"{}\\n\".format(pr))\n preds_out_file.close()\n print(\"starting captioning\")\n highest_idx = np.argmax(preds_out, axis=1)\n found_highest_idx = False\n\n\n for b_i,f in enumerate(files_in_batch):\n concept_score_triples = []\n\n highest_class = \"\"\n for k,v in class_indices_for_model.items():\n new_tri = (k, preds_out[b_i,v], preds_out[b_i,v])\n #new_tri= (k, preds_out[v,b_i], preds_out[v,b_i])\n if v == highest_idx[b_i]:\n highest_class = k\n highest_score = preds_out[b_i, v]\n found_highest_idx = True\n concept_score_triples.append(new_tri)\n # if len(y_values.shape) > 1 :\n # class_idx = y_values[b_i] \n # else:\n # class_idx = np.argmax(y_values[b_i,:])\n\n class_idx = y_values[b_i]\n generic_class = bbn_anno_labels.get(model_classnames[class_idx], \"\")\n gt_classname = model_classnames[class_idx] +\" - \" + \\\n generic_class\n\n if model_classnames[class_idx] in bbn_anno_labels.keys():\n y_generic_class_idx = class_indices_for_model.get(model_classnames[class_idx], -1)\n if y_generic_class_idx == -1:\n print (\"model class list is \")\n print(class_indices_for_model)\n print(\"while translation/synset generic class list is\")\n print(bbn_anno_labels.keys())\n to_categorical( class_idx, )\n\n caption_image(f, concept_score_triples, output_dir, \n caption_threshold = 0.08 , trans_dict=None, \n true_classname = gt_classname, \n highest_pair = [highest_class, highest_score] if found_highest_idx else None )\n\n batch_ctr += 1\n if batch_ctr == batch_ctr_max:\n break\n if batch_ctr % 200 == 0 :\n print (\"{}th batch of images used on model\" .format(batch_ctr))\n\n\n\n"
] |
[
[
"pandas.read_csv",
"pandas.isnull",
"tensorflow.ConfigProto",
"pandas.lib.infer_dtype",
"numpy.argmax",
"pandas.unique",
"tensorflow.Session",
"numpy.array",
"numpy.zeros"
]
] |
ignaciodsimon/optimised_biquad_filter
|
[
"0d85dc42033e767eeb55107e72dba98417377686"
] |
[
"test_filters.py"
] |
[
"\"\"\"\n This script is used to test both implementations of \n the biquad filter and measure their time performance.\n\n Joe Simon 2018.\n\"\"\"\n\nimport biquad_filter_optimised\nimport biquad_filter_original\nimport numpy\nimport matplotlib.pyplot as plot\nimport time\n\n\nif __name__ == '__main__':\n\n _sampleRate = 48000.0\n print(\"Instantiating filters ...\")\n _filter1 = biquad_filter_original.BiquadFilter(sampleRate=_sampleRate)\n _filter2 = biquad_filter_optimised.BiquadFilter()\n\n print(\"Generating filter coefficients ...\")\n _filter1.generateBiQuadCoefficients(filterf0=1000, filterQ=0.707, filterType=biquad_filter_original.BiquadFilterType.LPF)\n _filter2.generateBiQuadCoefficients(biquad_filter_optimised.BiquadFilterParameters(filterf0=1000, filterQ=0.707, filterType=biquad_filter_optimised.BiquadFilterType.LPF))\n\n # Example of how to use the method 'computeBiquadFilterResponse()' without\n # having to 'measure' it passing a test signal and alysing the output.\n #\n # _filter2Response = _filter2.computeBiquadFilterResponse([i/_sampleRate for i in range(int(_sampleRate/2))])\n # plot.subplot(2,1,1)\n # plot.semilogx(20.0*numpy.log10(numpy.abs(_filter2Response)))\n # plot.ylim([-60, 10])\n # plot.grid(1)\n # plot.subplot(2,1,2)\n # plot.semilogx(180.0 / numpy.pi * numpy.angle(_filter2Response))\n # plot.grid(1)\n # plot.show()\n\n _impulse = [0.0]*int(_sampleRate * 10.0)\n _impulse[0] = 1.0\n print(\"Processing impulse signal with filters ...\")\n _amountOfTests = 10\n _timesFilter1 = []\n _timesFilter2 = []\n _testStartTime = time.time()\n for i in range(_amountOfTests):\n _initialTime1 = time.time()\n _filter1Output = [_filter1.processSample(inputSample=_sample) for _sample in _impulse]\n _finalTime1 = time.time()\n _initialTime2 = time.time()\n _filter2Output = [_filter2.processSample(_sample) for _sample in _impulse]\n _finalTime2 = time.time()\n _timesFilter1.append(_finalTime1 - _initialTime1)\n _timesFilter2.append(_finalTime2 - _initialTime2)\n\n _improvementRatios = [_timesFilter1[i] / _timesFilter2[i] for i in range(_amountOfTests)]\n print(\"Time improvement ratio: \" + str([\"%.2f\" % _improvementRatios[i] for i in range(_amountOfTests)]))\n print(\"Mean improvement ratio: %.2f\" % numpy.mean(_improvementRatios))\n print(\"Total elapsed time: %.2f [s]\" % (time.time() - _testStartTime))\n\n print(\"Plotting filters response ...\")\n _filter1Response = 20.0 * numpy.log10(numpy.abs(numpy.fft.rfft(_filter1Output[:48000])) + 10.0**-200/20.0)\n _filter2Response = 20.0 * numpy.log10(numpy.abs(numpy.fft.rfft(_filter2Output[:48000])) + 10.0**-200/20.0)\n _freqAxis = [i * _sampleRate / len(_filter1Response) / 2.0 for i in range(len(_filter1Response))]\n plot.semilogx(_freqAxis, _filter1Response)\n plot.semilogx(_freqAxis, _filter2Response)\n plot.legend(['Original %.1f [s]' % numpy.mean(_timesFilter1), 'Optimised %.1f [s]' % numpy.mean(_timesFilter2)])\n plot.grid(1)\n plot.title(\"Time improvement ratio: %.2f [.]\" % numpy.mean(_improvementRatios))\n plot.ylim([-90, 10])\n plot.show()\n"
] |
[
[
"matplotlib.pyplot.semilogx",
"numpy.fft.rfft",
"matplotlib.pyplot.ylim",
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show"
]
] |
Quentin18/Matplotlib-fractals
|
[
"cbf8c39bd4da04446638408eff72fca21a6d8580"
] |
[
"fractals/kochSnowflake.py"
] |
[
"\"\"\"\nKoch snowflake\nhttps://en.wikipedia.org/wiki/Koch_snowflake\n\"\"\"\nimport sys\nfrom math import sqrt\nimport matplotlib.pyplot as plt\n\n\ndef kochCurve(n, xA, yA, xB, yB):\n if n != 0:\n xC = xA + (xB - xA) / 3\n yC = yA + (yB - yA) / 3\n xD = xA + 2 * (xB - xA) / 3\n yD = yA + 2 * (yB - yA) / 3\n xE = (xC + xD) / 2 - (yD - yC) * sqrt(3) / 2\n yE = (yC + yD) / 2 + (xD - xC) * sqrt(3) / 2\n kochCurve(n - 1, xA, yA, xC, yC)\n kochCurve(n - 1, xC, yC, xE, yE)\n kochCurve(n - 1, xE, yE, xD, yD)\n kochCurve(n - 1, xD, yD, xB, yB)\n else:\n plt.plot([xA, xB], [yA, yB], 'b')\n\n\ndef kockCurveConstruction(n):\n kochCurve(n, 0, 0, 1, 0)\n plt.axis(\"equal\")\n plt.show()\n\n\ndef kochSnowflake(n):\n xA, yA = 0, 0\n xB, yB = 1 / 2, sqrt(0.75)\n xC, yC = 1, 0\n kochCurve(n, xA, yA, xB, yB)\n kochCurve(n, xB, yB, xC, yC)\n kochCurve(n, xC, yC, xA, yA)\n plt.axis(\"equal\")\n plt.title(\"Koch snowflake\")\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == \"__main__\":\n n = int(sys.argv[1]) if len(sys.argv) == 2 else 4\n kochSnowflake(n)\n # print(\"perimeter:\", 3*(4 / 3)**n)\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
]
] |
arshjot/knowledge-graphs
|
[
"14e2f6c141a361a9b973cefcfbfdd9209eff64c7"
] |
[
"run.py"
] |
[
"#!/usr/bin/python3\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom model import KGEModel\n\nfrom dataloader import TrainDataset\nfrom dataloader import BidirectionalOneShotIterator\n\nfrom ogb.linkproppred import LinkPropPredDataset, Evaluator\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport time\nfrom tensorboardX import SummaryWriter\nimport pdb\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(\n description='Training and Testing Knowledge Graph Embedding Models',\n usage='train.py [<args>] [-h | --help]'\n )\n\n parser.add_argument('--cuda', action='store_true', help='use GPU')\n \n parser.add_argument('--do_train', action='store_true')\n parser.add_argument('--do_valid', action='store_true')\n parser.add_argument('--do_test', action='store_true')\n parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')\n \n parser.add_argument('--dataset', type=str, default='ogbl-biokg', help='dataset name, default to biokg')\n parser.add_argument('--model', default='TransE', type=str)\n parser.add_argument('-de', '--double_entity_embedding', action='store_true')\n parser.add_argument('-dr', '--double_relation_embedding', action='store_true')\n \n parser.add_argument('-n', '--negative_sample_size', default=128, type=int)\n parser.add_argument('-acc', '--grad_accum_steps', default=1, type=int)\n parser.add_argument('-d', '--hidden_dim', default=500, type=int)\n parser.add_argument('-g', '--gamma', default=12.0, type=float)\n parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')\n parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)\n parser.add_argument('-b', '--batch_size', default=1024, type=int)\n parser.add_argument('-r', '--regularization', default=0.0, type=float)\n parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')\n parser.add_argument('--uni_weight', action='store_true', \n help='Otherwise use subsampling weighting like in word2vec')\n \n parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)\n parser.add_argument('-cpu', '--cpu_num', default=10, type=int)\n parser.add_argument('-init', '--init_checkpoint', default=None, type=str)\n parser.add_argument('-save', '--save_path', default=None, type=str)\n parser.add_argument('--max_steps', default=100000, type=int)\n parser.add_argument('--warm_up_steps', default=None, type=int)\n \n parser.add_argument('--save_checkpoint_steps', default=10000, type=int)\n parser.add_argument('--valid_steps', default=10000, type=int)\n parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')\n parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')\n \n parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')\n parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')\n \n parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')\n parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')\n parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')\n return parser.parse_args(args)\n\ndef override_config(args):\n '''\n Override model and data configuration\n '''\n \n with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:\n argparse_dict = json.load(fjson)\n \n args.dataset = argparse_dict['dataset']\n args.model = argparse_dict['model']\n args.double_entity_embedding = argparse_dict['double_entity_embedding']\n args.double_relation_embedding = argparse_dict['double_relation_embedding']\n args.hidden_dim = argparse_dict['hidden_dim']\n args.test_batch_size = argparse_dict['test_batch_size']\n \ndef save_model(model, optimizer, save_variable_list, args):\n '''\n Save the parameters of the model and the optimizer,\n as well as some other variables such as step and learning_rate\n '''\n \n argparse_dict = vars(args)\n with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:\n json.dump(argparse_dict, fjson)\n\n torch.save({\n **save_variable_list,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()},\n os.path.join(args.save_path, 'checkpoint')\n )\n \n entity_embedding = model.entity_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(args.save_path, 'entity_embedding'), \n entity_embedding\n )\n \n relation_embedding = model.relation_embedding.detach().cpu().numpy()\n np.save(\n os.path.join(args.save_path, 'relation_embedding'), \n relation_embedding\n )\n\ndef set_logger(args):\n '''\n Write logs to checkpoint and console\n '''\n\n if args.do_train:\n log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')\n else:\n log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')\n\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=log_file,\n filemode='w'\n )\n\n if args.print_on_screen:\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\ndef log_metrics(mode, step, metrics, writer):\n '''\n Print the evaluation logs\n '''\n for metric in metrics:\n logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))\n writer.add_scalar(\"_\".join([mode, metric]), metrics[metric], step)\n \n \ndef main(args):\n if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):\n raise ValueError('one of train/val/test mode must be choosed.')\n \n if args.init_checkpoint:\n override_config(args)\n\n args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path\n writer = SummaryWriter(args.save_path)\n \n # Write logs to checkpoint and console\n set_logger(args)\n \n dataset = LinkPropPredDataset(name = 'ogbl-biokg')\n split_edge = dataset.get_edge_split()\n train_triples, valid_triples, test_triples = split_edge[\"train\"], split_edge[\"valid\"], split_edge[\"test\"]\n nrelation = int(max(train_triples['relation']))+1\n entity_dict = dict()\n cur_idx = 0\n for key in dataset[0]['num_nodes_dict']:\n entity_dict[key] = (cur_idx, cur_idx + dataset[0]['num_nodes_dict'][key])\n cur_idx += dataset[0]['num_nodes_dict'][key]\n nentity = sum(dataset[0]['num_nodes_dict'].values())\n\n evaluator = Evaluator(name = args.dataset)\n\n args.nentity = nentity\n args.nrelation = nrelation\n \n logging.info('Model: %s' % args.model)\n logging.info('Dataset: %s' % args.dataset)\n logging.info('#entity: %d' % nentity)\n logging.info('#relation: %d' % nrelation)\n \n # train_triples = split_dict['train']\n logging.info('#train: %d' % len(train_triples['head']))\n # valid_triples = split_dict['valid']\n logging.info('#valid: %d' % len(valid_triples['head']))\n # test_triples = split_dict['test']\n logging.info('#test: %d' % len(test_triples['head']))\n\n train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)\n for i in tqdm(range(len(train_triples['head']))):\n head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]\n head_type, tail_type = train_triples['head_type'][i], train_triples['tail_type'][i]\n train_count[(head, relation, head_type)] += 1\n train_count[(tail, -relation-1, tail_type)] += 1\n train_true_head[(relation, tail)].append(head)\n train_true_tail[(head, relation)].append(tail)\n \n kge_model = KGEModel(\n model_name=args.model,\n nentity=nentity,\n nrelation=nrelation,\n hidden_dim=args.hidden_dim,\n gamma=args.gamma,\n double_entity_embedding=args.double_entity_embedding,\n double_relation_embedding=args.double_relation_embedding,\n evaluator=evaluator\n )\n\n def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n print(f'Number of parameters = {count_parameters(kge_model)}')\n logging.info('Model Parameter Configuration:')\n for name, param in kge_model.named_parameters():\n logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))\n\n if args.cuda:\n kge_model = kge_model.cuda()\n \n if args.init_checkpoint:\n # Restore model from checkpoint directory\n logging.info('Loading checkpoint %s...' % args.init_checkpoint)\n checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'), map_location=torch.device('cpu'))\n entity_dict = checkpoint['entity_dict']\n\n if args.do_train:\n # Set training dataloader iterator\n train_dataloader_head = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, \n args.negative_sample_size, 'head-batch',\n train_count, train_true_head, train_true_tail,\n entity_dict), \n batch_size=args.batch_size,\n shuffle=True, \n num_workers=max(1, args.cpu_num//2),\n collate_fn=TrainDataset.collate_fn\n )\n \n train_dataloader_tail = DataLoader(\n TrainDataset(train_triples, nentity, nrelation, \n args.negative_sample_size, 'tail-batch',\n train_count, train_true_head, train_true_tail,\n entity_dict), \n batch_size=args.batch_size,\n shuffle=True, \n num_workers=max(1, args.cpu_num//2),\n collate_fn=TrainDataset.collate_fn\n )\n \n train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)\n \n # Set training configuration\n current_learning_rate = args.learning_rate\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, kge_model.parameters()), \n lr=current_learning_rate\n )\n if args.warm_up_steps:\n warm_up_steps = args.warm_up_steps\n else:\n warm_up_steps = args.max_steps // 2\n\n if args.init_checkpoint:\n # Restore model from checkpoint directory\n # logging.info('Loading checkpoint %s...' % args.init_checkpoint)\n # checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))\n init_step = checkpoint['step']\n kge_model.load_state_dict(checkpoint['model_state_dict'])\n # entity_dict = checkpoint['entity_dict']\n if args.do_train:\n current_learning_rate = checkpoint['current_learning_rate']\n warm_up_steps = checkpoint['warm_up_steps']\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n else:\n logging.info('Ramdomly Initializing %s Model...' % args.model)\n init_step = 0\n \n step = init_step\n \n logging.info('Start Training...')\n logging.info('init_step = %d' % init_step)\n logging.info('batch_size = %d' % args.batch_size)\n logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)\n logging.info('hidden_dim = %d' % args.hidden_dim)\n logging.info('gamma = %f' % args.gamma)\n logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))\n if args.negative_adversarial_sampling:\n logging.info('adversarial_temperature = %f' % args.adversarial_temperature)\n \n # Set valid dataloader as it would be evaluated during training\n \n if args.do_train:\n logging.info('learning_rate = %d' % current_learning_rate)\n\n training_logs = []\n \n #Training Loop\n for step in range(init_step, args.max_steps):\n\n # decide if weights need to be updated acc to gradient accumulation\n if (step + 1) % args.grad_accum_steps == 0:\n accumulate = False\n else:\n accumulate = True\n \n log = kge_model.train_step(kge_model, optimizer, train_iterator, args, accumulate)\n training_logs.append(log)\n \n if step >= warm_up_steps:\n current_learning_rate = current_learning_rate / 5\n logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))\n optimizer = torch.optim.Adam(\n filter(lambda p: p.requires_grad, kge_model.parameters()), \n lr=current_learning_rate\n )\n warm_up_steps = warm_up_steps * 3\n \n if step % args.save_checkpoint_steps == 0 and step > 0: # ~ 41 seconds/saving\n save_variable_list = {\n 'step': step, \n 'current_learning_rate': current_learning_rate,\n 'warm_up_steps': warm_up_steps,\n 'entity_dict': entity_dict\n }\n save_model(kge_model, optimizer, save_variable_list, args)\n\n if step % args.log_steps == 0:\n metrics = {}\n for metric in training_logs[0].keys():\n metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)\n log_metrics('Train', step, metrics, writer)\n training_logs = []\n \n if args.do_valid and step % args.valid_steps == 0 and step > 0:\n logging.info('Evaluating on Valid Dataset...')\n metrics = kge_model.test_step(kge_model, valid_triples, args, entity_dict)\n log_metrics('Valid', step, metrics, writer)\n \n save_variable_list = {\n 'step': step, \n 'current_learning_rate': current_learning_rate,\n 'warm_up_steps': warm_up_steps\n }\n save_model(kge_model, optimizer, save_variable_list, args)\n \n if args.do_valid:\n logging.info('Evaluating on Valid Dataset...')\n metrics = kge_model.test_step(kge_model, valid_triples, args, entity_dict)\n log_metrics('Valid', step, metrics, writer)\n \n if args.do_test:\n logging.info('Evaluating on Test Dataset...')\n metrics = kge_model.test_step(kge_model, test_triples, args, entity_dict)\n log_metrics('Test', step, metrics, writer)\n \n if args.evaluate_train:\n logging.info('Evaluating on Training Dataset...')\n small_train_triples = {}\n indices = np.random.choice(len(train_triples['head']), args.ntriples_eval_train, replace=False)\n for i in train_triples:\n if 'type' in i:\n small_train_triples[i] = [train_triples[i][x] for x in indices]\n else:\n small_train_triples[i] = train_triples[i][indices]\n metrics = kge_model.test_step(kge_model, small_train_triples, args, entity_dict, random_sampling=True)\n log_metrics('Train', step, metrics, writer)\n \nif __name__ == '__main__':\n main(parse_args())\n"
] |
[
[
"torch.device"
]
] |
davidfpc/AoC2021
|
[
"b526e606dbf1cc59de4951a321aa9b98d04fde4c"
] |
[
"day1.py"
] |
[
"import numpy as np\n\n\ndef read_input(file_name):\n with open(\"inputFiles/\" + file_name, \"r\") as file:\n lines = file.read().splitlines()\n return [int(i, base=16) for i in lines]\n\n\ndef part1(input_value):\n prev_value = input_value[0]\n counter = 0\n for i in input_value[1:]:\n if int(i) > prev_value:\n counter = counter + 1\n prev_value = i\n return counter\n\n\ndef part2(input_value):\n windowed_input = list(map(lambda x: sum(x), np.lib.stride_tricks.sliding_window_view(input_value, 3)))\n return part1(windowed_input)\n\n\nif __name__ == \"__main__\":\n puzzle_input = read_input(\"day1.txt\")\n print(part1(puzzle_input))\n print(part2(puzzle_input))\n"
] |
[
[
"numpy.lib.stride_tricks.sliding_window_view"
]
] |
jacke121/Deep-Feature-Flow
|
[
"8034c0d4169e57db9a6d9add68275722dd20a8ba"
] |
[
"fgfa_rfcn/config/config.py"
] |
[
"# --------------------------------------------------------\n# Flow-Guided Feature Aggregation\n# Copyright (c) 2016 by Contributors\n# Copyright (c) 2017 Microsoft\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# Modified by Yuqing Zhu, Shuhao Fu, Xizhou Zhu, Yuwen Xiong, Bin Xiao\n# --------------------------------------------------------\n\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nconfig = edict()\n\nconfig.MXNET_VERSION = ''\nconfig.output_path = ''\nconfig.symbol = ''\nconfig.gpus = ''\nconfig.CLASS_AGNOSTIC = True\nconfig.SCALES = [(600, 1000)] # first is scale (the shorter side); second is max size\n\n# default training\nconfig.default = edict()\nconfig.default.frequent = 20\nconfig.default.kvstore = 'device'\n\n# network related params\nconfig.network = edict()\nconfig.network.pretrained = ''\nconfig.network.pretrained_flow = ''\nconfig.network.pretrained_epoch = 0\nconfig.network.PIXEL_MEANS = np.array([0, 0, 0])\nconfig.network.IMAGE_STRIDE = 0\nconfig.network.RPN_FEAT_STRIDE = 16\nconfig.network.RCNN_FEAT_STRIDE = 16\nconfig.network.FIXED_PARAMS = ['gamma', 'beta']\nconfig.network.ANCHOR_SCALES = (8, 16, 32)\nconfig.network.ANCHOR_RATIOS = (0.5, 1, 2)\nconfig.network.NORMALIZE_RPN = True\nconfig.network.ANCHOR_MEANS = (0.0, 0.0, 0.0, 0.0)\nconfig.network.ANCHOR_STDS = (0.1, 0.1, 0.4, 0.4)\nconfig.network.NUM_ANCHORS = len(config.network.ANCHOR_SCALES) * len(config.network.ANCHOR_RATIOS)\nconfig.network.FGFA_FEAT_DIM = 1024 + 2048 # 1024 for feature network, 2048 for embedding network\n\n# dataset related params\nconfig.dataset = edict()\nconfig.dataset.dataset = 'ImageNetVID'\nconfig.dataset.image_set = 'DET_train_30classes+VID_train_15frames'\nconfig.dataset.test_image_set = 'VID_val_videos'\nconfig.dataset.root_path = './data'\nconfig.dataset.dataset_path = '../data/ILSVRC2015'\nconfig.dataset.motion_iou_path = './lib/dataset/imagenet_vid_groundtruth_motion_iou.mat'\nconfig.dataset.enable_detailed_eval = True\nconfig.dataset.NUM_CLASSES = 31\n\n\nconfig.TRAIN = edict()\n\nconfig.TRAIN.lr = 0\nconfig.TRAIN.lr_step = ''\nconfig.TRAIN.lr_factor = 0.1\nconfig.TRAIN.warmup = False\nconfig.TRAIN.warmup_lr = 0\nconfig.TRAIN.warmup_step = 0\nconfig.TRAIN.momentum = 0.9\nconfig.TRAIN.wd = 0.0005\nconfig.TRAIN.begin_epoch = 0\nconfig.TRAIN.end_epoch = 0\nconfig.TRAIN.model_prefix = ''\n\n# whether resume training\nconfig.TRAIN.RESUME = False\n# whether flip image\nconfig.TRAIN.FLIP = True\n# whether shuffle image\nconfig.TRAIN.SHUFFLE = True\n# whether use OHEM\nconfig.TRAIN.ENABLE_OHEM = False\n# size of images for each device, 2 for rcnn, 1 for rpn and e2e\nconfig.TRAIN.BATCH_IMAGES = 2\n# e2e changes behavior of anchor loader and metric\nconfig.TRAIN.END2END = False\n# group images with similar aspect ratio\nconfig.TRAIN.ASPECT_GROUPING = True\n\n# R-CNN\n# rcnn rois batch size\nconfig.TRAIN.BATCH_ROIS = 128\nconfig.TRAIN.BATCH_ROIS_OHEM = 128\n# rcnn rois sampling params\nconfig.TRAIN.FG_FRACTION = 0.25\nconfig.TRAIN.FG_THRESH = 0.5\nconfig.TRAIN.BG_THRESH_HI = 0.5\nconfig.TRAIN.BG_THRESH_LO = 0.0\n# rcnn bounding box regression params\nconfig.TRAIN.BBOX_REGRESSION_THRESH = 0.5\nconfig.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])\n\n# RPN anchor loader\n# rpn anchors batch size\nconfig.TRAIN.RPN_BATCH_SIZE = 256\n# rpn anchors sampling params\nconfig.TRAIN.RPN_FG_FRACTION = 0.5\nconfig.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\nconfig.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\nconfig.TRAIN.RPN_CLOBBER_POSITIVES = False\n# rpn bounding box regression params\nconfig.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\nconfig.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# used for end2end training\n# RPN proposal\nconfig.TRAIN.CXX_PROPOSAL = True\nconfig.TRAIN.RPN_NMS_THRESH = 0.7\nconfig.TRAIN.RPN_PRE_NMS_TOP_N = 12000\nconfig.TRAIN.RPN_POST_NMS_TOP_N = 2000\nconfig.TRAIN.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n# approximate bounding box regression\nconfig.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = True\nconfig.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)\nconfig.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)\n\n# FGFA, trained image sampled from [min_offset, max_offset]\nconfig.TRAIN.MIN_OFFSET = -9\nconfig.TRAIN.MAX_OFFSET = 9\n\nconfig.TEST = edict()\n\n# R-CNN testing\n# use rpn to generate proposal\nconfig.TEST.HAS_RPN = False\n# size of images for each device\nconfig.TEST.BATCH_IMAGES = 1\n\n# RPN proposal\nconfig.TEST.CXX_PROPOSAL = True\nconfig.TEST.RPN_NMS_THRESH = 0.7\nconfig.TEST.RPN_PRE_NMS_TOP_N = 6000\nconfig.TEST.RPN_POST_NMS_TOP_N = 300\nconfig.TEST.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n\n# RCNN nms\nconfig.TEST.NMS = 0.3\nconfig.TEST.max_per_image = 300\n\n#\nconfig.TEST.KEY_FRAME_INTERVAL = 9\nconfig.TEST.SEQ_NMS = False\n\n\n# Test Model Epoch\nconfig.TEST.test_epoch = 0\n\n\ndef update_config(config_file):\n exp_config = None\n with open(config_file) as f:\n exp_config = edict(yaml.load(f))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n if k == 'TRAIN':\n if 'BBOX_WEIGHTS' in v:\n v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])\n elif k == 'network':\n if 'PIXEL_MEANS' in v:\n v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])\n for vk, vv in v.items():\n config[k][vk] = vv\n else:\n if k == 'SCALES':\n config[k][0] = (tuple(v))\n else:\n config[k] = v\n else:\n raise ValueError(\"key must exist in config.py\")\n"
] |
[
[
"numpy.array"
]
] |
EqThinker/deep-track
|
[
"c72dc7b182c66c13fb6f5df38b6ed6e78f625a41"
] |
[
"pred_learn/models/rnn.py"
] |
[
"import torch\nfrom torch import nn\n\n\nclass PredictorRNN(nn.Module):\n def __init__(self, obs_shape, action_shape, hidden_size=16):\n super(PredictorRNN, self).__init__()\n self.rnn = nn.GRU(obs_shape + action_shape, hidden_size, num_layers=1, batch_first=True)\n\n self.mlp_us = nn.Sequential(\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, obs_shape),\n )\n\n def forward(self, obs, actions):\n h = torch.cat((obs, actions), dim=2)\n h, state_f = self.rnn(h)\n obs_shifted = self.mlp_us(h)\n return obs_shifted\n\n # net = PredictorRNN()\n # x = torch.zeros(150, 16, 4)\n # out = net(x)\n # out.size()\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.nn.GRU",
"torch.cat"
]
] |
bda2017-shallowermind/MustGAN
|
[
"b06cbcf573461f88444d39ca6371d9912213d6f2",
"b06cbcf573461f88444d39ca6371d9912213d6f2"
] |
[
"magenta/magenta/models/nsynth/ours/train.py",
"magenta/magenta/models/nsynth/gan/model.py"
] |
[
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# internal imports\nimport tensorflow as tf\nimport glob\n\nfrom magenta.models.nsynth import utils\n\nslim = tf.contrib.slim\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\"master\", \"\",\n \"BNS name of the TensorFlow master to use.\")\ntf.app.flags.DEFINE_string(\"config\", \"model\", \"Model configuration name\")\ntf.app.flags.DEFINE_integer(\"task\", 0,\n \"Task id of the replica running the training.\")\ntf.app.flags.DEFINE_integer(\"worker_replicas\", 1,\n \"Number of replicas. We train with 32.\")\ntf.app.flags.DEFINE_integer(\"ps_tasks\", 0,\n \"Number of tasks in the ps job. If 0 no ps job is \"\n \"used. We typically use 11.\")\ntf.app.flags.DEFINE_integer(\"total_batch_size\", 1,\n \"Batch size spread across all sync replicas.\"\n \"We use a size of 32.\")\ntf.app.flags.DEFINE_string(\"logdir\", \"/tmp/nsynth\",\n \"The log directory for this experiment.\")\ntf.app.flags.DEFINE_string(\"train_path\", \"\", \"The path to the train tfrecord.\")\ntf.app.flags.DEFINE_string(\"log\", \"INFO\",\n \"The threshold for what messages will be logged.\"\n \"DEBUG, INFO, WARN, ERROR, or FATAL.\")\ntf.app.flags.DEFINE_integer(\"num_iters\", 1000,\n \"Number of iterations.\")\ntf.app.flags.DEFINE_integer(\"log_period\", 25,\n \"Log the curr loss after every log_period steps.\")\ntf.app.flags.DEFINE_integer(\"ckpt_period\", 1200,\n \"Checkpoint current variables after every ckpt_period sec.\")\ntf.app.flags.DEFINE_integer(\"gpu\", 2,\n \"Number of gpus to use.\")\n\n\ndef main(unused_argv=None):\n tf.logging.set_verbosity(FLAGS.log)\n\n if FLAGS.config is None:\n raise RuntimeError(\"No config name specified.\")\n\n logdir = FLAGS.logdir\n tf.logging.info(\"Saving to %s\" % logdir)\n train_files = glob.glob(FLAGS.train_path + \"/*\")\n assert len(train_files) == FLAGS.gpu\n\n with tf.Graph().as_default():\n total_batch_size = FLAGS.total_batch_size\n assert total_batch_size % FLAGS.gpu == 0\n worker_batch_size = total_batch_size / FLAGS.gpu\n config = utils.get_module(\"ours.\" + FLAGS.config).Config(worker_batch_size)\n\n # Run the Reader on the CPU\n cpu_device = \"/job:localhost/replica:0/task:0/cpu:0\"\n if FLAGS.ps_tasks:\n cpu_device = \"/job:worker/cpu:0\"\n\n with tf.variable_scope('ours_model_var_scope') as var_scope:\n with tf.device(cpu_device):\n global_step = tf.get_variable(\n \"global_step\", [],\n tf.int32,\n initializer=tf.constant_initializer(0),\n trainable=False)\n\n # pylint: disable=cell-var-from-loop\n lr = tf.constant(config.learning_rate_schedule[0])\n for key, value in config.learning_rate_schedule.iteritems():\n lr = tf.cond(\n tf.less(global_step, key), lambda: lr, lambda: tf.constant(value))\n # pylint: enable=cell-var-from-loop\n\n losses = []\n for i in range(FLAGS.gpu):\n inputs_dict = config.get_batch(train_files[i])\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('GPU_NAME_SCOPE_%d' % i):\n # build the model graph\n encode_dict = config.encode(inputs_dict[\"wav\"])\n decode_dict = config.decode(encode_dict[\"encoding\"])\n loss_dict = config.loss(encode_dict[\"x_quantized\"], decode_dict[\"logits\"])\n loss = loss_dict[\"loss\"]\n losses.append(loss)\n var_scope.reuse_variables()\n\n avg_loss = tf.reduce_mean(losses, 0)\n\n worker_replicas = FLAGS.worker_replicas\n ema = tf.train.ExponentialMovingAverage(\n decay=0.9999, num_updates=global_step)\n\n # with tf.variable_scope('ours_model_var_scope') as var_scope ENDS HERE\n\n opt = tf.train.SyncReplicasOptimizer(\n tf.train.AdamOptimizer(lr, epsilon=1e-8),\n worker_replicas,\n total_num_replicas=worker_replicas,\n variable_averages=ema,\n variables_to_average=tf.trainable_variables())\n\n train_op = slim.learning.create_train_op(avg_loss, opt,\n global_step=global_step, colocate_gradients_with_ops=True)\n\n session_config = tf.ConfigProto(allow_soft_placement=True)\n\n is_chief = (FLAGS.task == 0)\n local_init_op = opt.chief_init_op if is_chief else opt.local_step_init_op\n\n slim.learning.train(\n train_op=train_op,\n logdir=logdir,\n is_chief=is_chief,\n master=FLAGS.master,\n number_of_steps=FLAGS.num_iters,\n global_step=global_step,\n log_every_n_steps=FLAGS.log_period,\n local_init_op=local_init_op,\n save_interval_secs=FLAGS.ckpt_period,\n sync_optimizer=opt,\n session_config=session_config,)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"import json\nimport numpy as np\nimport tensorflow as tf\n\nfrom magenta.models.nsynth.gan import masked\n\nslim = tf.contrib.slim\nx_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits\n\ndef parse_hps_file(hps_filename):\n with open(hps_filename, \"r\") as f:\n j = json.load(f)\n hps = {}\n\n if \"ae_num_layers\" in j:\n assert isinstance(j[\"ae_num_layers\"], int)\n tf.logging.info(\"Using custom ae_num_layers %d to train the model.\"\n % j[\"ae_num_layers\"])\n hps[\"ae_num_layers\"] = j[\"ae_num_layers\"]\n else:\n tf.logging.info(\"Using default ae_num_layers to train the model.\")\n\n if \"d_lr_schedule\" in j:\n tf.logging.info(\"Using custom d_lr_schedule to train the model.\")\n d_lr_schedule = {int(k): float(v) for k, v in j[\"d_lr_schedule\"].iteritems()}\n hps[\"d_lr_schedule\"] = d_lr_schedule\n else:\n tf.logging.info(\"Using default d_lr_schedule to train the model.\")\n\n if \"g_lr_schedule\" in j:\n tf.logging.info(\"Using custom g_lr_schedule to train the model.\")\n g_lr_schedule = {int(k): float(v) for k, v in j[\"g_lr_schedule\"].iteritems()}\n hps[\"g_lr_schedule\"] = g_lr_schedule\n else:\n tf.logging.info(\"Using default g_lr_schedule to train the model.\")\n\n if \"alpha\" in j:\n assert isinstance(j[\"alpha\"], float)\n tf.logging.info(\"Using custom alpha %f to train the model.\" % j[\"alpha\"])\n hps[\"alpha\"] = j[\"alpha\"]\n else:\n tf.logging.info(\"Using default alpha to train the model.\")\n\n if \"beta\" in j:\n assert isinstance(j[\"beta\"], float)\n tf.logging.info(\"Using custom beta %f to train the model.\" % j[\"beta\"])\n hps[\"beta\"] = j[\"beta\"]\n else:\n tf.logging.info(\"Using default beta to train the model.\")\n\n if \"g_train_iter_per_step\" in j:\n assert isinstance(j[\"g_train_iter_per_step\"], int)\n tf.logging.info(\"Using custom g_train_iter_per_step %d to train the model.\"\n % j[\"g_train_iter_per_step\"])\n hps[\"g_train_iter_per_step\"] = j[\"g_train_iter_per_step\"]\n else:\n tf.logging.info(\"Using default g_train_iter_per_step to train the model.\")\n\n if \"d_train_iter_per_step\" in j:\n assert isinstance(j[\"d_train_iter_per_step\"], int)\n tf.logging.info(\"Using custom d_train_iter_per_step %d to train the model.\"\n % j[\"d_train_iter_per_step\"])\n hps[\"d_train_iter_per_step\"] = j[\"d_train_iter_per_step\"]\n else:\n tf.logging.info(\"Using default d_train_iter_per_step to train the model.\")\n\n return hps\n\nclass MusTGAN(object):\n def __init__(self, batch_size, num_gpus, hps_filename):\n hps = parse_hps_file(hps_filename) if hps_filename else {}\n\n self.pretrain_lr_schedule = {\n 0: 3e-4,\n 2500: 1e-4,\n 5000: 6e-5,\n 10000: 4e-5,\n 20000: 2e-5,\n 40000: 1e-5,\n 60000: 6e-6,\n 80000: 2e-6,\n }\n\n # TODO: learning rate tuning\n self.d_lr_schedule = hps.get(\"d_lr_schedule\", {\n 0: 3e-4,\n 2500: 1e-4,\n 5000: 6e-5,\n 10000: 4e-5,\n 20000: 2e-5,\n 40000: 1e-5,\n 60000: 6e-6,\n 80000: 2e-6,\n })\n\n self.g_lr_schedule = hps.get(\"g_lr_schedule\", {\n 0: 3e-4,\n 1250: 2e-4,\n 2500: 1e-4,\n 3750: 8e-5,\n 5000: 5e-5,\n 7500: 3e-5,\n 10000: 2e-5,\n 15000: 8e-6,\n 20000: 5e-6,\n 40000: 3e-6,\n 60000: 2e-6,\n 80000: 1e-6,\n })\n\n self.filter_length = 3\n self.ae_num_stages = 10\n self.ae_num_layers = hps.get(\"ae_num_layers\", 20)\n self.ae_filter_length = 3\n self.ae_width = 128\n self.ae_bottleneck_width = 16\n self.ae_hop_length = 4\n self.batch_size = batch_size\n self.num_gpus = num_gpus\n self.alpha = hps.get(\"alpha\", 30.)\n self.beta = hps.get(\"beta\", 5.)\n self.g_train_iter_per_step = hps.get(\"g_train_iter_per_step\", 5)\n self.d_train_iter_per_step = hps.get(\"d_train_iter_per_step\", 1)\n\n def mu_law(self, x, mu=255):\n out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)\n return out\n\n def inv_mu_law(self, x, mu=255):\n out = tf.sign(x) / mu * ((1 + mu)**tf.abs(out) - 1)\n out = tf.where(tf.equal(x, 0), x, out)\n return out\n\n def leakly_relu(self, x, alpha=0.1):\n return tf.maximum(x, alpha * x)\n\n def lr_schedule(self, step, schedule):\n lr = tf.constant(schedule[0])\n for key, value in schedule.iteritems():\n lr = tf.cond(\n tf.less(step, key),\n lambda: lr,\n lambda: tf.constant(value))\n return lr\n\n def f(self, x, reuse):\n with tf.variable_scope('f', reuse=reuse):\n ae_num_stages = self.ae_num_stages\n ae_num_layers = self.ae_num_layers\n ae_filter_length = self.ae_filter_length\n ae_width = self.ae_width\n ae_bottleneck_width = self.ae_bottleneck_width\n\n tf.logging.info(\"x shape: %s\" % str(x.shape.as_list()))\n x = tf.expand_dims(x, 2)\n\n en = masked.conv1d(\n x,\n causal=False,\n num_filters=ae_width,\n filter_length=ae_filter_length,\n name='ae_startconv')\n\n for num_layer in xrange(ae_num_layers):\n dilation = 2**(num_layer % ae_num_stages)\n d = self.leakly_relu(en)\n d = masked.conv1d(\n d,\n causal=False,\n num_filters=ae_width,\n filter_length=ae_filter_length,\n dilation=dilation,\n name='ae_dilatedconv_%d' % (num_layer + 1))\n d = self.leakly_relu(d)\n en += masked.conv1d(\n d,\n causal=False,\n num_filters=ae_width,\n filter_length=1,\n name='ae_res_%d' % (num_layer + 1))\n if ((num_layer + 1) % ae_num_stages == 0):\n en = masked.conv1d(\n en,\n causal=False,\n num_filters=ae_width,\n filter_length=self.ae_hop_length,\n stride=self.ae_hop_length,\n name='ae_stridedconv_%d' % (num_layer + 1))\n\n en = masked.conv1d(\n en,\n causal=False,\n num_filters=self.ae_bottleneck_width,\n filter_length=16,\n stride=16,\n name='ae_bottleneck')\n tf.logging.info(\"en shape: %s\" % str(en.shape.as_list()))\n\n return en\n\n def g(self, encoding, reuse):\n with tf.variable_scope('g', reuse=reuse):\n de = encoding\n de = masked.deconv1d(\n de,\n causal=False,\n num_filters=self.ae_width,\n filter_length=16,\n stride=16,\n name='ae_bottleneck')\n\n # Residual blocks with skip connections.\n for i in xrange(self.ae_num_layers):\n if i % self.ae_num_stages == 0:\n de = masked.deconv1d(\n de,\n causal=False,\n num_filters=self.ae_width,\n filter_length=self.ae_hop_length,\n stride=self.ae_hop_length,\n name='ae_stridedconv_%d' % (i + 1))\n\n dilation = 2 ** (self.ae_num_stages - (i % self.ae_num_stages) - 1)\n d = self.leakly_relu(de)\n d = masked.deconv1d(\n d,\n causal=False,\n num_filters=self.ae_width,\n filter_length=self.ae_filter_length,\n dilation=dilation,\n name='ae_dilateddeconv_%d' % (i + 1))\n d = self.leakly_relu(d)\n de += masked.conv1d(\n d,\n num_filters=self.ae_width,\n filter_length=1,\n name='ae_res_%d' % (i + 1))\n\n ge = masked.deconv1d(\n de,\n causal=False,\n num_filters=1,\n filter_length=self.ae_filter_length,\n name='ge')\n ge = tf.tanh(ge)\n ge = tf.squeeze(ge, [2])\n tf.logging.info('final ge shape: %s' % str(ge.shape.as_list()))\n\n return ge\n\n def discriminator(self, x, reuse):\n with tf.variable_scope('discriminator', reuse=reuse):\n fx = self.f(x, reuse)\n\n with tf.variable_scope('pool', reuse=reuse):\n fx_reshaped = tf.reshape(fx, [self.batch_size, -1])\n\n with tf.variable_scope('fc', reuse=reuse):\n fc1 = tf.layers.dense(inputs=fx_reshaped, units=512, activation=None)\n fc2 = tf.layers.dense(inputs=fc1, units=512, activation=None)\n fc3 = tf.layers.dense(inputs=fc2, units=3, activation=None)\n\n return fc3\n\n def build_pretrain_model(self, input_wavs, input_labels):\n assert len(input_wavs) == self.num_gpus\n assert len(input_labels) == self.num_gpus\n\n with tf.device('/cpu:0'):\n global_step = tf.train.get_or_create_global_step()\n\n lr = self.lr_schedule(global_step, self.pretrain_lr_schedule)\n\n losses = []\n accuracies = []\n for i in range(self.num_gpus):\n input_wav = input_wavs[i]\n input_label = input_labels[i]\n reuse = False if i == 0 else True\n\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('gpu_name_scope_%d' % i):\n # build the model graph\n mu_law_input_wav = self.mu_law(input_wav)\n en = self.f(mu_law_input_wav, reuse=reuse) # (batch_size, 48, ae_bottleneck=16)\n net = tf.reshape(en, [self.batch_size, -1])\n\n with tf.variable_scope('pretrain_fc', reuse=reuse):\n net = tf.layers.dense(inputs=net, units=512, activation=None)\n net = tf.layers.dense(inputs=net, units=512, activation=None)\n net = tf.layers.dense(inputs=net, units=128, activation=None)\n\n correct_pred = tf.equal(tf.argmax(net, 1), input_label)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n accuracies.append(accuracy)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=input_label, logits=net))\n losses.append(loss)\n\n avg_loss = tf.reduce_mean(losses)\n avg_accuracy = tf.reduce_mean(accuracies)\n\n ema = tf.train.ExponentialMovingAverage(\n decay=0.9999, num_updates=global_step)\n\n opt = tf.train.AdamOptimizer(lr, epsilon=1e-8)\n opt_op = opt.minimize(\n avg_loss,\n global_step=global_step,\n var_list=tf.trainable_variables(),\n colocate_gradients_with_ops=True)\n\n # opt = tf.train.SyncReplicasOptimizer(\n # tf.train.AdamOptimizer(lr, epsilon=1e-8),\n # 1, # worker_replicas\n # total_num_replicas=1, # worker_replicas\n # variable_averages=ema,\n # variables_to_average=tf.trainable_variables())\n\n maintain_averages_op = ema.apply(tf.trainable_variables())\n\n with tf.control_dependencies([opt_op]):\n train_op = tf.group(maintain_averages_op)\n\n return {\n 'global_step': global_step,\n 'loss': avg_loss,\n 'train_op': train_op,\n 'accuracy': avg_accuracy,\n }\n\n def build_train_model(self, src_wavs, trg_wavs):\n assert len(src_wavs) == self.num_gpus\n assert len(trg_wavs) == self.num_gpus\n\n with tf.device('/cpu:0'):\n global_step = tf.train.get_or_create_global_step()\n d_step = tf.get_variable(\n 'd_step',\n shape=[],\n dtype=tf.int64,\n initializer=tf.zeros_initializer(),\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES])\n g_step = tf.get_variable(\n 'g_step',\n shape=[],\n dtype=tf.int64,\n initializer=tf.zeros_initializer(),\n trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES])\n\n d_lr = self.lr_schedule(d_step, self.d_lr_schedule)\n g_lr = self.lr_schedule(g_step, self.g_lr_schedule)\n\n d_losses = []\n g_losses = []\n accuracies = []\n for i in range(self.num_gpus):\n src_wav = src_wavs[i]\n trg_wav = trg_wavs[i]\n reuse = False if i == 0 else True\n\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('gpu_name_scope_%d' % i):\n zero_labels = tf.cast(tf.fill([self.batch_size], 0), tf.int64)\n one_labels = tf.cast(tf.fill([self.batch_size], 1), tf.int64)\n two_labels = tf.cast(tf.fill([self.batch_size], 2), tf.int64)\n\n src_x = self.mu_law(src_wav)\n src_fx = self.f(src_x, reuse)\n src_gfx = self.g(src_fx, reuse)\n src_fgfx = self.f(src_gfx, reuse=True)\n src_dgfx = self.discriminator(src_gfx, reuse)\n\n src_dis_loss = tf.reduce_mean(x_entropy_loss(logits=src_dgfx, labels=zero_labels))\n src_gen_loss = tf.reduce_mean(x_entropy_loss(logits=src_dgfx, labels=two_labels))\n\n trg_x = self.mu_law(trg_wav)\n trg_fx = self.f(trg_x, reuse=True)\n trg_gfx = self.g(trg_fx, reuse=True)\n trg_dgfx = self.discriminator(trg_gfx, reuse=True)\n trg_dx = self.discriminator(trg_x, reuse=True)\n\n trg_dis_loss = tf.reduce_mean(x_entropy_loss(logits=trg_dgfx, labels=one_labels))\n trg_gen_loss = tf.reduce_mean(x_entropy_loss(logits=trg_dgfx, labels=two_labels))\n trg_real_dis_loss = tf.reduce_mean(x_entropy_loss(logits=trg_dx, labels=two_labels))\n\n src_const_loss = tf.reduce_mean(tf.square(src_fgfx - src_fx)) * self.alpha\n trg_tid_loss = tf.reduce_mean(tf.square(trg_x - trg_gfx)) * self.beta\n\n d_loss = src_dis_loss + trg_dis_loss + trg_real_dis_loss\n g_loss = src_gen_loss + trg_gen_loss + trg_tid_loss + src_const_loss\n\n d_losses.append(d_loss)\n g_losses.append(g_loss)\n\n src_dis_correct_pred = tf.equal(tf.argmax(src_dgfx, 1), zero_labels)\n src_dis_accuracy = tf.reduce_mean(tf.cast(src_dis_correct_pred, tf.float32))\n trg_dis_correct_pred = tf.equal(tf.argmax(trg_dgfx, 1), one_labels)\n trg_dis_accuracy = tf.reduce_mean(tf.cast(trg_dis_correct_pred, tf.float32))\n trg_real_dis_correct_pred = tf.equal(tf.argmax(trg_dx, 1), two_labels)\n trg_real_dis_accuracy = tf.reduce_mean(tf.cast(trg_real_dis_correct_pred, tf.float32))\n\n # NOTE: assumes all inputs have the same batch size\n accuracy = tf.reduce_mean([src_dis_accuracy, trg_dis_accuracy, trg_real_dis_accuracy])\n accuracies.append(accuracy)\n\n\n d_loss = tf.reduce_mean(d_losses)\n g_loss = tf.reduce_mean(g_losses)\n d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g')\n f_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='f')\n\n d_ema = tf.train.ExponentialMovingAverage(\n decay=0.9999, num_updates=d_step)\n g_ema = tf.train.ExponentialMovingAverage(\n decay=0.9999, num_updates=g_step)\n f_ema = tf.train.ExponentialMovingAverage(\n decay=0.9999, num_updates=g_step)\n\n avg_accuracy = tf.reduce_mean(accuracies)\n\n d_opt = tf.train.AdamOptimizer(d_lr, epsilon=1e-8)\n g_opt = tf.train.AdamOptimizer(g_lr, epsilon=1e-8)\n\n d_opt_op = d_opt.minimize(\n d_loss,\n global_step=d_step,\n var_list=d_vars,\n colocate_gradients_with_ops=True)\n g_opt_op = g_opt.minimize(\n g_loss,\n global_step=g_step,\n var_list=g_vars,\n colocate_gradients_with_ops=True)\n\n maintain_averages_d_op = d_ema.apply(d_vars)\n maintain_averages_g_op = g_ema.apply(g_vars)\n\n with tf.control_dependencies([d_opt_op]):\n d_train_op = tf.group(maintain_averages_d_op)\n with tf.control_dependencies([g_opt_op]):\n g_train_op = tf.group(maintain_averages_g_op)\n\n global_step_inc = tf.assign_add(global_step, 1)\n\n restore_from_pretrain_vars = {}\n for var in f_vars:\n restore_from_pretrain_vars[f_ema.average_name(var)] = var\n\n return {\n 'global_step': global_step,\n 'global_step_inc': global_step_inc,\n 'd_loss': d_loss,\n 'src_dis_loss': src_dis_loss,\n 'trg_dis_loss': trg_dis_loss,\n 'trg_real_dis_loss': trg_real_dis_loss,\n 'g_loss': g_loss,\n 'src_gen_loss': src_gen_loss,\n 'trg_gen_loss': trg_gen_loss,\n 'trg_tid_loss': trg_tid_loss,\n 'src_const_loss': src_const_loss,\n 'd_train_op': d_train_op,\n 'g_train_op': g_train_op,\n 'restore_from_pretrain_vars': restore_from_pretrain_vars,\n 'avg_accuracy': avg_accuracy,\n }\n\n def build_eval_model(self, input_wavs):\n reuse = False\n with tf.device('/gpu:0'):\n with tf.name_scope('gan_model_var_scope'):\n # build the model graph\n en = self.f(input_wavs, reuse=reuse) # (batch_size, 61440?, ae_bottleneck=16)\n de = self.g(en, reuse=reuse) # (batch_size, num_channel=128)\n\n return {\n 'encoding': en,\n 'decoding': de,\n }\n"
] |
[
[
"tensorflow.Graph",
"tensorflow.device",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.less",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.trainable_variables",
"tensorflow.ConfigProto",
"tensorflow.constant_initializer",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.logging.info",
"tensorflow.train.AdamOptimizer",
"tensorflow.variable_scope",
"tensorflow.name_scope",
"tensorflow.app.run"
],
[
"tensorflow.device",
"tensorflow.sign",
"tensorflow.control_dependencies",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.tanh",
"tensorflow.train.AdamOptimizer",
"tensorflow.group",
"tensorflow.assign_add",
"tensorflow.get_collection",
"tensorflow.squeeze",
"tensorflow.train.get_or_create_global_step",
"tensorflow.layers.dense",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"numpy.log",
"tensorflow.fill",
"tensorflow.less",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.abs"
]
] |
InvisibleNemo/NIH_ChestXRay
|
[
"649f2aa7b9edc0426066fcd51beaeab33f1e4d1d"
] |
[
"codes/one_hot_labels.py"
] |
[
"\"\"\"\nproject: NIH Chest XRay dataset\ndate: 04/06/2018\ndeveloped by: Debanjan Paul\nfilename: one_hot_labels.py\nversion: 0.1\ndescription: Converts csv into one hot encoded labels\ndependencies: Pandas\n\t\t\n\"\"\"\n\n# Imports\nimport pandas as pd\n\n# Read csv file into pandas dataframe\nlabels_csv = pd.read_csv(\"sample_labels.csv\")\n\n# Extract the set of labels in alphabetical order\nlabel_set = labels_csv['Finding Labels'].tolist()\nlabel_set = \"|\".join(labels)\nlabel_set = list(set(labels.split(\"|\")))\n\n# Create columns in dataframe to store binary values\nfor i in label_set:\n labels_csv[i] = 0\n\n# Get binary values\nfor i in range(labels_csv.shape[0]):\n for j in list(label_set):\n if(j in labels_csv.iloc[i]['Finding Labels']):\n labels_csv.loc[i,(j)] = 1\n\n# Store one hot encoded labels in dataframe\nlabels_csv['one_hot'] = labels_csv[[\n 'Atelectasis', \n 'Cardiomegaly', \n 'Consolidation', \n 'Edema', \n 'Effusion', \n 'Emphysema', \n 'Fibrosis', \n 'Hernia', \n 'Infiltration', \n 'Mass', \n 'No Finding', \n 'Nodule', \n 'Pleural_Thickening', \n 'Pneumonia', \n 'Pneumothorax']].values.tolist()\n\n# Store one hot encoded labels in list\none_hot_labels = labels_csv[[\n 'Atelectasis', \n 'Cardiomegaly', \n 'Consolidation', \n 'Edema', \n 'Effusion', \n 'Emphysema', \n 'Fibrosis', \n 'Hernia', \n 'Infiltration', \n 'Mass', \n 'No Finding', \n 'Nodule', \n 'Pleural_Thickening', \n 'Pneumonia', \n 'Pneumothorax']].values.tolist()\n"
] |
[
[
"pandas.read_csv"
]
] |
bradfordlynch/space_time_pde
|
[
"5e355b0434baf1757d071ce993b84073c8426223"
] |
[
"experiments/rb2d/dataloader_spacetime.py"
] |
[
"\"\"\"RB2 Experiment Dataloader\"\"\"\nimport os\nimport torch\nfrom torch.utils.data import Dataset, Sampler\nimport numpy as np\nfrom scipy.interpolate import RegularGridInterpolator\nfrom scipy import ndimage\nimport warnings\n# pylint: disable=too-manz-arguments, too-manz-instance-attributes, too-manz-locals\n\n\nclass RB2DataLoader(Dataset):\n \"\"\"Pytorch Dataset instance for loading Rayleigh Bernard 2D dataset.\n\n Loads a 2d space + time cubic cutout from the whole simulation.\n \"\"\"\n def __init__(self, data_dir=\"./\", data_filename=\"./data/rb2d_ra1e6_s42.npz\",\n nx=128, nz=128, nt=16, n_samp_pts_per_crop=1024,\n downsamp_xz=4, downsamp_t=4, normalize_output=False, normalize_hres=False,\n return_hres=False, lres_filter='none', lres_interp='linear'):\n \"\"\"\n\n Initialize DataSet\n Args:\n data_dir: str, path to the dataset folder, default=\"./\"\n data_filename: str, name of the dataset file, default=\"rb2d_ra1e6_s42\"\n nx: int, number of 'pixels' in x dimension for high res dataset.\n nz: int, number of 'pixels' in z dimension for high res dataset.\n nt: int, number of timesteps in time for high res dataset.\n n_samp_pts_per_crop: int, number of sample points to return per crop.\n downsamp_xz: int, downsampling factor for the spatial dimensions.\n downsamp_t: int, downsampling factor for the temporal dimension.\n normalize_output: bool, whether to normalize the range of each channel to [0, 1].\n normalize_hres: bool, normalize high res grid.\n return_hres: bool, whether to return the high-resolution data.\n lres_filter: str, filter to apply on original high-res image before interpolation.\n choice of 'none', 'gaussian', 'uniform', 'median', 'maximum'.\n lres_interp: str, interpolation scheme for generating low res.\n choice of 'linear', 'nearest'.\n \"\"\"\n self.data_dir = data_dir\n self.data_filename = data_filename\n self.nx_hres = nx\n self.nz_hres = nz\n self.nt_hres = nt\n self.nx_lres = int(nx/downsamp_xz)\n self.nz_lres = int(nz/downsamp_xz)\n self.nt_lres = int(nt/downsamp_t)\n self.n_samp_pts_per_crop = n_samp_pts_per_crop\n self.downsamp_xz = downsamp_xz\n self.downsamp_t = downsamp_t\n self.normalize_output = normalize_output\n self.normalize_hres = normalize_hres\n self.return_hres = return_hres\n self.lres_filter = lres_filter\n self.lres_interp = lres_interp\n\n # warn about median filter\n if lres_filter == 'median':\n warnings.warn(\"the median filter is very slow...\", RuntimeWarning)\n\n # concatenating pressure, temperature, x-velocity, and z-velocity as a 4 channel array: pbuw\n # shape: (4, 200, 512, 128)\n npdata = np.load(os.path.join(self.data_dir, self.data_filename))\n self.data = np.stack([npdata['p'], npdata['b'], npdata['u'], npdata['w']], axis=0)\n self.data = self.data.astype(np.float32)\n self.data = self.data.transpose(0, 1, 3, 2) # [c, t, z, x]\n nc_data, nt_data, nz_data, nx_data = self.data.shape\n\n # assert nx, nz, nt are viable\n if (nx > nx_data) or (nz > nz_data) or (nt > nt_data):\n raise ValueError('Resolution in each spatial temporal dimension x ({}), z({}), t({})'\n 'must not exceed dataset limits x ({}) z ({}) t ({})'.format(\n nx, nz, nt, nx_data, nz_data, nt_data))\n if (nt % downsamp_t != 0) or (nx % downsamp_xz != 0) or (nz % downsamp_xz != 0):\n raise ValueError('nx, nz and nt must be divisible by downsamp factor.')\n\n self.nx_start_range = np.arange(0, nx_data-nx+1)\n self.nz_start_range = np.arange(0, nz_data-nz+1)\n self.nt_start_range = np.arange(0, nt_data-nt+1)\n self.rand_grid = np.stack(np.meshgrid(self.nt_start_range,\n self.nz_start_range,\n self.nx_start_range, indexing='ij'), axis=-1)\n # (xaug, zaug, taug, 3)\n self.rand_start_id = self.rand_grid.reshape([-1, 3])\n self.scale_hres = np.array([self.nt_hres, self.nz_hres, self.nx_hres], dtype=np.int32)\n self.scale_lres = np.array([self.nt_lres, self.nz_lres, self.nx_lres], dtype=np.int32)\n\n # compute channel-wise mean and std\n self._mean = np.mean(self.data, axis=(1, 2, 3))\n self._std = np.std(self.data, axis=(1, 2, 3))\n\n def __len__(self):\n return self.rand_start_id.shape[0]\n\n def filter(self, signal):\n \"\"\"Filter a given signal with a choice of filter type (self.lres_filter).\n \"\"\"\n signal = signal.copy()\n filter_size = [1, self.downsamp_t*2-1, self.downsamp_xz*2-1, self.downsamp_xz*2-1]\n\n if self.lres_filter == 'none' or (not self.lres_filter):\n output = signal\n elif self.lres_filter == 'gaussian':\n sigma = [0, int(self.downsamp_t/2), int(self.downsamp_xz/2), int(self.downsamp_xz/2)]\n output = ndimage.gaussian_filter(signal, sigma=sigma)\n elif self.lres_filter == 'uniform':\n output = ndimage.uniform_filter(signal, size=filter_size)\n elif self.lres_filter == 'median':\n output = ndimage.median_filter(signal, size=filter_size)\n elif self.lres_filter == 'maximum':\n output = ndimage.maximum_filter(signal, size=filter_size)\n else:\n raise NotImplementedError(\n \"lres_filter must be one of none/gaussian/uniform/median/maximum\")\n return output\n\n def __getitem__(self, idx):\n \"\"\"Get the random cutout data cube corresponding to idx.\n\n Args:\n idx: int, index of the crop to return. must be smaller than len(self).\n\n Returns:\n space_time_crop_hres (*optional): array of shape [4, nt_hres, nz_hres, nx_hres],\n where 4 are the phys channels pbuw.\n space_time_crop_lres: array of shape [4, nt_lres, nz_lres, nx_lres], where 4 are the phys\n channels pbuw.\n point_coord: array of shape [n_samp_pts_per_crop, 3], where 3 are the t, x, z dims.\n CAUTION - point_coord are normalized to (0, 1) for the relative window.\n point_value: array of shape [n_samp_pts_per_crop, 4], where 4 are the phys channels pbuw.\n \"\"\"\n t_id, z_id, x_id = self.rand_start_id[idx]\n space_time_crop_hres = self.data[:,\n t_id:t_id+self.nt_hres,\n z_id:z_id+self.nz_hres,\n x_id:x_id+self.nx_hres] # [c, t, z, x]\n\n # create low res grid from hi res space time crop\n # apply filter\n space_time_crop_hres_fil = self.filter(space_time_crop_hres)\n\n interp = RegularGridInterpolator(\n (np.arange(self.nt_hres), np.arange(self.nz_hres), np.arange(self.nx_hres)),\n values=space_time_crop_hres_fil.transpose(1, 2, 3, 0), method=self.lres_interp)\n\n lres_coord = np.stack(np.meshgrid(np.linspace(0, self.nt_hres-1, self.nt_lres),\n np.linspace(0, self.nz_hres-1, self.nz_lres),\n np.linspace(0, self.nx_hres-1, self.nx_lres),\n indexing='ij'), axis=-1)\n space_time_crop_lres = interp(lres_coord).transpose(3, 0, 1, 2) # [c, t, z, x]\n\n # create random point samples within space time crop\n point_coord = np.random.rand(self.n_samp_pts_per_crop, 3) * (self.scale_hres - 1)\n point_value = interp(point_coord)\n point_coord = point_coord / (self.scale_hres - 1)\n\n if self.normalize_output:\n space_time_crop_lres = self.normalize_grid(space_time_crop_lres)\n point_value = self.normalize_points(point_value)\n if self.normalize_hres:\n space_time_crop_hres = self.normalize_grid(space_time_crop_hres)\n\n return_tensors = [space_time_crop_lres, point_coord, point_value]\n\n # cast everything to float32\n return_tensors = [t.astype(np.float32) for t in return_tensors]\n\n if self.return_hres:\n return_tensors = [space_time_crop_hres] + return_tensors\n return tuple(return_tensors)\n\n @property\n def channel_mean(self):\n \"\"\"channel-wise mean of dataset.\"\"\"\n return self._mean\n\n @property\n def channel_std(self):\n \"\"\"channel-wise mean of dataset.\"\"\"\n return self._std\n\n @staticmethod\n def _normalize_array(array, mean, std):\n \"\"\"normalize array (np or torch).\"\"\"\n if isinstance(array, torch.Tensor):\n dev = array.device\n std = torch.tensor(std, device=dev)\n mean = torch.tensor(mean, device=dev)\n return (array - mean) / std\n\n @staticmethod\n def _denormalize_array(array, mean, std):\n \"\"\"normalize array (np or torch).\"\"\"\n if isinstance(array, torch.Tensor):\n dev = array.device\n std = torch.tensor(std, device=dev)\n mean = torch.tensor(mean, device=dev)\n return array * std + mean\n\n def normalize_grid(self, grid):\n \"\"\"Normalize grid.\n\n Args:\n grid: np array or torch tensor of shape [4, ...], 4 are the num. of phys channels.\n Returns:\n channel normalized grid of same shape as input.\n \"\"\"\n # reshape mean and std to be broadcastable.\n g_dim = len(grid.shape)\n mean_bc = self.channel_mean[(...,)+(None,)*(g_dim-1)] # unsqueeze from the back\n std_bc = self.channel_std[(...,)+(None,)*(g_dim-1)] # unsqueeze from the back\n return self._normalize_array(grid, mean_bc, std_bc)\n\n\n def normalize_points(self, points):\n \"\"\"Normalize points.\n\n Args:\n points: np array or torch tensor of shape [..., 4], 4 are the num. of phys channels.\n Returns:\n channel normalized points of same shape as input.\n \"\"\"\n # reshape mean and std to be broadcastable.\n g_dim = len(points.shape)\n mean_bc = self.channel_mean[(None,)*(g_dim-1)] # unsqueeze from the front\n std_bc = self.channel_std[(None,)*(g_dim-1)] # unsqueeze from the front\n return self._normalize_array(points, mean_bc, std_bc)\n\n def denormalize_grid(self, grid):\n \"\"\"Denormalize grid.\n\n Args:\n grid: np array or torch tensor of shape [4, ...], 4 are the num. of phys channels.\n Returns:\n channel denormalized grid of same shape as input.\n \"\"\"\n # reshape mean and std to be broadcastable.\n g_dim = len(grid.shape)\n mean_bc = self.channel_mean[(...,)+(None,)*(g_dim-1)] # unsqueeze from the back\n std_bc = self.channel_std[(...,)+(None,)*(g_dim-1)] # unsqueeze from the back\n return self._denormalize_array(grid, mean_bc, std_bc)\n\n\n def denormalize_points(self, points):\n \"\"\"Denormalize points.\n\n Args:\n points: np array or torch tensor of shape [..., 4], 4 are the num. of phys channels.\n Returns:\n channel denormalized points of same shape as input.\n \"\"\"\n # reshape mean and std to be broadcastable.\n g_dim = len(points.shape)\n mean_bc = self.channel_mean[(None,)*(g_dim-1)] # unsqueeze from the front\n std_bc = self.channel_std[(None,)*(g_dim-1)] # unsqueeze from the front\n return self._denormalize_array(points, mean_bc, std_bc)\n\n\nif __name__ == '__main__':\n ### example for using the data loader\n data_loader = RB2DataLoader(nt=16, n_samp_pts_per_crop=10000, downsamp_t=4, downsamp_xz=8, return_hres=True)\n # lres_crop, point_coord, point_value = data_loader[61234]\n # import matplotlib.pyplot as plt\n # plt.scatter(point_coord[:, 1], point_coord[:, 2], c=point_value[:, 0])\n # plt.colorbar()\n # plt.show()\n # plt.imshow(lres_crop[0, :, :, 0].T, origin='lower'); plt.show()\n # plt.imshow(lres_crop[1, :, :, 0].T, origin='lower'); plt.show()\n\n data_batches = torch.utils.data.DataLoader(data_loader, batch_size=16, shuffle=True, num_workers=1)\n\n for batch_idx, (hires_input_batch, lowres_input_batch, point_coords, point_values) in enumerate(data_batches):\n print(\"Reading batch #{}:\\t with lowres inputs of size {}, sample coord of size {}, sampe val of size {}\"\n .format(batch_idx+1, list(lowres_input_batch.shape), list(point_coords.shape), list(point_values.shape)))\n if batch_idx > 16:\n break\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax1 = fig.add_subplots(121)\n ax2 = fig.add_subplots(122)\n ax1.imshow(hires_input_batch[0, 0, 2])\n ax2.imshow(lowres_input_batch[0, 0, 8])\n plt.show()\n"
] |
[
[
"scipy.ndimage.gaussian_filter",
"numpy.meshgrid",
"numpy.linspace",
"numpy.arange",
"torch.utils.data.DataLoader",
"scipy.ndimage.median_filter",
"numpy.stack",
"torch.tensor",
"scipy.ndimage.uniform_filter",
"scipy.ndimage.maximum_filter",
"numpy.std",
"numpy.mean",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
Gauthams1/smalltrain
|
[
"ac833d58ff2b577277079633da1b20eb50b8d332"
] |
[
"src/smalltrain/utils/tf_log_to_csv.py"
] |
[
"import os\nimport numpy as np\nimport pandas as pd\n\nfrom collections import defaultdict\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\n\ndef tabulate_events(dir_path):\n summary_iterators = [EventAccumulator(os.path.join(dir_path, dname)).Reload() for dname in os.listdir(dir_path)]\n\n tags = summary_iterators[0].Tags()['scalars']\n\n for it in summary_iterators:\n assert it.Tags()['scalars'] == tags\n\n out = defaultdict(list)\n steps = []\n\n for tag in tags:\n steps = [e.step for e in summary_iterators[0].Scalars(tag)]\n wall_times = [e.wall_time for e in summary_iterators[0].Scalars(tag)]\n\n for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):\n assert len(set(e.step for e in events)) == 1\n\n out[tag].append([e.value for e in events])\n\n return out, steps, wall_times\n\n\ndef to_csv(log_dir_path, csv_dir_path):\n dirs = os.listdir(log_dir_path)\n\n d, steps, wall_times = tabulate_events(log_dir_path)\n tags, values = zip(*d.items())\n np_values = np.array(values)\n csv_columns = ['step', 'wall_time']\n csv_columns.extend(dirs)\n print('extend', ['step', 'wall_time'].extend(dirs))\n print('csv_columns', csv_columns)\n\n for index, tag in enumerate(tags):\n # df = pd.DataFrame(np_values[index], index=steps, columns=dirs)\n df = pd.DataFrame(np.vstack((steps, wall_times, np_values[index].T)).T, columns=csv_columns)\n df.to_csv(get_csv_file_path(csv_dir_path, tag), index=False)\n\n\ndef get_csv_file_path(csv_dir_path, tag):\n file_name = tag.replace(\"/\", \"_\") + '.csv'\n folder_path = os.path.join(csv_dir_path, 'csv')\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n return os.path.join(folder_path, file_name)\n\n\nif __name__ == '__main__':\n # example\n train_id = 'SR_1D_CNN_SAMPLE-TRAIN'\n log_dir_path = \"/var/tensorflow/tsp/sample/logs/{}/\".format(train_id)\n csv_dir_path = \"/var/tensorflow/tsp/sample/history/{}/\".format(train_id)\n to_csv(log_dir_path, csv_dir_path)"
] |
[
[
"numpy.array",
"numpy.vstack"
]
] |
marctimjen/Artefact-Rejection
|
[
"4e850d172fa8c08ba1776c46e760484673d7e7ad"
] |
[
"LoaderPACK/trainer.py"
] |
[
"import neptune.new as neptune\nimport os\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport re\n\nimport sys\nsys.path.append(\"..\") # adds higher directory to python modules path\n\nfrom LoaderPACK.Accuarcy_finder import Accuarcy_find\nfrom LoaderPACK.Accuarcy_upload import Accuarcy_upload\n\n\ndef net_train(device,\n fl,\n it,\n net_name,\n model,\n optimizer,\n lossFunc,\n nEpoch,\n smooth,\n train_loader,\n val_loader,\n run,\n path,\n scheduler = None):\n \"\"\"\n This function is used for training the hyper-optimized networks.\n\n Args:\n device (torch device): whihc device to train the network on.\n fl (tensor type): which type of tensor is used\n (differes depending on device)\n it (tensor type): which type of tensor is used\n (differes depending on device)\n net_name (str): the name of the network\n model (torch model): the network model used\n optimizer (torch optimizor): the optimizor used for the training\n lossFunc (torch loss funciton): the loss function used\n nEpoch (int): amount of epochs used for training\n smooth (float): amount of smoothing used for the recorded loss\n train_loader (torch loader): data loader for training\n val_loader (torch loader): data loader for validation\n run (neptune run instance): to log the information duing training and\n validation\n path (str): path for saving networks\n scheduler = None (torch scheduler): if given the scheduler will be used\n to update the learning rate\n\n Produced files:\n This function creates a folder at path, using the number of the neptune\n run instance. In this folder networks will be saved when new better\n accuracy or loss is achived. Also the final network will be saved in\n this folder.\n \"\"\"\n\n valid_loss, train_loss = [], []\n valid_acc = torch.tensor([]).to(device)\n train_acc = torch.tensor([]).to(device)\n\n first_loss_save = True\n # save the loss of the network before training\n\n try: # test if the optimizor contain momentum\n moment = optimizer.param_groups[0]['momentum']\n moment = True\n except:\n moment = False\n\n # make dir to save the networks in\n str_run = run.get_run_url()\n m = re.match(r\".+-(\\d+)\", str_run) # this correlates the name of the network\n # with the neptune ai name.\n run_nr = m.group(1)\n\n new_path = os.path.join(path, f\"networks_{run_nr}\")\n os.mkdir(new_path)\n\n path = path + f\"networks_{run_nr}/\"\n\n\n\n lowest_val_loss = float(\"inf\") # the best loss obtained during training\n best_acc = -float(\"inf\") # the best accuarcy during training\n\n for iEpoch in range(nEpoch):\n print(f\"Training epoch {iEpoch}\")\n\n run[f\"{net_name}/learning_rate\"].log(optimizer.param_groups[0]['lr'])\n\n if moment:\n run[f\"{net_name}/momentum\"].log(\n optimizer.param_groups[0]['momentum'])\n\n t_mat = torch.zeros(2, 2) # save confusion matrix\n total_pos, total_neg = torch.tensor(0), torch.tensor(0)\n\n for series in train_loader:\n ind, tar, chan = series\n model.zero_grad() # clear the gradients before each instance\n y_pred = model(ind)\n pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl)\n target = tar.view(-1).type(it)\n loss = lossFunc(pred, target)\n\n if first_loss_save: # save loss before training\n run[f\"{net_name}/train_loss_pr_file\"].log(loss)\n run[f\"{net_name}/smooth_train_loss_pr_file\"].log(loss)\n t_sm_loss = loss.item()\n\n run[f\"{net_name}/validation_loss_pr_file\"].log(loss)\n run[f\"{net_name}/smooth_val_loss_pr_file\"].log(loss)\n v_sm_loss = loss.item()\n first_loss_save = False\n\n loss.backward()\n optimizer.step()\n train_loss.append(loss.item())\n\n acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device)\n train_acc = torch.cat((train_acc, acc.view(1)))\n t_mat = t_mat + mat\n total_pos = total_pos + tot_p_g\n total_neg = total_neg + tot_n_g\n\n if scheduler: # update the value of the scheduler\n scheduler.step()\n\n # log the mean training loss\n run[f\"{net_name}/train_loss_pr_file\"].log(\n np.mean(np.array(train_loss)))\n\n sm_loss = np.mean(np.array(train_loss)) * smooth \\\n + (1-smooth) * t_sm_loss\n\n t_sm_loss = sm_loss\n # log the smoothed loss\n run[f\"{net_name}/smooth_train_loss_pr_file\"].log(sm_loss)\n\n train_loss = []\n\n run[f\"{net_name}/train_acc_pr_file\"].log(torch.mean(train_acc))\n train_acc = torch.tensor([]).to(device)\n\n run[f\"{net_name}/matrix/train_confusion_matrix_pr_file\"].log(t_mat)\n Accuarcy_upload(run, t_mat, total_pos, total_neg,\n f\"{net_name}\", \"train\")\n\n v_mat = torch.zeros(2,2)\n total_pos, total_neg = torch.tensor(0), torch.tensor(0)\n\n\n for series in val_loader:\n ind, tar, chan = series\n y_pred = model(ind)\n pred = y_pred.transpose(1, 2).reshape(-1, 2).type(fl)\n target = tar.view(-1).type(it)\n loss = lossFunc(pred, target)\n valid_loss.append(loss.item())\n\n acc, mat, tot_p_g, tot_n_g = Accuarcy_find(y_pred, tar, device)\n valid_acc = torch.cat((valid_acc, acc.view(1)))\n v_mat = v_mat + mat\n total_pos = total_pos + tot_p_g\n total_neg = total_neg + tot_n_g\n\n avg_val_loss = np.mean(np.array(valid_loss))\n run[f\"{net_name}/validation_loss_pr_file\"].log(avg_val_loss)\n\n sm_loss = np.mean(np.array(valid_loss)) * smooth \\\n + (1-smooth) * v_sm_loss\n\n v_sm_loss = sm_loss\n\n run[f\"{net_name}/smooth_val_loss_pr_file\"].log(sm_loss)\n valid_loss = []\n\n avg_val_acc = torch.mean(valid_acc)\n run[f\"{net_name}/val_acc_pr_file\"].log(avg_val_acc)\n valid_acc = torch.tensor([]).to(device)\n\n if avg_val_loss < lowest_val_loss and best_acc < avg_val_acc \\\n and iEpoch >= 5:\n # scenario when both the loss and accuarcy is better\n torch.save(model.state_dict(),\n path + f\"{net_name}-epk-{iEpoch}.pt\")\n lowest_val_loss = avg_val_loss\n best_acc = avg_val_acc\n\n elif avg_val_loss < lowest_val_loss and iEpoch >= 5:\n # when only the loss is decreased\n torch.save(model.state_dict(),\n path + f\"{net_name}-loss-epk-{iEpoch}.pt\")\n lowest_val_loss = avg_val_loss\n elif best_acc < avg_val_acc and iEpoch >= 5:\n # when only the accuracy is increased\n torch.save(model.state_dict(),\n path + f\"{net_name}-acc-epk-{iEpoch}.pt\")\n best_acc = avg_val_acc\n\n run[f\"{net_name}/matrix/val_confusion_matrix_pr_file\"].log(v_mat)\n Accuarcy_upload(run, v_mat, total_pos, total_neg, f\"{net_name}\", \"val\")\n\n # save the final network\n torch.save(model.state_dict(), path + f\"final-{net_name}-{run_nr}.pt\")\n run.stop()\n"
] |
[
[
"torch.tensor",
"torch.mean",
"numpy.array",
"torch.zeros"
]
] |
qiuxin2012/BigDL
|
[
"e3cd7499c0f850eb003163df8f090e7e92841ad0"
] |
[
"pyspark/bigdl/keras/backend.py"
] |
[
"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nfrom pyspark.rdd import RDD\n\nfrom bigdl.keras.optimization import *\nfrom bigdl.util.common import get_spark_context\nfrom bigdl.util.common import to_sample_rdd\n\n\nclass KerasModelWrapper():\n\n def __init__(self, kmodel):\n self.bmodel = DefinitionLoader.from_kmodel(kmodel)\n WeightLoader.load_weights_from_kmodel(self.bmodel, kmodel) # share the same weight.\n self.criterion = OptimConverter.to_bigdl_criterion(kmodel.loss)\n self.optim_method = OptimConverter.to_bigdl_optim_method(kmodel.optimizer)\n self.metrics = OptimConverter.to_bigdl_metrics(kmodel.metrics) if kmodel.metrics else None\n\n def evaluate(self, x, y, batch_size=32, sample_weight=None, is_distributed=False):\n \"\"\"\n Evaluate a model by the given metrics.\n :param x: ndarray or list of ndarray for local mode.\n RDD[Sample] for distributed mode\n :param y: ndarray or list of ndarray for local mode and would be None for cluster mode.\n :param batch_size\n :param is_distributed: run in local mode or distributed mode.\n NB: if is_distributed=true, x should be RDD[Sample] and y should be None\n :return:\n \"\"\"\n if sample_weight:\n unsupport_exp(\"sample_weight\")\n if is_distributed:\n if isinstance(x, np.ndarray):\n input = to_sample_rdd(x, y)\n elif isinstance(x, RDD):\n input = x\n if self.metrics:\n sc = get_spark_context()\n return [r.result for r in\n self.bmodel.evaluate(input, batch_size, self.metrics)]\n else:\n raise Exception(\"No Metrics found.\")\n\n raise Exception(\"not supported operation: %s\", is_distributed)\n\n def predict(self, x, batch_size=None, verbose=None, is_distributed=False):\n \"\"\"Generates output predictions for the input samples,\n processing the samples in a batched way.\n\n # Arguments\n x: the input data, as a Numpy array or list of Numpy array for local mode.\n as RDD[Sample] for distributed mode\n is_distributed: used to control run in local or cluster. the default value is False\n # Returns\n A Numpy array or RDD[Sample] of predictions.\n \"\"\"\n if batch_size or verbose:\n raise Exception(\"we don't support batch_size or verbose for now\")\n if is_distributed:\n if isinstance(x, np.ndarray):\n input = to_sample_rdd(x, np.zeros([x.shape[0]]))\n # np.asarray(self.bmodel.predict(x_rdd).collect())\n elif isinstance(x, RDD):\n input = x\n return self.bmodel.predict(input)\n else:\n if isinstance(x, np.ndarray):\n return self.bmodel.predict_local(x)\n raise Exception(\"not supported type: %s\" % x)\n\n def fit(self, x, y=None, batch_size=32, nb_epoch=10, verbose=1, callbacks=None,\n validation_split=0., validation_data=None, shuffle=True,\n class_weight=None, sample_weight=None, initial_epoch=0, is_distributed=False):\n \"\"\"Optimize the model by the given options\n\n :param x: ndarray or list of ndarray for local mode.\n RDD[Sample] for distributed mode\n :param y: ndarray or list of ndarray for local mode and would be None for cluster mode.\n is_distributed: used to control run in local or cluster. the default value is False.\n NB: if is_distributed=true, x should be RDD[Sample] and y should be None\n :return:\n A Numpy array or RDD[Sample] of predictions.\n \"\"\"\n if callbacks:\n raise Exception(\"We don't support callbacks in fit for now\")\n if class_weight:\n unsupport_exp(\"class_weight\")\n if sample_weight:\n unsupport_exp(\"sample_weight\")\n if initial_epoch != 0:\n unsupport_exp(\"initial_epoch\")\n if shuffle != True:\n unsupport_exp(\"shuffle\")\n if validation_split != 0.:\n unsupport_exp(\"validation_split\")\n bopt = self.__create_optimizer(x=x,\n y=y,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n validation_data=validation_data,\n is_distributed=is_distributed)\n bopt.optimize()\n\n def __create_optimizer(self, x=None, y=None, batch_size=32, nb_epoch=10,\n validation_data=None, is_distributed=False):\n if is_distributed:\n if isinstance(x, np.ndarray):\n input = to_sample_rdd(x, y)\n validation_data_rdd = to_sample_rdd(*validation_data)\n elif isinstance(x, RDD):\n input = x\n validation_data_rdd = validation_data\n return self.__create_distributed_optimizer(training_rdd=input,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n validation_data=validation_data_rdd)\n else:\n if isinstance(x, np.ndarray):\n return self.__create_local_optimizer(x, y,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n validation_data=validation_data)\n raise Exception(\"not supported type: %s\" % x)\n\n def __create_local_optimizer(self, x, y, batch_size=32, nb_epoch=10, validation_data=None):\n if validation_data:\n raise unsupport_exp(\"validation_data\")\n bopt = boptimizer.LocalOptimizer(\n X=x,\n Y=y,\n model=self.bmodel,\n criterion=self.criterion,\n end_trigger=boptimizer.MaxEpoch(nb_epoch),\n batch_size=batch_size,\n optim_method=self.optim_method,\n cores=None\n )\n # TODO: enable validation for local optimizer.\n return bopt\n\n def __create_distributed_optimizer(self, training_rdd,\n batch_size=32,\n nb_epoch=10,\n validation_data=None):\n sc = get_spark_context()\n bopt = boptimizer.Optimizer(\n model=self.bmodel,\n training_rdd=training_rdd,\n criterion=self.criterion,\n end_trigger=boptimizer.MaxEpoch(nb_epoch),\n batch_size=batch_size,\n optim_method=self.optim_method\n )\n if validation_data:\n bopt.set_validation(batch_size,\n val_rdd=validation_data,\n # TODO: check if keras use the same strategy\n trigger=boptimizer.EveryEpoch(),\n val_method=self.metrics)\n return bopt\n\n\ndef with_bigdl_backend(kmodel):\n bcommon.init_engine()\n return KerasModelWrapper(kmodel)\n\n"
] |
[
[
"numpy.zeros"
]
] |
cancan101/matplotlib
|
[
"9c60c583f63da64bfcb9bcadcf6cf4df6a165714"
] |
[
"lib/matplotlib/legend.py"
] |
[
"\"\"\"\nThe legend module defines the Legend class, which is responsible for\ndrawing legends associated with axes and/or figures.\n\nThe Legend class can be considered as a container of legend handles\nand legend texts. Creation of corresponding legend handles from the\nplot elements in the axes or figures (e.g., lines, patches, etc.) are\nspecified by the handler map, which defines the mapping between the\nplot elements and the legend handlers to be used (the default legend\nhandlers are defined in the :mod:`~matplotlib.legend_handler` module). Note\nthat not all kinds of artist are supported by the legend yet (See\n:ref:`plotting-guide-legend` for more information).\n\"\"\"\nfrom __future__ import division, print_function\nimport warnings\n\nimport numpy as np\n\nfrom matplotlib import rcParams\nfrom matplotlib.artist import Artist, allow_rasterization\nfrom matplotlib.cbook import is_string_like, iterable, silent_list, safezip\nfrom matplotlib.font_manager import FontProperties\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch\nfrom matplotlib.collections import LineCollection, RegularPolyCollection, \\\n CircleCollection, PathCollection\nfrom matplotlib.transforms import Bbox, BboxBase, TransformedBbox\nfrom matplotlib.transforms import BboxTransformTo, BboxTransformFrom\n\nfrom matplotlib.offsetbox import HPacker, VPacker, TextArea, DrawingArea\nfrom matplotlib.offsetbox import DraggableOffsetBox\n\nfrom matplotlib.container import ErrorbarContainer, BarContainer, StemContainer\nfrom matplotlib import MatplotlibDeprecationWarning as mplDeprecation\nimport legend_handler\n\n\nclass DraggableLegend(DraggableOffsetBox):\n def __init__(self, legend, use_blit=False, update=\"loc\"):\n \"\"\"\n update : If \"loc\", update *loc* parameter of\n legend upon finalizing. If \"bbox\", update\n *bbox_to_anchor* parameter.\n \"\"\"\n self.legend = legend\n\n if update in [\"loc\", \"bbox\"]:\n self._update = update\n else:\n raise ValueError(\"update parameter '%s' is not supported.\" %\n update)\n\n DraggableOffsetBox.__init__(self, legend, legend._legend_box,\n use_blit=use_blit)\n\n def artist_picker(self, legend, evt):\n return self.legend.contains(evt)\n\n def finalize_offset(self):\n loc_in_canvas = self.get_loc_in_canvas()\n\n if self._update == \"loc\":\n self._update_loc(loc_in_canvas)\n elif self._update == \"bbox\":\n self._update_bbox_to_anchor(loc_in_canvas)\n else:\n raise RuntimeError(\"update parameter '%s' is not supported.\" %\n self.update)\n\n def _update_loc(self, loc_in_canvas):\n bbox = self.legend.get_bbox_to_anchor()\n\n # if bbox has zero width or height, the transformation is\n # ill-defined. Fall back to the defaul bbox_to_anchor.\n if bbox.width == 0 or bbox.height == 0:\n self.legend.set_bbox_to_anchor(None)\n bbox = self.legend.get_bbox_to_anchor()\n\n _bbox_transform = BboxTransformFrom(bbox)\n self.legend._loc = tuple(\n _bbox_transform.transform_point(loc_in_canvas))\n\n def _update_bbox_to_anchor(self, loc_in_canvas):\n\n tr = self.legend.axes.transAxes\n loc_in_bbox = tr.transform_point(loc_in_canvas)\n\n self.legend.set_bbox_to_anchor(loc_in_bbox)\n\n\nclass Legend(Artist):\n \"\"\"\n Place a legend on the axes at location loc. Labels are a\n sequence of strings and loc can be a string or an integer\n specifying the legend location\n\n The location codes are::\n\n 'best' : 0, (only implemented for axis legends)\n 'upper right' : 1,\n 'upper left' : 2,\n 'lower left' : 3,\n 'lower right' : 4,\n 'right' : 5,\n 'center left' : 6,\n 'center right' : 7,\n 'lower center' : 8,\n 'upper center' : 9,\n 'center' : 10,\n\n loc can be a tuple of the noramilzed coordinate values with\n respect its parent.\n\n \"\"\"\n codes = {'best': 0, # only implemented for axis legends\n 'upper right': 1,\n 'upper left': 2,\n 'lower left': 3,\n 'lower right': 4,\n 'right': 5,\n 'center left': 6,\n 'center right': 7,\n 'lower center': 8,\n 'upper center': 9,\n 'center': 10,\n }\n\n zorder = 5\n\n def __str__(self):\n return \"Legend\"\n\n def __init__(self, parent, handles, labels,\n loc=None,\n numpoints=None, # the number of points in the legend line\n markerscale=None, # the relative size of legend markers\n # vs. original\n scatterpoints=3, # TODO: may be an rcParam\n scatteryoffsets=None,\n prop=None, # properties for the legend texts\n fontsize=None, # keyword to set font size directly\n\n # the following dimensions are in axes coords\n pad=None, # deprecated; use borderpad\n labelsep=None, # deprecated; use labelspacing\n handlelen=None, # deprecated; use handlelength\n handletextsep=None, # deprecated; use handletextpad\n axespad=None, # deprecated; use borderaxespad\n\n # spacing & pad defined as a fraction of the font-size\n borderpad=None, # the whitespace inside the legend border\n labelspacing=None, # the vertical space between the legend\n # entries\n handlelength=None, # the length of the legend handles\n handleheight=None, # the height of the legend handles\n handletextpad=None, # the pad between the legend handle\n # and text\n borderaxespad=None, # the pad between the axes and legend\n # border\n columnspacing=None, # spacing between columns\n\n ncol=1, # number of columns\n mode=None, # mode for horizontal distribution of columns.\n # None, \"expand\"\n\n fancybox=None, # True use a fancy box, false use a rounded\n # box, none use rc\n shadow=None,\n title=None, # set a title for the legend\n bbox_to_anchor=None, # bbox that the legend will be anchored.\n bbox_transform=None, # transform for the bbox\n frameon=None, # draw frame\n handler_map=None,\n ):\n \"\"\"\n - *parent*: the artist that contains the legend\n - *handles*: a list of artists (lines, patches) to be added to the\n legend\n - *labels*: a list of strings to label the legend\n\n Optional keyword arguments:\n\n ================ ====================================================\n Keyword Description\n ================ ====================================================\n loc a location code\n prop the font property\n fontsize the font size (used only if prop is not specified)\n markerscale the relative size of legend markers vs. original\n numpoints the number of points in the legend for line\n scatterpoints the number of points in the legend for scatter plot\n scatteryoffsets a list of yoffsets for scatter symbols in legend\n frameon if True, draw a frame around the legend.\n If None, use rc\n fancybox if True, draw a frame with a round fancybox.\n If None, use rc\n shadow if True, draw a shadow behind legend\n ncol number of columns\n borderpad the fractional whitespace inside the legend border\n labelspacing the vertical space between the legend entries\n handlelength the length of the legend handles\n handleheight the length of the legend handles\n handletextpad the pad between the legend handle and text\n borderaxespad the pad between the axes and legend border\n columnspacing the spacing between columns\n title the legend title\n bbox_to_anchor the bbox that the legend will be anchored.\n bbox_transform the transform for the bbox. transAxes if None.\n ================ ====================================================\n\n\n The pad and spacing parameters are measured in font-size units. E.g.,\n a fontsize of 10 points and a handlelength=5 implies a handlelength of\n 50 points. Values from rcParams will be used if None.\n\n Users can specify any arbitrary location for the legend using the\n *bbox_to_anchor* keyword argument. bbox_to_anchor can be an instance\n of BboxBase(or its derivatives) or a tuple of 2 or 4 floats.\n See :meth:`set_bbox_to_anchor` for more detail.\n\n The legend location can be specified by setting *loc* with a tuple of\n 2 floats, which is interpreted as the lower-left corner of the legend\n in the normalized axes coordinate.\n \"\"\"\n # local import only to avoid circularity\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n\n Artist.__init__(self)\n\n if prop is None:\n if fontsize is not None:\n self.prop = FontProperties(size=fontsize)\n else:\n self.prop = FontProperties(size=rcParams[\"legend.fontsize\"])\n elif isinstance(prop, dict):\n self.prop = FontProperties(**prop)\n if \"size\" not in prop:\n self.prop.set_size(rcParams[\"legend.fontsize\"])\n else:\n self.prop = prop\n\n self._fontsize = self.prop.get_size_in_points()\n\n propnames = [\"numpoints\", \"markerscale\", \"shadow\", \"columnspacing\",\n \"scatterpoints\", \"handleheight\"]\n\n self.texts = []\n self.legendHandles = []\n self._legend_title_box = None\n\n self._handler_map = handler_map\n\n localdict = locals()\n\n for name in propnames:\n if localdict[name] is None:\n value = rcParams[\"legend.\" + name]\n else:\n value = localdict[name]\n setattr(self, name, value)\n\n # Take care the deprecated keywords\n deprecated_kwds = {\"pad\": \"borderpad\",\n \"labelsep\": \"labelspacing\",\n \"handlelen\": \"handlelength\",\n \"handletextsep\": \"handletextpad\",\n \"axespad\": \"borderaxespad\"}\n\n # convert values of deprecated keywords (ginve in axes coords)\n # to new vaules in a fraction of the font size\n\n # conversion factor\n bbox = parent.bbox\n axessize_fontsize = min(bbox.width, bbox.height) / self._fontsize\n\n for k, v in deprecated_kwds.iteritems():\n # use deprecated value if not None and if their newer\n # counter part is None.\n if localdict[k] is not None and localdict[v] is None:\n warnings.warn(\"Use '%s' instead of '%s'.\" % (v, k),\n mplDeprecation)\n setattr(self, v, localdict[k] * axessize_fontsize)\n continue\n\n # Otherwise, use new keywords\n if localdict[v] is None:\n setattr(self, v, rcParams[\"legend.\" + v])\n else:\n setattr(self, v, localdict[v])\n\n del localdict\n\n handles = list(handles)\n if len(handles) < 2:\n ncol = 1\n self._ncol = ncol\n\n if self.numpoints <= 0:\n raise ValueError(\"numpoints must be > 0; it was %d\" % numpoints)\n\n # introduce y-offset for handles of the scatter plot\n if scatteryoffsets is None:\n self._scatteryoffsets = np.array([3. / 8., 4. / 8., 2.5 / 8.])\n else:\n self._scatteryoffsets = np.asarray(scatteryoffsets)\n reps = int(self.scatterpoints / len(self._scatteryoffsets)) + 1\n self._scatteryoffsets = np.tile(self._scatteryoffsets,\n reps)[:self.scatterpoints]\n\n # _legend_box is an OffsetBox instance that contains all\n # legend items and will be initialized from _init_legend_box()\n # method.\n self._legend_box = None\n\n if isinstance(parent, Axes):\n self.isaxes = True\n self.set_axes(parent)\n self.set_figure(parent.figure)\n elif isinstance(parent, Figure):\n self.isaxes = False\n self.set_figure(parent)\n else:\n raise TypeError(\"Legend needs either Axes or Figure as parent\")\n self.parent = parent\n\n if loc is None:\n loc = rcParams[\"legend.loc\"]\n if not self.isaxes and loc in [0, 'best']:\n loc = 'upper right'\n if is_string_like(loc):\n if loc not in self.codes:\n if self.isaxes:\n warnings.warn('Unrecognized location \"%s\". Falling back '\n 'on \"best\"; valid locations are\\n\\t%s\\n'\n % (loc, '\\n\\t'.join(self.codes.iterkeys())))\n loc = 0\n else:\n warnings.warn('Unrecognized location \"%s\". Falling back '\n 'on \"upper right\"; '\n 'valid locations are\\n\\t%s\\n'\n % (loc, '\\n\\t'.join(self.codes.iterkeys())))\n loc = 1\n else:\n loc = self.codes[loc]\n if not self.isaxes and loc == 0:\n warnings.warn('Automatic legend placement (loc=\"best\") not '\n 'implemented for figure legend. '\n 'Falling back on \"upper right\".')\n loc = 1\n\n self._mode = mode\n self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)\n\n # We use FancyBboxPatch to draw a legend frame. The location\n # and size of the box will be updated during the drawing time.\n\n self.legendPatch = FancyBboxPatch(\n xy=(0.0, 0.0), width=1., height=1.,\n facecolor=rcParams[\"axes.facecolor\"],\n edgecolor=rcParams[\"axes.edgecolor\"],\n mutation_scale=self._fontsize,\n snap=True\n )\n\n # The width and height of the legendPatch will be set (in the\n # draw()) to the length that includes the padding. Thus we set\n # pad=0 here.\n if fancybox is None:\n fancybox = rcParams[\"legend.fancybox\"]\n\n if fancybox:\n self.legendPatch.set_boxstyle(\"round\", pad=0,\n rounding_size=0.2)\n else:\n self.legendPatch.set_boxstyle(\"square\", pad=0)\n\n self._set_artist_props(self.legendPatch)\n\n self._drawFrame = frameon\n if frameon is None:\n self._drawFrame = rcParams[\"legend.frameon\"]\n\n # init with null renderer\n self._init_legend_box(handles, labels)\n\n self._loc = loc\n\n self.set_title(title)\n\n self._last_fontsize_points = self._fontsize\n\n self._draggable = None\n\n def _set_artist_props(self, a):\n \"\"\"\n set the boilerplate props for artists added to axes\n \"\"\"\n a.set_figure(self.figure)\n if self.isaxes:\n a.set_axes(self.axes)\n a.set_transform(self.get_transform())\n\n def _set_loc(self, loc):\n # find_offset function will be provided to _legend_box and\n # _legend_box will draw itself at the location of the return\n # value of the find_offset.\n self._loc_real = loc\n if loc == 0:\n _findoffset = self._findoffset_best\n else:\n _findoffset = self._findoffset_loc\n\n #def findoffset(width, height, xdescent, ydescent):\n # return _findoffset(width, height, xdescent, ydescent, renderer)\n\n self._legend_box.set_offset(_findoffset)\n\n self._loc_real = loc\n\n def _get_loc(self):\n return self._loc_real\n\n _loc = property(_get_loc, _set_loc)\n\n def _findoffset_best(self, width, height, xdescent, ydescent, renderer):\n \"Helper function to locate the legend at its best position\"\n ox, oy = self._find_best_position(width, height, renderer)\n return ox + xdescent, oy + ydescent\n\n def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):\n \"Helper function to locate the legend using the location code\"\n\n if iterable(self._loc) and len(self._loc) == 2:\n # when loc is a tuple of axes(or figure) coordinates.\n fx, fy = self._loc\n bbox = self.get_bbox_to_anchor()\n x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy\n else:\n bbox = Bbox.from_bounds(0, 0, width, height)\n x, y = self._get_anchored_bbox(self._loc, bbox,\n self.get_bbox_to_anchor(),\n renderer)\n\n return x + xdescent, y + ydescent\n\n @allow_rasterization\n def draw(self, renderer):\n \"Draw everything that belongs to the legend\"\n if not self.get_visible():\n return\n\n renderer.open_group('legend')\n\n fontsize = renderer.points_to_pixels(self._fontsize)\n\n # if mode == fill, set the width of the legend_box to the\n # width of the paret (minus pads)\n if self._mode in [\"expand\"]:\n pad = 2 * (self.borderaxespad + self.borderpad) * fontsize\n self._legend_box.set_width(self.get_bbox_to_anchor().width - pad)\n\n if self._drawFrame:\n # update the location and size of the legend\n bbox = self._legend_box.get_window_extent(renderer)\n self.legendPatch.set_bounds(bbox.x0, bbox.y0,\n bbox.width, bbox.height)\n\n self.legendPatch.set_mutation_scale(fontsize)\n\n if self.shadow:\n shadow = Shadow(self.legendPatch, 2, -2)\n shadow.draw(renderer)\n\n self.legendPatch.draw(renderer)\n\n self._legend_box.draw(renderer)\n\n renderer.close_group('legend')\n\n def _approx_text_height(self, renderer=None):\n \"\"\"\n Return the approximate height of the text. This is used to place\n the legend handle.\n \"\"\"\n if renderer is None:\n return self._fontsize\n else:\n return renderer.points_to_pixels(self._fontsize)\n\n # _default_handler_map defines the default mapping between plot\n # elements and the legend handlers.\n\n _default_handler_map = {\n StemContainer: legend_handler.HandlerStem(),\n ErrorbarContainer: legend_handler.HandlerErrorbar(),\n Line2D: legend_handler.HandlerLine2D(),\n Patch: legend_handler.HandlerPatch(),\n LineCollection: legend_handler.HandlerLineCollection(),\n RegularPolyCollection: legend_handler.HandlerRegularPolyCollection(),\n CircleCollection: legend_handler.HandlerCircleCollection(),\n BarContainer: legend_handler.HandlerPatch(\n update_func=legend_handler.update_from_first_child),\n tuple: legend_handler.HandlerTuple(),\n PathCollection: legend_handler.HandlerPathCollection()\n }\n\n # (get|set|update)_default_handler_maps are public interfaces to\n # modify the defalut handler map.\n\n @classmethod\n def get_default_handler_map(cls):\n \"\"\"\n A class method that returns the default handler map.\n \"\"\"\n return cls._default_handler_map\n\n @classmethod\n def set_default_handler_map(cls, handler_map):\n \"\"\"\n A class method to set the default handler map.\n \"\"\"\n cls._default_handler_map = handler_map\n\n @classmethod\n def update_default_handler_map(cls, handler_map):\n \"\"\"\n A class method to update the default handler map.\n \"\"\"\n cls._default_handler_map.update(handler_map)\n\n def get_legend_handler_map(self):\n \"\"\"\n return the handler map.\n \"\"\"\n\n default_handler_map = self.get_default_handler_map()\n\n if self._handler_map:\n hm = default_handler_map.copy()\n hm.update(self._handler_map)\n return hm\n else:\n return default_handler_map\n\n @staticmethod\n def get_legend_handler(legend_handler_map, orig_handle):\n \"\"\"\n return a legend handler from *legend_handler_map* that\n corresponds to *orig_handler*.\n\n *legend_handler_map* should be a dictionary object (that is\n returned by the get_legend_handler_map method).\n\n It first checks if the *orig_handle* itself is a key in the\n *legend_hanler_map* and return the associated value.\n Otherwise, it checks for each of the classes in its\n method-resolution-order. If no matching key is found, it\n returns None.\n \"\"\"\n legend_handler_keys = legend_handler_map.keys()\n if orig_handle in legend_handler_keys:\n handler = legend_handler_map[orig_handle]\n else:\n\n for handle_type in type(orig_handle).mro():\n if handle_type in legend_handler_map:\n handler = legend_handler_map[handle_type]\n break\n else:\n handler = None\n\n return handler\n\n def _init_legend_box(self, handles, labels):\n \"\"\"\n Initialize the legend_box. The legend_box is an instance of\n the OffsetBox, which is packed with legend handles and\n texts. Once packed, their location is calculated during the\n drawing time.\n \"\"\"\n\n fontsize = self._fontsize\n\n # legend_box is a HPacker, horizontally packed with\n # columns. Each column is a VPacker, vertically packed with\n # legend items. Each legend item is HPacker packed with\n # legend handleBox and labelBox. handleBox is an instance of\n # offsetbox.DrawingArea which contains legend handle. labelBox\n # is an instance of offsetbox.TextArea which contains legend\n # text.\n\n text_list = [] # the list of text instances\n handle_list = [] # the list of text instances\n\n label_prop = dict(verticalalignment='baseline',\n horizontalalignment='left',\n fontproperties=self.prop,\n )\n\n labelboxes = []\n handleboxes = []\n\n # The approximate height and descent of text. These values are\n # only used for plotting the legend handle.\n descent = 0.35 * self._approx_text_height() * (self.handleheight - 0.7)\n # 0.35 and 0.7 are just heuristic numbers. this may need to be improbed\n height = self._approx_text_height() * self.handleheight - descent\n # each handle needs to be drawn inside a box of (x, y, w, h) =\n # (0, -descent, width, height). And their coordinates should\n # be given in the display coordinates.\n\n # The transformation of each handle will be automatically set\n # to self.get_trasnform(). If the artist does not uses its\n # default trasnform (eg, Collections), you need to\n # manually set their transform to the self.get_transform().\n\n legend_handler_map = self.get_legend_handler_map()\n\n for orig_handle, lab in zip(handles, labels):\n\n handler = self.get_legend_handler(legend_handler_map, orig_handle)\n\n if handler is None:\n warnings.warn(\n \"Legend does not support %s\\nUse proxy artist \"\n \"instead.\\n\\n\"\n \"http://matplotlib.sourceforge.net/users/legend_guide.html#using-proxy-artist\\n\" %\n (str(orig_handle),))\n handle_list.append(None)\n continue\n\n textbox = TextArea(lab, textprops=label_prop,\n multilinebaseline=True, minimumdescent=True)\n text_list.append(textbox._text)\n\n labelboxes.append(textbox)\n\n handlebox = DrawingArea(width=self.handlelength * fontsize,\n height=height,\n xdescent=0., ydescent=descent)\n\n handle = handler(self, orig_handle,\n #xdescent, ydescent, width, height,\n fontsize,\n handlebox)\n\n handle_list.append(handle)\n\n handleboxes.append(handlebox)\n\n if len(handleboxes) > 0:\n\n # We calculate number of rows in each column. The first\n # (num_largecol) columns will have (nrows+1) rows, and remaining\n # (num_smallcol) columns will have (nrows) rows.\n ncol = min(self._ncol, len(handleboxes))\n nrows, num_largecol = divmod(len(handleboxes), ncol)\n num_smallcol = ncol - num_largecol\n\n # starting index of each column and number of rows in it.\n largecol = safezip(range(0,\n num_largecol * (nrows + 1),\n (nrows + 1)),\n [nrows + 1] * num_largecol)\n smallcol = safezip(range(num_largecol * (nrows + 1),\n len(handleboxes),\n nrows),\n [nrows] * num_smallcol)\n else:\n largecol, smallcol = [], []\n\n handle_label = safezip(handleboxes, labelboxes)\n columnbox = []\n for i0, di in largecol + smallcol:\n # pack handleBox and labelBox into itemBox\n itemBoxes = [HPacker(pad=0,\n sep=self.handletextpad * fontsize,\n children=[h, t], align=\"baseline\")\n for h, t in handle_label[i0:i0 + di]]\n # minimumdescent=False for the text of the last row of the column\n itemBoxes[-1].get_children()[1].set_minimumdescent(False)\n\n # pack columnBox\n columnbox.append(VPacker(pad=0,\n sep=self.labelspacing * fontsize,\n align=\"baseline\",\n children=itemBoxes))\n\n if self._mode == \"expand\":\n mode = \"expand\"\n else:\n mode = \"fixed\"\n\n sep = self.columnspacing * fontsize\n\n self._legend_handle_box = HPacker(pad=0,\n sep=sep, align=\"baseline\",\n mode=mode,\n children=columnbox)\n\n self._legend_title_box = TextArea(\"\")\n\n self._legend_box = VPacker(pad=self.borderpad * fontsize,\n sep=self.labelspacing * fontsize,\n align=\"center\",\n children=[self._legend_title_box,\n self._legend_handle_box])\n\n self._legend_box.set_figure(self.figure)\n\n self.texts = text_list\n self.legendHandles = handle_list\n\n def _auto_legend_data(self):\n \"\"\"\n Returns list of vertices and extents covered by the plot.\n\n Returns a two long list.\n\n First element is a list of (x, y) vertices (in\n display-coordinates) covered by all the lines and line\n collections, in the legend's handles.\n\n Second element is a list of bounding boxes for all the patches in\n the legend's handles.\n \"\"\"\n # should always hold because function is only called internally\n assert self.isaxes\n\n ax = self.parent\n vertices = []\n bboxes = []\n lines = []\n\n for handle in ax.lines:\n assert isinstance(handle, Line2D)\n path = handle.get_path()\n trans = handle.get_transform()\n tpath = trans.transform_path(path)\n lines.append(tpath)\n\n for handle in ax.patches:\n assert isinstance(handle, Patch)\n\n if isinstance(handle, Rectangle):\n transform = handle.get_data_transform()\n bboxes.append(handle.get_bbox().transformed(transform))\n else:\n transform = handle.get_transform()\n bboxes.append(handle.get_path().get_extents(transform))\n\n return [vertices, bboxes, lines]\n\n def draw_frame(self, b):\n 'b is a boolean. Set draw frame to b'\n self.set_frame_on(b)\n\n def get_children(self):\n 'return a list of child artists'\n children = []\n if self._legend_box:\n children.append(self._legend_box)\n children.extend(self.get_lines())\n children.extend(self.get_patches())\n children.extend(self.get_texts())\n children.append(self.get_frame())\n\n if self._legend_title_box:\n children.append(self.get_title())\n return children\n\n def get_frame(self):\n 'return the Rectangle instance used to frame the legend'\n return self.legendPatch\n\n def get_lines(self):\n 'return a list of lines.Line2D instances in the legend'\n return [h for h in self.legendHandles if isinstance(h, Line2D)]\n\n def get_patches(self):\n 'return a list of patch instances in the legend'\n return silent_list('Patch',\n [h for h in self.legendHandles\n if isinstance(h, Patch)])\n\n def get_texts(self):\n 'return a list of text.Text instance in the legend'\n return silent_list('Text', self.texts)\n\n def set_title(self, title, prop=None):\n \"\"\"\n set the legend title. Fontproperties can be optionally set\n with *prop* parameter.\n \"\"\"\n self._legend_title_box._text.set_text(title)\n\n if prop is not None:\n if isinstance(prop, dict):\n prop = FontProperties(**prop)\n self._legend_title_box._text.set_fontproperties(prop)\n\n if title:\n self._legend_title_box.set_visible(True)\n else:\n self._legend_title_box.set_visible(False)\n\n def get_title(self):\n 'return Text instance for the legend title'\n return self._legend_title_box._text\n\n def get_window_extent(self, *args, **kwargs):\n 'return a extent of the the legend'\n return self.legendPatch.get_window_extent(*args, **kwargs)\n\n def get_frame_on(self):\n \"\"\"\n Get whether the legend box patch is drawn\n \"\"\"\n return self._drawFrame\n\n def set_frame_on(self, b):\n \"\"\"\n Set whether the legend box patch is drawn\n\n ACCEPTS: [ *True* | *False* ]\n \"\"\"\n self._drawFrame = b\n\n def get_bbox_to_anchor(self):\n \"\"\"\n return the bbox that the legend will be anchored\n \"\"\"\n if self._bbox_to_anchor is None:\n return self.parent.bbox\n else:\n return self._bbox_to_anchor\n\n def set_bbox_to_anchor(self, bbox, transform=None):\n \"\"\"\n set the bbox that the legend will be anchored.\n\n *bbox* can be a BboxBase instance, a tuple of [left, bottom,\n width, height] in the given transform (normalized axes\n coordinate if None), or a tuple of [left, bottom] where the\n width and height will be assumed to be zero.\n \"\"\"\n if bbox is None:\n self._bbox_to_anchor = None\n return\n elif isinstance(bbox, BboxBase):\n self._bbox_to_anchor = bbox\n else:\n try:\n l = len(bbox)\n except TypeError:\n raise ValueError(\"Invalid argument for bbox : %s\" % str(bbox))\n\n if l == 2:\n bbox = [bbox[0], bbox[1], 0, 0]\n\n self._bbox_to_anchor = Bbox.from_bounds(*bbox)\n\n if transform is None:\n transform = BboxTransformTo(self.parent.bbox)\n\n self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor,\n transform)\n\n def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):\n \"\"\"\n Place the *bbox* inside the *parentbbox* according to a given\n location code. Return the (x,y) coordinate of the bbox.\n\n - loc: a location code in range(1, 11).\n This corresponds to the possible values for self._loc, excluding\n \"best\".\n\n - bbox: bbox to be placed, display coodinate units.\n - parentbbox: a parent box which will contain the bbox. In\n display coordinates.\n \"\"\"\n assert loc in range(1, 11) # called only internally\n\n BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)\n\n anchor_coefs = {UR: \"NE\",\n UL: \"NW\",\n LL: \"SW\",\n LR: \"SE\",\n R: \"E\",\n CL: \"W\",\n CR: \"E\",\n LC: \"S\",\n UC: \"N\",\n C: \"C\"}\n\n c = anchor_coefs[loc]\n\n fontsize = renderer.points_to_pixels(self._fontsize)\n container = parentbbox.padded(-(self.borderaxespad) * fontsize)\n anchored_box = bbox.anchored(c, container=container)\n return anchored_box.x0, anchored_box.y0\n\n def _find_best_position(self, width, height, renderer, consider=None):\n \"\"\"\n Determine the best location to place the legend.\n\n `consider` is a list of (x, y) pairs to consider as a potential\n lower-left corner of the legend. All are display coords.\n \"\"\"\n # should always hold because function is only called internally\n assert self.isaxes\n\n verts, bboxes, lines = self._auto_legend_data()\n\n bbox = Bbox.from_bounds(0, 0, width, height)\n consider = [self._get_anchored_bbox(x, bbox, self.get_bbox_to_anchor(),\n renderer)\n for x\n in range(1, len(self.codes))]\n\n #tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()\n\n candidates = []\n for l, b in consider:\n legendBox = Bbox.from_bounds(l, b, width, height)\n badness = 0\n badness = legendBox.count_contains(verts)\n badness += legendBox.count_overlaps(bboxes)\n for line in lines:\n if line.intersects_bbox(legendBox):\n badness += 1\n\n ox, oy = l, b\n if badness == 0:\n return ox, oy\n\n candidates.append((badness, (l, b)))\n\n # rather than use min() or list.sort(), do this so that we are assured\n # that in the case of two equal badnesses, the one first considered is\n # returned.\n # NOTE: list.sort() is stable.But leave as it is for now. -JJL\n minCandidate = candidates[0]\n for candidate in candidates:\n if candidate[0] < minCandidate[0]:\n minCandidate = candidate\n\n ox, oy = minCandidate[1]\n\n return ox, oy\n\n def contains(self, event):\n return self.legendPatch.contains(event)\n\n def draggable(self, state=None, use_blit=False, update=\"loc\"):\n \"\"\"\n Set the draggable state -- if state is\n\n * None : toggle the current state\n\n * True : turn draggable on\n\n * False : turn draggable off\n\n If draggable is on, you can drag the legend on the canvas with\n the mouse. The DraggableLegend helper instance is returned if\n draggable is on.\n\n The update parameter control which parameter of the legend changes\n when dragged. If update is \"loc\", the *loc* paramter of the legend\n is changed. If \"bbox\", the *bbox_to_anchor* parameter is changed.\n \"\"\"\n is_draggable = self._draggable is not None\n\n # if state is None we'll toggle\n if state is None:\n state = not is_draggable\n\n if state:\n if self._draggable is None:\n self._draggable = DraggableLegend(self,\n use_blit,\n update=update)\n else:\n if self._draggable is not None:\n self._draggable.disconnect()\n self._draggable = None\n\n return self._draggable\n"
] |
[
[
"numpy.asarray",
"matplotlib.cbook.iterable",
"matplotlib.offsetbox.DraggableOffsetBox.__init__",
"matplotlib.cbook.is_string_like",
"matplotlib.transforms.Bbox.from_bounds",
"matplotlib.offsetbox.HPacker",
"matplotlib.artist.Artist.__init__",
"matplotlib.offsetbox.DrawingArea",
"matplotlib.transforms.BboxTransformFrom",
"matplotlib.font_manager.FontProperties",
"matplotlib.cbook.silent_list",
"matplotlib.transforms.BboxTransformTo",
"matplotlib.patches.Shadow",
"numpy.array",
"matplotlib.transforms.TransformedBbox",
"numpy.tile",
"matplotlib.cbook.safezip",
"matplotlib.patches.FancyBboxPatch",
"matplotlib.offsetbox.TextArea",
"matplotlib.offsetbox.VPacker"
]
] |
joseppinilla/dwave-system
|
[
"86a1698f15ccd8b0ece0ed868ee49292d3f67f5b"
] |
[
"tests/test_dwave_sampler.py"
] |
[
"# Copyright 2018 D-Wave Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ================================================================================================\nimport unittest\nimport random\nimport warnings\n\nfrom collections import namedtuple\nfrom concurrent.futures import Future\n\nimport numpy as np\n\nimport dimod\nimport dwave_networkx as dnx\n\nfrom dwave.system.samplers import DWaveSampler\n\ntry:\n # py3\n import unittest.mock as mock\nexcept ImportError:\n # py2\n import mock\n\n\nC16 = dnx.chimera_graph(16)\n\n# remove one node from C16 to simulate a not-fully-yielded system\nC16.remove_node(42)\n\nedges = set(tuple(edge) for edge in C16.edges)\nedges.update([(v, u) for u, v in edges]) # solver has bi-directional\n\n\nclass MockSolver():\n nodes = set(range(2048))\n edges = edges\n properties = {'readout_thermalization_range': [0, 10000],\n 'annealing_time_range': [1, 2000],\n 'default_readout_thermalization': 0,\n 'parameters': {'num_spin_reversal_transforms': '',\n 'programming_thermalization': '',\n 'anneal_offsets': '',\n 'num_reads': '',\n 'max_answers': '',\n 'readout_thermalization': '',\n 'beta': \"\",\n 'answer_mode': '',\n 'auto_scale': '',\n 'postprocess': \"\",\n 'anneal_schedule': '',\n 'chains': \"\"},\n 'chip_id': 'MockSolver'}\n\n def sample_ising(self, h, J, **kwargs):\n for key in kwargs:\n if key not in self.properties['parameters']:\n raise ValueError\n result = {'num_variables': 2048,\n 'format': 'qp',\n 'num_occurrences': [1],\n 'active_variables': list(range(2048)),\n 'solutions': [[random.choice((-1, +1)) for __ in range(2048)]],\n 'timing': {'total_real_time': 11511, 'anneal_time_per_run': 20,\n 'post_processing_overhead_time': 2042, 'qpu_sampling_time': 164,\n 'readout_time_per_run': 123,\n 'qpu_delay_time_per_sample': 21,\n 'qpu_anneal_time_per_sample': 20,\n 'total_post_processing_time': 2042,\n 'qpu_programming_time': 8740,\n 'run_time_chip': 164,\n 'qpu_access_time': 11511,\n 'qpu_readout_time_per_sample': 123},\n 'occurrences': [1]}\n result['samples'] = result['solutions']\n result['energies'] = [dimod.ising_energy(sample, h, J) for sample in result['samples']]\n future = Future()\n future.set_result(result)\n return future\n\n def sample_qubo(self, Q, **kwargs):\n for key in kwargs:\n if key not in self.properties['parameters']:\n raise ValueError\n result = {'num_variables': 2048,\n 'format': 'qp',\n 'num_occurrences': [1],\n 'active_variables': list(range(2048)),\n 'solutions': [[random.choice((0, 1)) for __ in range(2048)]],\n 'timing': {'total_real_time': 11511, 'anneal_time_per_run': 20,\n 'post_processing_overhead_time': 2042, 'qpu_sampling_time': 164,\n 'readout_time_per_run': 123,\n 'qpu_delay_time_per_sample': 21,\n 'qpu_anneal_time_per_sample': 20,\n 'total_post_processing_time': 2042,\n 'qpu_programming_time': 8740,\n 'run_time_chip': 164,\n 'qpu_access_time': 11511,\n 'qpu_readout_time_per_sample': 123},\n 'occurrences': [1]}\n result['samples'] = result['solutions']\n result['energies'] = [dimod.qubo_energy(sample, Q) for sample in result['samples']]\n future = Future()\n future.set_result(result)\n return future\n\n\nclass TestDwaveSampler(unittest.TestCase):\n @mock.patch('dwave.system.samplers.dwave_sampler.Client')\n def setUp(self, MockClient):\n\n # using the mock\n self.sampler = DWaveSampler()\n\n self.sampler.solver = MockSolver()\n\n @mock.patch('dwave.system.samplers.dwave_sampler.Client')\n def test_solver_init(self, MockClient):\n \"\"\"Deprecation warning is raised for `solver_features` use, but it still works.\"\"\"\n\n # assertWarns not available in py2\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n DWaveSampler(solver_features={'qpu': True})\n self.assertEqual(len(w), 1)\n self.assertTrue(issubclass(w[-1].category, DeprecationWarning))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n DWaveSampler(solver={'qpu': True})\n self.assertEqual(len(w), 0)\n\n MockClient.reset_mock()\n solver = {'qpu': True, 'num_qubits__gt': 1000}\n sampler = DWaveSampler(solver=solver)\n MockClient.from_config.assert_called_once_with(solver=solver)\n\n def test_sample_ising_variables(self):\n\n sampler = self.sampler\n\n response = sampler.sample_ising({0: -1, 1: 1}, {})\n\n rows, cols = response.record.sample.shape\n\n self.assertEqual(cols, 2)\n\n response = sampler.sample_ising({}, {(0, 1): 1})\n\n rows, cols = response.record.sample.shape\n\n self.assertEqual(cols, 2)\n\n self.assertFalse(np.any(response.record.sample == 0))\n self.assertIs(response.vartype, dimod.SPIN)\n\n self.assertIn('num_occurrences', response.record.dtype.fields)\n self.assertIn('timing', response.info)\n\n def test_sample_qubo_variables(self):\n\n sampler = self.sampler\n\n response = sampler.sample_qubo({(0, 0): -1, (1, 1): 1})\n\n rows, cols = response.record.sample.shape\n\n self.assertEqual(cols, 2)\n\n response = sampler.sample_qubo({(0, 0): -1, (1, 1): 1})\n\n rows, cols = response.record.sample.shape\n\n self.assertEqual(cols, 2)\n\n self.assertTrue(np.all(response.record.sample >= 0))\n self.assertIs(response.vartype, dimod.BINARY)\n\n self.assertIn('num_occurrences', response.record.dtype.fields)\n self.assertIn('timing', response.info)\n\n\nclass TestDWaveSamplerAnnealSchedule(unittest.TestCase):\n def test_typical(self):\n class MockScheduleSampler(DWaveSampler):\n parameters = {'anneal_schedule': ''}\n properties = {'max_anneal_schedule_points': 4,\n 'annealing_time_range': [1, 2000]}\n\n def __init__(self):\n pass\n\n DWaveSampler.validate_anneal_schedule(MockScheduleSampler(), [(0, 1), (55.0, 0.45), (155.0, 0.45), (210.0, 1)])\n"
] |
[
[
"numpy.all",
"numpy.any"
]
] |
IlyaGusev/rudetox
|
[
"e1c6334744bf9d28639efbb61c3605be51642ce9"
] |
[
"rudetox/marker/train.py"
] |
[
"import argparse\nimport json\nimport random\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom transformers import AutoTokenizer, AutoModelForTokenClassification\nfrom transformers import Trainer, TrainingArguments, pipeline, AdamW, get_cosine_schedule_with_warmup\nfrom tqdm import tqdm\nfrom sklearn.metrics import classification_report\n\nfrom rudetox.util.io import read_jsonl\nfrom rudetox.util.dl import gen_batch\nfrom rudetox.marker.util import choose_best_records\n\n\nclass LabeledTokensDataset(Dataset):\n def __init__(self, records, max_tokens, tokenizer, text_field, labels_field):\n self.tokenizer = tokenizer\n self.max_tokens = max_tokens\n self.records = list()\n for r in tqdm(records):\n inputs = self.embed_record(r[text_field])\n true_inputs = [i for i in inputs[\"input_ids\"].tolist() if i != tokenizer.pad_token_id]\n labels = r[labels_field]\n assert len(true_inputs) == len(labels)\n labels = labels[:self.max_tokens] + [0 for _ in range(self.max_tokens - len(labels))]\n inputs[\"labels\"] = labels\n assert len(inputs[\"input_ids\"]) == len(labels)\n self.records.append(inputs)\n\n def embed_record(self, text):\n inputs = self.tokenizer(\n text=text,\n add_special_tokens=True,\n max_length=self.max_tokens,\n padding=\"max_length\",\n truncation=\"longest_first\",\n return_tensors=\"pt\"\n )\n return {key: value.squeeze(0) for key, value in inputs.items()}\n\n def __len__(self):\n return len(self.records)\n\n def __getitem__(self, index):\n return self.records[index]\n\n\ndef main(\n train_path,\n val_path,\n config_path,\n seed,\n out_dir,\n sample_rate,\n text_field,\n labels_field,\n choose_best\n):\n random.seed(seed)\n train_records = list(read_jsonl(train_path, sample_rate))\n val_records = list(read_jsonl(val_path, sample_rate))\n\n if choose_best:\n train_records = choose_best_records(train_records)\n val_records = choose_best_records(val_records)\n\n with open(config_path, \"r\") as r:\n config = json.load(r)\n\n random.shuffle(train_records)\n print(\"Train records: \", len(train_records))\n print(\"Val records: \", len(val_records))\n\n max_tokens = config[\"max_tokens\"]\n model_name = config[\"model_name\"]\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n train_dataset = LabeledTokensDataset(train_records, max_tokens, tokenizer, text_field, labels_field)\n val_dataset = LabeledTokensDataset(val_records, max_tokens, tokenizer, text_field, labels_field)\n\n num_labels = config[\"num_labels\"]\n override_model_params = config.get(\"model_params\", {})\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n model = AutoModelForTokenClassification.from_pretrained(\n model_name,\n num_labels=num_labels,\n **override_model_params\n )\n model = model.to(device)\n model.config.id2label = {int(key): value for key, value in config[\"id2label\"].items()}\n model.config.label2id = config[\"label2id\"]\n\n batch_size = config[\"batch_size\"]\n gradient_accumulation_steps = config[\"gradient_accumulation_steps\"]\n logging_steps = config[\"logging_steps\"]\n eval_steps = config[\"eval_steps\"]\n save_steps = config[\"save_steps\"]\n learning_rate = config[\"learning_rate\"]\n warmup_steps = config[\"warmup_steps\"]\n num_train_epochs = config[\"num_train_epochs\"]\n lr_scheduler_type = config.get(\"lr_scheduler_type\", \"linear\")\n training_args = TrainingArguments(\n output_dir=out_dir,\n evaluation_strategy=\"steps\",\n save_strategy=\"steps\",\n per_device_train_batch_size=batch_size,\n per_device_eval_batch_size=batch_size,\n logging_steps=logging_steps,\n save_steps=save_steps,\n eval_steps=eval_steps,\n warmup_steps=warmup_steps,\n learning_rate=learning_rate,\n num_train_epochs=num_train_epochs,\n gradient_accumulation_steps=gradient_accumulation_steps,\n report_to=\"none\",\n load_best_model_at_end=True,\n save_total_limit=1,\n lr_scheduler_type=lr_scheduler_type\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=val_dataset\n )\n\n trainer.train()\n model.save_pretrained(out_dir)\n tokenizer.save_pretrained(out_dir)\n\n sample = \"Горит восток зарёю новой! Говно, залупа, пенис, хер, давалка, хуй, блядина, хороший или плохой человек\"\n model = model.to(\"cpu\")\n logits = model(**tokenizer(sample, add_special_tokens=True, return_tensors=\"pt\")).logits.squeeze(0)\n print(sample)\n print(torch.argmax(logits, dim=1).tolist())\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train-path\", type=str, required=True)\n parser.add_argument(\"--val-path\", type=str, required=True)\n parser.add_argument(\"--out-dir\", type=str, required=True)\n parser.add_argument(\"--config-path\", type=str, required=True)\n parser.add_argument(\"--seed\", type=int, default=42)\n parser.add_argument(\"--sample-rate\", type=float, default=1.0)\n parser.add_argument(\"--text-field\", type=str, required=True)\n parser.add_argument(\"--labels-field\", type=str, required=True)\n parser.add_argument(\"--choose-best\", action=\"store_true\", default=False)\n args = parser.parse_args()\n main(**vars(args))\n"
] |
[
[
"torch.cuda.is_available",
"torch.argmax"
]
] |
ash-aldujaili/blackbox-adv-examples-signhunter
|
[
"9279730522d6127ecb332133a090256e90904f2a"
] |
[
"src/lib/challenges/cifar10_challenge/train.py"
] |
[
"\"\"\"Trains a model, saving checkpoints and tensorboard summaries along\n the way.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport shutil\nfrom datetime import datetime\nfrom timeit import default_timer as timer\n\nimport cifar10_input\nimport numpy as np\nimport tensorflow as tf\nfrom model import Model\nfrom pgd_attack import LinfPGDAttack\n\nwith open('config.json') as config_file:\n config = json.load(config_file)\n\n# seeding randomness\ntf.set_random_seed(config['tf_random_seed'])\nnp.random.seed(config['np_random_seed'])\n\n# Setting up training parameters\nmax_num_training_steps = config['max_num_training_steps']\nnum_output_steps = config['num_output_steps']\nnum_summary_steps = config['num_summary_steps']\nnum_checkpoint_steps = config['num_checkpoint_steps']\nstep_size_schedule = config['step_size_schedule']\nweight_decay = config['weight_decay']\ndata_path = config['data_path']\nmomentum = config['momentum']\nbatch_size = config['training_batch_size']\n\n# Setting up the data and the model\nraw_cifar = cifar10_input.CIFAR10Data(data_path)\nglobal_step = tf.contrib.framework.get_or_create_global_step()\nmodel = Model(mode='train')\n\n# Setting up the optimizer\nboundaries = [int(sss[0]) for sss in step_size_schedule]\nboundaries = boundaries[1:]\nvalues = [sss[1] for sss in step_size_schedule]\nlearning_rate = tf.train.piecewise_constant(\n tf.cast(global_step, tf.int32),\n boundaries,\n values)\ntotal_loss = model.mean_xent + weight_decay * model.weight_decay_loss\ntrain_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(\n total_loss,\n global_step=global_step)\n\n# Set up adversary\nattack = LinfPGDAttack(model,\n config['epsilon'],\n config['num_steps'],\n config['step_size'],\n config['random_start'],\n config['loss_func'])\n\n# Setting up the Tensorboard and checkpoint outputs\nmodel_dir = config['model_dir']\nif not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n# We add accuracy and xent twice so we can easily make three types of\n# comparisons in Tensorboard:\n# - train vs eval (for a single run)\n# - train of different runs\n# - eval of different runs\n\nsaver = tf.train.Saver(max_to_keep=3)\ntf.summary.scalar('accuracy adv train', model.accuracy)\ntf.summary.scalar('accuracy adv', model.accuracy)\ntf.summary.scalar('xent adv train', model.xent / batch_size)\ntf.summary.scalar('xent adv', model.xent / batch_size)\ntf.summary.image('images adv train', model.x_input)\nmerged_summaries = tf.summary.merge_all()\n\n# keep the configuration file with the model for reproducibility\nshutil.copy('config.json', model_dir)\n\nwith tf.Session() as sess:\n # initialize data augmentation\n cifar = cifar10_input.AugmentedCIFAR10Data(raw_cifar, sess, model)\n\n # Initialize the summary writer, global variables, and our time counter.\n summary_writer = tf.summary.FileWriter(model_dir, sess.graph)\n sess.run(tf.global_variables_initializer())\n training_time = 0.0\n\n # Main training loop\n for ii in range(max_num_training_steps):\n x_batch, y_batch = cifar.train_data.get_next_batch(batch_size,\n multiple_passes=True)\n\n # Compute Adversarial Perturbations\n start = timer()\n x_batch_adv = attack.perturb(x_batch, y_batch, sess)\n end = timer()\n training_time += end - start\n\n nat_dict = {model.x_input: x_batch,\n model.y_input: y_batch}\n\n adv_dict = {model.x_input: x_batch_adv,\n model.y_input: y_batch}\n\n # Output to stdout\n if ii % num_output_steps == 0:\n nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)\n adv_acc = sess.run(model.accuracy, feed_dict=adv_dict)\n print('Step {}: ({})'.format(ii, datetime.now()))\n print(' training nat accuracy {:.4}%'.format(nat_acc * 100))\n print(' training adv accuracy {:.4}%'.format(adv_acc * 100))\n if ii != 0:\n print(' {} examples per second'.format(\n num_output_steps * batch_size / training_time))\n training_time = 0.0\n # Tensorboard summaries\n if ii % num_summary_steps == 0:\n summary = sess.run(merged_summaries, feed_dict=adv_dict)\n summary_writer.add_summary(summary, global_step.eval(sess))\n\n # Write a checkpoint\n if ii % num_checkpoint_steps == 0:\n saver.save(sess,\n os.path.join(model_dir, 'checkpoint'),\n global_step=global_step)\n\n # Actual training step\n start = timer()\n sess.run(train_step, feed_dict=adv_dict)\n end = timer()\n training_time += end - start\n"
] |
[
[
"tensorflow.summary.FileWriter",
"numpy.random.seed",
"tensorflow.summary.image",
"tensorflow.cast",
"tensorflow.global_variables_initializer",
"tensorflow.train.MomentumOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.train.Saver",
"tensorflow.summary.scalar"
]
] |
RoyalTS/dstoolbox
|
[
"2b79dd3f70882c90b03c5c898d82f795a0ae7a78"
] |
[
"src/dstoolbox/sklearn/feature_importance.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport shap\n\n\n# FIXME?: This doesn't seem to return quite the same things as shap.summary_plot()\ndef shap_importances(model, X: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Return a dataframe containing the features sorted by Shap importance\n\n Parameters\n ----------\n model : The tree-based model\n X : pd.Dataframe\n training set/test set\n Returns\n -------\n pd.Dataframe\n A pd.DataFrame containing the features sorted by Shap importance\n \"\"\"\n\n explainer = shap.Explainer(model)\n\n shap_values = explainer(X)\n\n vals_abs = np.abs(shap_values.values)\n # average across cases\n val_mean = np.mean(vals_abs, axis=0)\n\n feature_importances = pd.DataFrame(\n list(zip(shap_values.feature_names, val_mean)),\n columns=[\"feature\", \"importance\"],\n ).sort_values(by=[\"importance\"], ascending=False)\n\n return feature_importances\n"
] |
[
[
"numpy.mean",
"numpy.abs"
]
] |
markgrobman/hailo_model_zoo
|
[
"2ea72272ed2debd7f6bee7c4a65bd41de57ec9cf"
] |
[
"hailo_model_zoo/datasets/create_d2s_tfrecord.py"
] |
[
"#!/usr/bin/env python\n\nimport os\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport json\nimport collections\n\n\ndef _int64_feature(values):\n if not isinstance(values, (tuple, list)):\n values = [values]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n\ndef _bytes_feature(values):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\n\ndef _float_list_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef _create_tfrecord(filenames, dataset_dir, name):\n \"\"\"Loop over all the images in filenames and create the TFRecord\n \"\"\"\n tfrecords_filename = os.path.join(dataset_dir, 'D2S_' + name + '.tfrecord')\n writer = tf.io.TFRecordWriter(tfrecords_filename)\n for idx, (img_path, bbox_annotations, img_shape) in enumerate(list(filenames)):\n if idx % 100 == 0:\n print(\"Finished {0}\".format(idx), end=\"\\r\")\n xmin, xmax, ymin, ymax, category_id, is_crowd, area = [], [], [], [], [], [], []\n img_jpeg = open(img_path, 'rb').read()\n image_height = img_shape[0]\n image_width = img_shape[1]\n for object_annotations in bbox_annotations:\n (x, y, width, height) = tuple(object_annotations['bbox'])\n if width <= 0 or height <= 0 or x + width > image_width or y + height > image_height:\n continue\n xmin.append(float(x) / image_width)\n xmax.append(float(x + width) / image_width)\n ymin.append(float(y) / image_height)\n ymax.append(float(y + height) / image_height)\n is_crowd.append(object_annotations['iscrowd'])\n area.append(object_annotations['area'])\n category_id.append(int(object_annotations['category_id']))\n\n # print (\"converting image number \" + str(idx) + \" from \" + name + \" : \" + img_path)\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(image_height),\n 'width': _int64_feature(image_width),\n 'num_boxes': _int64_feature(len(bbox_annotations)),\n 'image_id': _int64_feature(object_annotations['image_id']),\n 'xmin': _float_list_feature(xmin),\n 'xmax': _float_list_feature(xmax),\n 'ymin': _float_list_feature(ymin),\n 'ymax': _float_list_feature(ymax),\n 'area': _float_list_feature(area),\n 'category_id': _int64_feature(category_id),\n 'is_crowd': _int64_feature(is_crowd),\n 'image_name': _bytes_feature(str.encode(os.path.basename(img_path))),\n 'mask': _bytes_feature(np.array(0).tostring()),\n 'image_jpeg': _bytes_feature(img_jpeg)}))\n writer.write(example.SerializeToString())\n writer.close()\n return idx\n\n\ndef get_img_labels_list(dataset_dir, det_file):\n with tf.io.gfile.GFile(det_file, 'r') as fid:\n obj_annotations = json.load(fid)\n\n img_to_obj_annotation = collections.defaultdict(list)\n for annotation in obj_annotations['annotations']:\n image_name = \"D2S_{0}.jpg\".format(str(annotation['image_id']).zfill(6))\n img_to_obj_annotation[image_name].append(annotation)\n\n orig_file_names, det_annotations, imgs_shape = [], [], []\n for img in obj_annotations['images']:\n img_filename = img['file_name']\n det_annotations.append(img_to_obj_annotation[img_filename])\n orig_file_names.append(os.path.join(dataset_dir, img_filename))\n imgs_shape.append((img['height'], img['width']))\n return zip(orig_file_names, det_annotations, imgs_shape)\n\n\ndef run(dataset_dir, det_file, name='test'):\n assert dataset_dir != '', 'no dataset directory'\n assert det_file != '', 'no detection file'\n img_labels_list = get_img_labels_list(dataset_dir, det_file)\n images_num = _create_tfrecord(img_labels_list, dataset_dir, name)\n print('\\nDone converting {} images'.format(images_num))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--img', '-img', help=\"images directory\", type=str,\n default='/local/data/MVTec/D2S/images')\n parser.add_argument('--det', '-det', help=\"detection ground truth\", type=str,\n default='/local/data/MVTec/D2S/annotations/D2S_validation.json')\n parser.add_argument('--name', '-name', help=\"file name suffix\", type=str, default='test')\n args = parser.parse_args()\n run(args.img, args.det, args.name)\n\"\"\"\n----------------------------------------------------------------------------\nCMD used to create a D2S_train.tfrecord for D2S training dataset:\n----------------------------------------------------------------------------\npython create_d2s_tfrecord.py\n--img /local/data/MVTec/D2S/images\n--det --det /local/data/MVTec/D2S/annotations/D2S_training.json\n--name train\n\n----------------------------------------------------------------------------\nCMD used to create a D2S_test.tfrecord for D2S validation dataset:\n----------------------------------------------------------------------------\npython create_d2s_tfrecord.py\n--img /local/data/MVTec/D2S/images\n--det /local/data/MVTec/D2S/annotations/D2S_validation.json\n--name test\n\"\"\"\n"
] |
[
[
"tensorflow.io.TFRecordWriter",
"tensorflow.io.gfile.GFile",
"tensorflow.train.FloatList",
"tensorflow.train.BytesList",
"numpy.array",
"tensorflow.train.Int64List"
]
] |
sarangi777/DeepStack
|
[
"cba38629ea86d004b0e1ddcf0c9d7997ff78c43b"
] |
[
"deepstack/ensemble.py"
] |
[
"\"\"\"\nModule representing the Meta-Learners, containing an Ensemble of Base-Learners\n\"\"\"\nimport numpy as np\nfrom sklearn import metrics\nimport warnings\nfrom abc import abstractmethod\nfrom sklearn.ensemble import RandomForestRegressor\nimport os\nimport joblib\nimport glob\nfrom deepstack.base import Member\nfrom keras.utils import to_categorical\n\n\nclass Ensemble(object):\n \"\"\"Base Ensemble Definition.\"\"\"\n\n @abstractmethod\n def add_member(self, member):\n \"\"\"\n Adds a model Member to the Ensemble\n\n Args:\n member: the model Member\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def fit(self):\n \"\"\"\n Fit method to provided ensemble members\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def predict(self):\n \"\"\"\n Predict using fitted ensemble members\n \"\"\"\n raise NotImplementedError\n\n\nclass DirichletEnsemble(Ensemble):\n \"\"\"\n Representation of an Dirichlet Ensemble\n It weights the ensemble members optimizing a Metric/Score based on a\n validation dataset. The weight optimization search is performed with\n randomized search based on the dirichlet distribution.\n \"\"\"\n\n def __init__(self, N=10000, metric=None, maximize=True):\n \"\"\"\n Constructor of a Dirichlet Weighted Ensemble\n Args:\n N: the number of times weights should be (randomly) tried out,\n sampled from a dirichlet distribution\n metric: (optional) evaluation metric function.\n Default: `sklearn.metrics.roc_auc_score`\n maximize: if evaluation metric should be maximized (otherwise minimized)\n \"\"\"\n self.n = N\n self.metric = metric\n if metric is None:\n self.metric = metrics.roc_auc_score\n self.maximize = maximize\n # Initialize Parameters:\n self.members = []\n self.bestweights = []\n self.probabilities = None\n self._nmembers = 0\n self.bestscore = float(\"-inf\") if maximize else float(\"inf\")\n self.fitted = False\n\n def add_members(self, members):\n \"\"\"\n Adds Members to the Ensemble\n Args:\n members: a list containing instances of class `Member`\n \"\"\"\n for member in members:\n self.add_member(member)\n\n def add_member(self, member):\n \"\"\"\n Adds a Member (Base-Learner) to the Ensemble\n Args:\n member: an instance of class `Member`\n \"\"\"\n self.members.append(member)\n self._nmembers += 1\n\n def fit(self, verbose=False):\n \"\"\"\n Calculates ensemble weights, optimizing the AUC Binary Classification\n Metric using randomized search with the dirichlet distribution.\n \"\"\"\n assert(len(self.members) > 1)\n val_classes = self.members[0].val_classes\n\n best_ensemble_score = float(\"-inf\") if self.maximize else float(\"inf\")\n rsbest = None\n for i in range(self.n):\n rs = np.random.dirichlet(np.ones(self._nmembers), size=1)[0]\n preds = np.sum(np.array([self.members[i].val_probs * rs[i]\n for i in range(self._nmembers)]), axis=0)\n ensemble_score = _calculate_metric(val_classes, preds, self.metric)\n max_flag = self.maximize and ensemble_score > best_ensemble_score\n min_flag = not(self.maximize) and ensemble_score < best_ensemble_score\n if max_flag or min_flag:\n if verbose:\n print(ensemble_score, i, rs) # TODO: Proper logging\n best_ensemble_score = ensemble_score\n rsbest = rs\n self.bestweights = rsbest\n self.bestscore = best_ensemble_score\n\n def predict(self):\n \"\"\"\n Returns the weighed probabilities of the ensemble members\n Returns:\n the predicted probabilities as np.array\n \"\"\"\n self.probabilities = np.sum(np.array([self.bestweights[i] *\n self.members[i].submission_probs\n for i in range(self._nmembers)]),\n axis=0)\n return self.probabilities\n\n def describe(self):\n \"\"\"\n Prints information about the ensemble members and its weights as well\n as single and ensemble AUC performance on validation dataset.\n \"\"\"\n for i in range(self._nmembers):\n member = self.members[i]\n model_score = _calculate_metric(member.val_classes,\n member.val_probs,\n metric=self.metric)\n text = self.members[i].name + \\\n \" - Weight: {:1.4f} - {}: {:1.4f}\".format(\n self.bestweights[i],\n self.metric.__name__,\n model_score)\n print(text)\n print(\"DirichletEnsemble {}: {:1.4f}\".format(\n self.metric.__name__,\n self.bestscore))\n return\n\n\nclass StackEnsemble(Ensemble):\n def __init__(self, model=None):\n \"\"\"\n Constructor of a Stacking Ensemble.\n Args:\n model: ensemble model which should serve as meta-model.\n `sklearn.ensemble.RandomForestRegressor` per default for predicting class probabilities.\n members (list): ensemble Members to add to the Stack\n \"\"\"\n self.model = model\n if model is None:\n self.model = RandomForestRegressor(n_estimators=100, max_depth=3, n_jobs=20)\n # Initialize Parameters:\n self.members = []\n self._nmembers = 0\n self.predictions = None\n self._y_squeezed = False # Flags if labels dimension must be squeezed\n\n def __repr__(self):\n reps = [member.name for member in self.members]\n return \"<StackEnsemble: [\" + \", \".join(reps) + \"]>\"\n\n def __str__(self):\n reps = [member.name for member in self.members]\n return \"StackEnsemble: with\" + \\\n str(self._nmembers) + \" Base-Learners [\" + \", \".join(reps) + \"]\"\n\n def add_members(self, members):\n \"\"\"\n Adds ensemble Members to the Stack\n Args:\n members: a list containing instances of class `Member`\n \"\"\"\n for member in members:\n self.add_member(member)\n self._test()\n\n def add_member(self, member):\n \"\"\"\n Adds a ensemble Member to the Stack\n Args:\n member: an instance of class `Member`\n \"\"\"\n self.members.append(member)\n self._nmembers += 1\n if member.val_probs is None:\n try:\n member.val_probs = member._calculate_val_predictions()\n except Exception as e:\n warnings.warn(str(e))\n if member.train_probs is None:\n try:\n member.train_probs = member._calculate_train_predictions()\n except Exception as e:\n warnings.warn(str(e))\n\n def fit(self, X=None, y=None, kwargs={}):\n \"\"\"\n Trains the meta-model\n Args:\n X: training data for meta-learner\n y: training classes for meta-learner\n kwargs: further arguments for the fit function\n \"\"\"\n assert(len(self.members) > 1)\n # Assumption: all members have same train_batches.classes\n if X is None or y is None:\n return self._fit_train()\n if X.ndim >= 3:\n X = X.reshape(X.shape[0], np.prod(X.shape[1::]))\n try:\n self._y_squeezed = False\n return self.model.fit(X, y, **kwargs)\n except ValueError: # Normally bad input shape for non-multi-output models\n self._y_squeezed = True\n y_flat = np.argmax(y, axis=1)\n return self.model.fit(X, y_flat, **kwargs)\n\n def predict(self, X=None, predict_proba=False, kwargs={}):\n \"\"\"\n Meta-Model prediction for the class' probabilities as a regression\n problem.\n Args:\n X: input data to be predicted\n kwargs: further arguments for prediction function\n predict_proba: if should call method `predict_proba`\n instead of `predict`.\n Returns:\n the predicted probabilities as np.array\n \"\"\"\n if X is None:\n X = self._get_pred_X()\n if X.ndim == 3:\n X = X.reshape(X.shape[0], X.shape[1] * X.shape[2])\n if (predict_proba or self._y_squeezed) and hasattr(self.model, 'predict_proba'):\n self.predictions = self.model.predict_proba(X, **kwargs)\n print(\"Calling predict_proba\")\n elif hasattr(self.model, 'predict'):\n self.predictions = self.model.predict(X, **kwargs)\n print(\"Calling predict\") \n else:\n raise ValueError(\"Model has no predict function\")\n return np.array(self.predictions)\n\n def describe(self, probabilities_val=None, metric=None,\n maximize=True):\n \"\"\"\n Prints information about the performance of base and meta learners\n based on validation data.\n Args:\n probabilities_val: (optional) probabilities/prediction on\n validation data\n metric: (optional) evaluation metric function.\n Default: `sklearn.metrics.roc_auc_score`\n maximize: if metric should be maximized (otherwise minimized)\n \"\"\"\n best_score = float(\"-inf\") if maximize else float(\"inf\")\n if metric is None:\n metric = metrics.roc_auc_score\n if probabilities_val is None:\n probabilities_val = self._predict_val()\n # Assumption: all members have same val_classes\n val_classes = self.members[0].val_classes\n for i in range(self._nmembers):\n member = self.members[i]\n model_score = _calculate_metric(member.val_classes, member.val_probs, metric)\n max_flag = maximize and model_score > best_score\n min_flag = not(maximize) and model_score < best_score\n if max_flag or min_flag:\n best_score = model_score\n text = member.name + \" - {}: {:1.4f}\".format(\n metric.__name__, model_score)\n print(text)\n ensemble_score = _calculate_metric(val_classes, probabilities_val, metric)\n print(\"StackEnsemble {}: {:1.4f}\".format(\n metric.__name__, ensemble_score))\n return ensemble_score\n\n def _get_X(self, attrname):\n X = []\n probs = getattr(self.members[0], attrname)\n # Assumption: all members have same train_probs length\n for i in range(len(probs)):\n preds = []\n for member in self.members:\n preds.append(getattr(member, attrname)[i])\n X.append(preds)\n return np.array(X)\n\n def _get_train_X(self):\n return self._get_X(\"train_probs\")\n\n def _get_val_X(self):\n return self._get_X(\"val_probs\")\n\n def _get_pred_X(self):\n return self._get_X(\"submission_probs\")\n\n def _fit_train(self):\n return self.fit(self._get_train_X(), self.members[0].train_classes)\n\n def _fit_submission(self):\n \"\"\"\n Fits model on training and validation data.\n Useful when training the meta-learner for final submission prediction\n \"\"\"\n X1 = self._get_train_X()\n X2 = self._get_val_X()\n y1 = self.members[0].train_classes\n y2 = self.members[0].val_classes\n X = np.concatenate((X1, X2))\n y = np.concatenate((y1, y2))\n return self.fit(X, y)\n\n def _predict_val(self):\n return self.predict(self._get_val_X())\n\n def _test(self):\n \"\"\"\n Test assumption that all members' classes have same shape and values.\n Names should be unique.\n This is an internal condition for class structures.\n \"\"\"\n if self._nmembers < 2:\n return True\n t1 = [(np.array_equal(self.members[i].train_classes,\n self.members[i + 1].train_classes))\n for i in range(self._nmembers - 1)]\n t2 = [(np.array_equal(self.members[i].val_classes,\n self.members[i + 1].val_classes))\n for i in range(self._nmembers - 1)]\n assert(np.sum(t1) == self._nmembers - 1)\n assert(np.sum(t2) == self._nmembers - 1)\n names = [self.members[i].name for i in range(self._nmembers)]\n assert(len(list(names)) == len(set(names)))\n return True\n\n def save(self, folder=\"./premodels/\"):\n \"\"\"\n Saves meta-learner and base-learner of ensemble into folder / directory\n Args:\n folder: the folder where models should be saved to.\n Create if not exists.\n \"\"\"\n if not os.path.exists(folder):\n os.mkdir(folder)\n [member.save(folder=folder) for member in self.members]\n temp = self.members\n # Reset base-learners. These are loaded idependently\n self.members = None\n self._nmembers = 0\n joblib.dump(self, os.path.join(folder, \"stackensemble.joblib\"))\n self.members = temp\n self._nmembers = len(self.members)\n return self\n\n @classmethod\n def load(cls, folder=\"./premodels/\"):\n \"\"\"\n Loads meta-learner and base-learners from folder / directory\n Args:\n folder: directory where models should be loaded from\n Returns:\n loaded StackEnsemble with Members\n \"\"\"\n stack = joblib.load(os.path.join(folder, \"stackensemble.joblib\"))\n stack.members = []\n if folder[-1] != os.sep:\n folder += os.sep\n for fn in glob.glob(folder + \"**/\"):\n member = Member.load(fn)\n stack.add_member(member)\n return stack\n\n\ndef _calculate_metric(y_true, y_pred, metric=None): # TODO: Refactor\n if metric is None:\n metric = metrics.roc_auc_score\n try:\n return metric(y_true, y_pred)\n except ValueError:\n pass\n\n try:\n y_true_cat = to_categorical(y_true)\n return metrics.roc_auc_score(y_true_cat, y_pred)\n except ValueError:\n pass\n\n # Classification Task\n y_t = y_true\n if y_true.ndim > 1:\n y_t = np.argmax(y_true, axis=1)\n y_p = y_pred\n if y_pred.ndim > 1:\n y_p = np.argmax(y_pred, axis=1)\n return metric(y_t, y_p)\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.metrics.roc_auc_score",
"numpy.array_equal",
"numpy.ones",
"numpy.concatenate",
"numpy.argmax",
"numpy.prod",
"numpy.array",
"numpy.sum"
]
] |
minssoj/Learning_OpenCV-Python
|
[
"63f175985a1d9645191c49e16ab6bb91a4f6b7fb"
] |
[
"Code/26.2DImageHistogram.py"
] |
[
"# =================================================\n# minso.jeong@daum.net\n# 26. 2D 이미지 히스토그램 \n# Reference : samsjang@naver.com\n# =================================================\nimport numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n\nhscale = 10\n\ndef hist2D_cv():\n\timg = cv.imread('../Images/32.flower.jpg')\n\thsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\t# channel [0, 1]: Hue, saturation, Bin 개수 : Hue 180, Saturation 256\n\thist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])\n\tcv.imshow('hist2D', hist)\n\tcv.waitKey(0)\n\tcv.destroyAllWindows()\n# hist2D_cv()\n\ndef hist2D_plt():\n\timg = cv.imread('../Images/32.flower.jpg')\n\thsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\t# channel [0, 1]: Hue, saturation, Bin 개수 : Hue 180, Saturation 256\n\thist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])\n\tplt.imshow(hist, interpolation='nearest')\n\tplt.show()\n#hist2D_plt()\n\ndef HSVmap():\n\thsvmap = np.zeros((180, 256, 3), np.uint8)\n\th, s = np.indices(hsvmap.shape[:2])\n\thsvmap[:,:,0] = h\n\thsvmap[:,:,1] = s\n\thsvmap[:,:,2] = 255\n\thsvmap = cv.cvtColor(hsvmap, cv.COLOR_HSV2BGR)\n\t#cv.imshow('HSVmap', hsvmap)\n\t#cv.waitKey(0)\n\t#cv.destroyAllWindows()\n\treturn hsvmap\n#HSVmap()\n\ndef onChange(x):\n\tglobal hscale\n\thscale = x\n\ndef hist2D():\n\timg = cv.imread('../Images/32.flower.jpg')\n\thsvmap = HSVmap()\n\tcv.namedWindow('hist2D', 0)\n\tcv.createTrackbar('scale', 'hist2D', hscale, 32, onChange)\n\n\twhile True:\n\t\thsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\t\thist = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])\n\t\t# hist*0.005*hscale : 히스토그램에서 제외되는 픽셀 조절\n\t\thist = np.clip(hist*0.005*hscale, 0, 1)\n\t\t# hist[:, :, np.newaxis] : (180, 256) -> (180, 256, 1)\n\t\thist = hsvmap*hist[:, :, np.newaxis]/255.0\n\t\tcv.imshow('hist2D', hist)\n\t\tk = cv.waitKey(1) & 0xFF\n\t\tif k == 27:\n\t\t\tbreak\n\tcv.destroyAllWindows\nhist2D()"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.clip",
"numpy.indices",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
aponom84/FARZ
|
[
"db292dfe6555aa7e117f445b4962b4b1df2f4bbf"
] |
[
"src/network_models.py"
] |
[
"import igraph as ig\nimport networkx as nx\nimport numpy as np\n\n\nclass Graph:\n def __init__(self, n, edge_list, directed=False):\n self.edge_list = edge_list \n self.n = n\n self.directed=directed\n # self.C = None\n # self.Cid = None\n self.rewirings_count = 0\n \n self.deg = [0]* n\n self.neigh = [[] for i in range(n)]\n for e in edge_list:\n u , v = e\n self.neigh[u].append(v)\n self.deg[v]+=1 \n if not self.directed: #if directed deg is indegree, outdegree = len(negh)\n self.neigh[v].append(u)\n self.deg[u]+=1 \n return \n\n def add_edge(self, u, v):\n self.edge_list.append((u,v) if u<v else (v,u))\n self.neigh[u].append(v)\n self.deg[v]+=1\n if not self.directed: \n self.neigh[v].append(u)\n self.deg[u]+=1\n self.rewirings_count +=1\n return \n \n def remove_edge(self,u, v):\n self.edge_list.remove((u,v) if u<v else (v,u))\n self.neigh[u].remove(v)\n if not self.directed: \n self.neigh[v].remove(u)\n self.deg[u]-=1\n self.deg[v]-=1 \n self.rewirings_count +=1\n return \n\n def to_nx(self):\n G=nx.Graph()\n G.add_edges_from(self.edge_list)\n return G\n \n def to_ig(self):\n G=ig.Graph()\n G.add_edges(self.edge_list)\n return G \n\n\ndef albert_barabasi(params):\n g = ig.Graph.Barabasi(**params)\n return Graph(g.vcount(), g.get_edgelist())\n\ndef forest_fire(params): \n g = ig.Graph.Forest_Fire(**params)\n return Graph(g.vcount(), g.get_edgelist())\n\ndef erdos_renyi(params): \n g = ig.Graph.Erdos_Renyi(**params)\n return Graph(g.vcount(), g.get_edgelist())\n\n\ndef configuration_model_from_sequence(S, multilinks = False, selfloop = False):\n ''' \n would generate a graph from the given sequence, with the configuration model, i.e. forms edges with uniform probability\n the resulted nodes will have degrees <= given sequence -> hence works with any input, even if not a valid degree sequence\n # todo: add direction and weights\n '''\n edge_list = []\n max_itr = len(S)\n itr = 0\n Q = [i for i in range(0, len(S)) if S[i]!=0 ]\n while len(Q)>(1 if not selfloop else 0):\n itr+=1\n i, j = np.random.choice(Q, size =2, replace = False if not selfloop else True)\n e = (i,j) if i<j else (j,i)\n # woudn't terminate if all pairs in Q are connected in edge_list\n if not e in edge_list or len(Q)*(len(Q)-1) < 2* len(edge_list) or itr>max_itr: \n edge_list.append(e)\n S[i]-=1\n S[j]-=1\n itr = 0\n if S[i]==0: Q.remove(i)\n if S[j]==0: Q.remove(j)\n \n return edge_list if multilinks else list(set(edge_list))\n\n"
] |
[
[
"numpy.random.choice"
]
] |
chathumal93/Flood-Detection-ALOS2
|
[
"aa8cc1e9c9cff5c2522287ebae065278964f4dc9"
] |
[
"ALOS/process.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport os\nfrom zipfile import ZipFile\nimport gdal\nimport glob\nimport numpy as np\nimport pathlib\nimport rasterio\nfrom rasterio.warp import calculate_default_transform,reproject, Resampling\nfrom rasterio.merge import merge\nfrom rasterio import Affine\nfrom rasterio.enums import Resampling\n#from rasterio.windows import Window\nfrom shapely.geometry import Point, LineString, Polygon,box\nfrom whitebox.WBT.whitebox_tools import WhiteboxTools\nwbt = WhiteboxTools()\n\n\ndef unzip(in_path,out_path):\n while True:\n polarization = input(\"Polarization(HH/HV):\")\n if polarization not in ('HH','HV'):\n print(\"Not an appropriate choice.\")\n else:\n break\n\n #Unzip files according to polarization\n img_list = np.array([])\n for currentFile in pathlib.Path(in_path).glob(\"*.zip\"):\n with ZipFile(currentFile, 'r') as zipObj:\n listOfFileNames = zipObj.namelist()\n for fileName in listOfFileNames:\n if polarization == \"HH\":\n if fileName.startswith('IMG-HH-ALOS2') and fileName.endswith('.tif'):\n filePath = out_path+'/'+fileName\n print(filePath)\n img_list = np.append(img_list,filePath) \n zipObj.extract(fileName,out_path)\n else:\n if fileName.startswith('IMG-HV-ALOS2') and fileName.endswith('.tif'):\n filePath = out_path+'/'+fileName\n print(filePath)\n img_list = np.append(img_list,filePath) \n zipObj.extract(fileName,out_path) \n return img_list\n\ndef resample(raster_path_list,upscale_factor,dst_crs,outpath):\n #dst_crs = 'EPSG:4326'/'EPSG:3857'\n img_path_list = np.array([])\n \n for n in range(len(raster_path_list)):\n\n with rasterio.open(raster_path_list[n]) as src:\n transform, width, height = calculate_default_transform(\n src.crs, dst_crs, src.width* upscale_factor, src.height* upscale_factor, *src.bounds)\n kwargs = src.meta.copy()\n kwargs.update({\n 'crs': dst_crs,\n 'transform': transform,\n 'width': width,\n 'height': height\n })\n\n with rasterio.open((outpath+'/'+raster_path_list[n].split('/')[-1].split('.tif')[0]+'-CCRS.tif'), 'w', **kwargs) as dst:\n for i in range(1, src.count + 1):\n reproject(\n source=rasterio.band(src, i),\n destination=rasterio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=transform,\n dst_crs=dst_crs,\n resampling=Resampling.bilinear)\n \n img_path_list = np.append(img_path_list,str(outpath+'/'+raster_path_list[n].split('/')[-1].split('.tif')[0]+'-CCRS.tif'))\n \n src.close()\n dst.close()\n # Remove unzipped files and carry out with CCRS images\n for item in raster_path_list:\n os.remove(item)\n return img_path_list\n\ndef mosaic(in_list,out_path): \n RasList = []\n for x in range(len(in_list)):\n raster = rasterio.open(in_list[x])\n RasList.append(raster)\n #Mosaicing\n mos_img, out_trans = merge(RasList)\n \n #Metadata gathering from a one image(last image)\n rasterLast = rasterio.open(in_list[len(in_list)-1])\n metaData = rasterLast.meta.copy()\n\n #Update the metadata\n mosaic_meta = metaData\n\n #writing the new mosaic raster\n mosaic_meta.update({\"driver\": \"GTiff\",\n \"height\": mos_img.shape[1],\n \"width\": mos_img.shape[2],\n \"transform\": out_trans,\n \"compress\": 'lzw' })\n\n with rasterio.open(out_path+'/Mosaic.tif', \"w\", **mosaic_meta) as dest:\n dest.write(mos_img)\n dest.close()\n \ndef calibrate(product_path,raster_prefix):\n with rasterio.open(product_path+'/'+'Mosaic.tif') as dataset:\n band1 = dataset.read(1)\n band1 = np.where(band1 == 0, np.nan, band1)\n band1_calib = 20*np.log10(band1)-83\n band1_calib = np.float32(band1_calib)\n\n metaCalib = dataset.meta.copy()\n\n #writing the new mosaic raster\n metaCalib.update({\"driver\": \"GTiff\",\n \"height\": dataset.shape[0],\n \"width\": dataset.shape[1], \n 'dtype':'float32', \n \"compress\": 'lzw' })\n \n with rasterio.open(product_path+'/'+raster_prefix+'_Mosaic.tif', \"w\", **metaCalib) as dest:\n\n dest.write(band1_calib,1)\n dest.close() \n dataset.close()\n os.remove(product_path+'/'+'Mosaic.tif')\n \ndef Lee_sigma(in_path,out_path,filter_size): \n def my_callback(value):\n if not \"%\" in value:\n print(value)\n #Speckle filtering\n wbt.lee_sigma_filter(in_path,out_path, \n filterx=filter_size, \n filtery=filter_size, \n sigma=10.0, \n m=5.0, \n callback=my_callback)\n \ndef threshold(in_path,product_path,threshold_value): \n np.warnings.filterwarnings('ignore')\n with rasterio.open(in_path) as dataset: \n band1 = dataset.read(1)\n threshold = np.where(band1 < threshold_value,1,0)\n metaCalib = dataset.meta.copy()\n #writing the new mosaic raster\n metaCalib.update({\"driver\": \"GTiff\",\n \"height\": dataset.shape[0],\n \"width\": dataset.shape[1], \n 'dtype':'int32', \n \"compress\": 'lzw' })\n \n with rasterio.open(product_path, \"w\", **metaCalib) as dest:\n\n dest.write(threshold,1)\n dest.close() \n dataset.close()\n \ndef majority(in_path,product_path,final_path,filter_size): \n # Running status\n def my_callback(value): \n if not \"%\" in value:\n print(value)\n #Majority filter \n wbt.majority_filter(in_path,\n product_path,\n filterx=filter_size,\n filtery=filter_size,\n callback=my_callback)\n \n with rasterio.open(product_path) as dataset:\n band1 = dataset.read(1)\n selection = np.where(band1 == 1,1,0)\n metaCalib = dataset.meta.copy()\n #writing the new mosaic raster\n metaCalib.update({\"driver\": \"GTiff\",\n \"height\": dataset.shape[0],\n \"width\": dataset.shape[1], \n 'dtype': 'int32', \n \"compress\": 'lzw' })\n \n with rasterio.open(final_path, \"w\", **metaCalib) as dest:\n dest.write(selection,1)\n dest.close() \n dataset.close()\n os.remove(product_path)\n \ndef ras2poly(in_path,product_path): \n #Printing the running status\n def my_callback(value): \n if not \"%\" in value:\n print(value)\n\n wbt.raster_to_vector_polygons(in_path,\n product_path,\n callback=my_callback)\n \ndef change_gdal(img01,img02,out_path):\n pre_image = img01\n post_image = img02\n\n raster_pre=gdal.Open(pre_image)\n raster_post=gdal.Open(post_image)\n\n pre_band = raster_pre.GetRasterBand(1)\n post_band = raster_post.GetRasterBand(1)\n\n gtpost =raster_post.GetGeoTransform()\n gtpre =raster_pre.GetGeoTransform()\n\n #Pre and post top(x,y) bottom(x,y) co-ordinates\n post_bound = [gtpost[0], gtpost[3], \n gtpost[0] + (gtpost[1] * raster_post.RasterXSize), gtpost[3] + (gtpost[5] * raster_post.RasterYSize)]\n pre_bound = [gtpre[0] , gtpre[3] , \n gtpre[0] + (gtpre[1] * raster_pre.RasterXSize) , gtpre[3] + (gtpre[5] * raster_pre.RasterYSize)]\n\n #Finding the intersection boundry\n intersection = [max(post_bound[0], pre_bound[0]), \n min(post_bound[1], pre_bound[1]), \n min(post_bound[2], pre_bound[2]),\n max(post_bound[3], pre_bound[3])]\n\n post_bound_pix = [abs(round((gtpost[0]-intersection[0])/gtpost[1])),\n abs(round((gtpost[3]-intersection[1])/gtpost[5])),\n abs(round((gtpost[0]-intersection[2])/gtpost[1])),\n abs(round((gtpost[3]-intersection[3])/gtpost[5]))]\n\n pre_bound_pix = [abs(round((gtpre[0]-intersection[0])/gtpre[1])),\n abs(round((gtpre[3]-intersection[1])/gtpre[5])),\n abs(round((gtpre[0]-intersection[2])/gtpre[1])),\n abs(round((gtpre[3]-intersection[3])/gtpre[5]))]\n\n post_intersect = post_band.ReadAsArray(post_bound_pix[0],post_bound_pix[1],post_bound_pix[2] - post_bound_pix[0],\n post_bound_pix[3] - post_bound_pix[1],post_bound_pix[2] - post_bound_pix[0],\n post_bound_pix[3] - post_bound_pix[1],buf_type=gdal.GDT_Float32)\n\n pre_intersect = pre_band.ReadAsArray(pre_bound_pix[0],pre_bound_pix[1],pre_bound_pix[2] - pre_bound_pix[0],\n pre_bound_pix[3] - pre_bound_pix[1],pre_bound_pix[2] - pre_bound_pix[0], \n pre_bound_pix[3] - pre_bound_pix[1],buf_type=gdal.GDT_Float32)\n\n nrows = pre_bound_pix[3] - pre_bound_pix[1]\n ncols = pre_bound_pix[2] - pre_bound_pix[0]\n\n #Getting the change image using the numpy array operations\n change_array = np.subtract(post_intersect,pre_intersect)\n\n\n geotransform=([intersection[0],gtpost[1],gtpost[2],intersection[1],gtpost[2], gtpost[5]]) \n proj = raster_pre.GetProjection()\n\n output_raster = gdal.GetDriverByName('GTiff').Create(out_path+'/'+'Change.tif',ncols, nrows, 1 ,gdal.GDT_Float32)\n output_raster.SetGeoTransform(geotransform) \n output_raster.SetProjection(proj) \n output_raster.GetRasterBand(1).SetNoDataValue(-99)\n output_raster.GetRasterBand(1).WriteArray(change_array) \n output_raster.FlushCache()\n print(\"Change image has been created using the intersection region\") \n \ndef cordsys_check(raster_list): \n crs_list = []\n for x in raster_list:\n with rasterio.open(x) as src:\n crs_list.append(src.crs) \n src.close()\n \n if len(set(crs_list))==1:\n print(\"All images in same co-sys\")\n def_crs = crs_list[0]\n else:\n print(\"Reproject needed for same co-sys\")\n def_crs = crs_list[0]\n return def_crs\n \n"
] |
[
[
"numpy.subtract",
"numpy.append",
"numpy.log10",
"numpy.float32",
"numpy.warnings.filterwarnings",
"numpy.array",
"numpy.where"
]
] |
MauricioSalazare/conditonal-copula
|
[
"68a9be3e0af7e541bca1b5bca28b45848420a583"
] |
[
"models/elliptical_distributions_study.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.stats import multivariate_normal, chi2, norm, t\nfrom scipy.special import gamma, stdtr, stdtridf, stdtrit # x = stdtrit(2, 0.1) == t(df=2).ppf(0.1) // x = t.inv(u)\nfrom scipy import optimize, stats, interpolate\nimport statsmodels.api as sm\nfrom scipy.interpolate import griddata\nimport time\nimport seaborn as sns\nimport scipy.stats as sps\nimport matplotlib.ticker as ticker\nfrom datetime import timedelta\n\n\ndef elli_distribution(data, mean, dim, covariance, nu=None, dist='gaussian'):\n \"\"\"\n Calculate the values of the samples (data) on the probability density function (p.d.f) for a 'gaussian' or\n 't-student' distributions.\n\n The method broadcast the function over the data samples. This makes the calculation a lot of faster for large\n dataset samples, making easier and faster the calculations of the log-likelihood.\n\n The elliptical distribution function follows the notation of Claudia Czado [1]\n\n f(x; mu, Sigma) = k_d |Sigma|^(-1/2) * g( (x-mu)^T · Sigma · (x-mu) )\n\n Sigma: Covariance matrix\n mu: Mean vector\n T: Transpose marker\n\n Where k_d and g(t) are defined as:\n\n 't-student':\n k_d = gamma( (nu + d) / 2) / ( gamma(nu/2) * (nu * d) ^ (d/2) )\n\n g(t) = ( 1 + t/nu )^(-(v + d)/2)\n\n nu: Degrees of freedom\n d: dimensions (number of variables)\n gamma: gamma distribution (generalization of n!)\n\n 'Gaussian':\n k_d = (2 * pi)^(-d/2)\n\n g(t) = exp(-t / 2)\n\n d: dimensions (number of variables)\n\n [1] - Czado, Claudia. \"Analyzing Dependent Data with Vine Copulas.\" Lecture Notes in Statistics, Springer (2019).\n pages 4 - 8.\n\n Input:\n -----\n data: (obj::numpy array): 2D - Array with dimension [dim x N]: 'N' are number of samples an\n 'dim' are the number of variables.\n 3D - Array with dimension [N x N x dim]: This is used for broadcasting a combination\n of variables using the mesh function.\n mean: (obj::numpy array): 2D - Array with dimensions [dim x 1]: 'dim' are number of variables\n dim: (int): The number of dimension/variables. This is for sanity check that the user knows\n how many dimension the problem has.\n covariance: (obj:: numpy array): 2D- Array with dimensions [dim x dim]\n nu: (int): Degrees of Freedom for the multivariate t-student distribution\n dist: (str): The dist of distribution to be calculated. Only 2 options available:\n 'gaussian' or 't'.\n\n Return:\n ------:\n (obj:: numpy array): 1D - Vector with dimension [N,] with the values of the samples evaluated in\n the p.d.f. selected.\n \"\"\"\n assert (mean.shape == (dim, 1)), \"Mean matrix has incorrect dimensions\"\n assert (len(data.shape) < 4), \"Data/Samples Matrix needs to have maximum 3-dimensions\"\n assert (dist == 'gaussian' or dist == 't'), \"Select the correct type of distribution\"\n\n if len(data.shape) == 2: # The array is 2D\n x_m = data.reshape(dim, -1) - mean.reshape(dim, 1)\n else:\n x_m = data.reshape(-1, dim).T - mean.reshape(dim, 1)\n\n t_ = np.sum(x_m * np.linalg.solve(covariance, x_m), axis=0)\n\n g_t_ = g_t(t_, dim=dim, nu=nu, dist=dist)\n k_d_ = k_d(dim=dim, nu=nu, dist=dist)\n\n #TODO: If the determinant of the covariance is 0, everything is doomed == singular matrix\n pdf = k_d_ * 1 / np.sqrt(np.linalg.det(covariance)) * g_t_\n\n # determinant = np.linalg.det(covariance)\n #\n # if determinant == 0.0:\n # determinant = -10 ** -200\n #\n # pdf = k_d_ * (1 / np.sqrt(determinant)) * g_t_\n\n if len(data.shape) == 2: # The array is 2D\n return pdf\n else: # The array is 3D\n return pdf.reshape(data.shape[:-1])\n\n\ndef g_t(x, dim=None, nu=None, dist='gaussian'):\n if dist == 'gaussian':\n return np.exp(- x / 2)\n elif dist == 't':\n assert (dim >= 2), \"The dimension should be at least a bivariate problem\"\n assert (dim is not None), \"No scalar in the dimension variable\"\n assert (nu is not None), \"No scalar in 'nu' (degrees of freedom - DoF)\"\n # assert nu >= 2 # Approximation works for a DoF greater than 2\n return np.power(1 + x / nu, -(nu + dim) / 2)\n else:\n raise ValueError('Wrong distribution selected')\n\n\ndef k_d(dim=None, nu=None, dist='gaussian'):\n assert (dim >= 2), \"The dimension should be at least a bivariate problem\"\n assert (dim is not None), \"No scalar in the dimension variable\"\n\n if dist == 'gaussian':\n return np.power(2 * np.pi, -dim / 2)\n elif dist == 't':\n assert (nu is not None), \"You need nu (degrees of freedom - DoF)\"\n # assert (nu >= 2), \"Approximation works for a DoF greater than 2\"\n return gamma((nu + dim) / 2) / (gamma(nu / 2) * np.power(nu * np.pi, dim / 2))\n else:\n raise ValueError('Wrong distribution selected')\n\n\ndef is_pos_def(A):\n \"\"\"\n Check if the matrix A is positive definite:\n https://stackoverflow.com/questions/16266720/find-out-if-matrix-is-positive-definite-with-numpy\n \"\"\"\n if np.array_equal(A, A.T):\n try:\n np.linalg.cholesky(A)\n return True\n except np.linalg.LinAlgError:\n return False\n else:\n return False\n\n\ndef samples_multivariate_t(mean, covariance, nu, n_samples, allow_singular=False):\n \"\"\"\n Multivariate t-Student (MVT) Generator.\n\n [1] - \"On Sampling from the Multivariate t Distribution\" - Marius Hofert. The R Journal Vol. 5/2, December 2013.\n ISSN 2073-4859. Page 131. Equation (3)\n\n X = \\mu + sqrt(W) * A * Z\n\n X: Random samples from a multivariate t-student distribution.\n \\mu: Mean of the probability distribution\n W: nu / Chi-squared (nu > 0, Chi-squared distribution)\n A: Cholesky decomposition (lower triangular) of the scale matrix \\sigma for a multivariate gaussian.\n Z: Multivariate random gaussian with covariance/scale matrix the identity matrix.\n\n In python we can say that Y = A * Z. And use the scipy function multivariate normal to do the sampling.\n \"\"\"\n\n dim = covariance.shape[0]\n assert (mean.shape == (dim, 1)), \"Shape should have dimension 2D dimension with size [dim, 1]\"\n # Sanity check, as the optimization should only have solutions for nu > 2, to have a defined covariance.\n assert (nu >= 2), \"The approximation only works for ' v (DoF) > 2' \"\n\n q = chi2(df=nu).rvs(n_samples).reshape(-1, 1) / nu\n y = multivariate_normal(np.zeros(len(covariance)),\n covariance,\n allow_singular=allow_singular).rvs(n_samples)\n\n return np.divide(y, np.sqrt(q)).transpose() + mean\n\n\ndef plot_samples(data_samples):\n \"\"\"\n Plot data_samples for 1, 2, or 3 variables. If data_samples has more than 3 variables, don't use this method.\n \"\"\"\n\n assert (len(data_samples.shape) == 2), \"Array should be 2-D\"\n ax = None\n\n if data_samples.shape[0] == 1: # Univariate\n fig = plt.figure(figsize=(4, 4))\n ax = fig.add_subplot(111)\n ax.hist(data_samples.ravel(), bins=100, histtype='step')\n plt.show()\n\n elif data_samples.shape[0] == 2: # Bivariate case\n fig = plt.figure(figsize=(4, 4))\n ax = fig.add_subplot(111)\n ax.scatter(data_samples[0, :], data_samples[1, :], marker='.', s=5)\n ax.set_title('Data samples')\n plt.show()\n\n elif data_samples.shape[0] == 3: # Trivariate case\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n ax.scatter(data_samples[0, :], data_samples[1, :], data_samples[2, :], marker='.', s=5)\n ax.set_title('Data samples')\n plt.show()\n\n return ax\n\n\ndef conditional_parameters(dim, mean_vector, covariance_kendall, nu=None, copula_type='gaussian', variables={'x2':3}):\n r\"\"\"\n Calculate the conditional parameters: covariance (\\sigma), mean (\\mu) and degrees of freedom (\\nu),\n for the elliptical distributions. The notation is the following:\n\n Covariance block matrix:\n -----------------------\n \\sigma = [[\\sigma_{aa} , \\sigma_{ab}],\n [\\sigma_{ab}^{T}, \\sigma_{bb}]]\n\n \\sigma{ba} == \\sigma{ab}^{T}\n\n Conditional mean:\n -----------------\n \\mu{a|b} = \\mu_{a} + \\sigma_{ab}^{T} * \\sigma_{bb}^{-1} * (x_{b} - \\mu_{b})\n\n Conditional covariance:\n -----------------------\n \\sigma_{a|b} = k_cond * \\sigma_{aa} - \\sigma_{ab}^{T} * \\sigma_{bb}^{-1} * \\sigma_{ba}\n\n k_cond = 1 for 'gaussian'\n k_cond = (\\nu + (x_{b} - \\mu_{b})^{T} * \\sigma_{bb}^{-1} * (x_{b} - \\mu_{b})) / (\\nu + d_{b})\n\n where d_{b}: Dimension of the known variables (e.g. how many variables are conditioned)\n\n Conditional degrees of freedom (nu):\n ------------------------------------\n \\nu_{a|b} = \\nu + d_{b}\n\n\n Return:\n ------\n mu_cond: (obj:: numpy.array)\n 2-D numpy array with dimension [(D - P) x 1]. P: Dimension of known variables.\n (e.g. variables={'x2': 3.5, 'x4': 6.9}, then P = 2)\n\n sigma_cond:\n (obj:: numpy.array)\n 2-D numpy array with dimension [(D - P) x (D - P)]\n\n nu_cond:\n (obj:: numpy.array)\n 2-D numpy array with dimension [1 x 1]\n\n \"\"\"\n assert ((len(mean_vector.shape) == 2) and (len(covariance_kendall.shape) == 2)), \"Mean and covariance should be 2-D\"\n assert (mean_vector.shape[0] == covariance_kendall.shape[0]), \"Mean and covariance has wrong dimensions\"\n assert (copula_type.lower() in ['gaussian', 't']), \"Wrong copula type selected\"\n\n known_var_idx = []\n value_var = []\n for key in variables.keys():\n value_var.append(float(variables[key]))\n known_var_idx.append(int(key.replace('x', '')) - 1)\n known_var_idx = np.array(known_var_idx)\n value_var = np.array(value_var)\n\n assert ((dim - known_var_idx.max()) > 0), 'Cond. variables has higher or equal dimension than model'\n assert ((dim - len(known_var_idx)) > 0), 'Number of cond. variables are more than dimensions in the model'\n\n shift_idx = np.array([False] * dim)\n shift_idx[known_var_idx.tolist()] = True\n\n # variables_num = np.linspace(0, dim - 1, dim, dtype=np.int16)\n # variables_num = variables_num[shift_idx]\n\n # for ii, value in enumerate(variables_num):\n # value_var[ii] = self.ecdf[value](value_var[ii]) # Transform the variable value to uniform hyper cube\n #\n # if copula_type == 'gaussian':\n # value_var[ii] = norm.ppf(value_var[ii]) # Transform to the normal space (\\phi^{-1})\n # else: # 't' copula\n # value_var[ii] = t(df=nu).ppf(value_var[ii])\n\n value_var = np.array(value_var).reshape(len(value_var), 1)\n\n # Calculate the conditional covariance, mean and degrees of freedom\n # Pre-locate memory:\n dim_new = dim - len(known_var_idx)\n sigma_cond = np.zeros((dim_new, dim_new))\n mu_cond = np.zeros((dim_new, 1))\n d_B = len(known_var_idx) # Dimensions of the known variables d_{b}\n\n # --------------------------------------\n # SIGMA CONDITIONAL: Sigma_(a|b)\n # --------------------------------------\n # Block A will be the one to marginalize. p(x_a | x_b).\n # Meaning: a -> unknowns b -> known, provided, fixed values\n # Covariance matrix will be build as:\n # | A B |\n # | B^T D |\n\n cov_matrix = np.array(covariance_kendall)\n\n sigma_D = cov_matrix[shift_idx, :][:, shift_idx]\n sigma_A = cov_matrix[~shift_idx, :][:, ~shift_idx]\n sigma_B = cov_matrix[~shift_idx, :][:, shift_idx]\n\n # --------------------------------------\n # MEAN CONDITIONAL: Mu_(a|b)\n # --------------------------------------\n # Means organized to follow the same convention\n # | mu_a |\n # | mu_b |\n\n # mean_vector = np.array(np.zeros((dim, 1)))\n\n mu_A = mean_vector[~shift_idx]\n mu_B = mean_vector[shift_idx]\n\n if copula_type == 'gaussian':\n k_cond = 1\n else:\n k_cond = ((nu + np.matmul(np.matmul((value_var - mu_B).T, np.linalg.inv(sigma_D)), (value_var - mu_B)))\n / (nu + d_B))\n\n sigma_cond[:, :] = k_cond * (sigma_A - np.matmul(np.matmul(sigma_B, np.linalg.inv(sigma_D)), sigma_B.T))\n mu_cond[:] = mu_A + np.matmul(np.matmul(sigma_B, np.linalg.inv(sigma_D)), (value_var - mu_B))\n\n if copula_type == 't':\n # --------------------------------------\n # NU (Degrees of Freedom - DoF) CONDITIONAL: Nu_(a|b)\n # --------------------------------------\n # DoF organized to follow the same convention\n # | nu_a |\n # | nu_b |\n\n nu_cond = nu + d_B\n\n else:\n nu_cond = None\n\n unknown_variables_index = ~shift_idx\n\n return mu_cond, sigma_cond, nu_cond, unknown_variables_index\n\n\ndef covariance_kendall_tau(data_samples):\n # assert (data_samples.shape[1] > data_samples.shape[0]), \"Samples should be greater than number of variables\" # TODO: The original file has this uncommented\n\n tau = pd.DataFrame(data_samples).T.corr(method='kendall').values\n spearman_rho = pd.DataFrame(data_samples).T.corr(method='spearman').values\n\n return (np.sin((np.pi * tau) / 2), # Pearson relation with kendall's tau\n tau, # Kendall's tau matrix\n 2 * np.sin((np.pi / 6) * spearman_rho), # Pearson relation with spearman's rho\n spearman_rho) # Spearman rho matrix\n\n\ndef neg_log_likelihood_t_plot(data_samples, mean, covariance, dim, upper_bnd=100, step_size=300):\n start = time.time()\n log_likelihood = []\n nu_range = np.linspace(2, upper_bnd, step_size)\n for nu__ in nu_range:\n ans_t = elli_distribution(data=data_samples, mean=mean, dim=dim,\n covariance=covariance, nu=nu__, dist='t')\n log_likelihood.append(np.sum(-np.log(ans_t)))\n\n log_likelihood = np.array(log_likelihood)\n best_nu = nu_range[np.argmin(log_likelihood)]\n\n print(f'Best nu value: {best_nu}')\n print(f'Time processing: {time.time() - start}')\n\n ans_t = elli_distribution(data=data_samples, mean=mean, dim=dim, covariance=covariance,\n nu=best_nu, dist='t')\n # idx = (ans_t == np.inf) # Clean the values that generates and error\n print(f'Value of the log-likelihood: {np.sum(-np.log(ans_t))}')\n\n plt.figure()\n plt.plot(nu_range, log_likelihood)\n plt.title('negative log-likelihood \"t-student\"')\n plt.xlabel('nu - (degrees of freedom - DoF)')\n plt.ylabel('Neg-Log-likelihood')\n plt.show()\n\n\ndef neg_log_likelihood_t(x, *params):\n \"\"\"\n Wrapper function over the elliptical distribution function to calculate the negative log-likelihood of the data,\n with a parameter 'nu' (Degrees of Freedom)\n \"\"\"\n values = -np.log(elli_distribution(data=params[0],\n mean=params[1],\n dim=params[2],\n covariance=params[3],\n nu=x,\n dist=params[4]))\n # idx = (values == np.inf) # Clean the values that generates and error\n\n return np.sum(values)\n\n\ndef optimize_nu(samples, mean, covariance, dim, plot=True):\n n = np.floor(samples.shape[1] * 0.8).astype(np.int)\n nu_bounds = ((0, 200),)\n nu_results = []\n for _ in range(200):\n columns = np.random.randint(samples.shape[1], size=n)\n result = optimize.minimize(neg_log_likelihood_t,\n x0=np.array(3),\n method='SLSQP',\n bounds=nu_bounds,\n args=(samples[:, columns],\n mean,\n dim,\n covariance,\n 't'))\n nu_results.append(result.x)\n nu_results = np.array(nu_results).squeeze()\n low_quantile = np.quantile(nu_results, 0.025)\n high_quantile = np.quantile(nu_results, 0.975)\n\n if plot:\n plt.figure()\n plt.hist(nu_results)\n plt.title('Optimal nu results - Histogram')\n plt.xlabel('nu - Degrees of Freedom (DoF)')\n plt.show()\n\n print('-------------------------')\n print('Stochastic \"nu\" results:')\n print('-------------------------')\n print(f'nu mean: {nu_results.mean().round(3)}')\n print(f'nu low quantile (2.5%): {low_quantile.round(3)}')\n print(f'nu high quantile (97.5%): {high_quantile.round(3)}')\n\n return nu_results.mean().round(3), low_quantile.round(3), high_quantile.round(3)\n\n\ndef pit(X):\n \"\"\"\n Takes a data array X of dimension [M x N], and converts it to a uniform\n random variable using the probability integral transform, U = F(X)\n \"\"\"\n M = X.shape[0]\n N = X.shape[1]\n\n # convert X to U by using the probability integral transform: F(X) = U\n U = np.empty(X.shape)\n for ii in range(0, N):\n x_ii = X[:, ii]\n\n # estimate the empirical cdf\n (xx, pp) = ecdf(x_ii, M)\n f = interpolate.interp1d(xx, pp)\n\n # plug this RV sample into the empirical cdf to get uniform RV\n u_ii = f(x_ii)\n U[:, ii] = u_ii\n\n return U\n\n\ndef ecdf(x_i, npoints):\n \"\"\" Generates an Empirical CDF using the indicator function.\n\n Inputs:\n x_i -- the input data set, should be a numpy array\n npoints -- the number of desired points in the empirical CDF estimate\n\n Outputs:\n y -- the empirical CDF\n \"\"\"\n # define the points over which we will generate the kernel density estimate\n x = np.linspace(min(x_i), max(x_i), npoints)\n n = float(x_i.size)\n y = np.zeros(npoints)\n\n for ii in np.arange(x.size):\n idxs = np.where(x_i <= x[ii])\n y[ii] = np.sum(idxs[0].size) / (n + 1)\n\n return (x, y)\n\n\ndef probability_integral_transform(data, plot=False, variable=1, interpolation='spline', bins=None):\n '''\n Transforms the data to the uniform space, using and empirical distribution function.\n The method also returns a spline model of the ECDF and inverser of ECDF for future data sets.\n\n The empirical distribution function is take from [1]:\n\n \\hat{F}(x) = 1/(n + 1) \\sum_{n}{i=1} 1{x_i <= x} for all x\n\n Where\n 1: The indicator function.\n n: Number of samples.\n 'n + 1' is used instead of 'n' to avoid boundary problems of the estimator \\hat{F}(x).\n\n [1] - Czado, Claudia. \"Analyzing Dependent Data with Vine Copulas.\" Lecture Notes in Statistics, Springer (2019).\n page 3.\n\n\n The output is the linear interpolation between \\hat{F}(x) and \\hat{x}, which \\hat{x} are values equally\n spaced between the minimum and the maximum of 'x'.\n\n Notes on interpolation:\n The spline interpolation in scipy fails if you have repeated values over the x-axis, it should have only\n unique values, which is not the case for real data. Therefore, a np.linspace should be done to create an array\n that represents the values in the x-axis for the interpolation.\n\n The most difficult parts to interpolate are around 0 and 1, If the conditional copula is on the limits,\n you can se artifacts in the simulated data, because of the interpolation.\n\n Input:\n ------\n data (obj:: numpy array): The rows are variables and columns instances of the variables.\n plot (bool): Plots for the visual inspection of the transformation.\n\n Returns:\n --------\n uniform_samples (obj:: numpy array): Values within [0,1], which is the transformation of the input\n data into the uniform space.\n ecdf_model (obj:: scipy.interp1d): Model with the spline of the ecdf\n inv_ecdf_model (obj:: scipy.interp1d): Model with the splint of the inverse of the ecdf.\n\n '''\n #%%\n ecdf_models = []\n inv_ecdf_models = []\n uniform_values = []\n\n for ii in range(data.shape[0]):\n '''ECDF Calculation per variable'''\n\n x_data = data[ii, :]\n n_obs = data[ii, :].shape[0]\n _x = np.linspace(data[ii, :].min(), data[ii, :].max(), n_obs)\n # _x.sort()\n\n # #---------------------------------------------------------------------------------\n # # Patch\n # x_ii = data[ii, :]\n # npoints = data[ii, :].shape[0]\n #\n #\n # x = np.linspace(min(x_ii), max(x_ii), npoints) # This is like a sort\n # n = float(x_ii.size)\n # y = np.zeros(npoints)\n #\n # for jj in np.arange(x.size):\n # idxs = np.where(x_ii <= x[jj])\n # y[jj] = np.sum(idxs[0].size) / (n + 1)\n #\n # plt.figure()\n # plt.plot(x, y)\n # plt.show()\n #\n # f = interpolate.interp1d(x, y)\n # f_spline = interpolate.splrep(x, y)\n #\n # # plug this RV sample into the empirical cdf to get uniform RV\n # u_ii = f(x_ii)\n #\n # plt.figure()\n # plt.hist(u_ii)\n # plt.show()\n #\n # # ---------------------------------------------------------------------------------\n\n\n # n_obs = _x.shape[0]\n _y = np.empty(n_obs)\n\n # Avoid boundary problems in the spline and linear model\n for jj in np.arange(n_obs):\n _y[jj] = np.sum(x_data <= _x[jj]) / (n_obs + 1)\n\n # Avoid boundary problems in the linear model\n _x_bnd = np.r_[-np.inf, _x, np.inf]\n _y_bnd = np.r_[0.0, _y, 1.0]\n\n if interpolation == 'linear':\n ecdf_fun = interpolate.interp1d(_x_bnd, _y_bnd)\n inv_ecdf = interpolate.interp1d(_y_bnd, _x_bnd)\n\n ecdf_models.append(ecdf_fun)\n inv_ecdf_models.append(inv_ecdf)\n uniform_values.append(ecdf_fun(data[ii, :]))\n else:\n # smoothing = 0.01\n ecdf_fun_tck = interpolate.splrep(_x, _y)\n # inv_ecdf_tck = interpolate.splrep(_y, _x, s=smoothing)\n inv_ecdf_tck = interpolate.splrep(_y, _x)\n\n ecdf_models.append(ecdf_fun_tck)\n inv_ecdf_models.append(inv_ecdf_tck)\n uniform_values.append(interpolate.splev(data[ii, :], ecdf_fun_tck))\n\n\n uniform_values = np.array(uniform_values)\n\n if plot:\n fig = plt.figure(figsize=(15, 4))\n ax = fig.subplots(1, 4)\n\n if interpolation == 'linear':\n ecdf_x_support = ecdf_models[variable].x\n ecdf_y_support = ecdf_models[variable].y\n\n inv_ecdf_x_support = inv_ecdf_models[variable].x\n inv_ecdf_y_support = inv_ecdf_models[variable].y\n\n uniform_transform = ecdf_models[variable](data[variable, :])\n else:\n ecdf_x_support = ecdf_models[variable][0]\n ecdf_y_support = interpolate.splev(ecdf_models[variable][0], ecdf_models[variable])\n\n inv_ecdf_x_support = inv_ecdf_models[variable][0]\n inv_ecdf_y_support = interpolate.splev(inv_ecdf_models[variable][0], inv_ecdf_models[variable])\n\n uniform_transform = interpolate.splev(data[variable, :], ecdf_models[variable])\n\n\n ax[0].hist(data[variable, :], bins=bins, histtype='step', label=variable)\n ax[0].legend()\n\n ax[1].plot(ecdf_x_support, ecdf_y_support, lw=0.5, label='CDF')\n ax[1].legend()\n\n ax[2].plot(inv_ecdf_x_support,inv_ecdf_y_support, lw=0.5, label='Inverse CDF')\n ax[2].legend()\n\n ax[3].hist(uniform_transform, bins=bins, histtype='step',\n label= 'Uniform dist. (Transformed)')\n ax[3].legend(loc='lower center')\n plt.suptitle('Probability Integral Transform (PIT) - Variable: ' + str(variable)\n + '\\nInterpolation method: ' + interpolation)\n plt.show()\n #%%\n return uniform_values, ecdf_models, inv_ecdf_models\n\n\ndef plot_uniform_variables(u_, v_):\n fig = plt.figure(figsize=(10, 5))\n ax = fig.subplots(1, 2)\n sns.kdeplot(u_, v_, ax=ax[0])\n ax[0].set_xlim([0, 1])\n ax[0].set_ylim([0, 1])\n ax[1].scatter(u_, v_, marker='.', s=10)\n ax[1].set_xlim([0, 1])\n ax[1].set_ylim([0, 1])\n plt.show()\n\n\ndef t_copula(uniform_values, covariance, nu, dim):\n \"\"\"\n 't-student' copula density\n \"\"\"\n\n t_student = t(df=nu)\n c_density = elli_distribution(data=t_student.ppf(uniform_values), mean=np.zeros((dim, 1)),\n dim=dim, covariance=covariance, nu=nu, dist='t')\n\n if len(uniform_values.shape) == 2: # 2-D Matrix\n c_normalize = np.ones((1, uniform_values.shape[1]))\n for ii in range(dim):\n c_normalize = c_normalize * t_student.pdf(t_student.ppf(uniform_values[ii, :]))\n\n #TODO: Remove the division by 0\n # c_normalize[c_normalize == 0.0] = -10**-100\n c_normalize[c_normalize == 0.0] = 10**-100\n\n c_copula = c_density / c_normalize\n\n else: # 3-D Matrix (Used to broadcast the data created by mesh-grid)\n c_normalize = np.ones(uniform_values.shape[0:2])\n for ii in range(dim):\n c_normalize = c_normalize * t_student.pdf(t_student.ppf(uniform_values[:, :, ii]))\n\n #TODO: Remove the division by 0\n # c_normalize[c_normalize == 0.0] = -10**-100\n c_normalize[c_normalize == 0.0] = 10**-100\n\n c_copula = c_density / c_normalize\n\n # print('t copula:')\n # print(f'Nan values: {np.sum(np.isnan(c_copula))}')\n # print(f'inf values: {np.sum(c_copula == np.inf)}')\n\n return c_copula\n\n\ndef gaussian_copula(uniform_values, covariance, dim):\n \"\"\"\n Gaussian copula density\n \"\"\"\n\n gaussian = norm(loc=0, scale=1)\n c_density = elli_distribution(data=gaussian.ppf(uniform_values), mean=np.zeros((dim, 1)),\n dim=dim, covariance=covariance, dist='gaussian')\n\n\n if len(uniform_values.shape) == 2: # 2-D Matrix\n c_normalize = np.ones((1, uniform_values.shape[1]))\n for ii in range(dim):\n c_normalize = c_normalize * gaussian.pdf(gaussian.ppf(uniform_values[ii, :]))\n\n #TODO: Remove the division by 0\n # c_normalize[c_normalize == 0.0] = -10**-100\n c_normalize[c_normalize == 0.0] = 10**-100\n\n c_copula = c_density / c_normalize\n\n else: # 3-D Matrix (Used to broadcast the data created by mesh-grid)\n c_normalize = np.ones(uniform_values.shape[0:2])\n for ii in range(dim):\n c_normalize = c_normalize * gaussian.pdf(gaussian.ppf(uniform_values[:, :, ii]))\n\n #TODO: Remove the division by 0\n # c_normalize[c_normalize == 0.0] = -10**-100\n c_normalize[c_normalize == 0.0] = 10**-100\n\n c_copula = c_density / c_normalize\n\n # print('Gaussian copula:')\n # print(f'Nan values: {np.sum(np.isnan(c_copula))}')\n # print(f'inf values: {np.sum(c_copula == np.inf)}')\n\n return c_copula\n\n\ndef neg_log_likelihood_copula_t(x, *params):\n \"\"\"\n Wrapper function over the 't-student' copula function to calculate the negative log-likelihood of the data,\n with a parameter 'nu' (Degrees of Freedom)\n \"\"\"\n values = t_copula(uniform_values=params[0],\n covariance=params[1],\n nu=x,\n dim=params[2])\n\n # values = values[~np.isnan(values)] # Remove the nan\n # values = values[~(values == np.inf)] # Remove the division by zero in the copula\n # values[values <= 0.0] = np.finfo(np.float64).eps # Remove the warning for creating np.inf values\n\n #TODO: Remove the negative or 0 values\n values[values <= 0.0] = 10**-100\n\n values = -np.log(values)\n\n return np.nansum(values)\n\n\ndef neg_log_likelihood_copula_t_plot(data_samples, covariance, dim, upper_bnd=100, step_size=300, ax=None,\n legend_on=True, return_values=False):\n nu_range = np.linspace(2, upper_bnd, step_size)\n log_likelihood = []\n for nu__ in nu_range:\n values = t_copula(uniform_values=data_samples,\n covariance=covariance,\n nu=nu__,\n dim=dim)\n\n # values = values[~np.isnan(values)]\n # values = values[~(values == np.inf)] # Remove the division by zero in the copula\n # values[values <= 0.0] = np.finfo(np.float64).eps # Remove the warning for creating np.inf values\n values = -np.log(values)\n\n log_likelihood.append(np.nansum(values))\n\n log_likelihood = np.array(log_likelihood)\n\n log_like_clean = log_likelihood.copy()\n log_like_clean[(log_like_clean == -np.inf)] = np.inf # Remove 0.0 values of the evaluation of copula\n best_nu = nu_range[np.argmin(log_like_clean)]\n\n best_likelihood = t_copula(uniform_values=data_samples,\n covariance=covariance,\n nu=best_nu,\n dim=dim)\n\n # best_likelihood = best_likelihood[~np.isnan(best_likelihood)] # Remove the nan\n # best_likelihood = best_likelihood[~(best_likelihood == np.inf)] # Remove the division by zero in the copula\n # best_likelihood[best_likelihood <= 0.0] = np.finfo(np.float64).eps # Remove the warning for creating np.inf values\n\n\n best_likelihood = -np.log(best_likelihood)\n t_neg_loglike = np.nansum(best_likelihood)\n\n print('\\n')\n print('-------------------------------------------')\n print('\"t-student\" Copula (Linear search)')\n print('-------------------------------------------')\n print(f'Best nu value: {best_nu}')\n print(f'Neg log-likelihood: {t_neg_loglike}')\n\n # Calculate the neg-loglikelihood of the Gaussian copula\n values = gaussian_copula(uniform_values=data_samples, covariance=covariance, dim=dim)\n # values = values[~np.isnan(values)]\n # values = values[~(values == np.inf)] # Remove the division by zero in the copula\n # values[values <= 0.0] = np.finfo(np.float64).eps # Remove the warning for creating np.inf values\n values = -np.log(values)\n\n gauss_neg_loglike = np.nansum(values)\n\n print('\\n')\n print('-------------------------------------------')\n print('Gaussian Copula')\n print('-------------------------------------------')\n print(f'Neg log-likelihood: {gauss_neg_loglike}')\n print('\\n')\n\n if ax == None:\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1,1,1)\n ax.plot(nu_range, log_likelihood, label='t-Copula')\n # ax.set_title('negative log-likelihood \"t-student\"')\n # ax.set_xlabel(r'$\\nu$ - (degrees of freedom)')\n # ax.set_ylabel('Neg-Log-likelihood')\n ax.scatter(best_nu, t_neg_loglike, s=40, facecolors='none', edgecolors='r', label=r'Optimal $\\nu$')\n ax.axhline(gauss_neg_loglike, linewidth=0.5, color='k', label='Gaussian-Copula')\n if legend_on:\n ax.legend()\n plt.show()\n\n if return_values:\n return (nu_range, log_likelihood, best_nu, t_neg_loglike, gauss_neg_loglike)\n return ax\n\n\ndef initial_guess(data):\n nu = []\n for ii in range(data.shape[0]):\n nu_, _, _ = t.fit(data[ii, :])\n nu.append(nu_)\n\n return np.array(nu).mean()\n\n\ndef plot_cdf_2d(samples):\n assert (samples.shape[0] == 2), 'Samples should be in bivariate only'\n\n samples_trans = samples.T.copy()\n n_obs = samples_trans.size\n z = []\n for xx, yy in samples_trans:\n z.append(np.sum((samples_trans <= xx) & (samples_trans <= yy)) / (n_obs + 1))\n z = np.array(z)\n\n bivariate_cdf = np.hstack([samples_trans, np.array(z).reshape(-1, 1)])\n\n\n # Interpolate the data\n pts = 100j\n x_min = np.floor(bivariate_cdf[:, 0].min())\n x_max = np.ceil(bivariate_cdf[:, 0].max())\n y_min = np.floor(bivariate_cdf[:, 1].min())\n y_max = np.ceil(bivariate_cdf[:, 1].max())\n X, Y = np.mgrid[x_min:x_max:pts, y_min:y_max:pts]\n F = griddata(bivariate_cdf[:,0:2], bivariate_cdf[:,2], (X, Y))\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, F, cmap=plt.get_cmap('viridis'), norm=plt.Normalize(vmax=np.nanmax(F), vmin=np.nanmin(F)))\n ax.set_zlim([0, 1])\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('ECDF')\n ax.set_title('Bivariate CDF (Empirical CDF)')\n fig.colorbar(surf, shrink=0.5, aspect=5)\n plt.show()\n\n return ax\n\n\ndef normalize_copulas_visualization():\n \"\"\"\n Method to show the 't-student' and 'gaussian' copula in the normalized versions as visual aid\n \"\"\"\n tau = 0.2\n rho = np.sin(tau * np.pi / 2)\n scale = [[1, rho],\n [rho, 1]]\n nu = 4\n\n xx, yy = np.meshgrid(\n np.linspace(-8, 8, 500),\n np.linspace(-8, 8, 500))\n\n # xx_ = norm.cdf(norm.ppf(xx))\n uniform_z_x = t(df=nu).cdf(xx)\n uniform_z_y = t(df=nu).cdf(yy)\n\n z_x = norm.ppf(uniform_z_x)\n z_y = norm.ppf(uniform_z_y)\n pos = np.dstack((z_x, z_y)) # This is Z\n\n values = t_copula(uniform_values=norm.cdf(pos), covariance=np.array(scale), nu=nu, dim=2)\n\n rr_1 = norm.pdf(pos[:, :, 0])\n rr_2 = norm.pdf(pos[:, :, 1])\n\n re_values = values * rr_1 * rr_2\n\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot(121)\n cs = ax.contour(z_x, z_y, re_values, 10, linewidths=0.8)\n ax.clabel(cs, inline=1, fontsize=8)\n ax.set_xlim([-3, 3])\n ax.set_ylim([-3, 3])\n\n values_gauss = gaussian_copula(uniform_values=norm.cdf(pos), covariance=np.array(scale), dim=2)\n re_values = values_gauss * rr_1 * rr_2\n ax = fig.add_subplot(122)\n cs = ax.contour(z_x, z_y, re_values, 10, linewidths=0.8)\n ax.clabel(cs, inline=1, fontsize=8)\n ax.set_xlim([-3, 3])\n ax.set_ylim([-3, 3])\n\n\ndef plot_pit(data, variable, interpolation='linear', bins=None):\n\n if isinstance(variable, list):\n if len(variable) == 1:\n variable = variable[0]\n probability_integral_transform(data=data,\n plot=True,\n variable=variable,\n interpolation=interpolation,\n bins=bins)\n else:\n for variable_number in variable:\n probability_integral_transform(data=data,\n plot=True,\n variable=variable_number,\n interpolation=interpolation,\n bins=bins)\n elif isinstance(variable, int):\n probability_integral_transform(data=data,\n plot=True,\n variable=variable,\n interpolation=interpolation,\n bins=bins)\n\n else:\n raise Warning('The variable is not a list or a integer number')\n\n\ndef quarter_converter(quarter):\n hour = timedelta(minutes=(quarter) * 15).seconds // 3600\n minutes = (timedelta(minutes=(quarter) * 15).seconds // 60) % 60\n\n if minutes == 0:\n minutes_str = '00'\n else:\n minutes_str = str(minutes)\n\n return str(hour) + ':' + minutes_str\n\n\ndef plot_standarized_samples(samples):\n uniform_samples, _,_ =probability_integral_transform(samples)\n cov_pearson, tau, _, _ = covariance_kendall_tau(samples)\n standarized_plots(uniform_samples, [0,1], pearson=cov_pearson, tau=tau)\n\n\n\ndef standarized_plots(uniform_samples, variables, pearson, tau, ax=None):\n\n n_grid = len(variables)\n\n if ax is None:\n fig = plt.figure(figsize=(10, 10))\n ax = fig.subplots(n_grid, n_grid)\n fig.subplots_adjust(wspace=0, hspace=0)\n\n # Lower diagonal\n for col in range(n_grid - 1):\n for row in range(col + 1, n_grid):\n # var_1 = 60\n # var_2 = 70\n uniform_z_x = uniform_samples[variables[row], :]\n uniform_z_y = uniform_samples[variables[col], :]\n\n # z-scale of observations\n z_x = norm.ppf(uniform_z_x)\n z_y = norm.ppf(uniform_z_y)\n z_i = np.array([z_x, z_y])\n\n kde = sps.gaussian_kde(z_i, bw_method=0.5)\n # get a regular grid of points over our region of interest\n xx, yy = np.meshgrid(\n np.linspace(-3, 3, 50),\n np.linspace(-3, 3, 50))\n # calculate probability density on these points\n z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)\n cs = ax[row, col].contour(xx, yy, z, 6, linewidths=0.5, cmap=plt.get_cmap('plasma'))\n # ax[row, col].clabel(cs, inline=1, fontsize=4)\n ax[row, col].set_xlim([-3, 3])\n ax[row, col].set_ylim([-3, 3])\n ax[row, col].yaxis.set_major_formatter(ticker.NullFormatter())\n ax[row, col].xaxis.set_major_formatter(ticker.NullFormatter())\n ax[row, col].set_xticks([], [])\n ax[row, col].set_yticks([], [])\n\n # Upper-diagonal\n for row in range(n_grid - 1):\n for col in range(row + 1, n_grid):\n # ax[row, col].scatter(uniform_samples[row, :], uniform_samples[col, :], s=5, marker='.', c='#CCCCCC')\n ax[row, col].scatter(uniform_samples[row, :], uniform_samples[col, :], s=2, marker='.', c='k')\n # ax[row, col].text(0.5, 0.5, \"{:.{}f}\".format(tau[row, col], 2),\n # horizontalalignment='center',\n # verticalalignment='center',\n # transform=ax[row, col].transAxes,\n # fontdict={'color': 'red', 'weight': 'bold', 'size': 12},\n # bbox=dict(facecolor='w', edgecolor='w'))\n # ax[row, col].text(0.5, 0.6, \"{:.{}f}\".format(pearson[row, col], 2),\n # horizontalalignment='center',\n # verticalalignment='center',\n # transform=ax[row, col].transAxes,\n # fontdict={'color': 'blue', 'weight': 'bold', 'size': 12})\n ax[row, col].yaxis.set_major_formatter(ticker.NullFormatter())\n ax[row, col].xaxis.set_major_formatter(ticker.NullFormatter())\n ax[row, col].set_xticks([], [])\n ax[row, col].set_yticks([], [])\n\n # Diagonal\n for diag in range(n_grid):\n ax[diag, diag].hist(uniform_samples[diag], density=True, edgecolor='w', fc='#AAAAAA')\n ax[diag, diag].set_ylim([0, 1.5])\n\n if variables[diag] != 96:\n # ax[diag, diag].text(x=0.5, y=0.8, s='quarter.' + str(variables[diag]),\n # horizontalalignment='center',\n # verticalalignment='center',\n # transform=ax[diag, diag].transAxes,\n # fontdict={'color': 'red', 'weight': 'bold'})\n ax[diag, diag].text(x=0.5, y=0.8, s=quarter_converter(variables[diag]),\n horizontalalignment='center',\n verticalalignment='center',\n transform=ax[diag, diag].transAxes,\n fontdict={'color': 'red', 'weight': 'bold', 'size': 9})\n else:\n ax[diag, diag].text(x=0.5, y=0.8, s='energy.year',\n horizontalalignment='center',\n verticalalignment='center',\n transform=ax[diag, diag].transAxes,\n fontdict={'color': 'red', 'weight': 'bold', 'size': 7})\n\n\n ax[diag, diag].hlines(1.0, xmin=ax[diag, diag].get_xlim()[0], xmax=ax[diag, diag].get_xlim()[1],\n linestyles={'dashed'}, linewidths=0.8, colors='k')\n ax[diag, diag].yaxis.set_major_formatter(ticker.NullFormatter())\n ax[diag, diag].xaxis.set_major_formatter(ticker.NullFormatter())\n ax[diag, diag].set_xticks([], [])\n ax[diag, diag].set_yticks([], [])\n\n return ax\n\n\ndef plot_covariance_matrix(covariance, ax=None):\n levels = None\n\n if ax is None:\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n\n cb_ax = ax.contourf(covariance, levels=levels, cmap=plt.cm.get_cmap('PuOr'), vmin=-1.0, vmax=1.0)\n ax.set_xlabel('Time intervals [quarters]')\n cbar = plt.colorbar(cb_ax, ax=ax)\n cbar.ax.set_ylabel('Kendall\\'s tau Correlation')\n plt.show()\n\n return ax\n\n\ndef plot_time_steps(samples, xlim=None, ylim=None):\n if xlim==None:\n xlim_ = (-0.1, 3)\n else:\n xlim_ = xlim\n\n if ylim == None:\n ylim_ = (-0.1, 3)\n else:\n ylim_ = ylim\n\n return sns.jointplot(samples[0, :], samples[1, :],\n xlim=xlim_, ylim=ylim_,\n s=5, fc='k', ec='k', marker='x').plot_joint(sns.kdeplot,\n n_levels=30,\n linewidths=0.5,\n zorder=1)\n\n\nif __name__ == '__main__':\n #%% Test gaussian distribution against Matlab R2018b\n x_ = np.linspace(-3, 3, 31)\n y_ = np.linspace(-3, 3, 31)\n data_ = np.vstack([x_, y_])\n point_ = data_[:, 10]\n mean_ = np.array([1, 1]).reshape(2, 1)\n covariance_ = np.array([[1, 0.7], [0.7, 1]])\n\n tau = 0.7\n rho = np.sin(tau * np.pi / 2)\n\n covariance_tau_ = np.array([[1, rho], [rho, 1]])\n\n nu_ = 4\n dim_ = 2\n\n # Compare my elliptical gaussian distribution vs scipy multivariate_normal method\n ans = elli_distribution(data=data_, mean=mean_, dim=dim_, covariance=covariance_, nu=nu_, dist='gaussian')\n mvn = multivariate_normal(mean_.squeeze(), covariance_)\n mvn.pdf(point_)\n\n #%% Create surface plots for 't-distribution' and 'gaussian'. Compare against Matlab R2018b\n X1, X2 = np.meshgrid(x_, y_)\n data_ = np.vstack([X1.ravel(), X2.ravel()])\n ans_t = elli_distribution(data=data_, mean=mean_, dim=dim_, covariance=covariance_, nu=nu_, dist='t')\n ans_t = ans_t.reshape(len(x_), len(x_))\n\n ans_gauss = elli_distribution(data=data_, mean=mean_, dim=dim_, covariance=covariance_, dist='gaussian')\n ans_gauss = ans_gauss.reshape(len(x_), len(x_))\n\n fig = plt.figure(figsize=(12, 6))\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.plot_surface(X1, X2, ans_t, cmap=plt.cm.get_cmap('viridis'), linewidth=0.2, antialiased=False)\n ax.set_title('t-Student')\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.plot_surface(X1, X2, ans_gauss, cmap=plt.cm.get_cmap('viridis'), linewidth=0.2, antialiased=False)\n ax.set_title('Gaussian')\n plt.show()\n\n #%% Second way to create the 't-student' 3d surface - Broad casting a 3D array\n resolution = 0.2\n X, Y = np.mgrid[-3:3:resolution, -3:3:resolution]\n pos = np.dstack((X, Y))\n ans_t = elli_distribution(data=pos, mean=mean_, dim=dim_, covariance=covariance_, nu=nu_, dist='gaussian')\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n ax.plot_surface(X, Y, ans_t, cmap=plt.cm.get_cmap('viridis'), linewidth=0.2, antialiased=False)\n ax.set_title('t-Student')\n plt.show()\n\n #%% Copula plots\n\n '''\n NICE COPULA PLOTS\n '''\n\n # 'Gaussian' copula\n resolution = 1/200\n U, V = np.mgrid[0.05:0.95:resolution, 0.05:0.95:resolution]\n pos = np.dstack((U, V))\n gaussian = norm(loc=0, scale=1)\n c_density_gauss = (elli_distribution(data=gaussian.ppf(pos), mean=np.array([0, 0]).reshape(2, 1),\n dim=dim_, covariance=covariance_tau_, dist='gaussian')\n / (gaussian.pdf(gaussian.ppf(U)) * gaussian.pdf(gaussian.ppf(V))))\n\n # 't-student'\n nu_c = 4\n t_student = t(df=nu_c)\n c_density_t = (elli_distribution(data=t_student.ppf(pos), mean=np.array([0, 0]).reshape(2, 1),\n dim=dim_, covariance=covariance_tau_, nu=nu_c, dist='t')\n / (t_student.pdf(t_student.ppf(U)) * t_student.pdf(t_student.ppf(V))))\n\n fig = plt.figure(figsize=(6, 6))\n ax_1 = fig.add_subplot(2, 2, 1, projection='3d')\n ax_2 = fig.add_subplot(2, 2, 2, projection='3d')\n ax_3 = fig.add_subplot(2, 2, 3)\n ax_4 = fig.add_subplot(2, 2, 4)\n ax_1.plot_surface(U, V, c_density_gauss, cmap=plt.cm.get_cmap('viridis'), linewidth=0.2, antialiased=False)\n ax_1.set_title('Gaussian copula')\n ax_2.plot_surface(U, V, c_density_t, cmap=plt.cm.get_cmap('viridis'), linewidth=0.2, antialiased=False)\n ax_2.set_title('t-student copula')\n ax_3.contour(U, V, c_density_gauss, levels=40)\n ax_4.contour(U, V, c_density_t, levels=40)\n plt.show()\n\n #%% Test maximum likelihood\n n_samples_ = 1000\n samples_t = samples_multivariate_t(mean=mean_, covariance=covariance_tau_, nu=4, n_samples=n_samples_)\n samples_gauss = multivariate_normal(mean_.squeeze(), covariance_tau_).rvs(n_samples_).T\n samples_mixed = np.hstack([samples_t, samples_gauss])\n samples = samples_t # Select the sample data set to continue the calculations\n # np.savetxt('data.csv', samples.T, delimiter=',') # For testing in Matlab R2018b\n\n plot_samples(samples)\n covariance_kendall, correlation_kendall, _, _ = covariance_kendall_tau(samples)\n mean_hat = samples.mean(axis=1).reshape(-1, 1)\n\n # uniform_samples = pit(samples.T).T # This code is from internet. This is just to check.\n uniform_samples, mod, model = probability_integral_transform(samples, plot=False, variable=1)\n\n u = uniform_samples[0, :]\n v = uniform_samples[1, :]\n plot_uniform_variables(u, v)\n\n cov_uniform, corr_uniform, _, _ = covariance_kendall_tau(uniform_samples)\n\n #%% Copula likelihood - Examples over sampled data - I AM MANUALLY FIXING THE NU!!!!!\n\n # 't-student copula' and 'Gaussian copula' - EXAMPLE PLOTS\n ans_t_copula = t_copula(uniform_samples, cov_uniform, nu=4, dim=2)\n ans_gaussian_copula = gaussian_copula(uniform_samples, cov_uniform, dim=2)\n\n fig = plt.figure(figsize=(12, 5))\n ax_1 = fig.add_subplot(121, projection='3d')\n ax_2 = fig.add_subplot(122, projection='3d')\n\n ax_1.scatter(uniform_samples[0, :], uniform_samples[1, :], ans_t_copula, marker='.', s=10)\n ax_1.set_zlim([0, 7.5])\n ax_1.set_title('t-student Copula')\n ax_2.scatter(uniform_samples[0, :], uniform_samples[1, :], ans_gaussian_copula, marker='.', s=10)\n ax_2.set_zlim([0, 7.5])\n ax_2.set_title('Gaussian Copula')\n plt.show()\n\n\n #%% Plot the z-scale of the data\n '''\n Visualization proposed by Czado - Analyzing Dependent Data with Vine Copulas\n z-scale: Marginal normalized scale (Z_1, Z_2), where Z_i := \\Phi^{-1} (U_i) = \\Phi^{-1}(F_i(X_i)) \n F_i: Pseudo-inverse \n \n g(z_1, z_2) = c( \\Phi(z_1), \\Phi(z_2)) * \\phi(z_1) * \\phi(z_2) \n \n '''\n\n z_i = norm.ppf(uniform_samples) # Uniform_samples has already applied the pseudo-inverse\n\n g = t_copula(norm.cdf(z_i), covariance=covariance_kendall, nu=4, dim=2) * norm.pdf(z_i[0, :]) * norm.pdf(z_i[1, :])\n\n tau = 0.7\n rho = np.sin(tau * np.pi / 2)\n\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(111)\n sns.kdeplot(z_i[0, :], z_i[1, :], ax=ax)\n ax.set_xlim([-3, 3])\n ax.set_ylim([-3, 3])\n # plot_samples(uniform_samples)\n # plot_samples(samples)\n\n # g = t_copula(uniform_samples, nu=3, covariance=covariance_kendall, dim=2) * norm.pdf(norm.ppf(uniform_samples[0,:])) * norm.pdf(norm.pdf(uniform_samples[1,:]))\n #\n # import matplotlib.tri as tri\n # import matplotlib.pyplot as plt\n #\n # plt.tricontour(x, y, z, 15, linewidths=0.5, colors='k')\n # plt.tricontourf(x, y, z, 15)\n #\n # plt.tricontour(norm.ppf(uniform_samples[0,:]).ravel(), norm.ppf(uniform_samples[1,:]).ravel(), g.ravel(), 15, linewidths=0.5, colors='k')\n # plt.tricontourf(x, y, z, 15)\n\n\n\n\n\n\n\n\n\n\n #%% Optimization by scipy - Over the copula - Finds the best 'nu'\n '''\n COPULA\n '''\n # Bound the degrees of freedom for the t-distribution\n nu_bounds = ((0, 200),)\n x0 = initial_guess(samples)\n result_2 = optimize.minimize(neg_log_likelihood_copula_t,\n x0=np.array(x0),\n method='SLSQP',\n bounds=nu_bounds,\n args=(uniform_samples,\n cov_uniform,\n dim_),\n options={'disp': True})\n\n print(f'Best nu value: {result_2.x}')\n print(f'Results:\\n {result_2}')\n\n # Log-likelihood gaussian\n values = -np.log(gaussian_copula(uniform_values=uniform_samples, covariance=cov_uniform, dim=dim_))\n neg_loglike = np.nansum(values)\n print(f'Gaussian neg Log-likelihood: {neg_loglike}')\n\n #%% Linear sweep (Very slow) - But you have the graph\n # neg_log_likelihood_t_plot(data_samples=samples, mean=mean_hat, covariance=covariance_kendall, dim=dim_)\n neg_log_likelihood_copula_t_plot(data_samples=uniform_samples, covariance=cov_uniform, dim=dim_, upper_bnd=100)\n\n #%% Optimization by scipy - Over the original samples (t-student fitting)\n '''\n SAMPLES - This doesn't fit a copula, just find the NU that fits the data (Common parameter fitting over data)\n '''\n # Bound the degrees of freedom for the t-distribution\n nu_bounds = ((0, 200),)\n result_2 = optimize.minimize(neg_log_likelihood_t,\n x0=np.array(3),\n method='SLSQP',\n bounds=nu_bounds,\n args=(samples,\n mean_hat,\n dim_,\n covariance_kendall,\n 't'),\n options={'disp': True})\n\n print(f'Best nu value: {result_2.x}')\n print(f'Results:\\n {result_2}')\n\n # Log-likelihood gaussian\n values = -np.log(elli_distribution(data=samples,\n mean=mean_hat,\n dim=dim_,\n covariance=covariance_,\n dist='gaussian'))\n neg_loglike = np.nansum(values)\n\n print(f'Gaussian neg Log-likelihood: {neg_loglike}')\n\n #%% Stochastic Optimization by scipy - Re-sampling to create a confidence interval for the nu\n nu_hat, nu_low_ci, nu_high_ci = optimize_nu(samples, mean_hat, covariance_kendall, dim_)\n\n #%% Empirical CDF\n plot_cdf_2d(samples)\n\n #%%\n\n\n\n\n\n"
] |
[
[
"numpy.nanmax",
"scipy.stats.norm.ppf",
"scipy.stats.norm.cdf",
"numpy.linspace",
"numpy.sqrt",
"numpy.vstack",
"numpy.nanmin",
"matplotlib.pyplot.get_cmap",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"scipy.stats.gaussian_kde",
"numpy.argmin",
"scipy.interpolate.griddata",
"numpy.exp",
"numpy.where",
"numpy.random.randint",
"numpy.hstack",
"scipy.stats.t.fit",
"scipy.stats.chi2",
"numpy.arange",
"numpy.sin",
"numpy.linalg.det",
"numpy.nansum",
"scipy.interpolate.interp1d",
"scipy.stats.t",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"scipy.special.gamma",
"matplotlib.pyplot.title",
"numpy.power",
"matplotlib.pyplot.cm.get_cmap",
"numpy.linalg.inv",
"numpy.quantile",
"scipy.interpolate.splev",
"matplotlib.ticker.NullFormatter",
"numpy.floor",
"numpy.linalg.cholesky",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.meshgrid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.hist",
"scipy.interpolate.splrep",
"numpy.linalg.solve",
"numpy.array_equal",
"scipy.stats.norm.pdf",
"numpy.dstack",
"numpy.ones",
"scipy.stats.norm",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlabel",
"numpy.empty"
]
] |
Xiang-cd/realsafe
|
[
"39f632e950562fa00ac26d34d13b2691c9c5f013",
"39f632e950562fa00ac26d34d13b2691c9c5f013"
] |
[
"realsafe/defense/bit_depth_reduction.py",
"realsafe/benchmark/attack_cli.py"
] |
[
"''' The bit depth reduction defense method. '''\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom realsafe.defense.input_transformation import input_transformation\n\n\ndef bit_depth_reduce(xs, x_min, x_max, step_num, alpha=1e6):\n ''' Run bit depth reduce on xs.\n\n :param xs: A batch of images to apply bit depth reduction.\n :param x_min: The minimum value of xs.\n :param x_max: The maximum value of xs.\n :param step_num: Step number for bit depth reduction.\n :param alpha: Alpha for bit depth reduction.\n :return: Bit depth reduced xs.\n '''\n # due to tf.custom_gradient's limitation, we need a wrapper here\n @tf.custom_gradient\n def bit_depth_reduce_op(xs_tf):\n steps = x_min + np.arange(1, step_num, dtype=np.float32) / (step_num / (x_max - x_min))\n steps = steps.reshape([1, 1, 1, 1, step_num-1])\n tf_steps = tf.constant(steps, dtype=tf.float32)\n\n inputs = tf.expand_dims(xs_tf, 4)\n quantized_inputs = x_min + tf.reduce_sum(tf.sigmoid(alpha * (inputs - tf_steps)), axis=4)\n quantized_inputs = quantized_inputs / ((step_num-1) / (x_max - x_min))\n\n def bit_depth_reduce_grad(d_output):\n return d_output\n\n return quantized_inputs, bit_depth_reduce_grad\n\n return bit_depth_reduce_op(xs)\n\n\ndef bit_depth_reduction(step_num, alpha=1e6):\n ''' A decorator to add bit depth reduce input transformation to a Classifier or a ClassifierWithLogits.\n\n It would leave the original classifier's logits' gradient untouched, so that white box attacks could still be\n applied to the new classifier.\n\n :param step_num: Step number for bit depth reduction.\n :param alpha: Alpha for bit depth reduction.\n '''\n def args_fn(model):\n return (model.x_min, model.x_max, step_num, alpha)\n\n def kwargs_fn(_):\n return {}\n\n return lambda rs_class: input_transformation(rs_class, bit_depth_reduce, args_fn, kwargs_fn)\n",
"''' Provide a command line tool to call AttackBenchmark directly. '''\n\nif __name__ == '__main__':\n import argparse\n import numpy as np\n import tensorflow as tf\n\n from realsafe import CrossEntropyLoss, CWLoss\n from realsafe.model.loader import load_model_from_path\n from realsafe.benchmark.attack import AttackBenchmark\n\n PARSER = argparse.ArgumentParser(description='Run attack on a classifier.')\n\n PARSER.add_argument(\n '--method', help='Attack method.', required=True,\n choices=['fgsm', 'bim', 'pgd', 'mim', 'cw', 'deepfool', 'nes', 'spsa', 'nattack', 'boundary', 'evolutionary'],\n )\n PARSER.add_argument('--dataset', help='Dataset for this model.', choices=['cifar10', 'imagenet'], required=True)\n PARSER.add_argument('--offset', help='Dataset offset.', type=int, required=True)\n PARSER.add_argument('--count', help='Number of examples to attack.', type=int, required=True)\n\n PARSER.add_argument('model', help='Path to the model\\'s python source file.')\n\n # Attack method initialization parameters\n PARSER.add_argument('--goal', help='Attack goal.', required=True, choices=['t', 'tm', 'ut'])\n PARSER.add_argument('--distance-metric', help='Attack\\' distance metric.', required=True, choices=['l_2', 'l_inf'])\n PARSER.add_argument('--batch-size', help='Batch size hint for attack method.', type=int, required=True)\n PARSER.add_argument('--learning-rate', help='Learning rate in CW attack.', type=float)\n PARSER.add_argument('--cw-loss-c', help='CWLoss\\'s c value in CW attack.', type=float)\n PARSER.add_argument('--samples-per-draw', type=int)\n PARSER.add_argument('--init-distortion', type=float)\n PARSER.add_argument('--dimension-reduction-height', type=int)\n PARSER.add_argument('--dimension-reduction-width', type=int)\n\n PARSER.add_argument('--iteration', type=int)\n PARSER.add_argument('--max-queries', type=int)\n PARSER.add_argument('--magnitude', type=float)\n PARSER.add_argument('--alpha', type=float)\n PARSER.add_argument('--rand-init-magnitude', type=float)\n PARSER.add_argument('--decay-factor', type=float)\n PARSER.add_argument('--cs', type=float)\n PARSER.add_argument('--search-steps', type=int)\n PARSER.add_argument('--binsearch-steps', type=int)\n PARSER.add_argument('--overshot', type=float)\n PARSER.add_argument('--sigma', type=float)\n PARSER.add_argument('--lr', type=float)\n PARSER.add_argument('--min-lr', type=float)\n PARSER.add_argument('--lr-tuning', action='store_true', default=False)\n PARSER.add_argument('--plateau-length', type=int)\n PARSER.add_argument('--max-directions', type=int)\n PARSER.add_argument('--spherical-step', type=float)\n PARSER.add_argument('--source-step', type=float)\n PARSER.add_argument('--step-adaptation', type=float)\n PARSER.add_argument('--mu', type=float)\n PARSER.add_argument('--c', type=float)\n PARSER.add_argument('--maxprocs', type=int)\n PARSER.add_argument('--logger', action='store_true', default=False)\n\n args = PARSER.parse_args()\n\n config_kwargs = dict()\n for kwarg in ('iteration', 'max_queries', 'magnitude', 'alpha', 'rand_init_magnitude', 'decay_factor', 'cs',\n 'search_steps', 'binsearch_steps', 'overshot', 'sigma', 'lr', 'min_lr', 'lr_tuning', 'plateau_length',\n 'max_directions', 'spherical_step', 'source_step', 'step_adaptation', 'mu', 'c', 'maxprocs'):\n attr = getattr(args, kwarg)\n if attr is not None:\n config_kwargs[kwarg] = attr\n\n logger = tf.get_logger()\n logger.setLevel(tf.logging.INFO)\n if args.logger:\n config_kwargs['logger'] = logger\n\n print('Loading tensorflow session...')\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n session = tf.Session(config=config)\n\n print('Loading model...')\n model = load_model_from_path(args.model).load(session)\n\n print('Loading dataset...')\n if args.dataset == 'cifar10':\n from realsafe.dataset import cifar10\n dataset = cifar10.load_dataset_for_classifier(model, offset=args.offset, load_target=True)\n else:\n from realsafe.dataset import imagenet\n dataset = imagenet.load_dataset_for_classifier(model, offset=args.offset, load_target=True)\n dataset = dataset.take(args.count)\n\n print('Loading attack...')\n attack_name, batch_size, dataset_name = args.method, args.batch_size, args.dataset\n goal, distance_metric = args.goal, args.distance_metric\n\n kwargs = dict()\n for kwarg in ('learning_rate', 'cw_loss_c', 'samples_per_draw', 'init_distortion'):\n attr = getattr(args, kwarg)\n if attr is not None:\n kwargs[kwarg] = attr\n if args.dimension_reduction_height is not None and args.dimension_reduction_width is not None:\n kwargs['dimension_reduction'] = (args.dimension_reduction_height, args.dimension_reduction_width)\n if attack_name in ('fgsm', 'bim', 'pgd', 'mim'):\n kwargs['loss'] = CrossEntropyLoss(model)\n elif attack_name in ('nes', 'spsa', 'nattack'):\n kwargs['loss'] = CWLoss(model)\n\n benchmark = AttackBenchmark(attack_name, model, batch_size, dataset_name, goal, distance_metric, session, **kwargs)\n\n print('Configuring attack...')\n benchmark.config(**config_kwargs)\n\n print('Running benchmark...')\n acc, acc_adv, total, succ, dist = benchmark.run(dataset, logger)\n print('n={}, acc={:3f}, adv_acc={:3f}, succ={:3f}, dist_mean={:3f}'.format(\n args.count,\n np.mean(acc.astype(np.float)), np.mean(acc_adv.astype(np.float)),\n np.sum(succ.astype(np.float)) / np.sum(total.astype(np.float)), np.mean(dist)\n ))\n"
] |
[
[
"numpy.arange",
"tensorflow.sigmoid",
"tensorflow.constant",
"tensorflow.expand_dims"
],
[
"tensorflow.ConfigProto",
"tensorflow.Session",
"numpy.mean",
"tensorflow.get_logger"
]
] |
GMadorell/programming-challenges
|
[
"b4fd6cf9bc4a61a6f3efc2c5ab2be43743044df8"
] |
[
"tuenti/tuenti_challenge_4/qualification/8_tuenti_restructuration/tuenti_restructuration.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nProblem description.\n\"\"\"\n\nfrom __future__ import division\nfrom Queue import PriorityQueue\nimport sys\nimport math\nimport numpy\nfrom pprintpp import pprint\nfrom scipy.spatial.distance import cityblock\n\n\nclass TuentiRestructurationSolver(object):\n def __init__(self, output_file=sys.stdout):\n self.__output_file = output_file\n self.solutions = []\n\n def solve(self, instances):\n solutions = []\n for i, instance in enumerate(instances, start=1):\n solutions.append(self.solve_instance(instance, i))\n\n for i, solution in enumerate(solutions, start=1):\n newline_needed = True if i != len(solutions) else False\n self.__output_file.write(\"{1}{2}\".format(i, solution, \"\\n\" if newline_needed else \"\"))\n\n def solve_instance(self, instance, instance_id):\n \"\"\"\n Where the magic happens.\n This method should return the solution (as a string) of the given instance.\n \"\"\"\n initial = instance.initial_table\n objective = instance.objective\n\n with open(\"DEBUG.txt\", \"a\") as fd:\n fd.write(\"Instance: {0}\\n {1}\\n{2}\\n------\\n\".format(instance_id, initial, objective))\n\n translator = self.get_translator(initial)\n initial = self.to_array(initial, translator)\n try:\n objective = self.to_array(objective, translator)\n except KeyError:\n return -1\n\n with open(\"DEBUG.txt\", \"a\") as fd:\n fd.write(\"Instance: {0}\\n {1}\\n{2}\\n------\\n\".format(instance_id, initial, objective))\n\n hard_heuristic_sol = \\\n self.solve_heuristic(initial, objective, lambda actual, target: euclidean(actual, target) / 1.4)\n return \"{0}\".format(hard_heuristic_sol)\n\n def get_translator(self, initial):\n translations = {}\n count = 1\n for row in initial:\n for item in row:\n if item:\n translations[item] = count\n count += 1\n else:\n translations[item] = 0\n return translations\n\n def to_array(self, initial, translator):\n as_array = numpy.zeros((3, 3), dtype=numpy.int8)\n\n for i in range(len(initial)):\n for j in range(len(initial)):\n as_array[i][j] = translator[initial[i][j]]\n\n return as_array\n\n def solve_heuristic(self, initial, objective, heuristic):\n initial_state = State(initial, 0, None, heuristic)\n\n # print(\"Initial heuristic: {0}\".format(initial_state.calculate_heuristic(objective)))\n\n q = StatePriorityQueue(objective)\n q.add_state(initial_state)\n\n while True:\n state = q.consume()\n if state.is_final(objective):\n break\n\n for new_state in state.expand():\n q.add_state(new_state)\n\n # print(\"\\n-----\\n\".join(map(lambda st: str(st), state.get_way_until_this_state())))\n\n return state.n_moves_made\n\n\nclass State(object):\n def __init__(self, position=None, n_moves_made=None, previous_state=None, heuristic=None):\n self.position = position\n self.n_moves_made = n_moves_made\n self.previous_state = previous_state\n self.h = heuristic\n\n def calculate_heuristic(self, target):\n return self.h(self.position, target) + self.n_moves_made\n # return self.n_moves_made\n\n def is_final(self, target):\n return manhattan_2d(self.position, target) == 0\n\n def expand(self):\n \"\"\" Returns the new states we can reach from this state. \"\"\"\n # print self.position\n states = []\n # Top left corner.\n states.append(self.create_modification((0, 0), (0, 1)))\n states.append(self.create_modification((0, 0), (1, 0)))\n\n # Top right corner.\n states.append(self.create_modification((0, 2), (0, 1)))\n states.append(self.create_modification((0, 2), (1, 2)))\n\n # Bot left corner.\n states.append(self.create_modification((2, 0), (2, 1)))\n states.append(self.create_modification((2, 0), (1, 0)))\n\n # Bot right corner.\n states.append(self.create_modification((2, 2), (2, 1)))\n states.append(self.create_modification((2, 2), (1, 2)))\n\n # Middle cells - clockwise starting from north.\n states.append(self.create_modification((0, 1), (1, 1)))\n states.append(self.create_modification((1, 2), (1, 1)))\n states.append(self.create_modification((2, 1), (1, 1)))\n states.append(self.create_modification((1, 0), (1, 1)))\n\n # pprint(map(lambda state: state.position, states))\n return states\n\n def create_modification(self, from_coords, to_coords):\n new_pos = self.position.copy()\n x1, y1 = from_coords\n x2, y2 = to_coords\n new_pos[x1, y1], new_pos[x2, y2] = new_pos[x2, y2], new_pos[x1, y1]\n\n return State(new_pos, self.n_moves_made + 1, self, self.h)\n\n def get_way_until_this_state(self):\n \"\"\" Debug method. \"\"\"\n if self.previous_state is None:\n return [self.position]\n return self.previous_state.get_way_until_this_state() + [self.position]\n\n\nclass StateHistory(object):\n def __init__(self):\n self.history = []\n\n def add(self, array):\n self.history.append(array)\n\n def __contains__(self, check_state):\n for state in self.history:\n if not numpy.array_equal(state.position, check_state.position):\n return False\n return True\n\n def find_equal_position_state(self, check_state):\n for state in self.history:\n if numpy.array_equal(state.position, check_state.position):\n return state\n raise LookupError(\"Not found\")\n\n def delete(self, state):\n self.history.remove(state)\n\n\nclass StatePriorityQueue(object):\n def __init__(self, objective):\n self.q = PriorityQueue()\n self.objective = objective\n\n def consume(self):\n assert not self.q.empty()\n return self.q.get()[1]\n\n def add_state(self, state):\n self.q.put((state.calculate_heuristic(self.objective), state))\n\n\ndef manhattan_2d(first, second):\n manh = 0\n for i in range(len(second)):\n for j in range(len(second)):\n # if second[i, j] != 0: # Remove 'if' if you want a full calculation.\n x, y = numpy.nonzero(first == second[i, j])\n #\n # # print x, y, i, j\n # print abs(x[0] - i) + abs(y[0] - j)\n manh += abs(x[0] - i) + abs(y[0] - j)\n\n return manh\n\n\ndef euclidean(first, second):\n euc = 0\n for i in range(len(second)):\n for j in range(len(second)):\n # if second[i, j] != 0: # Remove 'if' if you want a full calculation.\n x, y = numpy.nonzero(first == second[i, j])\n euc += math.sqrt((x[0] - i) ** 2 + (y[0] - j) ** 2)\n return euc\n\n\nclass TuentiRestructurationInstance(object):\n def __init__(self):\n self.initial_table = None\n self.objective = None\n\n\nclass TuentiRestructurationParser(object):\n def __init__(self):\n data = sys.stdin.readlines()\n data = map(lambda s: s.strip(), data)\n\n self.amount_samples = int(data[0][0])\n self.data = data[1:]\n self.instances = []\n\n self.parse()\n\n def parse(self):\n \"\"\"\n This method should populate the instances list.\n \"\"\"\n for i in range(0, len(self.data), 8):\n instance = TuentiRestructurationInstance()\n\n initial_table = []\n initial_table.append(map(lambda s: s.strip(), self.data[i + 1].split(\",\")))\n initial_table.append(map(lambda s: s.strip(), self.data[i + 2].split(\",\")))\n initial_table.append(map(lambda s: s.strip(), self.data[i + 3].split(\",\")))\n instance.initial_table = initial_table\n\n objective_table = []\n objective_table.append(map(lambda s: s.strip(), self.data[i + 5].split(\",\")))\n objective_table.append(map(lambda s: s.strip(), self.data[i + 6].split(\",\")))\n objective_table.append(map(lambda s: s.strip(), self.data[i + 7].split(\",\")))\n instance.objective = objective_table\n\n self.instances.append(instance)\n\n def get_data_as_type(self, type_):\n return map(lambda row: map(lambda element: type_(element), row), self.data)\n\n\nif __name__ == \"__main__\":\n parser = TuentiRestructurationParser()\n solver = TuentiRestructurationSolver()\n solver.solve(parser.instances)"
] |
[
[
"numpy.nonzero",
"numpy.zeros",
"numpy.array_equal"
]
] |
timgates42/hyperopt
|
[
"63b5b9bf379fc55f6a158e17c400c1d8bb780fff"
] |
[
"hyperopt/tests/test_fmin.py"
] |
[
"import unittest\nimport numpy as np\nimport nose.tools\nfrom timeit import default_timer as timer\nimport time\nfrom hyperopt.early_stop import no_progress_loss\nfrom hyperopt.fmin import generate_trials_to_calculate\n\nfrom hyperopt import (\n fmin,\n rand,\n tpe,\n hp,\n Trials,\n exceptions,\n space_eval,\n STATUS_FAIL,\n STATUS_OK,\n)\nfrom hyperopt.base import JOB_STATE_ERROR\n\n\ndef test_quadratic1_rand():\n trials = Trials()\n\n argmin = fmin(\n fn=lambda x: (x - 3) ** 2,\n space=hp.uniform(\"x\", -5, 5),\n algo=rand.suggest,\n max_evals=500,\n trials=trials,\n )\n\n assert len(trials) == 500\n assert abs(argmin[\"x\"] - 3.0) < 0.25\n\n\ndef test_quadratic1_tpe(trials=Trials()):\n\n argmin = fmin(\n fn=lambda x: (x - 3) ** 2,\n space=hp.uniform(\"x\", -5, 5),\n algo=tpe.suggest,\n max_evals=50,\n trials=trials,\n )\n\n assert len(trials) == 50, len(trials)\n assert abs(argmin[\"x\"] - 3.0) < 0.25, argmin\n\n\ndef test_quadratic1_anneal():\n trials = Trials()\n import hyperopt.anneal\n\n N = 30\n\n def fn(x):\n return (x - 3) ** 2\n\n argmin = fmin(\n fn=fn,\n space=hp.uniform(\"x\", -5, 5),\n algo=hyperopt.anneal.suggest,\n max_evals=N,\n trials=trials,\n )\n\n print(argmin)\n\n assert len(trials) == N\n assert abs(argmin[\"x\"] - 3.0) < 0.25\n\n\n@nose.tools.raises(exceptions.DuplicateLabel)\ndef test_duplicate_label_is_error():\n trials = Trials()\n\n def fn(xy):\n x, y = xy\n return x ** 2 + y ** 2\n\n fmin(\n fn=fn,\n space=[hp.uniform(\"x\", -5, 5), hp.uniform(\"x\", -5, 5)],\n algo=rand.suggest,\n max_evals=500,\n trials=trials,\n )\n\n\ndef test_space_eval():\n space = hp.choice(\n \"a\",\n [\n (\"case 1\", 1 + hp.lognormal(\"c1\", 0, 1)),\n (\"case 2\", hp.uniform(\"c2\", -10, 10)),\n ],\n )\n\n assert space_eval(space, {\"a\": 0, \"c1\": 1.0}) == (\"case 1\", 2.0)\n assert space_eval(space, {\"a\": 1, \"c2\": 3.5}) == (\"case 2\", 3.5)\n\n\ndef test_set_fmin_rstate():\n def lossfn(x):\n return (x - 3) ** 2\n\n trials_seed0 = Trials()\n argmin_seed0 = fmin(\n fn=lossfn,\n space=hp.uniform(\"x\", -5, 5),\n algo=rand.suggest,\n max_evals=1,\n trials=trials_seed0,\n rstate=np.random.RandomState(0),\n )\n assert len(trials_seed0) == 1\n trials_seed1 = Trials()\n argmin_seed1 = fmin(\n fn=lossfn,\n space=hp.uniform(\"x\", -5, 5),\n algo=rand.suggest,\n max_evals=1,\n trials=trials_seed1,\n rstate=np.random.RandomState(1),\n )\n assert len(trials_seed1) == 1\n assert argmin_seed0 != argmin_seed1\n\n\ndef test_fmin_return_argmin():\n def fn(x):\n return x\n\n space = hp.choice(\"x\", [100, 5, 10])\n # With return_argmin=False it should return the\n # best parameter values\n best_parameter = fmin(\n fn=fn,\n space=space,\n max_evals=10,\n algo=rand.suggest,\n return_argmin=False,\n rstate=np.random.RandomState(0),\n )\n assert best_parameter == 5\n\n # With return_argmin=True it should return the\n # optimal point in the sample space\n best_args = fmin(\n fn=fn,\n space=space,\n max_evals=10,\n algo=rand.suggest,\n return_argmin=True,\n rstate=np.random.RandomState(0),\n )\n assert best_args[\"x\"] == 1\n\n\nclass TestFmin(unittest.TestCase):\n class SomeError(Exception):\n # XXX also test domain.exceptions mechanism that actually catches this\n pass\n\n def eval_fn(self, space):\n raise TestFmin.SomeError()\n\n def setUp(self):\n self.trials = Trials()\n\n def test_catch_eval_exceptions_True(self):\n\n # -- should go to max_evals, catching all exceptions, so all jobs\n # should have JOB_STATE_ERROR\n fmin(\n self.eval_fn,\n space=hp.uniform(\"x\", 0, 1),\n algo=rand.suggest,\n trials=self.trials,\n max_evals=2,\n catch_eval_exceptions=True,\n return_argmin=False,\n )\n trials = self.trials\n assert len(trials) == 0\n assert len(trials._dynamic_trials) == 2\n assert trials._dynamic_trials[0][\"state\"] == JOB_STATE_ERROR\n assert trials._dynamic_trials[0][\"misc\"][\"error\"] != None\n assert trials._dynamic_trials[1][\"state\"] == JOB_STATE_ERROR\n assert trials._dynamic_trials[1][\"misc\"][\"error\"] != None\n\n def test_catch_eval_exceptions_False(self):\n with self.assertRaises(TestFmin.SomeError):\n fmin(\n self.eval_fn,\n space=hp.uniform(\"x\", 0, 1),\n algo=rand.suggest,\n trials=self.trials,\n max_evals=2,\n catch_eval_exceptions=False,\n )\n print(len(self.trials))\n assert len(self.trials) == 0\n assert len(self.trials._dynamic_trials) == 1\n\n\ndef test_status_fail_tpe():\n trials = Trials()\n\n argmin = fmin(\n fn=lambda x: (\n {\"loss\": (x - 3) ** 2, \"status\": STATUS_OK}\n if (x < 0)\n else {\"status\": STATUS_FAIL}\n ),\n space=hp.uniform(\"x\", -5, 5),\n algo=tpe.suggest,\n max_evals=50,\n trials=trials,\n )\n\n assert len(trials) == 50, len(trials)\n assert argmin[\"x\"] < 0, argmin\n assert \"loss\" in trials.best_trial[\"result\"], \"loss\" in trials.best_trial[\"result\"]\n assert trials.best_trial[\"result\"][\"loss\"] >= 9, trials.best_trial[\"result\"][\"loss\"]\n\n\nclass TestGenerateTrialsToCalculate(unittest.TestCase):\n def test_generate_trials_to_calculate(self):\n points = [{\"x\": 0.0, \"y\": 0.0}, {\"x\": 1.0, \"y\": 1.0}]\n best = fmin(\n fn=lambda space: space[\"x\"] ** 2 + space[\"y\"] ** 2,\n space={\"x\": hp.uniform(\"x\", -10, 10), \"y\": hp.uniform(\"y\", -10, 10)},\n algo=tpe.suggest,\n max_evals=10,\n points_to_evaluate=points,\n )\n assert best[\"x\"] == 0.0\n assert best[\"y\"] == 0.0\n\n\ndef test_timeout():\n def fn(x):\n return [time.sleep(1), x][1]\n\n space = hp.choice(\"x\", range(20))\n\n start_time_1 = timer()\n fmin(\n fn=fn,\n space=space,\n max_evals=10,\n timeout=1,\n algo=rand.suggest,\n return_argmin=False,\n rstate=np.random.RandomState(0),\n )\n end_time_1 = timer()\n assert (end_time_1 - start_time_1) < 2\n assert (end_time_1 - start_time_1) > 0.9\n\n start_time_5 = timer()\n fmin(\n fn=fn,\n space=space,\n max_evals=10,\n timeout=5,\n algo=rand.suggest,\n return_argmin=False,\n rstate=np.random.RandomState(0),\n )\n end_time_5 = timer()\n assert (end_time_5 - start_time_5) < 6\n assert (end_time_5 - start_time_5) > 4.9\n\n\ndef test_invalid_timeout():\n def fn(x):\n return [time.sleep(1), x][1]\n\n space = hp.choice(\"x\", range(20))\n\n for wrong_timeout in [-1, True]:\n expected_message = \"The timeout argument should be None or a positive value. Given value: {m}\".format(\n m=wrong_timeout\n )\n try:\n fmin(\n fn=fn,\n space=space,\n max_evals=10,\n timeout=wrong_timeout,\n algo=rand.suggest,\n return_argmin=False,\n rstate=np.random.RandomState(0),\n )\n except Exception as e:\n assert str(e) == expected_message\n\n\ndef test_loss_threshold():\n loss_threshold = 0.001\n hypopt_trials = Trials()\n fmin(\n fn=lambda x: x ** 2,\n space=hp.uniform(\"x\", -10, 10),\n loss_threshold=loss_threshold,\n algo=rand.suggest,\n trials=hypopt_trials,\n rstate=np.random.RandomState(0),\n )\n best_loss = hypopt_trials.best_trial[\"result\"][\"loss\"]\n assert best_loss <= loss_threshold\n assert len(hypopt_trials) > 0\n\n\ndef test_invalid_loss_threshold():\n def fn(x):\n return [time.sleep(1), x][1]\n\n space = hp.choice(\"x\", range(20))\n\n for wrong_loss_threshold in [\"a\", True]:\n expected_message = (\n \"The loss_threshold argument should be None \"\n \"or a numeric value. Given value: {m}\".format(m=wrong_loss_threshold)\n )\n try:\n fmin(\n fn=fn,\n space=space,\n max_evals=10,\n loss_threshold=wrong_loss_threshold,\n algo=rand.suggest,\n return_argmin=False,\n rstate=np.random.RandomState(0),\n )\n except Exception as e:\n assert str(e) == expected_message\n\n\ndef test_early_stop():\n trials = Trials()\n\n # basic stop after 100 trials\n def stop(trial, count=0):\n return count + 1 >= 100, [count + 1]\n\n fmin(\n fn=lambda x: x,\n space=hp.uniform(\"x\", -5, 5),\n algo=rand.suggest,\n max_evals=500,\n trials=trials,\n early_stop_fn=stop,\n )\n\n assert len(trials) == 100\n\n\ndef test_early_stop_no_progress_loss():\n trials = generate_trials_to_calculate([{\"x\": -100}])\n fmin(\n fn=lambda x: x,\n space=hp.uniform(\"x\", -5, 5),\n algo=rand.suggest,\n max_evals=500,\n trials=trials,\n early_stop_fn=no_progress_loss(10),\n )\n\n assert len(trials) == 10\n"
] |
[
[
"numpy.random.RandomState"
]
] |
photoszzt/cupy
|
[
"05b7a50815b7f43ccfb504cf8c8b104a7093f9eb"
] |
[
"cupy/__init__.py"
] |
[
"import functools as _functools\nimport sys as _sys\nimport warnings as _warnings\n\nimport numpy as _numpy\n\nfrom cupy import _environment\nfrom cupy import _version\n\n\n_environment._detect_duplicate_installation() # NOQA\n_environment._setup_win32_dll_directory() # NOQA\n_environment._preload_libraries() # NOQA\n\n\ntry:\n from cupy import _core # NOQA\nexcept ImportError as e:\n # _core is a c-extension module.\n # When a user cannot import _core, it represents that CuPy is not correctly\n # built.\n _exc_info = _sys.exc_info()\n _msg = ('''\\\nCuPy is not correctly installed.\n\nIf you are using wheel distribution (cupy-cudaXX), make sure that the version of CuPy you installed matches with the version of CUDA on your host.\nAlso, confirm that only one CuPy package is installed:\n $ pip freeze\n\nIf you are building CuPy from source, please check your environment, uninstall CuPy and reinstall it with:\n $ pip install cupy --no-cache-dir -vvvv\n\nCheck the Installation Guide for details:\n https://docs.cupy.dev/en/latest/install.html\n\noriginal error: {}'''.format(_exc_info[1])) # NOQA\n\n raise ImportError(_msg) from e\n\n\nfrom cupy import cuda # NOQA\n# Do not make `cupy.cupyx` available because it is confusing.\nimport cupyx as _cupyx # NOQA\n\n\ndef is_available():\n return cuda.is_available()\n\n\n__version__ = _version.__version__\n\n\nfrom cupy import fft # NOQA\nfrom cupy import linalg # NOQA\nfrom cupy import polynomial # NOQA\nfrom cupy import random # NOQA\n# `cupy.sparse` is deprecated in v8\nfrom cupy import sparse # NOQA\nfrom cupy import testing # NOQA # NOQA\n\n\n# import class and function\nfrom cupy._core import ndarray # NOQA\nfrom cupy._core import ufunc # NOQA\n\n\n# =============================================================================\n# Constants (borrowed from NumPy)\n# =============================================================================\nfrom numpy import e # NOQA\nfrom numpy import euler_gamma # NOQA\nfrom numpy import Inf # NOQA\nfrom numpy import inf # NOQA\nfrom numpy import Infinity # NOQA\nfrom numpy import infty # NOQA\nfrom numpy import NAN # NOQA\nfrom numpy import NaN # NOQA\nfrom numpy import nan # NOQA\nfrom numpy import newaxis # == None # NOQA\nfrom numpy import NINF # NOQA\nfrom numpy import NZERO # NOQA\nfrom numpy import pi # NOQA\nfrom numpy import PINF # NOQA\nfrom numpy import PZERO # NOQA\n\n\n# =============================================================================\n# Data types (borrowed from NumPy)\n#\n# The order of these declarations are borrowed from the NumPy document:\n# https://docs.scipy.org/doc/numpy/reference/arrays.scalars.html\n# =============================================================================\n\n# -----------------------------------------------------------------------------\n# Generic types\n# -----------------------------------------------------------------------------\nfrom numpy import complexfloating # NOQA\nfrom numpy import floating # NOQA\nfrom numpy import generic # NOQA\nfrom numpy import inexact # NOQA\nfrom numpy import integer # NOQA\nfrom numpy import number # NOQA\nfrom numpy import signedinteger # NOQA\nfrom numpy import unsignedinteger # NOQA\n\n# Not supported by CuPy:\n# from numpy import flexible\n# from numpy import character\n\n# -----------------------------------------------------------------------------\n# Booleans\n# -----------------------------------------------------------------------------\nfrom numpy import bool_ # NOQA\n\nfrom numpy import bool8 # NOQA\n\n# -----------------------------------------------------------------------------\n# Integers\n# -----------------------------------------------------------------------------\nfrom numpy import byte # NOQA\n\nfrom numpy import short # NOQA\n\nfrom numpy import intc # NOQA\n\nfrom numpy import int_ # NOQA\n\nfrom numpy import longlong # NOQA\n\nfrom numpy import intp # NOQA\n\nfrom numpy import int8 # NOQA\n\nfrom numpy import int16 # NOQA\n\nfrom numpy import int32 # NOQA\n\nfrom numpy import int64 # NOQA\n\n# -----------------------------------------------------------------------------\n# Unsigned integers\n# -----------------------------------------------------------------------------\nfrom numpy import ubyte # NOQA\n\nfrom numpy import ushort # NOQA\n\nfrom numpy import uintc # NOQA\n\nfrom numpy import uint # NOQA\n\nfrom numpy import ulonglong # NOQA\n\nfrom numpy import uintp # NOQA\n\nfrom numpy import uint8 # NOQA\n\nfrom numpy import uint16 # NOQA\n\nfrom numpy import uint32 # NOQA\n\nfrom numpy import uint64 # NOQA\n\n# -----------------------------------------------------------------------------\n# Floating-point numbers\n# -----------------------------------------------------------------------------\nfrom numpy import half # NOQA\n\nfrom numpy import single # NOQA\n\nfrom numpy import double # NOQA\n\nfrom numpy import float_ # NOQA\n\nfrom numpy import longfloat # NOQA\n\nfrom numpy import float16 # NOQA\n\nfrom numpy import float32 # NOQA\n\nfrom numpy import float64 # NOQA\n\n# Not supported by CuPy:\n# from numpy import float96\n# from numpy import float128\n\n# -----------------------------------------------------------------------------\n# Complex floating-point numbers\n# -----------------------------------------------------------------------------\nfrom numpy import csingle # NOQA\n\nfrom numpy import complex_ # NOQA\n\nfrom numpy import complex64 # NOQA\n\nfrom numpy import complex128 # NOQA\n\n# Not supported by CuPy:\n# from numpy import complex192\n# from numpy import complex256\n# from numpy import clongfloat\n\n# -----------------------------------------------------------------------------\n# Any Python object\n# -----------------------------------------------------------------------------\n\n# Not supported by CuPy:\n# from numpy import object_\n# from numpy import bytes_\n# from numpy import unicode_\n# from numpy import void\n\n# -----------------------------------------------------------------------------\n# Built-in Python types\n# -----------------------------------------------------------------------------\n\n# =============================================================================\n# Routines\n#\n# The order of these declarations are borrowed from the NumPy document:\n# https://docs.scipy.org/doc/numpy/reference/routines.html\n# =============================================================================\n\n# -----------------------------------------------------------------------------\n# Array creation routines\n# -----------------------------------------------------------------------------\nfrom cupy._creation.basic import empty # NOQA\nfrom cupy._creation.basic import empty_like # NOQA\nfrom cupy._creation.basic import eye # NOQA\nfrom cupy._creation.basic import full # NOQA\nfrom cupy._creation.basic import full_like # NOQA\nfrom cupy._creation.basic import identity # NOQA\nfrom cupy._creation.basic import ones # NOQA\nfrom cupy._creation.basic import ones_like # NOQA\nfrom cupy._creation.basic import zeros # NOQA\nfrom cupy._creation.basic import zeros_like # NOQA\n\nfrom cupy._creation.from_data import copy # NOQA\nfrom cupy._creation.from_data import array # NOQA\nfrom cupy._creation.from_data import asanyarray # NOQA\nfrom cupy._creation.from_data import asarray # NOQA\nfrom cupy._creation.from_data import ascontiguousarray # NOQA\nfrom cupy._creation.from_data import fromfile # NOQA\n\nfrom cupy._creation.ranges import arange # NOQA\nfrom cupy._creation.ranges import linspace # NOQA\nfrom cupy._creation.ranges import logspace # NOQA\nfrom cupy._creation.ranges import meshgrid # NOQA\nfrom cupy._creation.ranges import mgrid # NOQA\nfrom cupy._creation.ranges import ogrid # NOQA\n\nfrom cupy._creation.matrix import diag # NOQA\nfrom cupy._creation.matrix import diagflat # NOQA\nfrom cupy._creation.matrix import tri # NOQA\nfrom cupy._creation.matrix import tril # NOQA\nfrom cupy._creation.matrix import triu # NOQA\n\n# -----------------------------------------------------------------------------\n# Functional routines\n# -----------------------------------------------------------------------------\nfrom cupy._functional.piecewise import piecewise # NOQA\nfrom cupy._functional.vectorize import vectorize # NOQA\nfrom cupy.lib._shape_base import apply_along_axis # NOQA\n\n# -----------------------------------------------------------------------------\n# Array manipulation routines\n# -----------------------------------------------------------------------------\nfrom cupy._manipulation.basic import copyto # NOQA\n\nfrom cupy._manipulation.shape import shape # NOQA\nfrom cupy._manipulation.shape import ravel # NOQA\nfrom cupy._manipulation.shape import reshape # NOQA\n\nfrom cupy._manipulation.transpose import moveaxis # NOQA\nfrom cupy._manipulation.transpose import rollaxis # NOQA\nfrom cupy._manipulation.transpose import swapaxes # NOQA\nfrom cupy._manipulation.transpose import transpose # NOQA\n\nfrom cupy._manipulation.dims import atleast_1d # NOQA\nfrom cupy._manipulation.dims import atleast_2d # NOQA\nfrom cupy._manipulation.dims import atleast_3d # NOQA\nfrom cupy._manipulation.dims import broadcast # NOQA\nfrom cupy._manipulation.dims import broadcast_arrays # NOQA\nfrom cupy._manipulation.dims import broadcast_to # NOQA\nfrom cupy._manipulation.dims import expand_dims # NOQA\nfrom cupy._manipulation.dims import squeeze # NOQA\n\nfrom cupy._manipulation.join import column_stack # NOQA\nfrom cupy._manipulation.join import concatenate # NOQA\nfrom cupy._manipulation.join import dstack # NOQA\nfrom cupy._manipulation.join import hstack # NOQA\nfrom cupy._manipulation.join import stack # NOQA\nfrom cupy._manipulation.join import vstack # NOQA\n\nfrom cupy._manipulation.kind import asfortranarray # NOQA\nfrom cupy._manipulation.kind import require # NOQA\n\nfrom cupy._manipulation.split import array_split # NOQA\nfrom cupy._manipulation.split import dsplit # NOQA\nfrom cupy._manipulation.split import hsplit # NOQA\nfrom cupy._manipulation.split import split # NOQA\nfrom cupy._manipulation.split import vsplit # NOQA\n\nfrom cupy._manipulation.tiling import repeat # NOQA\nfrom cupy._manipulation.tiling import tile # NOQA\n\nfrom cupy._manipulation.add_remove import append # NOQA\nfrom cupy._manipulation.add_remove import resize # NOQA\nfrom cupy._manipulation.add_remove import unique # NOQA\nfrom cupy._manipulation.add_remove import trim_zeros # NOQA\n\nfrom cupy._manipulation.rearrange import flip # NOQA\nfrom cupy._manipulation.rearrange import fliplr # NOQA\nfrom cupy._manipulation.rearrange import flipud # NOQA\nfrom cupy._manipulation.rearrange import roll # NOQA\nfrom cupy._manipulation.rearrange import rot90 # NOQA\n\n# -----------------------------------------------------------------------------\n# Binary operations\n# -----------------------------------------------------------------------------\nfrom cupy._binary.elementwise import bitwise_and # NOQA\nfrom cupy._binary.elementwise import bitwise_or # NOQA\nfrom cupy._binary.elementwise import bitwise_xor # NOQA\nfrom cupy._binary.elementwise import bitwise_not # NOQA\nfrom cupy._binary.elementwise import invert # NOQA\nfrom cupy._binary.elementwise import left_shift # NOQA\nfrom cupy._binary.elementwise import right_shift # NOQA\n\nfrom cupy._binary.packing import packbits # NOQA\nfrom cupy._binary.packing import unpackbits # NOQA\n\n\ndef binary_repr(num, width=None):\n \"\"\"Return the binary representation of the input number as a string.\n\n .. seealso:: :func:`numpy.binary_repr`\n \"\"\"\n return _numpy.binary_repr(num, width)\n\n\n# -----------------------------------------------------------------------------\n# Data type routines (borrowed from NumPy)\n# -----------------------------------------------------------------------------\ndef can_cast(from_, to, casting='safe'):\n \"\"\"Returns True if cast between data types can occur according to the\n casting rule. If from is a scalar or array scalar, also returns True if the\n scalar value can be cast without overflow or truncation to an integer.\n\n .. seealso:: :func:`numpy.can_cast`\n \"\"\"\n from_ = from_.dtype if isinstance(from_, ndarray) else from_\n return _numpy.can_cast(from_, to, casting=casting)\n\n\ndef common_type(*arrays):\n \"\"\"Return a scalar type which is common to the input arrays.\n\n .. seealso:: :func:`numpy.common_type`\n \"\"\"\n if len(arrays) == 0:\n return _numpy.float16\n\n default_float_dtype = _numpy.dtype('float64')\n dtypes = []\n for a in arrays:\n if a.dtype.kind == 'b':\n raise TypeError('can\\'t get common type for non-numeric array')\n elif a.dtype.kind in 'iu':\n dtypes.append(default_float_dtype)\n else:\n dtypes.append(a.dtype)\n\n return _functools.reduce(_numpy.promote_types, dtypes).type\n\n\ndef result_type(*arrays_and_dtypes):\n \"\"\"Returns the type that results from applying the NumPy type promotion\n rules to the arguments.\n\n .. seealso:: :func:`numpy.result_type`\n \"\"\"\n dtypes = [a.dtype if isinstance(a, ndarray)\n else a for a in arrays_and_dtypes]\n return _numpy.result_type(*dtypes)\n\n\nfrom numpy import min_scalar_type # NOQA\nfrom numpy import obj2sctype # NOQA\nfrom numpy import promote_types # NOQA\n\nfrom numpy import dtype # NOQA\nfrom numpy import format_parser # NOQA\n\nfrom numpy import finfo # NOQA\nfrom numpy import iinfo # NOQA\nfrom numpy import MachAr # NOQA\n\nfrom numpy import find_common_type # NOQA\nfrom numpy import issctype # NOQA\nfrom numpy import issubclass_ # NOQA\nfrom numpy import issubdtype # NOQA\nfrom numpy import issubsctype # NOQA\n\nfrom numpy import mintypecode # NOQA\nfrom numpy import sctype2char # NOQA\nfrom numpy import typename # NOQA\n\n# -----------------------------------------------------------------------------\n# Optionally Scipy-accelerated routines\n# -----------------------------------------------------------------------------\n# TODO(beam2d): Implement it\n\n# -----------------------------------------------------------------------------\n# Discrete Fourier Transform\n# -----------------------------------------------------------------------------\n# TODO(beam2d): Implement it\n\n# -----------------------------------------------------------------------------\n# Indexing routines\n# -----------------------------------------------------------------------------\nfrom cupy._indexing.generate import c_ # NOQA\nfrom cupy._indexing.generate import indices # NOQA\nfrom cupy._indexing.generate import ix_ # NOQA\nfrom cupy._indexing.generate import r_ # NOQA\nfrom cupy._indexing.generate import ravel_multi_index # NOQA\nfrom cupy._indexing.generate import unravel_index # NOQA\n\nfrom cupy._indexing.indexing import choose # NOQA\nfrom cupy._indexing.indexing import compress # NOQA\nfrom cupy._indexing.indexing import diagonal # NOQA\nfrom cupy._indexing.indexing import extract # NOQA\nfrom cupy._indexing.indexing import select # NOQA\nfrom cupy._indexing.indexing import take # NOQA\nfrom cupy._indexing.indexing import take_along_axis # NOQA\n\nfrom cupy._indexing.insert import place # NOQA\nfrom cupy._indexing.insert import put # NOQA\nfrom cupy._indexing.insert import putmask # NOQA\nfrom cupy._indexing.insert import fill_diagonal # NOQA\nfrom cupy._indexing.insert import diag_indices # NOQA\nfrom cupy._indexing.insert import diag_indices_from # NOQA\n\nfrom cupy._indexing.iterate import flatiter # NOQA\n\n# -----------------------------------------------------------------------------\n# Input and output\n# -----------------------------------------------------------------------------\nfrom cupy._io.npz import load # NOQA\nfrom cupy._io.npz import save # NOQA\nfrom cupy._io.npz import savez # NOQA\nfrom cupy._io.npz import savez_compressed # NOQA\n\nfrom cupy._io.formatting import array_repr # NOQA\nfrom cupy._io.formatting import array_str # NOQA\n\n\ndef base_repr(number, base=2, padding=0): # NOQA (needed to avoid redefinition of `number`)\n \"\"\"Return a string representation of a number in the given base system.\n\n .. seealso:: :func:`numpy.base_repr`\n \"\"\"\n return _numpy.base_repr(number, base, padding)\n\n\n# -----------------------------------------------------------------------------\n# Linear algebra\n# -----------------------------------------------------------------------------\nfrom cupy.linalg._einsum import einsum # NOQA\n\nfrom cupy.linalg._product import cross # NOQA\nfrom cupy.linalg._product import dot # NOQA\nfrom cupy.linalg._product import inner # NOQA\nfrom cupy.linalg._product import kron # NOQA\nfrom cupy.linalg._product import matmul # NOQA\nfrom cupy.linalg._product import outer # NOQA\nfrom cupy.linalg._product import tensordot # NOQA\nfrom cupy.linalg._product import vdot # NOQA\n\nfrom cupy.linalg._norms import trace # NOQA\n\n# -----------------------------------------------------------------------------\n# Logic functions\n# -----------------------------------------------------------------------------\nfrom cupy._logic.comparison import allclose # NOQA\nfrom cupy._logic.comparison import array_equal # NOQA\nfrom cupy._logic.comparison import isclose # NOQA\n\nfrom cupy._logic.content import isfinite # NOQA\nfrom cupy._logic.content import isinf # NOQA\nfrom cupy._logic.content import isnan # NOQA\n\nfrom cupy._logic.truth import in1d # NOQA\nfrom cupy._logic.truth import isin # NOQA\n\nfrom cupy._logic.type_test import iscomplex # NOQA\nfrom cupy._logic.type_test import iscomplexobj # NOQA\nfrom cupy._logic.type_test import isfortran # NOQA\nfrom cupy._logic.type_test import isreal # NOQA\nfrom cupy._logic.type_test import isrealobj # NOQA\n\nfrom cupy._logic.truth import in1d # NOQA\nfrom cupy._logic.truth import isin # NOQA\n\n\ndef isscalar(element):\n \"\"\"Returns True if the type of num is a scalar type.\n\n .. seealso:: :func:`numpy.isscalar`\n \"\"\"\n return _numpy.isscalar(element)\n\n\nfrom cupy._logic.ops import logical_and # NOQA\nfrom cupy._logic.ops import logical_not # NOQA\nfrom cupy._logic.ops import logical_or # NOQA\nfrom cupy._logic.ops import logical_xor # NOQA\n\nfrom cupy._logic.comparison import equal # NOQA\nfrom cupy._logic.comparison import greater # NOQA\nfrom cupy._logic.comparison import greater_equal # NOQA\nfrom cupy._logic.comparison import less # NOQA\nfrom cupy._logic.comparison import less_equal # NOQA\nfrom cupy._logic.comparison import not_equal # NOQA\n\nfrom cupy._logic.truth import all # NOQA\nfrom cupy._logic.truth import any # NOQA\n\n# ------------------------------------------------------------------------------\n# Polynomial functions\n# ------------------------------------------------------------------------------\nfrom cupy.lib._polynomial import poly1d # NOQA\nfrom cupy.lib._routines_poly import polyadd # NOQA\nfrom cupy.lib._routines_poly import polysub # NOQA\nfrom cupy.lib._routines_poly import polymul # NOQA\nfrom cupy.lib._routines_poly import polyfit # NOQA\nfrom cupy.lib._routines_poly import polyval # NOQA\nfrom cupy.lib._routines_poly import roots # NOQA\n\n# -----------------------------------------------------------------------------\n# Mathematical functions\n# -----------------------------------------------------------------------------\nfrom cupy._math.trigonometric import arccos # NOQA\nfrom cupy._math.trigonometric import arcsin # NOQA\nfrom cupy._math.trigonometric import arctan # NOQA\nfrom cupy._math.trigonometric import arctan2 # NOQA\nfrom cupy._math.trigonometric import cos # NOQA\nfrom cupy._math.trigonometric import deg2rad # NOQA\nfrom cupy._math.trigonometric import degrees # NOQA\nfrom cupy._math.trigonometric import hypot # NOQA\nfrom cupy._math.trigonometric import rad2deg # NOQA\nfrom cupy._math.trigonometric import radians # NOQA\nfrom cupy._math.trigonometric import sin # NOQA\nfrom cupy._math.trigonometric import tan # NOQA\nfrom cupy._math.trigonometric import unwrap # NOQA\n\nfrom cupy._math.hyperbolic import arccosh # NOQA\nfrom cupy._math.hyperbolic import arcsinh # NOQA\nfrom cupy._math.hyperbolic import arctanh # NOQA\nfrom cupy._math.hyperbolic import cosh # NOQA\nfrom cupy._math.hyperbolic import sinh # NOQA\nfrom cupy._math.hyperbolic import tanh # NOQA\n\nfrom cupy._math.rounding import around # NOQA\nfrom cupy._math.rounding import ceil # NOQA\nfrom cupy._math.rounding import fix # NOQA\nfrom cupy._math.rounding import floor # NOQA\nfrom cupy._math.rounding import rint # NOQA\nfrom cupy._math.rounding import round_ # NOQA\nfrom cupy._math.rounding import round_ as round # NOQA\nfrom cupy._math.rounding import trunc # NOQA\n\nfrom cupy._math.sumprod import prod # NOQA\nfrom cupy._math.sumprod import sum # NOQA\nfrom cupy._math.sumprod import cumprod # NOQA\nfrom cupy._math.sumprod import cumsum # NOQA\nfrom cupy._math.sumprod import nancumprod # NOQA\nfrom cupy._math.sumprod import nancumsum # NOQA\nfrom cupy._math.sumprod import nansum # NOQA\nfrom cupy._math.sumprod import nanprod # NOQA\nfrom cupy._math.sumprod import diff # NOQA\nfrom cupy._math.sumprod import gradient # NOQA\nfrom cupy._math.window import bartlett # NOQA\nfrom cupy._math.window import blackman # NOQA\nfrom cupy._math.window import hamming # NOQA\nfrom cupy._math.window import hanning # NOQA\nfrom cupy._math.window import kaiser # NOQA\n\nfrom cupy._math.explog import exp # NOQA\nfrom cupy._math.explog import exp2 # NOQA\nfrom cupy._math.explog import expm1 # NOQA\nfrom cupy._math.explog import log # NOQA\nfrom cupy._math.explog import log10 # NOQA\nfrom cupy._math.explog import log1p # NOQA\nfrom cupy._math.explog import log2 # NOQA\nfrom cupy._math.explog import logaddexp # NOQA\nfrom cupy._math.explog import logaddexp2 # NOQA\n\nfrom cupy._math.special import i0 # NOQA\nfrom cupy._math.special import sinc # NOQA\n\nfrom cupy._math.floating import copysign # NOQA\nfrom cupy._math.floating import frexp # NOQA\nfrom cupy._math.floating import ldexp # NOQA\nfrom cupy._math.floating import nextafter # NOQA\nfrom cupy._math.floating import signbit # NOQA\n\nfrom cupy._math.rational import gcd # NOQA\nfrom cupy._math.rational import lcm # NOQA\n\nfrom cupy._math.arithmetic import add # NOQA\nfrom cupy._math.arithmetic import divide # NOQA\nfrom cupy._math.arithmetic import divmod # NOQA\nfrom cupy._math.arithmetic import floor_divide # NOQA\nfrom cupy._math.arithmetic import fmod # NOQA\nfrom cupy._math.arithmetic import modf # NOQA\nfrom cupy._math.arithmetic import multiply # NOQA\nfrom cupy._math.arithmetic import negative # NOQA\nfrom cupy._math.arithmetic import power # NOQA\nfrom cupy._math.arithmetic import reciprocal # NOQA\nfrom cupy._math.arithmetic import remainder # NOQA\nfrom cupy._math.arithmetic import remainder as mod # NOQA\nfrom cupy._math.arithmetic import subtract # NOQA\nfrom cupy._math.arithmetic import true_divide # NOQA\n\nfrom cupy._math.arithmetic import angle # NOQA\nfrom cupy._math.arithmetic import conjugate as conj # NOQA\nfrom cupy._math.arithmetic import conjugate # NOQA\nfrom cupy._math.arithmetic import imag # NOQA\nfrom cupy._math.arithmetic import real # NOQA\n\nfrom cupy._math.misc import absolute as abs # NOQA\nfrom cupy._math.misc import absolute # NOQA\nfrom cupy._math.misc import cbrt # NOQA\nfrom cupy._math.misc import clip # NOQA\nfrom cupy._math.misc import fmax # NOQA\nfrom cupy._math.misc import fmin # NOQA\nfrom cupy._math.misc import interp # NOQA\nfrom cupy._math.misc import maximum # NOQA\nfrom cupy._math.misc import minimum # NOQA\nfrom cupy._math.misc import nan_to_num # NOQA\nfrom cupy._math.misc import sign # NOQA\nfrom cupy._math.misc import sqrt # NOQA\nfrom cupy._math.misc import square # NOQA\nfrom cupy._math.misc import convolve # NOQA\n\n# -----------------------------------------------------------------------------\n# Miscellaneous routines\n# -----------------------------------------------------------------------------\nfrom cupy._misc.memory_ranges import may_share_memory # NOQA\nfrom cupy._misc.memory_ranges import shares_memory # NOQA\nfrom cupy._misc.who import who # NOQA\n\n\n# -----------------------------------------------------------------------------\n# Padding\n# -----------------------------------------------------------------------------\nfrom cupy._padding.pad import pad # NOQA\n\n\n# -----------------------------------------------------------------------------\n# Sorting, searching, and counting\n# -----------------------------------------------------------------------------\nfrom cupy._sorting.count import count_nonzero # NOQA\n\nfrom cupy._sorting.search import argmax # NOQA\nfrom cupy._sorting.search import argmin # NOQA\nfrom cupy._sorting.search import argwhere # NOQA\nfrom cupy._sorting.search import flatnonzero # NOQA\nfrom cupy._sorting.search import nanargmax # NOQA\nfrom cupy._sorting.search import nanargmin # NOQA\nfrom cupy._sorting.search import nonzero # NOQA\nfrom cupy._sorting.search import searchsorted # NOQA\nfrom cupy._sorting.search import where # NOQA\n\nfrom cupy._sorting.sort import argpartition # NOQA\nfrom cupy._sorting.sort import argsort # NOQA\nfrom cupy._sorting.sort import lexsort # NOQA\nfrom cupy._sorting.sort import msort # NOQA\nfrom cupy._sorting.sort import sort_complex # NOQA\nfrom cupy._sorting.sort import partition # NOQA\nfrom cupy._sorting.sort import sort # NOQA\n\n# -----------------------------------------------------------------------------\n# Statistics\n# -----------------------------------------------------------------------------\nfrom cupy._statistics.correlation import corrcoef # NOQA\nfrom cupy._statistics.correlation import cov # NOQA\nfrom cupy._statistics.correlation import correlate # NOQA\n\nfrom cupy._statistics.order import amax # NOQA\nfrom cupy._statistics.order import amax as max # NOQA\nfrom cupy._statistics.order import amin # NOQA\nfrom cupy._statistics.order import amin as min # NOQA\nfrom cupy._statistics.order import nanmax # NOQA\nfrom cupy._statistics.order import nanmin # NOQA\nfrom cupy._statistics.order import percentile # NOQA\nfrom cupy._statistics.order import ptp # NOQA\nfrom cupy._statistics.order import quantile # NOQA\n\nfrom cupy._statistics.meanvar import median # NOQA\nfrom cupy._statistics.meanvar import average # NOQA\nfrom cupy._statistics.meanvar import mean # NOQA\nfrom cupy._statistics.meanvar import std # NOQA\nfrom cupy._statistics.meanvar import var # NOQA\nfrom cupy._statistics.meanvar import nanmedian # NOQA\nfrom cupy._statistics.meanvar import nanmean # NOQA\nfrom cupy._statistics.meanvar import nanstd # NOQA\nfrom cupy._statistics.meanvar import nanvar # NOQA\n\nfrom cupy._statistics.histogram import bincount # NOQA\nfrom cupy._statistics.histogram import digitize # NOQA\nfrom cupy._statistics.histogram import histogram # NOQA\nfrom cupy._statistics.histogram import histogram2d # NOQA\nfrom cupy._statistics.histogram import histogramdd # NOQA\n\n# -----------------------------------------------------------------------------\n# Undocumented functions\n# -----------------------------------------------------------------------------\nfrom cupy._core import size # NOQA\n\n\ndef ndim(a):\n \"\"\"Returns the number of dimensions of an array.\n\n Args:\n a (array-like): If it is not already an `cupy.ndarray`, a conversion\n via :func:`numpy.asarray` is attempted.\n\n Returns:\n (int): The number of dimensions in `a`.\n\n \"\"\"\n try:\n return a.ndim\n except AttributeError:\n return _numpy.ndim(a)\n\n\n# -----------------------------------------------------------------------------\n# CuPy specific functions\n# -----------------------------------------------------------------------------\n\nfrom cupy._util import clear_memo # NOQA\nfrom cupy._util import memoize # NOQA\n\nfrom cupy._core import ElementwiseKernel # NOQA\nfrom cupy._core import RawKernel # NOQA\nfrom cupy._core import RawModule # NOQA\nfrom cupy._core._reduction import ReductionKernel # NOQA\n\n# -----------------------------------------------------------------------------\n# DLPack\n# -----------------------------------------------------------------------------\n\nfrom cupy._core import fromDlpack # NOQA\nfrom cupy._core import from_dlpack # NOQA\n\n\ndef asnumpy(a, stream=None, order='C', out=None):\n \"\"\"Returns an array on the host memory from an arbitrary source array.\n\n Args:\n a: Arbitrary object that can be converted to :class:`numpy.ndarray`.\n stream (cupy.cuda.Stream): CUDA stream object. If it is specified, then\n the device-to-host copy runs asynchronously. Otherwise, the copy is\n synchronous. Note that if ``a`` is not a :class:`cupy.ndarray`\n object, then this argument has no effect.\n order ({'C', 'F', 'A'}): The desired memory layout of the host\n array. When ``order`` is 'A', it uses 'F' if ``a`` is\n fortran-contiguous and 'C' otherwise.\n out (numpy.ndarray): The output array to be written to. It must have\n compatible shape and dtype with those of ``a``'s.\n\n Returns:\n numpy.ndarray: Converted array on the host memory.\n\n \"\"\"\n if isinstance(a, ndarray):\n return a.get(stream=stream, order=order, out=out)\n elif hasattr(a, \"__cuda_array_interface__\"):\n return array(a).get(stream=stream, order=order, out=out)\n else:\n temp = _numpy.asarray(a, order=order)\n if out is not None:\n out[...] = temp\n else:\n out = temp\n return out\n\n\n_cupy = _sys.modules[__name__]\n\n\ndef get_array_module(*args):\n \"\"\"Returns the array module for arguments.\n\n This function is used to implement CPU/GPU generic code. If at least one of\n the arguments is a :class:`cupy.ndarray` object, the :mod:`cupy` module is\n returned.\n\n Args:\n args: Values to determine whether NumPy or CuPy should be used.\n\n Returns:\n module: :mod:`cupy` or :mod:`numpy` is returned based on the types of\n the arguments.\n\n .. admonition:: Example\n\n A NumPy/CuPy generic function can be written as follows\n\n >>> def softplus(x):\n ... xp = cupy.get_array_module(x)\n ... return xp.maximum(0, x) + xp.log1p(xp.exp(-abs(x)))\n\n \"\"\"\n for arg in args:\n if isinstance(arg, (ndarray, _cupyx.scipy.sparse.spmatrix,\n _core.fusion._FusionVarArray,\n _core.new_fusion._ArrayProxy)):\n return _cupy\n return _numpy\n\n\nfuse = _core.fusion.fuse\n\ndisable_experimental_feature_warning = False\n\n\n# set default allocator\n_default_memory_pool = cuda.MemoryPool()\n_default_pinned_memory_pool = cuda.PinnedMemoryPool()\n\ncuda.set_allocator(_default_memory_pool.malloc)\ncuda.set_pinned_memory_allocator(_default_pinned_memory_pool.malloc)\n\n\ndef get_default_memory_pool():\n \"\"\"Returns CuPy default memory pool for GPU memory.\n\n Returns:\n cupy.cuda.MemoryPool: The memory pool object.\n\n .. note::\n If you want to disable memory pool, please use the following code.\n\n >>> cupy.cuda.set_allocator(None)\n\n \"\"\"\n return _default_memory_pool\n\n\ndef get_default_pinned_memory_pool():\n \"\"\"Returns CuPy default memory pool for pinned memory.\n\n Returns:\n cupy.cuda.PinnedMemoryPool: The memory pool object.\n\n .. note::\n If you want to disable memory pool, please use the following code.\n\n >>> cupy.cuda.set_pinned_memory_allocator(None)\n\n \"\"\"\n return _default_pinned_memory_pool\n\n\ndef show_config(*, _full=False):\n \"\"\"Prints the current runtime configuration to standard output.\"\"\"\n _sys.stdout.write(str(_cupyx.get_runtime_info(full=_full)))\n _sys.stdout.flush()\n\n\nif _sys.version_info >= (3, 7):\n _deprecated_attrs = {\n 'int': (int, 'cupy.int_'),\n 'bool': (bool, 'cupy.bool_'),\n 'float': (float, 'cupy.float_'),\n 'complex': (complex, 'cupy.complex_'),\n }\n\n def __getattr__(name):\n value = _deprecated_attrs.get(name)\n if value is None:\n raise AttributeError(\n f\"module 'cupy' has no attribute {name!r}\")\n attr, eq_attr = value\n _warnings.warn(\n f'`cupy.{name}` is a deprecated alias for the Python scalar type '\n f'`{name}`. Please use the builtin `{name}` or its corresponding '\n f'NumPy scalar type `{eq_attr}` instead.',\n DeprecationWarning, stacklevel=2\n )\n return attr\nelse:\n # Does not emit warnings.\n from builtins import int\n from builtins import bool\n from builtins import float\n from builtins import complex\n"
] |
[
[
"numpy.can_cast",
"numpy.asarray",
"numpy.dtype",
"numpy.binary_repr",
"numpy.result_type",
"numpy.base_repr",
"numpy.ndim",
"numpy.isscalar"
]
] |
Goliath-Research/Computational-Statistics
|
[
"0eee6231da2f203c5cd393f8429177cc9c1e27cf"
] |
[
"GoliathResearch/goliath_research/bootstrap.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom scipy import stats as st\nfrom statsmodels.distributions.empirical_distribution import ECDF\nimport matplotlib.pyplot as plt\nimport seaborn as sns;\nfrom typing import Callable\n\nsns.set_style(\"whitegrid\") \n\nclass Bootstrap(object):\n '''\n\n '''\n\n def __init__(self, sample_data, num_samples=10000, sample_size=100):\n ''' \n It creates the sample distribution using the bootstrap method (resampling WITH replacement)\n sample_data: sample data (must be representative of the population)\n num_samples: number of samples to generate for the bootstrap method\n sample_size: size of each of num_samples samples to generate\n '''\n self._sample_data = sample_data\n self._num_samples = num_samples\n self._sample_size = sample_size\n\n self.generateSamples()\n\n @property\n def sample_data(self):\n '''Sample of the original population (list or np.ndarray)'''\n return self._sample_data\n\n @property\n def num_samples(self):\n return self._num_samples\n\n @property\n def sample_size(self):\n return self._sample_size\n\n @property\n def samples(self):\n '''DataFrame with num_samples samples of size sample_size'''\n if self._samples.empty:\n self.generateSamples()\n return self._samples\n\n def generateSamples(self):\n '''\n It returns a DataFrame where each column (num_samples columns) is a sample with replacement.\n '''\n self._samples = pd.DataFrame()\n for k in range(self.num_samples):\n sample = np.random.choice(self.sample_data, size=self.sample_size, replace=True)\n column_name = 'Sample' + str(k)\n self._samples[column_name] = sample\n self._samples = self._samples.copy()\n\n# Method for generating a sample distribution from a DataFrame with sample columns\n# This method receives all samples (a Pandas DataFrame) and the statistical function\n# of interest (that is, a function that takes a sample and returns a statistic)\n\nStatistics = Callable[np.ndarray, float]\n\ndef calcSampleDistribution(samples : pd.DataFrame, func : Statistics) -> np.ndarray :\n '''\n It creates the samples distribution using func.\n '''\n return np.array(samples.apply(func))\n\ndef getConfidenceInterval(d : np.ndarray, confidence: float = 95) -> (float, float):\n alpha = 100 - confidence\n lower_percentile = alpha / 2.0 \n lower = np.percentile(d, lower_percentile)\n upper_percentile = lower_percentile + confidence\n upper = np.percentile(d, upper_percentile)\n return (lower, upper) \n\ndef graphSampleDistribution(d : np.ndarray):\n '''\n It graph the sample distribution\n '''\n sns.displot(d, height=3.5, aspect=1.5).set(title='Sample Distribution') \n\ndef graphConfidenceInterval(d : np.ndarray, confidence: float = 95):\n '''\n It returns a confidence% confident interval and presents the result graphically.\n '''\n lower, upper = getConfidenceInterval(d, confidence)\n sns.displot(d, kde=True, height=3.5, aspect=1.5)\n plt.title('Sample Distribution\\n %i%% CI: (%.2f, %.2f)'%(confidence, lower, upper))\n plt.axvline(x = lower, ymin=0, ymax=0.5, color='orangered', linewidth=2)\n plt.axvline(x = upper, ymin=0, ymax=0.5, color='orangered', linewidth=2)\n return (lower, upper) \n\n\"\"\" def createSampleDistributionHo(self, pop_value):\n '''\n It creates the samples distribution centered at pop_value (true under Ho).\n '''\n self.sample_distribution_Ho = self.sample_distribution - self.sample_distribution.mean() + pop_value\n self.sample_mean_Ho = np.round(np.mean(self.sample_distribution_Ho), 2)\n \"\"\" \n\n\nclass OneSampleHT(Bootstrap):\n\n def __init__(self, sample_data, num_samples=10000, sample_size=100, func=np.mean):\n super().__init__(sample_data, num_samples, sample_size)\n\n self.sample_distribution = calcSampleDistribution(self._samples, func)\n self.sample_mean = np.round(np.mean(self.sample_distribution), 2)\n\n def createSampleDistributionHo(self, pop_value):\n '''\n It creates the samples distribution centered at pop_value (true under Ho).\n '''\n self.sample_distribution_Ho = self.sample_distribution - self.sample_distribution.mean() + pop_value\n self.sample_mean_Ho = np.round(np.mean(self.sample_distribution_Ho), 2)\n \n def getpValue(self, obs_value, alpha=0.05, alternative='two-sided'):\n '''\n It calculates the p-value for one-sample HT\n obs_value: obs_value: observed value \n alpha: significance level\n alternative: one of the three values: 'two-sided', 'smaller', or 'larger' \n '''\n ecdf = ECDF(self.sample_distribution_Ho) \n if alternative == 'two-sided':\n if obs_value < self.sample_mean_Ho: \n pValue = 2 * ecdf(obs_value)\n else: \n pValue = 2 * (1 - ecdf(obs_value)) \n elif alternative == 'smaller':\n pValue = ecdf(obs_value)\n else:\n pValue = 1 - ecdf(obs_value)\n return pValue\n\n def graphpValue(self, obs_value, alpha=0.05, alternative='two-sided'):\n '''\n It calculates the p-value for one-sample HT, and also graph the sample distribution, \n the critical region, and the obs_value\n obs_value: obs_value: observed value\n alpha: significance level\n alternative: one of the three values: 'two-sided', 'smaller', or 'larger' \n '''\n ecdf = ECDF(self.sample_distribution_Ho) \n pValue = self.getpValue(obs_value, alpha, alternative) \n ax = sns.kdeplot(x=self.sample_distribution_Ho, color='lightskyblue', shade=True, alpha=0.4)\n plt.axvline(x=obs_value, ymin=0, ymax= 0.02, color='black', linewidth=6)\n plt.title('Sampling Distribution')\n if alternative == 'two-sided':\n cv1 = np.round(np.percentile(self.sample_distribution_Ho, (alpha/2)*100), 2) \n cv2 = np.round(np.percentile(self.sample_distribution_Ho, (1-alpha)*100), 2) \n plt.axvline(x = cv1, ymin=0, ymax=0.5, color='orangered', linewidth=2)\n plt.axvline(x = cv2, ymin=0, ymax=0.5, color='orangered', linewidth=2); \n elif alternative == 'smaller':\n cv1 = np.round(np.percentile(self.sample_distribution_Ho, alpha*100), 2) \n plt.axvline(x = cv1, ymin=0, ymax=0.5, color='orangered', linewidth=2)\n else:\n cv2 = np.round(np.percentile(self.sample_distribution_Ho, (1-alpha)*100), 2) \n plt.axvline(x = cv2, ymin=0, ymax=0.5, color='orangered', linewidth=2)\n return pValue\n\n def oneSampleHT(self, pop_value, alpha=0.05, alternative='two-sided'):\n '''\n It computes the bootstrap one-sample test.\n obs_value: observed value\n alpha: significance level\n alternative: one of the three values: 'two-sided', 'smaller', or 'larger' \n '''\n #sigHo = {'two-sided':' =', 'smaller':'>=', 'larger':'<='}\n sigHa = {'two-sided':'!=', 'smaller':'< ', 'larger':'> '}\n print('--- Bootstrapping Method ---')\n print(' Ho: measure =', np.round(pop_value, 2)) \n print(' Ha: measure' , sigHa[alternative], np.round(pop_value, 2)) \n self.createSampleDistributionHo(pop_value)\n obs_value = self.sample_mean\n print(' Sample mean = %.2f' %(self.sample_mean))\n p_val = self.getpValue(obs_value, alpha, alternative)\n print(' p-value = ' + str(np.round(p_val,4)))\n \n def graphOneSampleHT(self, pop_value, alpha=0.05, alternative='two-sided'):\n '''\n It computes the bootstrap one-sample test and gets graphical results.\n obs_value: observed value\n alpha: significance level\n alternative: one of the three values: 'two-sided', 'smaller', or 'larger' \n '''\n #sigHo = {'two-sided':' =', 'smaller':'>=', 'larger':'<='}\n sigHa = {'two-sided':'!=', 'smaller':'< ', 'larger':'> '}\n print('--- Bootstrapping Method ---')\n #print(' Ho: measure', sigHo[alternative], np.round(test_value,2)) \n print(' Ho: measure =', np.round(pop_value,2)) \n print(' Ha: measure', sigHa[alternative], np.round(pop_value, 2)) \n self.createSampleDistributionHo(pop_value) \n print(' Sample mean = %.2f' %(self.sample_mean))\n p_val = self.graphpValue(self.sample_mean, alpha, alternative)\n print(' p-value = ' + str(np.round(p_val, 4)))\n\nif __name__ == \"__main__\":\n # Generate a data sample\n\n np.random.seed(10)\n data = np.random.randint(158, 175, 50)\n\n # Create a bootstrap object based on the previous sample\n # The object will generate multiple samples following the\n # bootstrap method (sampling with replacement)\n\n b = Bootstrap(data)\n\n # Once we have the bootstrap object, we can use it for\n # creating a sampling distribution of any \"statistics\"\n\n d = calcSampleDistribution(b.samples, np.mean)\n\n # Sample Distribution Histogram\n\n plt.hist(d)\n #input()\n\n # 95% confidence interval according to the Sample Distribution\n\n lower, upper = getConfidenceInterval(d, 95)\n print(lower, upper)\n\n # Graphical View\n\n graphSampleDistribution(d)\n #input()\n\n graphConfidenceInterval(d, 95)\n #input()\n\n # Testing now OneSampleHT\n\n My1HT = OneSampleHT(data, sample_size=40)\n\n graphSampleDistribution(My1HT.sample_distribution)\n\n pop_value = 160\n My1HT.createSampleDistributionHo(pop_value)\n\n obs_value = 165\n pValue = My1HT.getpValue(obs_value, 0.05, 'two-sided')\n print(\"pValue(%5.2f) = %3.2f\" % (obs_value, pValue))\n\n My1HT.graphOneSampleHT(165)\n\n"
] |
[
[
"matplotlib.pyplot.axvline",
"numpy.random.seed",
"matplotlib.pyplot.title",
"numpy.random.choice",
"numpy.percentile",
"pandas.DataFrame",
"numpy.round",
"numpy.mean",
"matplotlib.pyplot.hist",
"numpy.random.randint"
]
] |
shreyas-bk/TPN
|
[
"f761af1e61086733a882cc37e0556cb47116f574"
] |
[
"mmaction/models/tenons/necks/tpn.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\nfrom mmcv import Config\nimport numpy as np\n\nfrom ...registry import NECKS\n\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass ConvModule(nn.Module):\n def __init__(\n self,\n inplanes,\n planes,\n kernel_size,\n stride,\n padding,\n bias=False,\n groups=1,\n ):\n super(ConvModule, self).__init__()\n self.conv = nn.Conv3d(inplanes, planes, kernel_size, stride, padding, bias=bias, groups=groups)\n self.bn = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n out = self.relu(self.bn(self.conv(x)))\n return out\n\n\nclass AuxHead(nn.Module):\n def __init__(\n self,\n inplanes,\n planes,\n loss_weight=0.5\n ):\n super(AuxHead, self).__init__()\n self.convs = \\\n ConvModule(inplanes, inplanes * 2, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1), bias=False)\n self.loss_weight = loss_weight\n self.dropout = nn.Dropout(p=0.5)\n self.fc = nn.Linear(inplanes * 2, planes)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n if isinstance(m, nn.Conv3d):\n xavier_init(m, distribution='uniform')\n if isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.fill_(0)\n\n def forward(self, x, target=None):\n if target is None:\n return None\n loss = dict()\n x = self.convs(x)\n x = F.adaptive_avg_pool3d(x, 1).squeeze(-1).squeeze(-1).squeeze(-1)\n x = self.dropout(x)\n x = self.fc(x)\n\n loss['loss_aux'] = self.loss_weight * F.cross_entropy(x, target)\n return loss\n\n\nclass TemporalModulation(nn.Module):\n def __init__(self,\n inplanes,\n planes,\n downsample_scale=8,\n ):\n super(TemporalModulation, self).__init__()\n\n self.conv = nn.Conv3d(inplanes, planes, (3, 1, 1), (1, 1, 1), (1, 0, 0), bias=False, groups=32)\n self.pool = nn.MaxPool3d((downsample_scale, 1, 1), (downsample_scale, 1, 1), (0, 0, 0), ceil_mode=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass Upsampling(nn.Module):\n def __init__(self,\n scale=(2, 1, 1),\n ):\n super(Upsampling, self).__init__()\n self.scale = scale\n\n def forward(self, x):\n x = F.interpolate(x, scale_factor=self.scale, mode='nearest')\n return x\n\n\nclass Downampling(nn.Module):\n def __init__(self,\n inplanes,\n planes,\n kernel_size=(3, 1, 1),\n stride=(1, 1, 1),\n padding=(1, 0, 0),\n bias=False,\n groups=1,\n norm=False,\n activation=False,\n downsample_position='after',\n downsample_scale=(1, 2, 2),\n ):\n super(Downampling, self).__init__()\n\n self.conv = nn.Conv3d(inplanes, planes, kernel_size, stride, padding, bias=bias, groups=groups)\n self.norm = nn.BatchNorm3d(planes) if norm else None\n self.relu = nn.ReLU(inplace=True) if activation else None\n assert (downsample_position in ['before', 'after'])\n self.downsample_position = downsample_position\n self.pool = nn.MaxPool3d(downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)\n\n def forward(self, x):\n if self.downsample_position == 'before':\n x = self.pool(x)\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.relu is not None:\n x = self.relu(x)\n if self.downsample_position == 'after':\n x = self.pool(x)\n\n return x\n\n\nclass LevelFusion(nn.Module):\n def __init__(self,\n in_channels=[1024, 1024],\n mid_channels=[1024, 1024],\n out_channels=2048,\n ds_scales=[(1, 1, 1), (1, 1, 1)],\n ):\n super(LevelFusion, self).__init__()\n self.ops = nn.ModuleList()\n num_ins = len(in_channels)\n for i in range(num_ins):\n op = Downampling(in_channels[i], mid_channels[i], kernel_size=(1, 1, 1), stride=(1, 1, 1),\n padding=(0, 0, 0), bias=False, groups=32, norm=True, activation=True,\n downsample_position='before', downsample_scale=ds_scales[i])\n self.ops.append(op)\n\n in_dims = np.sum(mid_channels)\n self.fusion_conv = nn.Sequential(\n nn.Conv3d(in_dims, out_channels, 1, 1, 0, bias=False),\n nn.BatchNorm3d(out_channels),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, inputs):\n out = [self.ops[i](feature) for i, feature in enumerate(inputs)]\n out = torch.cat(out, 1)\n out = self.fusion_conv(out)\n return out\n\n\nclass SpatialModulation(nn.Module):\n def __init__(\n self,\n inplanes=[1024, 2048],\n planes=2048,\n ):\n super(SpatialModulation, self).__init__()\n\n self.spatial_modulation = nn.ModuleList()\n for i, dim in enumerate(inplanes):\n op = nn.ModuleList()\n ds_factor = planes // dim\n ds_num = int(np.log2(ds_factor))\n if ds_num < 1:\n op = Identity()\n else:\n for dsi in range(ds_num):\n in_factor = 2 ** dsi\n out_factor = 2 ** (dsi + 1)\n op.append(ConvModule(dim * in_factor, dim * out_factor, kernel_size=(1, 3, 3), stride=(1, 2, 2),\n padding=(0, 1, 1), bias=False))\n self.spatial_modulation.append(op)\n\n def forward(self, inputs):\n out = []\n for i, feature in enumerate(inputs):\n if isinstance(self.spatial_modulation[i], nn.ModuleList):\n out_ = inputs[i]\n for III, op in enumerate(self.spatial_modulation[i]):\n out_ = op(out_)\n out.append(out_)\n else:\n out.append(self.spatial_modulation[i](inputs[i]))\n return out\n\n\n@NECKS.register_module\nclass TPN(nn.Module):\n\n def __init__(self,\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n spatial_modulation_config=None,\n temporal_modulation_config=None,\n upsampling_config=None,\n downsampling_config=None,\n level_fusion_config=None,\n aux_head_config=None,\n ):\n super(TPN, self).__init__()\n assert isinstance(in_channels, list)\n assert isinstance(out_channels, int)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n\n spatial_modulation_config = Config(spatial_modulation_config) if isinstance(spatial_modulation_config,\n dict) else spatial_modulation_config\n temporal_modulation_config = Config(temporal_modulation_config) if isinstance(temporal_modulation_config,\n dict) else temporal_modulation_config\n upsampling_config = Config(upsampling_config) if isinstance(upsampling_config, dict) else upsampling_config\n downsampling_config = Config(downsampling_config) if isinstance(downsampling_config,\n dict) else downsampling_config\n aux_head_config = Config(aux_head_config) if isinstance(aux_head_config, dict) else aux_head_config\n level_fusion_config = Config(level_fusion_config) if isinstance(level_fusion_config,\n dict) else level_fusion_config\n\n self.temporal_modulation_ops = nn.ModuleList()\n self.upsampling_ops = nn.ModuleList()\n self.downsampling_ops = nn.ModuleList()\n self.level_fusion_op = LevelFusion(**level_fusion_config)\n self.spatial_modulation = SpatialModulation(**spatial_modulation_config)\n for i in range(0, self.num_ins, 1):\n inplanes = in_channels[-1]\n planes = out_channels\n\n if temporal_modulation_config is not None:\n # overwrite the temporal_modulation_config\n temporal_modulation_config.param.downsample_scale = temporal_modulation_config.scales[i]\n temporal_modulation_config.param.inplanes = inplanes\n temporal_modulation_config.param.planes = planes\n temporal_modulation = TemporalModulation(**temporal_modulation_config.param)\n self.temporal_modulation_ops.append(temporal_modulation)\n\n if i < self.num_ins - 1:\n if upsampling_config is not None:\n # overwrite the upsampling_config\n upsampling = Upsampling(**upsampling_config)\n self.upsampling_ops.append(upsampling)\n\n if downsampling_config is not None:\n # overwrite the downsampling_config\n downsampling_config.param.inplanes = planes\n downsampling_config.param.planes = planes\n downsampling_config.param.downsample_scale = downsampling_config.scales\n downsampling = Downampling(**downsampling_config.param)\n self.downsampling_ops.append(downsampling)\n\n out_dims = level_fusion_config.out_channels\n\n # Two pyramids\n self.level_fusion_op2 = LevelFusion(**level_fusion_config)\n\n self.pyramid_fusion_op = nn.Sequential(\n nn.Conv3d(out_dims * 2, 2048, 1, 1, 0, bias=False),\n nn.BatchNorm3d(2048),\n nn.ReLU(inplace=True)\n )\n\n # overwrite aux_head_config\n if aux_head_config is not None:\n aux_head_config.inplanes = self.in_channels[-2]\n self.aux_head = AuxHead(**aux_head_config)\n else:\n self.aux_head = None\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n xavier_init(m, distribution='uniform')\n if isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.fill_(0)\n\n if self.aux_head is not None:\n self.aux_head.init_weights()\n\n def forward(self, inputs, target=None):\n loss = None\n\n # Auxiliary loss\n if self.aux_head is not None:\n loss = self.aux_head(inputs[-2], target)\n\n # Spatial Modulation\n outs = self.spatial_modulation(inputs)\n\n # Temporal Modulation\n outs = [temporal_modulation(outs[i]) for i, temporal_modulation in enumerate(self.temporal_modulation_ops)]\n\n temporal_modulation_outs = outs\n\n # Build top-down flow - upsampling operation\n if self.upsampling_ops is not None:\n for i in range(self.num_ins - 1, 0, -1):\n outs[i - 1] = outs[i - 1] + self.upsampling_ops[i - 1](outs[i])\n\n # Get top-down outs\n topdownouts = self.level_fusion_op2(outs)\n outs = temporal_modulation_outs\n\n # Build bottom-up flow - downsampling operation\n if self.downsampling_ops is not None:\n for i in range(0, self.num_ins - 1, 1):\n outs[i + 1] = outs[i + 1] + self.downsampling_ops[i](outs[i])\n\n # Get bottom-up outs\n outs = self.level_fusion_op(outs)\n\n # fuse two pyramid outs\n outs = self.pyramid_fusion_op(torch.cat([topdownouts, outs], 1))\n\n return outs, loss\n"
] |
[
[
"torch.nn.Dropout",
"numpy.log2",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.adaptive_avg_pool3d",
"torch.nn.Linear",
"torch.nn.Conv3d",
"torch.nn.MaxPool3d",
"torch.nn.init.normal_",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"numpy.sum"
]
] |
allenai/scruples
|
[
"9a43459c507e57d89ab8442a4f3985cedecb8710",
"9a43459c507e57d89ab8442a4f3985cedecb8710"
] |
[
"src/scruples/baselines/train.py",
"src/scruples/baselines/utils.py"
] |
[
"\"\"\"Fine-tune pre-trained LMs on the scruples datasets.\"\"\"\n\nimport gc\nimport json\nimport logging\nimport math\nimport os\nimport shutil\nfrom typing import (\n Any,\n Dict,\n List,\n Optional)\n\nimport numpy as np\nfrom transformers import (\n AdamW,\n WarmupLinearSchedule)\nfrom scipy.special import softmax\nimport tensorboardX\nimport torch\nfrom torch.utils.data import DataLoader\nimport tqdm\n\nfrom . import resource, corpus\nfrom .. import settings, utils\nfrom ..baselines.loss import (\n SoftCrossEntropyLoss,\n DirichletMultinomialLoss)\nfrom ..data.labels import Label\nfrom ..dataset.readers import (\n ScruplesResourceDataset,\n ScruplesCorpusDataset)\n\n\ndef train_lm(\n data_dir: str,\n model_dir: str,\n dataset: str,\n baseline: str,\n hyper_params: Dict[str, Any],\n loss_type: str,\n compute_train_batch_size: int,\n predict_batch_size: int,\n gpu_ids: Optional[List[int]],\n logger: Optional[logging.Logger] = None\n) -> None:\n \"\"\"Fine-tune a pre-trained LM baseline on a scruples dataset.\n\n Fine-tune ``baseline`` on ``dataset``, writing all results and\n artifacts to ``model_dir``. Return the best calibrated xentropy achieved on\n dev after any epoch.\n\n Parameters\n ----------\n data_dir : str\n The path to the directory containing the dataset.\n model_dir : str\n The path to the directory in which to save results.\n dataset : str\n The dataset to use when fine-tuning ``baseline``. Must be either\n \"resource\" or \"corpus\".\n baseline : str\n The pre-trained LM to fine-tune. Should be one of the keys for\n ``scruples.baselines.$dataset.FINE_TUNE_LM_BASELINES`` where\n ``$dataset`` corresponds to the ``dataset`` argument to this\n function.\n hyper_params : Dict[str, Any]\n The dictionary of hyper-parameters for the model.\n loss_type : str\n The type of loss to use. Should be one of ``\"xentropy-hard\"``,\n ``\"xentropy-soft\"``, ``\"xentropy-full\"`` or\n ``\"dirichlet-multinomial\"``.\n compute_train_batch_size : int\n The largest batch size that will fit on the hardware during\n training. Gradient accumulation will be used to make sure the\n actual size of the batch on the hardware respects this limit.\n predict_batch_size : int\n The number of instances to use in a predicting batch.\n gpu_ids : Optional[List[int]]\n A list of IDs for GPUs to use.\n logger : Optional[logging.Logger], optional (default=None)\n The logger to use when logging messages. If ``None``, then no\n messages will be logged.\n\n Returns\n -------\n float\n The best calibrated xentropy on dev achieved after any epoch.\n bool\n ``True`` if the training loss diverged, ``False`` otherwise.\n \"\"\"\n gc.collect()\n # collect any garbage to make sure old torch objects are cleaned up (and\n # their memory is freed from the GPU). Otherwise, old tensors can hang\n # around on the GPU, causing CUDA out-of-memory errors.\n\n if loss_type not in settings.LOSS_TYPES:\n raise ValueError(\n f'Unrecognized loss type: {loss_type}. Please use one of'\n f' \"xentropy-hard\", \"xentropy-soft\", \"xentropy-full\" or'\n f' \"dirichlet-multinomial\".')\n\n # Step 1: Manage and construct paths.\n\n if logger is not None:\n logger.info('Creating the model directory.')\n\n checkpoints_dir = os.path.join(model_dir, 'checkpoints')\n tensorboard_dir = os.path.join(model_dir, 'tensorboard')\n os.makedirs(model_dir)\n os.makedirs(checkpoints_dir)\n os.makedirs(tensorboard_dir)\n\n config_file_path = os.path.join(model_dir, 'config.json')\n log_file_path = os.path.join(model_dir, 'log.txt')\n best_checkpoint_path = os.path.join(\n checkpoints_dir, 'best.checkpoint.pkl')\n last_checkpoint_path = os.path.join(\n checkpoints_dir, 'last.checkpoint.pkl')\n\n # Step 2: Setup the log file.\n\n if logger is not None:\n logger.info('Configuring log files.')\n\n log_file_handler = logging.FileHandler(log_file_path)\n log_file_handler.setLevel(logging.DEBUG)\n log_file_handler.setFormatter(logging.Formatter(settings.LOG_FORMAT))\n logging.root.addHandler(log_file_handler)\n\n # Step 3: Record the script's arguments.\n\n if logger is not None:\n logger.info(f'Writing arguments to {config_file_path}.')\n\n with open(config_file_path, 'w') as config_file:\n json.dump({\n 'data_dir': data_dir,\n 'model_dir': model_dir,\n 'dataset': dataset,\n 'baseline': baseline,\n 'hyper_params': hyper_params,\n 'loss_type': loss_type,\n 'compute_train_batch_size': compute_train_batch_size,\n 'predict_batch_size': predict_batch_size,\n 'gpu_ids': gpu_ids\n }, config_file)\n\n # Step 4: Configure GPUs.\n\n if gpu_ids:\n if logger is not None:\n logger.info(\n f'Configuring environment to use {len(gpu_ids)} GPUs:'\n f' {\", \".join(str(gpu_id) for gpu_id in gpu_ids)}.')\n\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, gpu_ids))\n\n if not torch.cuda.is_available():\n raise EnvironmentError('CUDA must be available to use GPUs.')\n\n device = torch.device('cuda')\n else:\n if logger is not None:\n logger.info('Configuring environment to use CPU.')\n\n device = torch.device('cpu')\n\n # Step 5: Fetch the baseline information and training loop parameters.\n\n if logger is not None:\n logger.info('Retrieving baseline and related parameters.')\n\n if dataset == 'resource':\n Model, baseline_config, _, make_transform =\\\n resource.FINE_TUNE_LM_BASELINES[baseline]\n elif dataset == 'corpus':\n Model, baseline_config, _, make_transform =\\\n corpus.FINE_TUNE_LM_BASELINES[baseline]\n else:\n raise ValueError(\n f'dataset must be either \"resource\" or \"corpus\", not'\n f' {dataset}.')\n\n n_epochs = hyper_params['n_epochs']\n train_batch_size = hyper_params['train_batch_size']\n n_gradient_accumulation = math.ceil(\n train_batch_size / (compute_train_batch_size * len(gpu_ids)))\n\n # Step 6: Load the dataset.\n\n if logger is not None:\n logger.info(f'Loading the dataset from {data_dir}.')\n\n featurize = make_transform(**baseline_config['transform'])\n if dataset == 'resource':\n Dataset = ScruplesResourceDataset\n labelize = None\n labelize_scores = lambda scores: np.array(scores).astype(float)\n elif dataset == 'corpus':\n Dataset = ScruplesCorpusDataset\n labelize = lambda s: getattr(Label, s).index\n labelize_scores = lambda scores: np.array([\n score\n for _, score in sorted(\n scores.items(),\n key=lambda t: labelize(t[0]))\n ]).astype(float)\n else:\n raise ValueError(\n f'dataset must be either \"resource\" or \"corpus\", not'\n f' {dataset}.')\n\n train = Dataset(\n data_dir=data_dir,\n split='train',\n transform=featurize,\n label_transform=labelize,\n label_scores_transform=labelize_scores)\n dev = Dataset(\n data_dir=data_dir,\n split='dev',\n transform=featurize,\n label_transform=labelize,\n label_scores_transform=labelize_scores)\n\n train_loader = DataLoader(\n dataset=train,\n batch_size=train_batch_size // n_gradient_accumulation,\n shuffle=True,\n num_workers=len(gpu_ids),\n pin_memory=bool(gpu_ids))\n dev_loader = DataLoader(\n dataset=dev,\n batch_size=predict_batch_size,\n shuffle=False,\n num_workers=len(gpu_ids),\n pin_memory=bool(gpu_ids))\n\n # Step 7: Create the model, optimizer, and loss.\n\n if logger is not None:\n logger.info('Initializing the model.')\n\n model = Model(**baseline_config['model'])\n model.to(device)\n\n n_optimization_steps = n_epochs * math.ceil(len(train) / train_batch_size)\n parameter_groups = [\n {\n 'params': [\n param\n for name, param in model.named_parameters()\n if 'bias' in name\n or 'LayerNorm.bias' in name\n or 'LayerNorm.weight' in name\n ],\n 'weight_decay': 0\n },\n {\n 'params': [\n param\n for name, param in model.named_parameters()\n if 'bias' not in name\n and 'LayerNorm.bias' not in name\n and 'LayerNorm.weight' not in name\n ],\n 'weight_decay': hyper_params['weight_decay']\n }\n ]\n optimizer = AdamW(parameter_groups, lr=hyper_params['lr'])\n\n if loss_type == 'xentropy-hard':\n loss = torch.nn.CrossEntropyLoss()\n elif loss_type == 'xentropy-soft':\n loss = SoftCrossEntropyLoss()\n elif loss_type == 'xentropy-full':\n loss = SoftCrossEntropyLoss()\n elif loss_type == 'dirichlet-multinomial':\n loss = DirichletMultinomialLoss()\n\n xentropy = SoftCrossEntropyLoss()\n\n scheduler = WarmupLinearSchedule(\n optimizer=optimizer,\n warmup_steps=int(\n hyper_params['warmup_proportion']\n * n_optimization_steps\n ),\n t_total=n_optimization_steps)\n\n # add data parallelism support\n model = torch.nn.DataParallel(model)\n\n # Step 8: Run training.\n\n n_train_batches_per_epoch = math.ceil(len(train) / train_batch_size)\n n_dev_batch_per_epoch = math.ceil(len(dev) / predict_batch_size)\n\n writer = tensorboardX.SummaryWriter(log_dir=tensorboard_dir)\n\n best_dev_calibrated_xentropy = math.inf\n for epoch in range(n_epochs):\n # set the model to training mode\n model.train()\n\n # run training for the epoch\n epoch_train_loss = 0\n epoch_train_xentropy = 0\n for i, (_, features, labels, label_scores) in tqdm.tqdm(\n enumerate(train_loader),\n total=n_gradient_accumulation * n_train_batches_per_epoch,\n **settings.TQDM_KWARGS\n ):\n # move the data onto the device\n features = {k: v.to(device) for k, v in features.items()}\n\n # create the targets\n if loss_type == 'xentropy-hard':\n targets = labels\n elif loss_type == 'xentropy-soft':\n targets = label_scores / torch.unsqueeze(\n torch.sum(label_scores, dim=-1), dim=-1)\n elif loss_type == 'xentropy-full':\n targets = label_scores\n elif loss_type == 'dirichlet-multinomial':\n targets = label_scores\n # create the soft labels\n soft_labels = label_scores / torch.unsqueeze(\n torch.sum(label_scores, dim=-1), dim=-1)\n\n # move the targets and soft labels to the device\n targets = targets.to(device)\n soft_labels = soft_labels.to(device)\n\n # make predictions\n logits = model(**features)[0]\n\n batch_loss = loss(logits, targets)\n batch_xentropy = xentropy(logits, soft_labels)\n\n # update training statistics\n epoch_train_loss = (\n batch_loss.item() + i * epoch_train_loss\n ) / (i + 1)\n epoch_train_xentropy = (\n batch_xentropy.item() + i * epoch_train_xentropy\n ) / (i + 1)\n\n # update the network\n batch_loss.backward()\n\n if (i + 1) % n_gradient_accumulation == 0:\n optimizer.step()\n optimizer.zero_grad()\n\n scheduler.step()\n\n # write training statistics to tensorboard\n\n step = n_train_batches_per_epoch * epoch + (\n (i + 1) // n_gradient_accumulation)\n if step % 100 == 0 and (i + 1) % n_gradient_accumulation == 0:\n writer.add_scalar('train/loss', epoch_train_loss, step)\n writer.add_scalar('train/xentropy', epoch_train_xentropy, step)\n\n # run evaluation\n with torch.no_grad():\n # set the model to evaluation mode\n model.eval()\n\n # run validation for the epoch\n epoch_dev_loss = 0\n epoch_dev_soft_labels = []\n epoch_dev_logits = []\n for i, (_, features, labels, label_scores) in tqdm.tqdm(\n enumerate(dev_loader),\n total=n_dev_batch_per_epoch,\n **settings.TQDM_KWARGS):\n # move the data onto the device\n features = {k: v.to(device) for k, v in features.items()}\n\n # create the targets\n if loss_type == 'xentropy-hard':\n targets = labels\n elif loss_type == 'xentropy-soft':\n targets = label_scores / torch.unsqueeze(\n torch.sum(label_scores, dim=-1), dim=-1)\n elif loss_type == 'xentropy-full':\n targets = label_scores\n elif loss_type == 'dirichlet-multinomial':\n targets = label_scores\n\n # move the targets to the device\n targets = targets.to(device)\n\n # make predictions\n logits = model(**features)[0]\n\n batch_loss = loss(logits, targets)\n\n # update validation statistics\n epoch_dev_loss = (\n batch_loss.item() + i * epoch_dev_loss\n ) / (i + 1)\n epoch_dev_soft_labels.extend(\n (\n label_scores\n / torch.unsqueeze(torch.sum(label_scores, dim=-1), dim=-1)\n ).cpu().numpy().tolist()\n )\n epoch_dev_logits.extend(logits.cpu().numpy().tolist())\n\n # compute validation statistics\n epoch_dev_soft_labels = np.array(epoch_dev_soft_labels)\n epoch_dev_logits = np.array(epoch_dev_logits)\n\n calibration_factor = utils.calibration_factor(\n logits=epoch_dev_logits,\n targets=epoch_dev_soft_labels)\n\n epoch_dev_xentropy = utils.xentropy(\n y_true=epoch_dev_soft_labels,\n y_pred=softmax(epoch_dev_logits, axis=-1))\n epoch_dev_calibrated_xentropy = utils.xentropy(\n y_true=epoch_dev_soft_labels,\n y_pred=softmax(epoch_dev_logits / calibration_factor, axis=-1))\n\n # write validation statistics to tensorboard\n writer.add_scalar('dev/loss', epoch_dev_loss, step)\n writer.add_scalar('dev/xentropy', epoch_dev_xentropy, step)\n writer.add_scalar(\n 'dev/calibrated-xentropy', epoch_dev_calibrated_xentropy, step)\n\n if logger is not None:\n logger.info(\n f'\\n\\n'\n f' epoch {epoch}:\\n'\n f' train loss : {epoch_train_loss:.4f}\\n'\n f' train xentropy : {epoch_train_xentropy:.4f}\\n'\n f' dev loss : {epoch_dev_loss:.4f}\\n'\n f' dev xentropy : {epoch_dev_xentropy:.4f}\\n'\n f' dev calibrated xentropy : {epoch_dev_calibrated_xentropy:.4f}\\n'\n f' calibration factor : {calibration_factor:.4f}\\n')\n\n # update checkpoints\n\n torch.save(\n {\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'calibration_factor': calibration_factor\n },\n last_checkpoint_path)\n\n # update the current best model\n if epoch_dev_calibrated_xentropy < best_dev_calibrated_xentropy:\n shutil.copyfile(last_checkpoint_path, best_checkpoint_path)\n best_dev_calibrated_xentropy = epoch_dev_calibrated_xentropy\n\n # exit early if the training loss has diverged\n if math.isnan(epoch_train_loss):\n logger.info('Training loss has diverged. Exiting early.')\n\n return best_dev_calibrated_xentropy, True\n\n logger.info(\n f'Training complete. Best dev calibrated xentropy was'\n f' {best_dev_calibrated_xentropy:.4f}.')\n\n return best_dev_calibrated_xentropy, False\n",
"\"\"\"Utilities for baselines on scruples.\"\"\"\n\nfrom typing import Any, Dict\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.special import gammaln\nfrom sklearn.base import (\n BaseEstimator,\n TransformerMixin)\nfrom sklearn.utils.validation import check_is_fitted\n\n\n# functions\n\ndef concat_title_and_text(features: pd.DataFrame) -> np.ndarray:\n \"\"\"Return the concatenation of the title and text features.\n\n Parameters\n ----------\n features : pd.DataFrame\n The features for the scruples dataset.\n\n Returns\n -------\n np.ndarray\n The concatentation of the title and text strings separated by a\n newline character, in a numpy array.\n \"\"\"\n return (features['title'] + '\\n' + features['text']).values\n\n\ndef dirichlet_multinomial(log_alphas: np.ndarray) -> np.ndarray:\n \"\"\"Return class probabilities from a dirichlet-multinomial model.\n\n Parameters\n ----------\n log_alphas : np.ndarray\n An n x k dimensional numpy array where n is the number of\n samples and k is the number of classes. The values of the array\n should correspond to the log of the alpha parameters for the\n predicted dirichlet distribution corresponding to each instance.\n\n Returns\n -------\n np.ndarray\n An n x k dimensional array giving the class probabilities\n corresponding to ``log_alphas`` for each sample.\n \"\"\"\n alphas = np.exp(log_alphas)\n return alphas / np.expand_dims(np.sum(alphas, axis=-1), -1)\n\n\n# classes\n\nclass ResourceTransformer(BaseEstimator, TransformerMixin):\n \"\"\"Featurize the action pairs from the scruples resource.\n\n ``ResourceTransformer`` applies the same featurization pipeline\n (``self.transformer``) to both actions in an instance from the\n scruples resource and then takes the difference of their\n features.\n\n You can set parameters on the ``self.transformer`` attribute by\n prefixing parameters to ``ResourceTransformer`` with\n ``transformer__``.\n\n ``ResourceTransformer`` is particularly useful in front of linear\n models like logistic regression, since applying the model to the\n difference of the features is the same as taking the difference of\n the final scores.\n\n Attributes\n ----------\n See `Parameters`_.\n\n Parameters\n ----------\n transformer : Transformer\n The transformer to apply to the actions.\n \"\"\"\n def __init__(\n self,\n transformer: TransformerMixin\n ) -> None:\n self.transformer = transformer\n\n def set_params(\n self,\n **params: Dict[str, Any]\n ) -> 'ResourceTransformer':\n self_params = {}\n transformer_params = {}\n for param, value in params.items():\n if param.startswith('transformer__'):\n transformer_params[param[13:]] = value\n else:\n self_params[param] = value\n # set the parameters on this instance\n super().set_params(**self_params)\n # set the parameters on the transformer attribute\n self.transformer.set_params(**transformer_params)\n\n return self\n\n def fit(\n self,\n X: pd.DataFrame,\n y: np.ndarray = None\n ) -> 'ResourceTransformer':\n \"\"\"Fit the instance to ``X``.\n\n Fitting an instance of ``ResourceTransformer`` fits its\n ``self.transformer`` attribute to the data. The ``y`` argument\n is ignored.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to fit.\n y : None\n An ignored argument.\n\n Returns\n -------\n self : object\n The instance.\n \"\"\"\n X_ = pd.concat([X['action0'], X['action1']])\n\n self.transformer.fit(X_)\n\n self._fitted = True\n\n return self\n\n def transform(\n self,\n X: pd.DataFrame\n ) -> Any:\n \"\"\"Transform ``X``.\n\n Parameters\n ----------\n X : pd.DataFrame\n The data to transform.\n\n Returns\n -------\n Any\n The difference of the features for the actions derived by\n applying ``self.transformer`` to them.\n \"\"\"\n check_is_fitted(self, '_fitted')\n\n return (\n self.transformer.transform(X['action1'])\n - self.transformer.transform(X['action0'])\n )\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.sum",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.DataParallel",
"numpy.array",
"scipy.special.softmax"
],
[
"pandas.concat",
"numpy.exp",
"sklearn.utils.validation.check_is_fitted",
"numpy.sum"
]
] |
fakegit/RestoreGAN
|
[
"eb64d65da1dba289349530960eafdbcebfa0e9a8"
] |
[
"train.py"
] |
[
"import logging\nfrom functools import partial\n\nimport cv2\nimport torch\nimport torch.optim as optim\nimport tqdm\nimport yaml\nfrom joblib import cpu_count\nfrom torch.utils.data import DataLoader\n\nfrom adversarial_trainer import GANFactory\nfrom dataset import PairedDataset\nfrom metric_counter import MetricCounter\nfrom models.losses import get_loss\nfrom models.models import get_model\nfrom models.networks import get_nets\nfrom schedulers import LinearDecay, WarmRestart\n\ncv2.setNumThreads(0)\n\n\nclass Trainer:\n def __init__(self, config, train: DataLoader, val: DataLoader):\n self.config = config\n self.train_dataset = train\n self.val_dataset = val\n self.adv_lambda = config['model']['adv_lambda']\n self.metric_counter = MetricCounter(config['experiment_desc'])\n self.warmup_epochs = config['warmup_num']\n\n def train(self):\n self._init_params()\n for epoch in range(0, config['num_epochs']):\n if (epoch == self.warmup_epochs) and not (self.warmup_epochs == 0):\n self.netG.module.unfreeze()\n self.optimizer_G = self._get_optim(self.netG.parameters())\n self.scheduler_G = self._get_scheduler(self.optimizer_G)\n self._run_epoch(epoch)\n self._validate(epoch)\n self.scheduler_G.step()\n self.scheduler_D.step()\n\n if self.metric_counter.update_best_model():\n torch.save({\n 'model': self.netG.state_dict()\n }, 'best_{}.h5'.format(self.config['experiment_desc']))\n torch.save({\n 'model': self.netG.state_dict()\n }, 'last_{}.h5'.format(self.config['experiment_desc']))\n print(self.metric_counter.loss_message())\n logging.debug(\"Experiment Name: %s, Epoch: %d, Loss: %s\" % (\n self.config['experiment_desc'], epoch, self.metric_counter.loss_message()))\n\n def _run_epoch(self, epoch):\n self.metric_counter.clear()\n for param_group in self.optimizer_G.param_groups:\n lr = param_group['lr']\n\n epoch_size = config.get('train_batches_per_epoch') or len(self.train_dataset)\n tq = tqdm.tqdm(self.train_dataset, total=epoch_size)\n tq.set_description('Epoch {}, lr {}'.format(epoch, lr))\n i = 0\n for data in tq:\n inputs, targets = self.model.get_input(data)\n outputs = self.netG(inputs)\n loss_D = self._update_d(outputs, targets)\n self.optimizer_G.zero_grad()\n loss_content = self.criterionG(outputs, targets)\n loss_adv = self.adv_trainer.loss_g(outputs, targets)\n loss_G = loss_content + self.adv_lambda * loss_adv\n loss_G.backward()\n self.optimizer_G.step()\n self.metric_counter.add_losses(loss_G.item(), loss_content.item(), loss_D)\n curr_psnr, curr_ssim, img_for_vis = self.model.get_images_and_metrics(inputs, outputs, targets)\n self.metric_counter.add_metrics(curr_psnr, curr_ssim)\n tq.set_postfix(loss=self.metric_counter.loss_message())\n if not i:\n self.metric_counter.add_image(img_for_vis, tag='train')\n i += 1\n if i > epoch_size:\n break\n tq.close()\n self.metric_counter.write_to_tensorboard(epoch)\n\n def _validate(self, epoch):\n self.metric_counter.clear()\n epoch_size = config.get('val_batches_per_epoch') or len(self.val_dataset)\n tq = tqdm.tqdm(self.val_dataset, total=epoch_size)\n tq.set_description('Validation')\n i = 0\n for data in tq:\n inputs, targets = self.model.get_input(data)\n outputs = self.netG(inputs)\n loss_content = self.criterionG(outputs, targets)\n loss_adv = self.adv_trainer.loss_g(outputs, targets)\n loss_G = loss_content + self.adv_lambda * loss_adv\n self.metric_counter.add_losses(loss_G.item(), loss_content.item())\n curr_psnr, curr_ssim, img_for_vis = self.model.get_images_and_metrics(inputs, outputs, targets)\n self.metric_counter.add_metrics(curr_psnr, curr_ssim)\n if not i:\n self.metric_counter.add_image(img_for_vis, tag='val')\n i += 1\n if i > epoch_size:\n break\n tq.close()\n self.metric_counter.write_to_tensorboard(epoch, validation=True)\n\n def _update_d(self, outputs, targets):\n if self.config['model']['d_name'] == 'no_gan':\n return 0\n self.optimizer_D.zero_grad()\n loss_D = self.adv_lambda * self.adv_trainer.loss_d(outputs, targets)\n loss_D.backward(retain_graph=True)\n self.optimizer_D.step()\n return loss_D.item()\n\n def _get_optim(self, params):\n if self.config['optimizer']['name'] == 'adam':\n optimizer = optim.Adam(params, lr=self.config['optimizer']['lr'])\n elif self.config['optimizer']['name'] == 'sgd':\n optimizer = optim.SGD(params, lr=self.config['optimizer']['lr'])\n elif self.config['optimizer']['name'] == 'adadelta':\n optimizer = optim.Adadelta(params, lr=self.config['optimizer']['lr'])\n else:\n raise ValueError(\"Optimizer [%s] not recognized.\" % self.config['optimizer']['name'])\n return optimizer\n\n def _get_scheduler(self, optimizer):\n if self.config['scheduler']['name'] == 'plateau':\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,\n mode='min',\n patience=self.config['scheduler']['patience'],\n factor=self.config['scheduler']['factor'],\n min_lr=self.config['scheduler']['min_lr'])\n elif self.config['optimizer']['name'] == 'sgdr':\n scheduler = WarmRestart(optimizer)\n elif self.config['scheduler']['name'] == 'linear':\n scheduler = LinearDecay(optimizer,\n min_lr=self.config['scheduler']['min_lr'],\n num_epochs=self.config['num_epochs'],\n start_epoch=self.config['scheduler']['start_epoch'])\n else:\n raise ValueError(\"Scheduler [%s] not recognized.\" % self.config['scheduler']['name'])\n return scheduler\n\n @staticmethod\n def _get_adversarial_trainer(d_name, net_d, criterion_d):\n if d_name == 'no_gan':\n return GANFactory.create_model('NoGAN')\n elif d_name == 'patch_gan' or d_name == 'multi_scale':\n return GANFactory.create_model('SingleGAN', net_d, criterion_d)\n elif d_name == 'double_gan':\n return GANFactory.create_model('DoubleGAN', net_d, criterion_d)\n else:\n raise ValueError(\"Discriminator Network [%s] not recognized.\" % d_name)\n\n def _init_params(self):\n self.criterionG, criterionD = get_loss(self.config['model'])\n self.netG, netD = get_nets(self.config['model'])\n self.netG.cuda()\n self.adv_trainer = self._get_adversarial_trainer(self.config['model']['d_name'], netD, criterionD)\n self.model = get_model(self.config['model'])\n self.optimizer_G = self._get_optim(filter(lambda p: p.requires_grad, self.netG.parameters()))\n self.optimizer_D = self._get_optim(self.adv_trainer.get_params())\n self.scheduler_G = self._get_scheduler(self.optimizer_G)\n self.scheduler_D = self._get_scheduler(self.optimizer_D)\n\n\nif __name__ == '__main__':\n with open('config/config.yaml', 'r') as f:\n config = yaml.load(f)\n\n batch_size = config.pop('batch_size')\n get_dataloader = partial(DataLoader, batch_size=batch_size, num_workers=cpu_count(), shuffle=True, drop_last=True)\n datasets = map(config.pop, ('train', 'val'))\n datasets = map(PairedDataset.from_config, datasets)\n train, val = map(get_dataloader, datasets)\n trainer = Trainer(config, train=train, val=val)\n trainer.train()\n"
] |
[
[
"torch.optim.Adam",
"torch.optim.Adadelta",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.SGD"
]
] |
Algogator/posthog
|
[
"764e10696b6ee9cba927b38e0789ed896f5d67dd"
] |
[
"posthog/queries/sessions.py"
] |
[
"import datetime\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport pandas as pd\nfrom dateutil.relativedelta import relativedelta\nfrom django.db import connection\nfrom django.db.models import F, Q, QuerySet\nfrom django.db.models.expressions import Window\nfrom django.db.models.functions import Lag\nfrom django.utils.timezone import now\n\nfrom posthog.api.element import ElementSerializer\nfrom posthog.models import ElementGroup, Event, Filter, Team\nfrom posthog.queries.base import BaseQuery, determine_compared_filter\nfrom posthog.utils import append_data, dict_from_cursor_fetchall, friendly_time\n\n\nclass Sessions(BaseQuery):\n def run(self, filter: Filter, team: Team, *args, **kwargs) -> List[Dict[str, Any]]:\n events = (\n Event.objects.filter(team=team)\n .filter(filter.properties_to_Q(team_id=team.pk))\n .add_person_id(team.pk)\n .order_by(\"-timestamp\")\n )\n\n session_type = kwargs.get(\"session_type\", None)\n offset = kwargs.get(\"offset\", 0)\n\n if not filter.date_to:\n filter._date_to = now().isoformat()\n calculated = []\n\n # get compared period\n if filter.compare and filter._date_from != \"all\" and session_type == \"avg\":\n calculated = self.calculate_sessions(\n events.filter(filter.date_filter_Q), session_type, filter, team, offset\n )\n calculated = self._convert_to_comparison(calculated, \"current\")\n\n compare_filter = determine_compared_filter(filter)\n compared_calculated = self.calculate_sessions(\n events.filter(compare_filter.date_filter_Q), session_type, compare_filter, team, offset\n )\n converted_compared_calculated = self._convert_to_comparison(compared_calculated, \"previous\")\n calculated.extend(converted_compared_calculated)\n else:\n # if session_type is None, it's a list of sessions which shouldn't have any date filtering\n if session_type is not None:\n events = events.filter(filter.date_filter_Q)\n calculated = self.calculate_sessions(events, session_type, filter, team, offset)\n\n return calculated\n\n def calculate_sessions(\n self, events: QuerySet, session_type: Optional[str], filter: Filter, team: Team, offset: int\n ) -> List[Dict[str, Any]]:\n\n # format date filter for session view\n _date_gte = Q()\n if session_type is None:\n # if _date_from is not explicitely set we only want to get the last day worth of data\n # otherwise the query is very slow\n if filter._date_from and filter.date_to:\n _date_gte = Q(timestamp__gte=filter.date_from, timestamp__lte=filter.date_to + relativedelta(days=1),)\n else:\n dt = now()\n dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)\n _date_gte = Q(timestamp__gte=dt, timestamp__lte=dt + relativedelta(days=1))\n else:\n if not filter.date_from:\n filter._date_from = (\n Event.objects.filter(team_id=team)\n .order_by(\"timestamp\")[0]\n .timestamp.replace(hour=0, minute=0, second=0, microsecond=0)\n .isoformat()\n )\n\n sessions = (\n events.filter(_date_gte)\n .annotate(\n previous_timestamp=Window(\n expression=Lag(\"timestamp\", default=None),\n partition_by=F(\"distinct_id\"),\n order_by=F(\"timestamp\").asc(),\n )\n )\n .annotate(\n previous_event=Window(\n expression=Lag(\"event\", default=None), partition_by=F(\"distinct_id\"), order_by=F(\"timestamp\").asc(),\n )\n )\n )\n\n sessions_sql, sessions_sql_params = sessions.query.sql_with_params()\n all_sessions = \"\\\n SELECT *,\\\n SUM(new_session) OVER (ORDER BY distinct_id, timestamp) AS global_session_id,\\\n SUM(new_session) OVER (PARTITION BY distinct_id ORDER BY timestamp) AS user_session_id\\\n FROM (SELECT id, distinct_id, event, elements_hash, timestamp, properties, CASE WHEN EXTRACT('EPOCH' FROM (timestamp - previous_timestamp)) >= (60 * 30)\\\n OR previous_timestamp IS NULL \\\n THEN 1 ELSE 0 END AS new_session \\\n FROM ({}) AS inner_sessions\\\n ) AS outer_sessions\".format(\n sessions_sql\n )\n\n result: List = []\n if session_type == \"avg\":\n result = self._session_avg(all_sessions, sessions_sql_params, filter)\n elif session_type == \"dist\":\n result = self._session_dist(all_sessions, sessions_sql_params)\n else:\n result = self._session_list(all_sessions, sessions_sql_params, team, filter, offset)\n\n return result\n\n def _session_list(\n self, base_query: str, params: Tuple[Any, ...], team: Team, filter: Filter, offset: int\n ) -> List[Dict[str, Any]]:\n session_list = \"SELECT * FROM (SELECT global_session_id, properties, start_time, length, sessions.distinct_id, event_count, events from\\\n (SELECT\\\n global_session_id,\\\n count(1) as event_count,\\\n MAX(distinct_id) as distinct_id,\\\n EXTRACT('EPOCH' FROM (MAX(timestamp) - MIN(timestamp))) AS length,\\\n MIN(timestamp) as start_time,\\\n array_agg(json_build_object( 'id', id, 'event', event, 'timestamp', timestamp, 'properties', properties, 'elements_hash', elements_hash) ORDER BY timestamp) as events\\\n FROM ({}) as count GROUP BY 1) as sessions\\\n LEFT OUTER JOIN posthog_persondistinctid ON posthog_persondistinctid.distinct_id = sessions.distinct_id\\\n LEFT OUTER JOIN posthog_person ON posthog_person.id = posthog_persondistinctid.person_id\\\n ORDER BY start_time DESC) as ordered_sessions OFFSET %s LIMIT 50\".format(\n base_query\n )\n\n with connection.cursor() as cursor:\n params = params + (offset,)\n cursor.execute(session_list, params)\n sessions = dict_from_cursor_fetchall(cursor)\n\n hash_ids = []\n for session in sessions:\n for event in session[\"events\"]:\n if event.get(\"elements_hash\"):\n hash_ids.append(event[\"elements_hash\"])\n\n groups = self._prefetch_elements(hash_ids, team)\n\n for session in sessions:\n for event in session[\"events\"]:\n try:\n event.update(\n {\n \"elements\": ElementSerializer(\n [group for group in groups if group.hash == event[\"elements_hash\"]][0]\n .element_set.all()\n .order_by(\"order\"),\n many=True,\n ).data\n }\n )\n except IndexError:\n event.update({\"elements\": []})\n return sessions\n\n def _session_avg(self, base_query: str, params: Tuple[Any, ...], filter: Filter) -> List[Dict[str, Any]]:\n def _determineInterval(interval):\n if interval == \"minute\":\n return (\n \"minute\",\n \"min\",\n )\n elif interval == \"hour\":\n return \"hour\", \"H\"\n elif interval == \"week\":\n return \"week\", \"W\"\n elif interval == \"month\":\n return \"month\", \"M\"\n else:\n return \"day\", \"D\"\n\n interval, interval_freq = _determineInterval(filter.interval)\n\n average_length_time = \"SELECT date_trunc('{interval}', timestamp) as start_time,\\\n AVG(length) AS average_session_length_per_day,\\\n SUM(length) AS total_session_length_per_day, \\\n COUNT(1) as num_sessions_per_day\\\n FROM (SELECT global_session_id, EXTRACT('EPOCH' FROM (MAX(timestamp) - MIN(timestamp)))\\\n AS length,\\\n MIN(timestamp) as timestamp FROM ({}) as count GROUP BY 1) as agg group by 1 order by start_time\".format(\n base_query, interval=interval\n )\n\n cursor = connection.cursor()\n cursor.execute(average_length_time, params)\n time_series_avg = cursor.fetchall()\n if len(time_series_avg) == 0:\n return []\n\n date_range = pd.date_range(filter.date_from, filter.date_to, freq=interval_freq,)\n df = pd.DataFrame([{\"date\": a[0], \"count\": a[1], \"breakdown\": \"Total\"} for a in time_series_avg])\n if interval == \"week\":\n df[\"date\"] = df[\"date\"].apply(lambda x: x - pd.offsets.Week(weekday=6))\n elif interval == \"month\":\n df[\"date\"] = df[\"date\"].apply(lambda x: x - pd.offsets.MonthEnd(n=0))\n\n df_dates = pd.DataFrame(df.groupby(\"date\").mean(), index=date_range)\n df_dates = df_dates.fillna(0)\n values = [(key, round(value[0])) if len(value) > 0 else (key, 0) for key, value in df_dates.iterrows()]\n\n time_series_data = append_data(values, interval=filter.interval, math=None)\n # calculate average\n totals = [sum(x) for x in list(zip(*time_series_avg))[2:4]]\n overall_average = (totals[0] / totals[1]) if totals else 0\n avg_formatted = friendly_time(overall_average)\n avg_split = avg_formatted.split(\" \")\n\n time_series_data.update(\n {\"label\": \"Average Duration of Session ({})\".format(avg_split[1]), \"count\": int(avg_split[0]),}\n )\n time_series_data.update({\"chartLabel\": \"Average Duration of Session (seconds)\"})\n result = [time_series_data]\n return result\n\n def _session_dist(self, base_query: str, params: Tuple[Any, ...]) -> List[Dict[str, Any]]:\n distribution = \"SELECT COUNT(CASE WHEN length = 0 THEN 1 ELSE NULL END) as first,\\\n COUNT(CASE WHEN length > 0 AND length <= 3 THEN 1 ELSE NULL END) as second,\\\n COUNT(CASE WHEN length > 3 AND length <= 10 THEN 1 ELSE NULL END) as third,\\\n COUNT(CASE WHEN length > 10 AND length <= 30 THEN 1 ELSE NULL END) as fourth,\\\n COUNT(CASE WHEN length > 30 AND length <= 60 THEN 1 ELSE NULL END) as fifth,\\\n COUNT(CASE WHEN length > 60 AND length <= 180 THEN 1 ELSE NULL END) as sixth,\\\n COUNT(CASE WHEN length > 180 AND length <= 600 THEN 1 ELSE NULL END) as seventh,\\\n COUNT(CASE WHEN length > 600 AND length <= 1800 THEN 1 ELSE NULL END) as eighth,\\\n COUNT(CASE WHEN length > 1800 AND length <= 3600 THEN 1 ELSE NULL END) as ninth,\\\n COUNT(CASE WHEN length > 3600 THEN 1 ELSE NULL END) as tenth\\\n FROM (SELECT global_session_id, EXTRACT('EPOCH' FROM (MAX(timestamp) - MIN(timestamp)))\\\n AS length FROM ({}) as count GROUP BY 1) agg\".format(\n base_query\n )\n\n dist_labels = [\n \"0 seconds (1 event)\",\n \"0-3 seconds\",\n \"3-10 seconds\",\n \"10-30 seconds\",\n \"30-60 seconds\",\n \"1-3 minutes\",\n \"3-10 minutes\",\n \"10-30 minutes\",\n \"30-60 minutes\",\n \"1+ hours\",\n ]\n cursor = connection.cursor()\n cursor.execute(distribution, params)\n calculated = cursor.fetchall()\n result = [{\"label\": dist_labels[index], \"count\": calculated[0][index]} for index in range(len(dist_labels))]\n return result\n\n def _convert_to_comparison(self, trend_entity: List[Dict[str, Any]], label: str) -> List[Dict[str, Any]]:\n for entity in trend_entity:\n days = [i for i in range(len(entity[\"days\"]))]\n labels = [\"{} {}\".format(\"Day\", i) for i in range(len(entity[\"labels\"]))]\n entity.update(\n {\n \"labels\": labels,\n \"days\": days,\n \"chartLabel\": \"{} - {}\".format(entity[\"label\"], label),\n \"dates\": entity[\"days\"],\n \"compare\": True,\n }\n )\n return trend_entity\n\n def _prefetch_elements(self, hash_ids: List[str], team: Team) -> QuerySet:\n groups = ElementGroup.objects.none()\n if len(hash_ids) > 0:\n groups = ElementGroup.objects.filter(team=team, hash__in=hash_ids).prefetch_related(\"element_set\")\n return groups\n"
] |
[
[
"pandas.offsets.MonthEnd",
"pandas.offsets.Week",
"pandas.DataFrame",
"pandas.date_range"
]
] |
pcmoritz/flow
|
[
"bc97132e9e2d05262bb6bbad5bda173fd9f4ae92"
] |
[
"flow/benchmarks/baselines/merge012.py"
] |
[
"\"\"\"Evaluates the baseline performance of merge without RL control.\n\nBaseline is no AVs.\n\"\"\"\n\nfrom flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \\\n InFlows\nfrom flow.scenarios.merge.scenario import ADDITIONAL_NET_PARAMS\nfrom flow.core.vehicles import Vehicles\nfrom flow.core.experiment import SumoExperiment\nfrom flow.controllers import SumoCarFollowingController\nfrom flow.scenarios.merge.scenario import MergeScenario\nfrom flow.scenarios.merge.gen import MergeGenerator\nfrom flow.envs.merge import WaveAttenuationMergePOEnv\nimport numpy as np\n\n# time horizon of a single rollout\nHORIZON = int(750*(0.5/0.2))\n# inflow rate at the highway\nFLOW_RATE = 2000\n# percent of autonomous vehicles\nRL_PENETRATION = 0.1\n# num_rl term (see ADDITIONAL_ENV_PARAMs)\nNUM_RL = 5\n\n\ndef merge_baseline(num_runs, sumo_binary=\"sumo-gui\"):\n \"\"\"Run script for all merge baselines.\n\n Parameters\n ----------\n num_runs : int\n number of rollouts the performance of the environment is evaluated\n over\n sumo_binary: str, optional\n specifies whether to use sumo's gui during execution\n\n Returns\n -------\n SumoExperiment\n class needed to run simulations\n \"\"\"\n # We consider a highway network with an upstream merging lane producing\n # shockwaves\n additional_net_params = ADDITIONAL_NET_PARAMS.copy()\n additional_net_params[\"merge_lanes\"] = 1\n additional_net_params[\"highway_lanes\"] = 1\n additional_net_params[\"pre_merge_length\"] = 500\n\n # RL vehicles constitute 5% of the total number of vehicles\n vehicles = Vehicles()\n vehicles.add(veh_id=\"human\",\n acceleration_controller=(SumoCarFollowingController, {}),\n speed_mode=\"no_collide\",\n num_vehicles=5)\n\n # Vehicles are introduced from both sides of merge, with RL vehicles\n # entering from the highway portion as well\n inflow = InFlows()\n inflow.add(veh_type=\"human\", edge=\"inflow_highway\",\n vehs_per_hour=FLOW_RATE,\n departLane=\"free\", departSpeed=10)\n inflow.add(veh_type=\"human\", edge=\"inflow_merge\", vehs_per_hour=100,\n departLane=\"free\", departSpeed=7.5)\n\n sumo_params = SumoParams(\n restart_instance=False,\n sim_step=0.2, # time step decreased to prevent occasional crashes\n sumo_binary=sumo_binary,\n )\n\n env_params = EnvParams(\n horizon=HORIZON,\n sims_per_step=5, # value raised to ensure sec/step match experiment\n warmup_steps=0,\n evaluate=True, # Set to True to evaluate traffic metric performance\n additional_params={\n \"max_accel\": 1.5,\n \"max_decel\": 1.5,\n \"target_velocity\": 20,\n \"num_rl\": NUM_RL,\n },\n )\n\n initial_config = InitialConfig()\n\n net_params = NetParams(\n in_flows=inflow,\n no_internal_links=False,\n additional_params=additional_net_params,\n )\n\n scenario = MergeScenario(name=\"merge\",\n generator_class=MergeGenerator,\n vehicles=vehicles,\n net_params=net_params,\n initial_config=initial_config)\n\n env = WaveAttenuationMergePOEnv(env_params, sumo_params, scenario)\n\n exp = SumoExperiment(env, scenario)\n\n results = exp.run(num_runs, HORIZON)\n avg_speed = np.mean(results[\"mean_returns\"])\n\n return avg_speed\n\n\nif __name__ == \"__main__\":\n runs = 2 # number of simulations to average over\n res = merge_baseline(num_runs=runs)\n\n print('---------')\n print('The average speed across {} runs is {}'.format(runs, res))\n"
] |
[
[
"numpy.mean"
]
] |
makailove123/tensor2tensor
|
[
"dde1661ab04149d02fb74ee62d0c82157f5e046a",
"f5d73746f7a46dc18fdd541b1f9265c7f3ad2918"
] |
[
"tensor2tensor/data_generators/problem.py",
"tensor2tensor/layers/common_layers_test.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for problem/dataset definitions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport os\nimport random\nimport six\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import contrib\nfrom tensor2tensor.utils import data_reader\nfrom tensor2tensor.utils import hparam\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import mlperf_log\n\nimport tensorflow.compat.v1 as tf\n# pylint: disable=g-import-not-at-top\ntry:\n from tensorflow.contrib.tpu.python.tpu import tpu_config\nexcept ImportError:\n # TF 2.0 doesn't ship with contrib.\n tpu_config = None\n# pylint: enable=g-import-not-at-top\n\n\n\nclass DatasetSplit(object):\n TRAIN = tf.estimator.ModeKeys.TRAIN\n EVAL = tf.estimator.ModeKeys.EVAL\n TEST = \"test\"\n\n\nclass SpaceID(object):\n \"\"\"Input and target space ids. Add more as needed.\"\"\"\n # Generic / unknown output space (default)\n GENERIC = 0\n # Image labels\n IMAGE_LABEL = 1\n # English characters\n EN_CHR = 2\n # English tokens\n EN_TOK = 3\n # English bpe tokens\n EN_BPE_TOK = 4\n # French characters\n FR_CHR = 5\n # French tokens\n FR_TOK = 6\n # German characters\n DE_CHR = 7\n # German tokens\n DE_TOK = 8\n # German bpe tokens\n DE_BPE_TOK = 9\n # Digit cipher lexicon 0\n DIGIT_0 = 10\n # Digit cipher lexicon 1\n DIGIT_1 = 11\n # Audio waveform domain\n AUDIO_WAV = 12\n # Audio spectral domain\n AUDIO_SPECTRAL = 13\n # Parse characters\n PARSE_CHR = 14\n # Parse tokens\n PARSE_TOK = 15\n # Chinese tokens\n ZH_TOK = 16\n # Icelandic characters\n ICE_CHAR = 17\n # Icelandic tokens\n ICE_TOK = 18\n # Icelandic parse tokens\n ICE_PARSE_TOK = 19\n # Macedonian tokens\n MK_TOK = 20\n # Czech tokens\n CS_TOK = 21\n # Czech characters\n CS_CHR = 22\n # Genetic bases (ACTG)\n DNA = 23\n # Real numbers\n REAL = 24\n # Images\n IMAGE = 25\n # Peptide\n PEPTIDE = 26\n # Python\n PY_TOK = 27\n # C++\n CPP_TOK = 28\n # Strokes\n STROKES = 29\n # Pickled Python\n PICKLED_PYTHON = 30\n\n\nclass TaskID(object):\n \"\"\"Problem specific task ids. Add more as needed.\"\"\"\n # English characters\n EN_CHR = 2\n # English characters sentiment\n EN_CHR_SENT = 3\n # English Premise Hypothesis pair\n EN_PR_HYP = 4\n # English NLI\n EN_NLI = 5\n # COLA\n COLA = 6\n # Enligh Question Context pair\n EN_Q_CONT = 7\n # English similarity task\n EN_SIM = 8\n # English sentence pair\n EN_SENT_PAIR = 9\n # 3 class NLI\n THREE_CL_NLI = 10\n\n\ndef default_model_hparams():\n return hparam.HParams(\n max_input_seq_length=0,\n max_target_seq_length=0,\n prepend_mode=\"none\",\n split_to_length=0,\n data_dir=None)\n\n\ndef preprocess_example_common(example, mode, hparams):\n \"\"\"Preprocessing steps common to all models.\"\"\"\n if \"inputs\" in example and hparams.max_input_seq_length > 0:\n example[\"inputs\"] = example[\"inputs\"][:hparams.max_input_seq_length]\n if hparams.prepend_mode != \"none\":\n if mode == tf.estimator.ModeKeys.PREDICT:\n example[\"partial_targets\"] = tf.concat([example[\"inputs\"], [0]], 0)\n else:\n example[\"targets\"] = tf.concat(\n [example[\"inputs\"], [0], example[\"targets\"]], 0)\n if \"targets\" in example and hparams.max_target_seq_length > 0:\n example[\"targets\"] = example[\"targets\"][:hparams.max_target_seq_length]\n if hparams.split_to_length:\n new_example = {}\n for k, v in six.iteritems(example):\n if k == \"targets\" or k == \"inputs\":\n new_example[k] = tf.reshape(v, [-1, hparams.split_to_length, 1, 1])\n else:\n tf.logging.warning(\"Dropping feature %s\" % k)\n return tf.data.Dataset.from_tensor_slices(new_example)\n return example\n\n\nclass Problem(object):\n \"\"\"Problem base class. Specifies a T2T problem.\n\n Problems unify the specification of a problem for data generation, training,\n and inference.\n\n New problems are specified by the following methods:\n\n Data generation:\n * generate_data(data_dir, tmp_dir)\n - Generate training and dev datasets into data_dir.\n - Additional files, e.g. vocabulary files, should also be written to\n data_dir. Vocab files are newline-separated files with each line\n containing a token. The standard convention for the filename is to\n set it to be\n ${Problem.vocab_filename}.${Problem.targeted_vocab_size}\n - Downloads and other files can be written to tmp_dir\n - If you have a training and dev generator, you can generate the\n training and dev datasets with\n generator_utils.generate_dataset_and_shuffle.\n - Use the self.training_filepaths and self.dev_filepaths functions to\n get sharded filenames. If shuffled=False, the filenames will contain\n an \"unshuffled\" suffix; you should then shuffle the data\n shard-by-shard with generator_utils.shuffle_dataset.\n - Allows to specify the number of shards, optionally (can be omitted).\n - Subclasses must override\n * dataset_filename()\n - Base filename for problem.\n - Defaults to registered name (self.name).\n\n Training:\n * hparams(defaults, model_hparams)\n - Specify the problem hyperparameters (see _default_hparams)\n - Mutate defaults as needed\n * example_reading_spec\n - Specify the names and types of the features on disk.\n - Specify tf.contrib.slim.tfexample_decoder\n * preprocess_example(example, mode, hparams)\n - Preprocess the example feature dict from feature name to Tensor or\n SparseTensor.\n - Used in training, eval, and inference (specified by mode).\n\n Eval:\n * eval_metrics\n - Specify the set of evaluation metrics for this problem.\n * eval_hooks\n - Specify the set of evalueation hooks for this problem.\n\n Inference:\n * feature_encoders(data_dir)\n - Return a dict of <feature name, TextEncoder> for encoding and decoding\n inference input/output.\n - Defaults to TextEncoder for inputs and targets.\n \"\"\"\n\n # ============================================================================\n # BEGIN SUBCLASS INTERFACE\n # ============================================================================\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n raise NotImplementedError()\n\n @property\n def multiprocess_generate(self):\n \"\"\"Whether to generate the data in multiple parallel processes.\"\"\"\n return False\n\n @property\n def num_generate_tasks(self):\n \"\"\"Needed if multiprocess_generate is True.\"\"\"\n raise NotImplementedError()\n\n @property\n def num_training_examples(self):\n \"\"\"Used when mixing problems - how many examples are in the dataset.\"\"\"\n raise NotImplementedError()\n\n def prepare_to_generate(self, data_dir, tmp_dir):\n \"\"\"Prepare to generate data in parallel on different processes.\n\n This function is called if multiprocess_generate is True.\n\n Some things that might need to be done once are downloading the data\n if it is not yet downloaded, and building the vocabulary.\n\n Args:\n data_dir: a string\n tmp_dir: a string\n \"\"\"\n raise NotImplementedError()\n\n def hparams(self, defaults, model_hparams):\n pass\n\n def max_length(self, model_hparams):\n \"\"\"Maximum sequence length.\n\n Problems with fixed length should override.\n\n Args:\n model_hparams: model hyperparameters\n Returns:\n an integer\n \"\"\"\n return (model_hparams.split_to_length or model_hparams.max_length or\n model_hparams.batch_size)\n\n def tpu_batch_size_per_shard(self, model_hparams):\n \"\"\"Batch size in examples per TPU core.\n\n Args:\n model_hparams: model hyperparameters\n Returns:\n an integer\n \"\"\"\n if self.batch_size_means_tokens and not model_hparams.use_fixed_batch_size:\n return model_hparams.batch_size // self.max_length(model_hparams)\n else:\n return model_hparams.batch_size\n\n @property\n def batch_size_means_tokens(self):\n \"\"\"Do we specify hparams.batch_size in tokens per datashard per batch.\n\n This is generally done for text problems.\n\n If False, we assume that batch sizes are specified in examples per\n datashard per batch.\n\n TODO(noam): we should be more explicit and replace the hyperparameter\n batch size with two hyperparameters:\n hparams.examples_per_batch_per_datashard\n hparams.tokens_per_batch_per_datashard\n\n Returns:\n a boolean\n \"\"\"\n return False\n\n @property\n def skip_random_fraction_when_training(self):\n \"\"\"Skip a random number of examples at the beginning of training.\"\"\"\n # Skip a random fraction at the beginning of the stream. The skip is\n # essential for synchronous highly-parallel training to avoid multiple\n # replicas reading the same data in lock-step. So keep this true unless\n # you have a very specific setting in which it needs to be turned off.\n return True\n\n def dataset_filename(self):\n return self.name\n\n def feature_encoders(self, data_dir):\n del data_dir\n return {\n \"inputs\": text_encoder.TextEncoder(),\n \"targets\": text_encoder.TextEncoder()\n }\n\n def example_reading_spec(self):\n \"\"\"Define how data is serialized to file and read back.\n\n Returns:\n data_fields: A dictionary mapping data names to its feature type.\n data_items_to_decoders: A dictionary mapping data names to TF Example\n decoders, to be used when reading back TF examples from disk.\n \"\"\"\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.VarLenFeature(tf.int64)\n }\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def preprocess_example(self, example, mode, hparams):\n \"\"\"Runtime preprocessing.\n\n Return a dict or a tf.data.Dataset.from_tensor_slices (if you want each\n example to turn into multiple).\n\n Args:\n example: dict, features\n mode: tf.estimator.ModeKeys\n hparams: HParams, model hyperparameters\n\n Returns:\n dict or Dataset\n \"\"\"\n return preprocess_example_common(example, mode, hparams)\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY\n ]\n\n @property\n def all_metrics_fns(self):\n return metrics.METRICS_FNS\n\n def eval_metric_fns(self, model_hparams):\n del model_hparams\n metric_names = self.eval_metrics()\n if not all([m in self.all_metrics_fns for m in metric_names]):\n error_str = (\"Unrecognized metric. Problem %s specified metrics \"\n \"%s. Recognized metrics are %s.\")\n raise ValueError(error_str % (self.name,\n metric_names,\n list(self.all_metrics_fns.keys())))\n return {\n metric_name: self.all_metrics_fns[metric_name]\n for metric_name in metric_names\n }\n\n def eval_hooks(self, features, logits, hparams):\n del features, logits, hparams\n return []\n\n @property\n def task_id(self):\n if self._task_id == -1 and hasattr(self, \"global_task_id\"):\n self._task_id = self.global_task_id()\n return self._task_id\n\n def set_task_id(self, new_task_id):\n self._task_id = new_task_id\n\n # ============================================================================\n # END SUBCLASS INTERFACE\n # ============================================================================\n\n def preprocess(self, dataset, mode, hparams, interleave=True):\n \"\"\"Runtime preprocessing on the whole dataset.\n\n Return a tf.data.Dataset -- the preprocessed version of the given one.\n By default this function calls preprocess_example.\n\n Args:\n dataset: the Dataset of already decoded but not yet preprocessed features.\n mode: tf.estimator.ModeKeys\n hparams: HParams, model hyperparameters\n interleave: bool, whether to use parallel_interleave, which is faster\n but will alter the order of samples non-deterministically, or flat_map,\n which is slower but will preserve the sample order.\n\n Returns:\n a Dataset\n \"\"\"\n def _preprocess(example):\n examples = self.preprocess_example(example, mode, hparams)\n if not isinstance(examples, tf.data.Dataset):\n examples = tf.data.Dataset.from_tensors(examples)\n return examples\n\n if interleave:\n dataset = dataset.apply(\n tf.data.experimental.parallel_interleave(\n _preprocess, sloppy=True, cycle_length=8))\n else:\n dataset = dataset.flat_map(_preprocess)\n\n return dataset\n\n def training_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.train_data_filenames(file_basename, data_dir,\n num_shards)\n\n def dev_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.dev_data_filenames(file_basename, data_dir,\n num_shards)\n\n def test_filepaths(self, data_dir, num_shards, shuffled):\n file_basename = self.dataset_filename()\n if not shuffled:\n file_basename += generator_utils.UNSHUFFLED_SUFFIX\n return generator_utils.test_data_filenames(file_basename, data_dir,\n num_shards)\n\n def data_filepaths(self, split, output_dir, num_shards, shuffled):\n if split == DatasetSplit.TRAIN:\n return self.training_filepaths(output_dir, num_shards, shuffled)\n elif split == DatasetSplit.EVAL:\n return self.dev_filepaths(output_dir, num_shards, shuffled)\n elif split == DatasetSplit.TEST:\n return self.test_filepaths(output_dir, num_shards, shuffled)\n else:\n raise ValueError(\"Unknown value for split: %s\" % split)\n\n def filepattern(self, data_dir, mode, shard=None):\n \"\"\"Get filepattern for data files for mode.\n\n Matches mode to a suffix.\n * DatasetSplit.TRAIN: train\n * DatasetSplit.EVAL: dev\n * DatasetSplit.TEST: test\n * tf.estimator.ModeKeys.PREDICT: dev\n\n Args:\n data_dir: str, data directory.\n mode: DatasetSplit\n shard: int, if provided, will only read data from the specified shard.\n\n Returns:\n filepattern str\n \"\"\"\n if not os.path.isdir(data_dir):\n ret = data_dir.split(\",\")\n if len(ret) > 1:\n return ret\n return data_dir\n path = os.path.join(data_dir, self.dataset_filename())\n shard_str = \"-%05d\" % shard if shard is not None else \"\"\n if mode == DatasetSplit.TRAIN:\n suffix = \"train\"\n elif mode in [DatasetSplit.EVAL, tf.estimator.ModeKeys.PREDICT]:\n suffix = \"dev\"\n else:\n assert mode == DatasetSplit.TEST\n suffix = \"test\"\n\n return \"%s-%s%s*\" % (path, suffix, shard_str)\n\n def __init__(self, was_reversed=False, was_copy=False):\n \"\"\"Create a Problem.\n\n Args:\n was_reversed: bool, whether to reverse inputs and targets.\n was_copy: bool, whether to copy inputs to targets. Can be composed with\n was_reversed so that if both are true, the targets become the inputs,\n which are then copied to targets so that the task is targets->targets.\n \"\"\"\n self._was_reversed = was_reversed\n self._was_copy = was_copy\n self._encoders = None\n self._hparams = None\n self._feature_info = None\n self._task_id = -1\n\n @property\n def was_reversed(self):\n \"\"\"Whether the problem was reversed.\"\"\"\n return self._was_reversed\n\n def get_feature_encoders(self, data_dir=None):\n if self._encoders is None:\n self._encoders = self.feature_encoders(data_dir)\n return self._encoders\n\n def get_hparams(self, model_hparams=None):\n \"\"\"Returns problem_hparams.\"\"\"\n if self._hparams is not None:\n return self._hparams\n\n if model_hparams is None:\n model_hparams = default_model_hparams()\n\n if self._encoders is None:\n data_dir = (model_hparams and hasattr(model_hparams, \"data_dir\") and\n model_hparams.data_dir) or None\n self.get_feature_encoders(data_dir)\n\n hp = _default_hparams()\n ret = self.hparams(hp, model_hparams)\n if ret is not None:\n raise ValueError(\"The Problem subclass hparams function should mutate \"\n \"the defaults passed in and return None.\")\n\n hp.add_hparam(\"vocabulary\", self._encoders)\n hp.add_hparam(\"was_reversed\", self._was_reversed)\n hp.add_hparam(\"was_copy\", self._was_copy)\n\n if self._was_reversed:\n _reverse_problem_hparams(hp)\n if self._was_copy:\n _copy_problem_hparams(hp)\n\n self._hparams = hp\n return self._hparams\n\n def maybe_reverse_features(self, feature_map):\n \"\"\"Reverse features between inputs and targets if the problem is '_rev'.\"\"\"\n if not self._was_reversed:\n return\n inputs = feature_map.pop(\"inputs\", None)\n targets = feature_map.pop(\"targets\", None)\n inputs_seg = feature_map.pop(\"inputs_segmentation\", None)\n targets_seg = feature_map.pop(\"targets_segmentation\", None)\n inputs_pos = feature_map.pop(\"inputs_position\", None)\n targets_pos = feature_map.pop(\"targets_position\", None)\n if inputs is not None:\n feature_map[\"targets\"] = inputs\n if targets is not None:\n feature_map[\"inputs\"] = targets\n if inputs_seg is not None:\n feature_map[\"targets_segmentation\"] = inputs_seg\n if targets_seg is not None:\n feature_map[\"inputs_segmentation\"] = targets_seg\n if inputs_pos is not None:\n feature_map[\"targets_position\"] = inputs_pos\n if targets_pos is not None:\n feature_map[\"inputs_position\"] = targets_pos\n\n def maybe_copy_features(self, feature_map):\n if not self._was_copy:\n return\n feature_map[\"targets\"] = feature_map[\"inputs\"]\n if (\"inputs_segmentation\" in feature_map and\n \"targets_segmentation\" not in feature_map):\n feature_map[\"targets_segmentation\"] = feature_map[\"inputs_segmentation\"]\n if (\"inputs_position\" in feature_map and\n \"targets_position\" not in feature_map):\n feature_map[\"targets_position\"] = feature_map[\"inputs_position\"]\n\n def maybe_reverse_and_copy(self, example):\n self.maybe_reverse_features(example)\n self.maybe_copy_features(example)\n return example\n\n def dataset(self,\n mode,\n data_dir=None,\n num_threads=None,\n output_buffer_size=None,\n shuffle_files=None,\n hparams=None,\n preprocess=True,\n dataset_split=None,\n shard=None,\n partition_id=0,\n num_partitions=1,\n shuffle_buffer_size=1024,\n max_records=-1):\n \"\"\"Build a Dataset for this problem.\n\n Args:\n mode: tf.estimator.ModeKeys; determines which files to read from.\n data_dir: directory that contains data files.\n num_threads: int, number of threads to use for decode and preprocess\n Dataset.map calls.\n output_buffer_size: int, how many elements to prefetch at end of pipeline.\n shuffle_files: whether to shuffle input files. Default behavior (i.e. when\n shuffle_files=None) is to shuffle if mode == TRAIN.\n hparams: HParams; hparams to be passed to\n Problem.preprocess_example and Problem.hparams. If None, will use a\n default set that is a no-op.\n preprocess: bool, whether to map the Dataset through\n Problem.preprocess_example.\n dataset_split: DatasetSplit, which split to read data\n from (TRAIN:\"-train\", EVAL:\"-dev\", \"test\":\"-test\"). Defaults to mode.\n shard: int, if provided, will only read data from the specified shard.\n partition_id: integer - which partition of the dataset to read from\n num_partitions: how many partitions in the dataset\n shuffle_buffer_size: if shuffle_files is True, this is the buffer size\n used to shuffle records.\n max_records: int, number of records to truncate to.\n\n Returns:\n Dataset containing dict<feature name, Tensor>.\n\n Raises:\n ValueError: if num_partitions is greater than the number of data files.\n \"\"\"\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n shuffle_files = shuffle_files or shuffle_files is None and is_training\n\n dataset_split = dataset_split or mode\n assert data_dir\n\n if hparams is None:\n hparams = default_model_hparams()\n\n if not hasattr(hparams, \"data_dir\"):\n hparams.add_hparam(\"data_dir\", data_dir)\n if not hparams.data_dir:\n hparams.data_dir = data_dir\n # Construct the Problem's hparams so that items within it are accessible\n _ = self.get_hparams(hparams)\n\n data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)\n tf.logging.info(\"Reading data files from %s\", data_filepattern)\n data_files = sorted(\n contrib.slim().parallel_reader.get_data_files(data_filepattern))\n\n # Functions used in dataset transforms below. `filenames` can be either a\n # `tf.string` tensor or `tf.data.Dataset` containing one or more filenames.\n def _load_records_and_preprocess(filenames):\n \"\"\"Reads files from a string tensor or a dataset of filenames.\"\"\"\n # Load records from file(s) with an 8MiB read buffer.\n dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024)\n # Decode.\n dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads)\n # Preprocess if requested.\n # Note that preprocessing should happen per-file as order may matter.\n if preprocess:\n dataset = self.preprocess(dataset, mode, hparams,\n interleave=shuffle_files)\n return dataset\n\n if len(data_files) < num_partitions:\n raise ValueError(\n \"number of data files (%d) must be at least the number of hosts (%d)\"\n % (len(data_files), num_partitions))\n data_files = [f for (i, f) in enumerate(data_files)\n if i % num_partitions == partition_id]\n tf.logging.info(\n \"partition: %d num_data_files: %d\" % (partition_id, len(data_files)))\n if shuffle_files:\n mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)\n random.shuffle(data_files)\n\n dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files))\n # Create data-set from files by parsing, pre-processing and interleaving.\n if shuffle_files:\n dataset = dataset.apply(\n tf.data.experimental.parallel_interleave(\n _load_records_and_preprocess, sloppy=True, cycle_length=8))\n else:\n dataset = _load_records_and_preprocess(dataset)\n\n dataset = dataset.map(\n self.maybe_reverse_and_copy, num_parallel_calls=num_threads)\n dataset = dataset.take(max_records)\n\n ## Shuffle records only for training examples.\n if shuffle_files and is_training:\n dataset = dataset.shuffle(shuffle_buffer_size)\n if hparams.get(\"pack_dataset\", False):\n dataset = generator_utils.pack_dataset(\n dataset, hparams.max_length, keys=[\"inputs\", \"targets\"],\n use_custom_ops=hparams.get(\"use_custom_ops\", False))\n if output_buffer_size:\n dataset = dataset.prefetch(output_buffer_size)\n\n return dataset\n\n def decode_example(self, serialized_example):\n \"\"\"Return a dict of Tensors from a serialized tensorflow.Example.\"\"\"\n data_fields, data_items_to_decoders = self.example_reading_spec()\n # Necessary to rejoin examples in the correct order with the Cloud ML Engine\n # batch prediction API.\n data_fields[\"batch_prediction_key\"] = tf.FixedLenFeature([1], tf.int64, 0)\n\n if getattr(self._hparams, \"sampling_method\", \"\") == \"random_per_example\":\n data_fields[\"sampling_temp\"] = tf.FixedLenFeature(\n [1], tf.float32, getattr(self._hparams, \"sampling_temp\", 1.0))\n data_fields[\"sampling_keep_top_k\"] = tf.FixedLenFeature(\n [1], tf.int64, getattr(self._hparams, \"sampling_keep_top_k\", -1))\n\n if data_items_to_decoders is None:\n data_items_to_decoders = {\n field: contrib.slim().tfexample_decoder.Tensor(field)\n for field in data_fields\n }\n\n decoder = contrib.slim().tfexample_decoder.TFExampleDecoder(\n data_fields, data_items_to_decoders)\n\n decode_items = list(sorted(data_items_to_decoders))\n decoded = decoder.decode(serialized_example, items=decode_items)\n return dict(zip(decode_items, decoded))\n\n @property\n def decode_hooks(self):\n \"\"\"List of functions to be run after full decodes have been produced.\n\n Returns:\n List of functions. Each function should expect a single argument, an\n instance of decoding.DecodeHookArgs and optionally return a list of\n tf.Summary.Value objects.\n \"\"\"\n return []\n\n @property\n def has_inputs(self):\n return \"inputs\" in self.get_feature_encoders()\n\n @property\n def feature_info(self):\n \"\"\"Retrieve dict<feature name, FeatureInfo>.\n\n Must first call Problem.get_hparams or Problem.dataset to have the problem's\n internal hparams already constructed.\n\n Returns:\n dict<feature name, FeatureInfo>\n \"\"\"\n if self._feature_info is not None:\n return self._feature_info\n\n assert self._hparams is not None\n\n hp = self.get_hparams()\n if self.has_inputs:\n in_id = hp.input_space_id\n out_id = hp.target_space_id\n\n features = collections.defaultdict(FeatureInfo)\n for feature_name, modality_cls in six.iteritems(hp.modality):\n finfo = features[feature_name]\n finfo.modality = modality_cls\n finfo.vocab_size = hp.vocab_size[feature_name]\n\n vocabs = hp.vocabulary\n for name, encoder in six.iteritems(vocabs):\n features[name].encoder = encoder\n\n if self.has_inputs:\n features[\"inputs\"].space_id = in_id\n features[\"targets\"].space_id = out_id\n\n self._feature_info = features\n return features\n\n def make_estimator_input_fn(self,\n mode,\n hparams,\n data_dir=None,\n force_repeat=False,\n prevent_repeat=False,\n dataset_kwargs=None):\n \"\"\"Return input_fn wrapped for Estimator.\"\"\"\n\n def estimator_input_fn(params, config):\n return self.input_fn(\n mode,\n hparams,\n data_dir=data_dir,\n params=params,\n config=config,\n force_repeat=force_repeat,\n prevent_repeat=prevent_repeat,\n dataset_kwargs=dataset_kwargs)\n\n return estimator_input_fn\n\n def _dataset_partition(self, mode, config, params):\n \"\"\"Which part of the training data to read.\n\n If there are multiple parallel calls to input_fn (multiple TPU hosts),\n then we want each one to read from a separate partition of the training\n data.\n\n Args:\n mode: tf.estimator.ModeKeys\n config: RunConfig\n params: A dict that contains parameters.\n Returns:\n partition_id: an integer\n num_partitions: an integer\n \"\"\"\n if mode != tf.estimator.ModeKeys.TRAIN or not hasattr(config, \"tpu_config\"):\n # Reset in the case when using TPU but alternating TRAIN and EVAL.\n self._next_partition_id = 0\n return 0, 1\n phift = config.tpu_config.per_host_input_for_training\n # This is the mesh-tensorflow case.\n if (hasattr(tpu_config.InputPipelineConfig, \"BROADCAST\") and\n phift == tpu_config.InputPipelineConfig.BROADCAST):\n return 0, 1\n if phift:\n num_hosts = (params[\"context\"].num_hosts if \"context\" in params\n else config.tpu_config.num_shards // 8)\n num_partitions = max(num_hosts, 1)\n else:\n num_partitions = config.tpu_config.num_shards\n partition_id = getattr(self, \"_next_partition_id\", 0)\n self._next_partition_id = partition_id + 1\n tf.logging.info(\"num_partitions = %d partition_id = %d\" %\n (num_partitions, partition_id))\n assert partition_id < num_partitions\n return partition_id, num_partitions\n\n def input_fn(self,\n mode,\n hparams,\n data_dir=None,\n params=None,\n config=None,\n force_repeat=False,\n prevent_repeat=False,\n dataset_kwargs=None):\n \"\"\"Builds input pipeline for problem.\n\n Args:\n mode: tf.estimator.ModeKeys\n hparams: HParams, model hparams\n data_dir: str, data directory; if None, will use hparams.data_dir\n params: dict, may include \"batch_size\"\n config: RunConfig; should have the data_parallelism attribute if not using\n TPU\n force_repeat: bool, whether to repeat the data even if not training\n prevent_repeat: bool, whether to not repeat when in training mode.\n Overrides force_repeat.\n dataset_kwargs: dict, if passed, will pass as kwargs to self.dataset\n method when called\n\n Returns:\n (features_dict<str name, Tensor feature>, Tensor targets)\n \"\"\"\n partition_id, num_partitions = self._dataset_partition(mode, config, params)\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n if config and config.use_tpu:\n num_threads = 64\n else:\n num_threads = data_reader.cpu_count() if is_training else 1\n data_dir = data_dir or (hasattr(hparams, \"data_dir\") and hparams.data_dir)\n dataset_kwargs = dataset_kwargs or {}\n dataset_kwargs.update({\n \"mode\": mode,\n \"data_dir\": data_dir,\n \"num_threads\": num_threads,\n \"hparams\": hparams,\n \"partition_id\": partition_id,\n \"num_partitions\": num_partitions,\n })\n return data_reader.input_fn(\n self.dataset(**dataset_kwargs),\n self.filepattern(data_dir, mode),\n self.skip_random_fraction_when_training,\n self.batch_size_means_tokens,\n self.get_hparams().batch_size_multiplier,\n self.max_length(hparams),\n mode,\n hparams,\n data_dir=data_dir,\n params=params,\n config=config,\n force_repeat=force_repeat,\n prevent_repeat=prevent_repeat)\n\n @property\n def export_assets(self):\n \"\"\"Assets to export with the model.\n\n This property contains a dictionary of assets, such as vocabulary files,\n that should be exported together with the model, or None if no assets\n are needed.\n \"\"\"\n\n return None\n\n def serving_input_fn(self, hparams, decode_hparams=None, use_tpu=False):\n \"\"\"Input fn for serving export, starting from serialized example.\"\"\"\n self._hparams = hparams\n mode = tf.estimator.ModeKeys.PREDICT\n serialized_example = tf.placeholder(\n dtype=tf.string, shape=[None], name=\"serialized_example\")\n dataset = tf.data.Dataset.from_tensor_slices(serialized_example)\n dataset = dataset.map(self.decode_example)\n dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams))\n dataset = dataset.map(data_reader.cast_ints_to_int32)\n\n if use_tpu:\n padded_shapes = data_reader.pad_for_tpu(dataset.output_shapes, hparams,\n hparams.max_length)\n batch_size = 1 if not decode_hparams else getattr(decode_hparams,\n \"batch_size\", 1)\n dataset = dataset.padded_batch(\n batch_size, padded_shapes, drop_remainder=False)\n dataset = dataset.map(\n functools.partial(data_reader.pad_batch, batch_multiple=batch_size))\n else:\n dataset = dataset.padded_batch(\n tf.shape(serialized_example, out_type=tf.int64)[0],\n dataset.output_shapes)\n\n dataset = dataset.map(data_reader.standardize_shapes)\n features = tf.data.experimental.get_single_element(dataset)\n\n if self.has_inputs:\n features.pop(\"targets\", None)\n\n return tf.estimator.export.ServingInputReceiver(\n features=features, receiver_tensors=serialized_example)\n\n\nclass FeatureInfo(object):\n \"\"\"Encapsulates information about a feature.\"\"\"\n\n def __init__(self,\n encoder=None,\n modality=None,\n vocab_size=None,\n space_id=None):\n self.encoder = encoder\n self.modality = modality\n self.vocab_size = vocab_size\n self.space_id = space_id\n\n\ndef _copy_problem_hparams(p_hparams):\n \"\"\"Use input modality, vocab, and space id for target.\"\"\"\n p = p_hparams\n # Duplicate input modality.\n p.modality[\"targets\"] = p.modality[\"inputs\"]\n # Duplicate input vocab size.\n p.vocab_size[\"targets\"] = p.vocab_size[\"inputs\"]\n # Duplicate input vocabulary.\n p.vocabulary[\"targets\"] = p.vocabulary[\"inputs\"]\n # Duplicate input space ids.\n p.target_space_id = p.input_space_id\n # Mark that p was reversed.\n p.was_copy = True\n\n\ndef _reverse_problem_hparams(p_hparams):\n \"\"\"Swap input/output modalities, vocab, and space ids.\"\"\"\n p = p_hparams\n\n # Swap modalities.\n # TODO(trandustin): Note this assumes target modalities have feature name\n # 'target', and each intended feature to swap has feature name 'input'.\n # In the future, remove need for this behavior.\n reversed_modality = {}\n for feature_name in p.modality:\n # Copy feature as-is.\n if \"target\" not in feature_name and \"input\" not in feature_name:\n reversed_modality[feature_name] = p.modality[feature_name]\n else:\n # Change \"target\" to \"input\" and vice-versa for this feature.\n if \"target\" in feature_name:\n reversed_feature_name = feature_name.replace(\"target\", \"input\")\n else:\n assert \"input\" in feature_name, feature_name\n reversed_feature_name = feature_name.replace(\"input\", \"target\")\n reversed_modality[reversed_feature_name] = p.modality[feature_name]\n\n p.modality = reversed_modality\n\n # Swap vocab sizes.\n reversed_vocab_size = {}\n for feature_name in p.vocab_size:\n reversed_feature_name = feature_name.replace(\"target\", \"input\")\n if \"target\" in feature_name and reversed_feature_name in p.vocab_size:\n reversed_vocab_size[feature_name] = p.vocab_size[reversed_feature_name]\n reversed_vocab_size[reversed_feature_name] = p.vocab_size[feature_name]\n\n p.vocab_size = reversed_vocab_size\n\n # Swap vocabularies.\n input_vocabulary = p.vocabulary.pop(\"inputs\", None)\n target_vocabulary = p.vocabulary.pop(\"targets\", None)\n if input_vocabulary is not None:\n p.vocabulary[\"targets\"] = input_vocabulary\n if target_vocabulary is not None:\n p.vocabulary[\"inputs\"] = target_vocabulary\n\n # Swap input/target space ids.\n input_space_id = p.input_space_id\n target_space_id = p.target_space_id\n if input_space_id is not None:\n p.target_space_id = input_space_id\n else:\n p.target_space_id = SpaceID.GENERIC\n if target_space_id is not None:\n p.input_space_id = target_space_id\n else:\n p.input_space_id = SpaceID.GENERIC\n\n # Mark that p was reversed.\n p.was_reversed = True\n\n\ndef _default_hparams():\n \"\"\"A set of basic model hyperparameters.\"\"\"\n return hparam.HParams(\n # Use this parameter to get comparable perplexity numbers with different\n # tokenizations. This value should be set to the ratio of the number of\n # tokens in the test set according to the tokenization used to the number\n # of tokens in the test set in the \"official\" tokenization. For\n # example, if we are using a word-piece based model and we want to\n # compute per-word perplexity, then we set loss_multiplier to the number\n # of wordpieces per word in the test set.\n loss_multiplier=1.0,\n\n # Use this parameter to allow for larger sequences in the batch. Without\n # the use of this parameter, the size of the inner two dimensions will\n # be used to judge the sequence length.\n batch_size_multiplier=1,\n\n # During inference for autoregressive problems, if the batch_size is 1,\n # the inference will stop when the model predict a text_encoder.EOS_ID\n # token.\n stop_at_eos=False,\n\n # Modalities used to map from features to a space compatible with\n # chosen model architecture. It comprises key-value pairs of a feature\n # name (str) and its modality type.\n modality={},\n vocab_size={},\n\n # Identifiers used to tell the model which input/target space will be\n # expected. For example, it can tell that we expect French as characters\n # as output, or Spanish as sound. Spaces defined as constants in SpaceID\n # class.\n input_space_id=SpaceID.GENERIC,\n target_space_id=SpaceID.GENERIC)\n\n\ndef problem_hparams_to_features(problem_hparams):\n input_space_id, target_space_id = 0, 0\n if problem_hparams:\n input_space_id = problem_hparams.input_space_id\n target_space_id = problem_hparams.target_space_id\n return {\n \"input_space_id\": input_space_id,\n \"target_space_id\": target_space_id,\n }\n",
"# coding=utf-8\n# Copyright 2020 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for common layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport kfac\nimport numpy as np\n\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.utils import test_utils\n\nimport tensorflow.compat.v1 as tf\n\ntf.enable_eager_execution()\n\n\nclass CommonLayersTest(parameterized.TestCase, tf.test.TestCase):\n\n @test_utils.run_in_graph_and_eager_modes()\n def testIndexLastDimWithIndices(self):\n x = np.array([[2., 3., 4., 5.],\n [6., 7., 8., 9.]])\n indices = np.array([2, 0])\n x_idx = common_layers.index_last_dim_with_indices(x, indices)\n\n expected = np.array([4., 6.])\n self.assertAllEqual(expected, self.evaluate(x_idx))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSaturatingSigmoid(self):\n x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32)\n y = common_layers.saturating_sigmoid(tf.constant(x))\n res = self.evaluate(y)\n self.assertAllClose(res, [0.0, 0.0, 0.5, 1.0, 1.0])\n\n @test_utils.run_in_graph_and_eager_modes()\n def testFlatten4D3D(self):\n x = np.random.randint(1, high=9, size=(3, 5, 2))\n y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7))\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (3, 5 * 2, 7))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testEmbedding(self):\n x = np.random.randint(1, high=9, size=(3, 5))\n y = common_layers.embedding(x, 10, 16)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (3, 5, 16))\n\n @test_utils.run_in_graph_mode_only()\n def testShakeShake(self):\n x = np.random.rand(5, 7)\n with self.test_session() as session:\n x = tf.constant(x, dtype=tf.float32)\n y = common_layers.shakeshake([x, x, x, x, x])\n inp, res = session.run([x, y])\n self.assertAllClose(res, inp)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConv(self):\n x = np.random.rand(5, 7, 1, 11)\n y = common_layers.conv(tf.constant(x, dtype=tf.float32), 13, (3, 1))\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 5, 1, 13))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConv1d(self):\n x = np.random.rand(5, 7, 11)\n y = common_layers.conv1d(tf.constant(x, dtype=tf.float32), 13, 1)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 13))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSeparableConv(self):\n x = np.random.rand(5, 7, 1, 11)\n y = common_layers.separable_conv(\n tf.constant(x, dtype=tf.float32), 13, (3, 1))\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 5, 1, 13))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSubSeparableConv(self):\n for sep in [0, 1, 2, 4]:\n x = np.random.rand(5, 7, 1, 12)\n with tf.variable_scope(\"sep_%d\" % sep):\n y = common_layers.subseparable_conv(\n tf.constant(x, dtype=tf.float32), 16, (3, 1), separability=sep)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 5, 1, 16))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConvBlock(self):\n x = np.random.rand(5, 7, 1, 11)\n y = common_layers.conv_block(\n tf.constant(x, dtype=tf.float32),\n 13, [(1, (3, 3)), (1, (3, 3))],\n padding=\"SAME\",\n normalizer_fn=common_layers.noam_norm)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 1, 13))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSeparableConvBlock(self):\n x = np.random.rand(5, 7, 1, 11)\n y = common_layers.separable_conv_block(\n tf.constant(x, dtype=tf.float32),\n 13, [(1, (3, 3)), (1, (3, 3))],\n padding=\"SAME\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 1, 13))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSubSeparableConvBlock(self):\n for sep in [0, 1, 2, 4]:\n x = np.random.rand(5, 7, 1, 12)\n with tf.variable_scope(\"sep_%d\" % sep):\n y = common_layers.subseparable_conv_block(\n tf.constant(x, dtype=tf.float32),\n 16, [(1, (3, 3)), (1, (3, 3))],\n padding=\"SAME\",\n separability=sep)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 1, 16))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testPool(self):\n x = np.random.rand(5, 8, 1, 11)\n y = common_layers.pool(\n tf.constant(x, dtype=tf.float32), (2, 2), \"AVG\", \"SAME\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 8, 1, 11))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConvBlockDownsample(self):\n x = np.random.rand(5, 7, 1, 11)\n y = common_layers.conv_block_downsample(\n tf.constant(x, dtype=tf.float32), (3, 1), (2, 1), \"SAME\")\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 4, 1, 27))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGetTimingSignal(self):\n length = 7\n num_timescales = 10\n a = common_layers.get_timing_signal(length, num_timescales=num_timescales)\n res = self.evaluate(a)\n self.assertEqual(res.shape, (length, 2 * num_timescales))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testAddTimingSignal(self):\n batch = 5\n length = 7\n height = 3\n depth = 35\n x = np.random.rand(batch, length, height, depth)\n a = common_layers.add_timing_signal(tf.constant(x, dtype=tf.float32))\n res = self.evaluate(a)\n self.assertEqual(res.shape, (batch, length, height, depth))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConvGRU(self):\n x = np.random.rand(5, 7, 3, 11)\n y = common_layers.conv_gru(tf.constant(x, dtype=tf.float32), (1, 3), 11)\n z = common_layers.conv_gru(\n tf.constant(x, dtype=tf.float32), (1, 3), 11, padding=\"LEFT\")\n self.evaluate(tf.global_variables_initializer())\n res1 = self.evaluate(y)\n res2 = self.evaluate(z)\n self.assertEqual(res1.shape, (5, 7, 3, 11))\n self.assertEqual(res2.shape, (5, 7, 3, 11))\n\n @test_utils.run_in_graph_mode_only\n def testSRU(self):\n x = np.random.rand(5, 7, 3, 11)\n with self.test_session() as session:\n y = common_layers.sru(tf.constant(x, dtype=tf.float32))\n session.run(tf.global_variables_initializer())\n res = session.run(y)\n self.assertEqual(res.shape, (5, 7, 3, 11))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testLayerNorm(self):\n x = np.random.rand(5, 7, 11)\n y = common_layers.layer_norm(tf.constant(x, dtype=tf.float32), 11)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 11))\n\n\n @test_utils.run_in_graph_and_eager_modes()\n def testGroupNorm(self):\n x = np.random.rand(5, 7, 3, 16)\n y = common_layers.group_norm(tf.constant(x, dtype=tf.float32))\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 3, 16))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConvLSTM(self):\n x = np.random.rand(5, 7, 11, 13)\n y = common_layers.conv_lstm(tf.constant(x, dtype=tf.float32), (1, 3), 13)\n self.evaluate(tf.global_variables_initializer())\n res = self.evaluate(y)\n self.assertEqual(res.shape, (5, 7, 11, 13))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testPadToSameLength(self):\n x1 = np.random.rand(5, 7, 11)\n x2 = np.random.rand(5, 9, 11)\n a, b = common_layers.pad_to_same_length(\n tf.constant(x1, dtype=tf.float32), tf.constant(x2, dtype=tf.float32))\n c, d = common_layers.pad_to_same_length(\n tf.constant(x1, dtype=tf.float32),\n tf.constant(x2, dtype=tf.float32),\n final_length_divisible_by=4)\n res1, res2 = self.evaluate([a, b])\n res1a, res2a = self.evaluate([c, d])\n self.assertEqual(res1.shape, (5, 9, 11))\n self.assertEqual(res2.shape, (5, 9, 11))\n self.assertEqual(res1a.shape, (5, 12, 11))\n self.assertEqual(res2a.shape, (5, 12, 11))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testShiftLeft(self):\n x1 = np.zeros((5, 7, 1, 11))\n x1[:, 0, :] = np.ones_like(x1[:, 0, :])\n expected = np.zeros((5, 7, 1, 11))\n expected[:, 1, :] = np.ones_like(expected[:, 1, :])\n a = common_layers.shift_right(tf.constant(x1, dtype=tf.float32))\n actual = self.evaluate(a)\n self.assertAllEqual(actual, expected)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testConvStride2MultiStep(self):\n x1 = np.random.rand(5, 32, 16, 11)\n a = common_layers.conv_stride2_multistep(\n tf.constant(x1, dtype=tf.float32), 4, 16)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(a[0])\n self.assertEqual(actual.shape, (5, 2, 1, 16))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDeconvStride2MultiStep(self):\n x1 = np.random.rand(5, 2, 1, 11)\n a = common_layers.deconv_stride2_multistep(\n tf.constant(x1, dtype=tf.float32), 4, 16)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(a)\n self.assertEqual(actual.shape, (5, 32, 1, 16))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testApplyNormLayer(self):\n x1 = np.random.rand(5, 2, 1, 11)\n x2 = common_layers.apply_norm(\n tf.constant(x1, dtype=tf.float32), \"layer\", depth=11, epsilon=1e-6)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(x2)\n self.assertEqual(actual.shape, (5, 2, 1, 11))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testApplyNormNoam(self):\n x1 = np.random.rand(5, 2, 1, 11)\n x2 = common_layers.apply_norm(\n tf.constant(x1, dtype=tf.float32), \"noam\", depth=11, epsilon=1e-6)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(x2)\n self.assertEqual(actual.shape, (5, 2, 1, 11))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testApplyNormBatch(self):\n x1 = np.random.rand(5, 2, 1, 11)\n x2 = common_layers.apply_norm(\n tf.constant(x1, dtype=tf.float32), \"batch\", depth=11, epsilon=1e-6)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(x2)\n self.assertEqual(actual.shape, (5, 2, 1, 11))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testApplyNormNone(self):\n x1 = np.random.rand(5, 2, 1, 11)\n x2 = common_layers.apply_norm(\n tf.constant(x1, dtype=tf.float32), \"none\", depth=11, epsilon=1e-6)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(x2)\n self.assertEqual(actual.shape, (5, 2, 1, 11))\n self.assertAllClose(actual, x1, atol=1e-03)\n\n\n @test_utils.run_in_graph_mode_only()\n def testDenseWithLayerCollection(self):\n with tf.variable_scope(\"test_layer_collection\"):\n x1 = tf.zeros([3, 4], tf.float32)\n layer_collection = kfac.LayerCollection()\n common_layers.dense(\n x1, units=10, layer_collection=layer_collection, name=\"y1\")\n self.assertLen(layer_collection.get_blocks(), 1)\n\n # 3D inputs.\n x2 = tf.zeros([3, 4, 5], tf.float32)\n common_layers.dense(\n x2, units=10, layer_collection=layer_collection, name=\"y2\")\n self.assertLen(layer_collection.get_blocks(), 2)\n\n def testGlobalPool1d(self):\n x1 = np.random.rand(5, 4, 11)\n no_mask = np.ones((5, 4))\n full_mask = np.zeros((5, 4))\n\n x1_ = tf.Variable(x1, dtype=tf.float32)\n no_mask_ = tf.Variable(no_mask, dtype=tf.float32)\n full_mask_ = tf.Variable(full_mask, dtype=tf.float32)\n\n none_mask_max = common_layers.global_pool_1d(x1_)\n no_mask_max = common_layers.global_pool_1d(x1_, mask=no_mask_)\n result1 = tf.reduce_sum(none_mask_max - no_mask_max)\n\n full_mask_max = common_layers.global_pool_1d(x1_, mask=full_mask_)\n result2 = tf.reduce_sum(full_mask_max)\n\n none_mask_avr = common_layers.global_pool_1d(x1_, \"AVR\")\n no_mask_avr = common_layers.global_pool_1d(x1_, \"AVR\", no_mask_)\n result3 = tf.reduce_sum(none_mask_avr - no_mask_avr)\n\n full_mask_avr = common_layers.global_pool_1d(x1_, \"AVR\", full_mask_)\n result4 = tf.reduce_sum(full_mask_avr)\n\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate([result1, result2, result3, result4])\n self.assertAllEqual(actual[:3], [0.0, 0.0, 0.0])\n\n def testLinearSetLayer(self):\n x1 = np.random.rand(5, 4, 11)\n cont = np.random.rand(5, 13)\n x1_ = tf.Variable(x1, dtype=tf.float32)\n cont_ = tf.Variable(cont, dtype=tf.float32)\n\n simple_ff = common_layers.linear_set_layer(32, x1_)\n cont_ff = common_layers.linear_set_layer(32, x1_, context=cont_)\n\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate([simple_ff, cont_ff])\n self.assertEqual(actual[0].shape, (5, 4, 32))\n self.assertEqual(actual[1].shape, (5, 4, 32))\n\n def testRavanbakhshSetLayer(self):\n x1 = np.random.rand(5, 4, 11)\n x1_ = tf.Variable(x1, dtype=tf.float32)\n layer = common_layers.ravanbakhsh_set_layer(32, x1_)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(layer)\n self.assertEqual(actual.shape, (5, 4, 32))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testTopKthIterativeShape(self):\n x = np.random.rand(5, 2, 1, 12)\n y = common_layers.top_kth_iterative(tf.constant(x, dtype=tf.float32), 3)\n actual = self.evaluate(y)\n self.assertEqual(actual.shape, (5, 2, 1, 1))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testTopKthIterativeValue(self):\n x = [1.0, 2.0, 3.0, 4.0]\n y = common_layers.top_kth_iterative(tf.constant(x, dtype=tf.float32), 3)\n actual = self.evaluate(y)\n self.assertEqual(int(actual[0]), 2.0)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBReLU(self):\n x = np.random.rand(5, 2, 1, 12)\n y = common_layers.brelu(tf.constant(x, dtype=tf.float32))\n actual = self.evaluate(y)\n self.assertEqual(actual.shape, (5, 2, 1, 12))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testBELU(self):\n x = np.random.rand(5, 2, 1, 12)\n y = common_layers.belu(tf.constant(x, dtype=tf.float32))\n actual = self.evaluate(y)\n self.assertEqual(actual.shape, (5, 2, 1, 12))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testNAC(self):\n x = np.random.rand(5, 2, 1, 12)\n y = common_layers.nac(tf.constant(x, dtype=tf.float32), 14)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(y)\n self.assertEqual(actual.shape, (5, 2, 1, 14))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testNALU(self):\n x = np.random.rand(5, 2, 1, 12)\n y = common_layers.nalu(tf.constant(x, dtype=tf.float32), 14)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(y)\n self.assertEqual(actual.shape, (5, 2, 1, 14))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testNALUzeros(self):\n x = np.random.rand(5, 2, 1, 12)\n y = common_layers.nalu(tf.zeros_like(x, dtype=tf.float32), 14)\n self.evaluate(tf.global_variables_initializer())\n actual = self.evaluate(y)\n self.assertTrue(np.all(np.isfinite(actual)))\n self.assertEqual(actual.shape, (5, 2, 1, 14))\n\n @test_utils.run_in_graph_mode_only\n def testPaddingCrossEntropyFactored(self):\n vocab_size = 19\n rows = 5\n cols = 4\n depth = 11\n label_smoothing = 0.1\n features = np.random.rand(rows, cols, depth)\n weights = np.random.rand(vocab_size, depth)\n labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))\n with self.test_session() as session:\n features = tf.to_float(features)\n weights = tf.to_float(weights)\n labels = tf.to_int32(labels)\n logits = tf.matmul(\n tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)\n logits = tf.reshape(logits, [rows, cols, vocab_size])\n loss_num, loss_den = common_layers.padded_cross_entropy(\n logits, labels, label_smoothing=label_smoothing, reduce_sum=False)\n factored_logits = common_layers.FactoredTensor(features, weights)\n loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(\n factored_logits,\n labels=labels,\n label_smoothing=label_smoothing,\n reduce_sum=False)\n num, den, num_f, den_f = session.run(\n [loss_num, loss_den, loss_num_f, loss_den_f])\n self.assertEqual(num.shape, (rows, cols))\n self.assertEqual(den.shape, (rows, cols))\n self.assertEqual(num_f.shape, (rows, cols))\n self.assertEqual(den_f.shape, (rows, cols))\n self.assertAllClose(num, num_f)\n self.assertAllClose(den, den_f)\n\n @test_utils.run_in_graph_mode_only\n def testPaddingCrossEntropyFactoredGrad(self):\n vocab_size = 19\n rows = 5\n cols = 4\n depth = 11\n label_smoothing = 0.1\n features = np.random.rand(rows, cols, depth)\n weights = np.random.rand(vocab_size, depth)\n labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))\n with self.test_session() as session:\n features = tf.to_float(features)\n weights = tf.to_float(weights)\n labels = tf.to_int32(labels)\n logits = tf.matmul(\n tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)\n logits = tf.reshape(logits, [rows, cols, vocab_size])\n loss_num, loss_den = common_layers.padded_cross_entropy(\n logits, labels, label_smoothing=label_smoothing, reduce_sum=False)\n factored_logits = common_layers.FactoredTensor(features, weights)\n loss_num_factored, loss_den_factored = (\n common_layers.padded_cross_entropy_factored(\n factored_logits,\n labels=labels,\n label_smoothing=label_smoothing,\n reduce_sum=False))\n df, dw = tf.gradients(ys=[loss_num, loss_den], xs=[features, weights])\n df_factored, dw_factored = tf.gradients(\n ys=[loss_num_factored, loss_den_factored], xs=[features, weights])\n actual_df, actual_dw, actual_df_factored, actual_dw_factored = (\n session.run([df, dw, df_factored, dw_factored]))\n self.assertEqual(actual_df.shape, (rows, cols, depth))\n self.assertEqual(actual_dw.shape, (vocab_size, depth))\n self.assertEqual(actual_df_factored.shape, (rows, cols, depth))\n self.assertEqual(actual_dw_factored.shape, (vocab_size, depth))\n self.assertAllClose(actual_df, actual_df_factored)\n self.assertAllClose(actual_dw, actual_dw_factored)\n\n @parameterized.parameters(\n (2, 4, 4, 5, True),\n (2, 4, 4, 5, False),\n (1, 16, 16, 1, True),\n (1, 16, 16, 1, False),\n )\n def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum):\n channels = 3\n pred = tf.random_normal([batch, height, width, num_mixtures * 10])\n labels = tf.random_uniform([batch, height, width, channels],\n minval=0, maxval=256, dtype=tf.int32)\n actual_loss_num, actual_loss_den = common_layers.dml_loss(\n pred=pred, labels=labels, reduce_sum=reduce_sum)\n actual_loss = actual_loss_num / actual_loss_den\n\n real_labels = common_layers.convert_rgb_to_symmetric_real(labels)\n expected_loss = common_layers.discretized_mix_logistic_loss(\n pred=pred, labels=real_labels) / channels\n if reduce_sum:\n expected_loss = tf.reduce_mean(expected_loss)\n\n actual_loss_val, expected_loss_val = self.evaluate(\n [actual_loss, expected_loss])\n self.assertAllClose(actual_loss_val, expected_loss_val)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testWeightsMultiProblemAll(self):\n labels = tf.constant(np.array([[12, 15, 1, 20, 100],\n [67, 1, 34, 45, 124],\n [78, 2, 34, 18, 29],\n [78, 123, 55, 1, 33],\n [1, 18, 22, 36, 59]]), dtype=tf.int32)\n taskid = 1\n expected_mask = np.array([[1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1]])\n actual_mask = common_layers.weights_multi_problem_all(labels, taskid)\n actual_mask_eval = self.evaluate(actual_mask)\n self.assertAllClose(expected_mask, actual_mask_eval)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testWeightsMultiProblem(self):\n labels = tf.constant(np.array([[12, 15, 1, 20, 100],\n [67, 1, 34, 45, 124],\n [78, 2, 34, 18, 29],\n [78, 123, 55, 1, 33],\n [1, 18, 22, 36, 59]]), dtype=tf.int32)\n taskid = 1\n expected_mask = np.array([[0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1],\n [0, 1, 1, 1, 1]])\n actual_mask = common_layers.weights_multi_problem(labels, taskid)\n actual_mask_eval = self.evaluate(actual_mask)\n self.assertAllClose(expected_mask, actual_mask_eval)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDiscretizedMixLogisticLoss(self):\n batch = 2\n height = 4\n width = 4\n channels = 3\n num_mixtures = 5\n logits = tf.concat( # assign all probability mass to first component\n [tf.ones([batch, height, width, 1]) * 1e8,\n tf.zeros([batch, height, width, num_mixtures - 1])],\n axis=-1)\n locs = tf.random_uniform([batch, height, width, num_mixtures * 3],\n minval=-.9, maxval=.9)\n log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],\n minval=-1., maxval=1.)\n coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))\n pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)\n\n # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.\n labels = tf.random_uniform([batch, height, width, channels],\n minval=-.9, maxval=.9)\n locs_0 = locs[..., :3]\n log_scales_0 = log_scales[..., :3]\n centered_labels = labels - locs_0\n inv_stdv = tf.exp(-log_scales_0)\n plus_in = inv_stdv * (centered_labels + 1. / 255.)\n min_in = inv_stdv * (centered_labels - 1. / 255.)\n cdf_plus = tf.nn.sigmoid(plus_in)\n cdf_min = tf.nn.sigmoid(min_in)\n expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)\n\n actual_loss = common_layers.discretized_mix_logistic_loss(\n pred=pred, labels=labels)\n actual_loss_val, expected_loss_val = self.evaluate(\n [actual_loss, expected_loss])\n self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSampleFromDiscretizedMixLogistic(self):\n batch = 2\n height = 4\n width = 4\n num_mixtures = 5\n seed = 42\n logits = tf.concat( # assign all probability mass to first component\n [tf.ones([batch, height, width, 1]) * 1e8,\n tf.zeros([batch, height, width, num_mixtures - 1])],\n axis=-1)\n locs = tf.random_uniform([batch, height, width, num_mixtures * 3],\n minval=-.9, maxval=.9)\n log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8\n coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))\n pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)\n\n locs_0 = locs[..., :3]\n expected_sample = tf.clip_by_value(locs_0, -1., 1.)\n\n actual_sample = common_layers.sample_from_discretized_mix_logistic(\n pred, seed=seed)\n actual_sample_val, expected_sample_val = self.evaluate(\n [actual_sample, expected_sample])\n # Use a low tolerance: samples numerically differ, as the actual\n # implementation clips log-scales so they always contribute to sampling.\n self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testFactoredTensorImplicitConversion(self):\n a = np.random.rand(3, 4, 5)\n b = np.random.rand(6, 5)\n c = np.random.rand(3, 4, 6)\n # a factored representation of a Tensor of shape (3, 4, 6)\n factored = common_layers.FactoredTensor(tf.to_float(a), tf.to_float(b))\n # implicitly converts factored to a Tensor (performing the matmul)\n d = factored + tf.to_float(c)\n out = self.evaluate(d)\n self.assertEqual(out.shape, (3, 4, 6))\n\n @test_utils.run_in_graph_mode_only()\n def testConvHiddenReluMemoryEfficient(self):\n batch = 3\n length = 23\n io_size = 16\n filter_size = 7\n x = np.random.rand(batch, length, io_size)\n dy = np.random.rand(batch, length, io_size)\n with self.test_session() as session:\n x = tf.to_float(x)\n dy = tf.to_float(dy)\n f1 = tf.get_variable(\"f1\", [1, io_size, filter_size])\n f2 = tf.get_variable(\"f2\", [1, filter_size, io_size])\n norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)\n y = common_layers.conv_hidden_relu_memory_efficient(\n x, filter_size, forget=False,\n test_vars=(f1, f2, norm_scale, norm_bias))\n y_forget = common_layers.conv_hidden_relu_memory_efficient(\n x, filter_size, forget=True,\n test_vars=(f1, f2, norm_scale, norm_bias))\n dx, df1, df2, dnorm_scale, dnorm_bias = tf.gradients(\n ys=[y], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])\n dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f = tf.gradients(\n ys=[y_forget], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy])\n session.run(tf.global_variables_initializer())\n (y, y_forget,\n dx, df1, df2, dnorm_scale, dnorm_bias,\n dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f) = session.run(\n [y, y_forget,\n dx, df1, df2, dnorm_scale, dnorm_bias,\n dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f])\n self.assertAllClose(y, y_forget)\n self.assertAllClose(df2, df2_f, rtol=2e-6, atol=2e-6)\n self.assertAllClose(df1, df1_f, rtol=2e-6, atol=2e-6)\n self.assertAllClose(dnorm_scale, dnorm_scale_f)\n self.assertAllClose(dnorm_bias, dnorm_bias_f)\n self.assertAllClose(dx, dx_f)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testTopk(self):\n batch_size = 3\n seq_len = 5\n vocab_size = 7\n\n top_k = [3, 2, -1]\n logits = np.random.rand(batch_size, seq_len, 1, 1, vocab_size) + 0.001\n topk_logits = common_layers._select_top_k(logits, top_k)\n\n self.evaluate(tf.global_variables_initializer())\n topk_logits = self.evaluate(topk_logits)\n\n for i, k in enumerate(top_k):\n for j in range(seq_len):\n self.assertEqual((topk_logits[i, j, 0, 0, :] > -1e6).sum(),\n k if k != -1 else vocab_size)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testSampleTemperaturePerExample(self):\n batch_size = 3\n seq_len = 5\n vocab_size = 7\n\n logits = np.random.randn(batch_size, seq_len, 1, 1, vocab_size)\n temperature = np.random.rand(batch_size)\n\n out = common_layers.sample_temperature_per_example(logits, temperature)\n\n self.assertAllEqual(\n self.evaluate(tf.shape(out)), [batch_size, seq_len, 1, 1])\n\n @test_utils.run_in_graph_and_eager_modes()\n def testCycleGANUpsampleNnUpsampleConv(self):\n batch = 8\n height = 32\n width = 32\n num_channels = 3\n output_filters = 10\n stride = [2, 3] # we want height to be x2 and width to be x3\n random_input = np.random.rand(batch, height, width, num_channels).astype(\n np.float32)\n\n # nn_upsample_conv gives exactly the shapes we'd expect.\n upsampled_output = common_layers.cyclegan_upsample(\n random_input, output_filters, stride, \"nn_upsample_conv\")\n upsampled_output_shape = tf.shape(upsampled_output)\n self.evaluate(tf.global_variables_initializer())\n self.assertAllEqual(\n [batch, height * stride[0], width * stride[1], output_filters],\n self.evaluate(upsampled_output_shape))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testCycleGANUpsampleBilinearUpsampleConv(self):\n batch = 8\n height = 32\n width = 32\n num_channels = 3\n output_filters = 10\n stride = [2, 3] # we want height to be x2 and width to be x3\n random_input = np.random.rand(batch, height, width, num_channels).astype(\n np.float32)\n\n # bilinear_upsample_conv gives exactly the shapes we'd expect.\n upsampled_output = common_layers.cyclegan_upsample(\n random_input, output_filters, stride, \"bilinear_upsample_conv\")\n upsampled_output_shape = tf.shape(upsampled_output)\n self.evaluate(tf.global_variables_initializer())\n self.assertAllEqual(\n [batch, height * stride[0], width * stride[1], output_filters],\n self.evaluate(upsampled_output_shape))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testCycleGANUpsampleConv2dTranspose(self):\n batch = 8\n height = 32\n width = 32\n num_channels = 3\n output_filters = 10\n stride = [2, 3] # we want height to be x2 and width to be x3\n random_input = tf.convert_to_tensor(\n np.random.rand(batch, height, width, num_channels), dtype=tf.float32)\n\n # conv2d_transpose is a little tricky.\n # height_new = (height_old - 1) * stride + kernel - 2*padding - correction\n # here kernel = 3, padding = 0, correction = 1\n upsampled_height = (height - 1) * stride[0] + 3 - 2*0 - 1\n upsampled_width = (width - 1) * stride[1] + 3 - 2*0 - 1\n upsampled_output = common_layers.cyclegan_upsample(random_input,\n output_filters, stride,\n \"conv2d_transpose\")\n upsampled_output_shape = tf.shape(upsampled_output)\n self.evaluate(tf.global_variables_initializer())\n self.assertAllEqual(\n [batch, upsampled_height, upsampled_width, output_filters],\n self.evaluate(upsampled_output_shape))\n\n def testSpectralNorm(self):\n # Test that after 20 calls to apply_spectral_norm, the spectral\n # norm of the normalized matrix is close to 1.0\n with tf.Graph().as_default():\n weights = tf.get_variable(\"w\", dtype=tf.float32, shape=[2, 3, 50, 100])\n weights = tf.multiply(weights, 10.0)\n normed_weight, assign_op = common_layers.apply_spectral_norm(weights)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for _ in range(20):\n sess.run(assign_op)\n normed_weight, assign_op = common_layers.apply_spectral_norm(\n weights)\n normed_weight = sess.run(normed_weight).reshape(-1, 100)\n _, s, _ = np.linalg.svd(normed_weight)\n self.assertTrue(np.allclose(s[0], 1.0, rtol=0.1))\n\n\nclass FnWithCustomGradTest(tf.test.TestCase):\n\n @test_utils.run_in_graph_mode_only()\n def testCorrectness(self):\n\n w = tf.random_uniform([6, 10])\n\n def fn(a, b, c):\n return tf.layers.dense(\n a,\n 10,\n use_bias=False,\n kernel_initializer=lambda shape, dtype, partition_info: w\n ) + tf.matmul(b, c)\n\n def grad_fn(inputs, variables, outputs, grad_outputs):\n outputs = outputs[0]\n grad_outputs = grad_outputs[0]\n grad_inputs = tf.gradients(outputs, inputs, grad_ys=grad_outputs)\n grad_vars = tf.gradients(outputs, variables, grad_ys=grad_outputs)\n return grad_inputs, grad_vars\n\n custom_fn = common_layers.fn_with_custom_grad(grad_fn)(fn)\n\n a = tf.random_uniform([11, 6])\n b = tf.random_uniform([11, 7])\n c = tf.random_uniform([7, 10])\n\n out = fn(a, b, c)\n custom_out = custom_fn(a, b, c)\n self.assertEqual(out.get_shape().as_list(),\n custom_out.get_shape().as_list())\n\n loss = tf.reduce_mean(out)\n custom_loss = tf.reduce_mean(custom_out)\n\n grads = tf.gradients(loss, [a, b, c] + [tf.trainable_variables()[0]])\n custom_grads = tf.gradients(custom_loss,\n [a, b, c] + [tf.trainable_variables()[1]])\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n out_val, custom_out_val, grads_val, custom_grads_val = sess.run(\n [out, custom_out, grads, custom_grads])\n self.assertAllClose(out_val, custom_out_val)\n for g1, g2 in zip(grads_val, custom_grads_val):\n self.assertAllClose(g1, g2)\n\n @test_utils.run_in_graph_mode_only()\n def testCustomGrad(self):\n\n def fn(a, b, c):\n return tf.layers.dense(a, 10, use_bias=False) + tf.matmul(b, c)\n\n def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs):\n grad_inputs = [tf.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)]\n grad_vars = [\n tf.ones_like(t) * (i + len(inputs) + 1.)\n for i, t in enumerate(variables)\n ]\n return grad_inputs, grad_vars\n\n a = tf.random_uniform([11, 6])\n b = tf.random_uniform([11, 7])\n c = tf.random_uniform([7, 10])\n w = tf.random_uniform([6, 10])\n out = common_layers.fn_with_custom_grad(grad_fn)(fn)(a, b, c)\n loss = tf.reduce_mean(out)\n grads = tf.gradients(loss, [a, b, c, tf.trainable_variables()[0]])\n expected_grads = [\n tf.ones_like(t) * (i + 1.) for i, t in enumerate([a, b, c, w])\n ]\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n g_val, eg_val = sess.run([grads, expected_grads])\n for g1, g2 in zip(g_val, eg_val):\n self.assertAllClose(g1, g2)\n\n\nclass RecomputeTest(tf.test.TestCase):\n\n @test_utils.run_in_graph_mode_only()\n def testRecompute(self):\n\n def layer(x, name=None):\n with tf.variable_scope(name, default_name=\"layer\"):\n x = common_layers.layer_norm(x)\n x = tf.layers.conv1d(\n x,\n 10,\n 1,\n use_bias=False,\n kernel_initializer=tf.constant_initializer(42.42))\n x = tf.nn.relu(x)\n return x\n\n def fn(x):\n out = x\n for _ in range(3):\n out = layer(out)\n return out\n\n @common_layers.recompute_grad\n def fn_recompute(x):\n return fn(x)\n\n x = tf.random_uniform((3, 1, 3))\n recompute_vars = None\n with tf.variable_scope(\"recompute\") as vs:\n out1 = tf.reduce_sum(fn_recompute(x))\n recompute_vars = vs.trainable_variables()\n reg_vars = None\n with tf.variable_scope(\"regular\") as vs:\n out2 = tf.reduce_sum(fn(x))\n reg_vars = vs.trainable_variables()\n\n grad1 = tf.gradients(out1, recompute_vars)\n grad2 = tf.gradients(out2, reg_vars)\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n outs = sess.run([out1, out2, grad1, grad2])\n self.assertAllClose(outs[0], outs[1])\n for g1, g2 in zip(outs[2], outs[3]):\n self.assertAllClose(g1, g2)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.estimator.export.ServingInputReceiver",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.data.Dataset.from_tensors",
"tensorflow.compat.v1.data.experimental.parallel_interleave",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.VarLenFeature",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.data.experimental.get_single_element",
"tensorflow.compat.v1.constant"
],
[
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.random_normal",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.shape",
"numpy.random.randn",
"tensorflow.compat.v1.constant",
"numpy.random.randint",
"numpy.linalg.svd",
"tensorflow.compat.v1.to_int32",
"numpy.ones_like",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.ones",
"numpy.allclose",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.random_uniform",
"tensorflow.compat.v1.zeros_like",
"numpy.zeros",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.Graph",
"numpy.random.rand",
"numpy.array",
"tensorflow.compat.v1.layers.dense",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.Variable",
"numpy.isfinite",
"numpy.ones",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.matmul"
]
] |
sudo-michael/optimized_dp
|
[
"da4bfdd15c0dc91e4f62e6036f6de77b4a99c40c"
] |
[
"drafts/6d_graph.py"
] |
[
"import heterocl as hcl\nimport numpy as np\nimport time\nimport plotly.graph_objects as go\nfrom gridProcessing import Grid\nfrom shape_functions import *\nfrom custom_graph_functions import *\nfrom Humannoid6D_sys1 import *\nfrom argparse import ArgumentParser\n\nimport scipy.io as sio\n\nimport math\n\n\"\"\" USER INTERFACES\n- Define grid\n\n- Generate initial values for grid using shape functions\n\n- Time length for computations\n\n- Run\n\"\"\"\n\n# Grid field in this order: x, x_dot, z, z_dot, theta, theta_dot\n\ng = grid(np.array([-0.5, -1.0, 0.5, -2.0, -math.pi/2, -8.0]), np.array([0.5,1.0, 1.5, 2.0, math.pi/2, 8.0]), 6, np.array([29,29,29, 29, 29, 29])) # Leave out periodic field\n# Define my object\nmy_humanoid = Humanoid_6D()\n\n# Use the grid to initialize initial value function\nmy_shape = Rectangle6D(g)\n\n# Constraint function --- g function\n# cstraint_values = np.zeros((29, 29, 29, 29, 29, 29))\n# cstraint_values = cstraint_values + np.power(g.vs[0], 2) + np.power(g.vs[2], 2) - np.power(my_humanoid.L,2)\n\n# Initialization -- V_0 = max{l(x_1, t_0), g(x_1, t_0)}; it's called my_shape for now\n# my_shape = np.maximum(my_shape, cstraint_values)\n\n\n# Look-back lenght and time step\nlookback_length = 2.0\nt_step = 0.05\n\ntau = np.arange(start = 0, stop = lookback_length + t_step, step = t_step)\nprint(\"I'm here \\n\")\n\ncompMethod = \"Nothing\"\nmy_object = my_humanoid\n\n#parameters = { \"object\": myROV_6D}\n\n# Note that t has 2 elements t1, t2\ndef graph_6D(V_new, V_init, deriv_diff1, deriv_diff2, deriv_diff3, deriv_diff4, deriv_diff5, deriv_diff6,\n x1, x2, x3, x4, x5, x6 ,t , l0, obstacle):\n # Maximum derivative for each dim\n max_deriv1 = hcl.scalar(-1e9, \"max_deriv1\")\n max_deriv2 = hcl.scalar(-1e9, \"max_deriv2\")\n max_deriv3 = hcl.scalar(-1e9, \"max_deriv3\")\n max_deriv4 = hcl.scalar(-1e9, \"max_deriv4\")\n max_deriv5 = hcl.scalar(-1e9, \"max_deriv5\")\n max_deriv6 = hcl.scalar(-1e9, \"max_deriv6\")\n \n # Min derivative for each dim\n min_deriv1 = hcl.scalar(1e9, \"min_deriv1\")\n min_deriv2 = hcl.scalar(1e9, \"min_deriv2\")\n min_deriv3 = hcl.scalar(1e9, \"min_deriv3\")\n min_deriv4 = hcl.scalar(1e9, \"min_deriv4\")\n min_deriv5 = hcl.scalar(1e9, \"min_deriv5\")\n min_deriv6 = hcl.scalar(1e9, \"min_deriv6\")\n \n # These variables are used to dissipation calculation\n max_alpha1 = hcl.scalar(-1e9, \"max_alpha1\")\n max_alpha2 = hcl.scalar(-1e9, \"max_alpha2\")\n max_alpha3 = hcl.scalar(-1e9, \"max_alpha3\")\n max_alpha4 = hcl.scalar(-1e9, \"max_alpha4\")\n max_alpha5 = hcl.scalar(-1e9, \"max_alpha5\")\n max_alpha6 = hcl.scalar(-1e9, \"max_alpha6\")\n \n def step_bound(): # Function to calculate time step\n stepBoundInv = hcl.scalar(0, \"stepBoundInv\")\n stepBound = hcl.scalar(0, \"stepBound\")\n stepBoundInv[0] = max_alpha1[0]/g.dx[0] + max_alpha2[0]/g.dx[1] + max_alpha3[0]/g.dx[2] + max_alpha4[0]/g.dx[3] \\\n + max_alpha5[0]/g.dx[4] + max_alpha6[0]/g.dx[5]\n\n stepBound[0] = 0.8/stepBoundInv[0]\n with hcl.if_(stepBound > t[1] - t[0]):\n stepBound[0] = t[1] - t[0]\n\n # Update the lower time ranges\n t[0] = t[0] + stepBound[0]\n #t[0] = min_deriv2[0]\n return stepBound[0]\n\n def maxVWithV0(i, j, k, l, m, n): # Take the max\n with hcl.if_(V_new[i, j, k, l, m, n] < l0[i, j, k, l, m, n]):\n V_new[i, j, k, l, m, n] = l0[i, j, k, l, m, n]\n\n # Max(V, g )\n def maxVWithCStraint(i, j, k, l, m, n):\n with hcl.if_(V_new[i, j, k, l, m, n] < obstacle[i, j, k, l, m, n]):\n V_new[i, j, k, l, m, n] = obstacle[i, j, k, l, m, n]\n \n # Calculate Hamiltonian for every grid point in V_init\n with hcl.Stage(\"Hamiltonian\"):\n with hcl.for_(0, V_init.shape[0], name=\"i\") as i:\n with hcl.for_(0, V_init.shape[1], name=\"j\") as j:\n with hcl.for_(0, V_init.shape[2], name=\"k\") as k:\n with hcl.for_(0, V_init.shape[3], name=\"l\") as l:\n with hcl.for_(0, V_init.shape[4], name=\"m\") as m:\n with hcl.for_(0, V_init.shape[5], name=\"n\") as n:\n #Variables to calculate dV_dx\n dV_dx1_L = hcl.scalar(0, \"dV_dx1_L\")\n dV_dx1_R = hcl.scalar(0, \"dV_dx1_R\")\n dV_dx1 = hcl.scalar(0, \"dV_dx1\")\n dV_dx2_L = hcl.scalar(0, \"dV_dx2_L\")\n dV_dx2_R = hcl.scalar(0, \"dV_dx2_R\")\n dV_dx2 = hcl.scalar(0, \"dV_dx2\")\n dV_dx3_L = hcl.scalar(0, \"dV_dx3_L\")\n dV_dx3_R = hcl.scalar(0, \"dV_dx3_R\")\n dV_dx3 = hcl.scalar(0, \"dV_dx3\")\n dV_dx4_L = hcl.scalar(0, \"dV_dx4_L\")\n dV_dx4_R = hcl.scalar(0, \"dV_dx4_R\")\n dV_dx4 = hcl.scalar(0, \"dV_dx4\")\n dV_dx5_L = hcl.scalar(0, \"dV_dx5_L\")\n dV_dx5_R = hcl.scalar(0, \"dV_dx5_R\")\n dV_dx5 = hcl.scalar(0, \"dV_dx5\")\n dV_dx6_L = hcl.scalar(0, \"dV_dx6_L\")\n dV_dx6_R = hcl.scalar(0, \"dV_dx6_R\")\n dV_dx6 = hcl.scalar(0, \"dV_dx6\")\n\n # No tensor slice operation\n #dV_dx_L[0], dV_dx_R[0] = spa_derivX(i, j, k)\n dV_dx1_L[0], dV_dx1_R[0] = spa_derivX6_6d(i, j, k, l, m, n, V_init, g)\n dV_dx2_L[0], dV_dx2_R[0] = spa_derivX5_6d(i, j, k, l, m, n, V_init, g)\n dV_dx3_L[0], dV_dx3_R[0] = spa_derivX4_6d(i, j, k, l, m, n, V_init, g)\n dV_dx4_L[0], dV_dx4_R[0] = spa_derivX3_6d(i, j, k, l, m, n, V_init, g)\n dV_dx5_L[0], dV_dx5_R[0] = spa_derivX2_6d(i, j, k, l, m, n, V_init, g)\n dV_dx6_L[0], dV_dx6_R[0] = spa_derivX1_6d(i, j, k, l, m, n, V_init, g)\n\n # Saves spatial derivative diff into tables\n deriv_diff1[i, j, k, l, m, n] = dV_dx1_R[0] - dV_dx1_L[0]\n deriv_diff2[i, j, k, l, m, n] = dV_dx2_R[0] - dV_dx2_L[0]\n deriv_diff3[i, j, k, l, m, n] = dV_dx3_R[0] - dV_dx3_L[0]\n deriv_diff4[i, j, k, l, m, n] = dV_dx4_R[0] - dV_dx4_L[0]\n deriv_diff5[i, j, k, l, m, n] = dV_dx5_R[0] - dV_dx5_L[0]\n deriv_diff6[i, j, k, l, m, n] = dV_dx6_R[0] - dV_dx6_L[0]\n\n #Calculate average gradient\n dV_dx1[0] = (dV_dx1_L + dV_dx1_R) / 2\n dV_dx2[0] = (dV_dx2_L + dV_dx2_R) / 2\n dV_dx3[0] = (dV_dx3_L + dV_dx3_R) / 2\n dV_dx4[0] = (dV_dx4_L + dV_dx4_R) / 2\n dV_dx5[0] = (dV_dx5_L + dV_dx5_R) / 2\n dV_dx6[0] = (dV_dx6_L + dV_dx6_R) / 2\n\n\n # Find optimal control\n uOpt = my_object.opt_ctrl(t,(x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), (dV_dx1[0], dV_dx2[0], dV_dx3[0], dV_dx4[0], dV_dx5[0], dV_dx6[0]))\n # Find optimal disturbance\n dOpt = my_object.optDstb((dV_dx1[0], dV_dx2[0], dV_dx3[0], dV_dx4[0], dV_dx5[0], dV_dx6[0]))\n\n # Find rates of changes based on dynamics equation\n dx1_dt, dx2_dt, dx3_dt, dx4_dt, dx5_dt, dx6_dt = my_object.dynamics(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), uOpt, dOpt)\n\n # Calculate Hamiltonian terms:\n V_new[i, j, k, l, m, n] = -(dx1_dt * dV_dx1[0] + dx2_dt * dV_dx2[0] + dx3_dt * dV_dx3[0] + dx4_dt * dV_dx4[0] + dx5_dt * dV_dx5[0] + dx6_dt * dV_dx6[0])\n \n # Get derivMin\n with hcl.if_(dV_dx1_L[0] < min_deriv1[0]):\n min_deriv1[0] = dV_dx1_L[0]\n with hcl.if_(dV_dx1_R[0] < min_deriv1[0]):\n min_deriv1[0] = dV_dx1_R[0]\n\n with hcl.if_(dV_dx2_L[0] < min_deriv2[0]):\n min_deriv2[0] = dV_dx2_L[0]\n with hcl.if_(dV_dx2_R[0] < min_deriv2[0]):\n min_deriv2[0] = dV_dx2_R[0]\n\n with hcl.if_(dV_dx3_L[0] < min_deriv3[0]):\n min_deriv3[0] = dV_dx3_L[0]\n with hcl.if_(dV_dx3_R[0] < min_deriv3[0]):\n min_deriv3[0] = dV_dx3_R[0]\n\n with hcl.if_(dV_dx4_L[0] < min_deriv4[0]):\n min_deriv4[0] = dV_dx4_L[0]\n with hcl.if_(dV_dx4_R[0] < min_deriv4[0]):\n min_deriv4[0] = dV_dx4_R[0]\n\n with hcl.if_(dV_dx5_L[0] < min_deriv5[0]):\n min_deriv5[0] = dV_dx5_L[0]\n with hcl.if_(dV_dx5_R[0] < min_deriv5[0]):\n min_deriv5[0] = dV_dx5_R[0]\n\n with hcl.if_(dV_dx6_L[0] < min_deriv6[0]):\n min_deriv6[0] = dV_dx6_L[0]\n with hcl.if_(dV_dx6_R[0] < min_deriv6[0]):\n min_deriv6[0] = dV_dx6_R[0]\n\n # Get derivMax\n with hcl.if_(dV_dx1_L[0] > max_deriv1[0]):\n max_deriv1[0] = dV_dx1_L[0]\n with hcl.if_(dV_dx1_R[0] > max_deriv1[0]):\n max_deriv1[0] = dV_dx1_R[0]\n\n with hcl.if_(dV_dx2_L[0] > max_deriv2[0]):\n max_deriv2[0] = dV_dx2_L[0]\n with hcl.if_(dV_dx2_R[0] > max_deriv2[0]):\n max_deriv2[0] = dV_dx2_R[0]\n\n with hcl.if_(dV_dx3_L[0] > max_deriv3[0]):\n max_deriv3[0] = dV_dx3_L[0]\n with hcl.if_(dV_dx3_R[0] > max_deriv3[0]):\n max_deriv3[0] = dV_dx3_R[0]\n\n with hcl.if_(dV_dx4_L[0] > max_deriv4[0]):\n max_deriv4[0] = dV_dx4_L[0]\n with hcl.if_(dV_dx4_R[0] > max_deriv4[0]):\n max_deriv4[0] = dV_dx4_R[0]\n\n with hcl.if_(dV_dx5_L[0] > max_deriv5[0]):\n max_deriv5[0] = dV_dx5_L[0]\n with hcl.if_(dV_dx5_R[0] > max_deriv5[0]):\n max_deriv5[0] = dV_dx5_R[0]\n\n with hcl.if_(dV_dx6_L[0] > max_deriv6[0]):\n max_deriv6[0] = dV_dx6_L[0]\n with hcl.if_(dV_dx6_R[0] > max_deriv6[0]):\n max_deriv6[0] = dV_dx6_R[0]\n\n # Calculate dissipation amount\n with hcl.Stage(\"Dissipation\"):\n uOptL1 = hcl.scalar(0, \"uOptL1\")\n uOptL2 = hcl.scalar(0, \"uOptL2\")\n uOptL3 = hcl.scalar(0, \"uOptL3\")\n uOptL4 = hcl.scalar(0, \"uOptL4\")\n\n\n uOptU1 = hcl.scalar(0, \"uOptU1\")\n uOptU2 = hcl.scalar(0, \"uOptU2\")\n uOptU3 = hcl.scalar(0, \"uOptU3\")\n uOptU4 = hcl.scalar(0, \"uOptU4\")\n\n dOptL1 = hcl.scalar(0, \"dOptL1\")\n dOptL2 = hcl.scalar(0, \"dOptL2\")\n dOptL3 = hcl.scalar(0, \"dOptL3\")\n dOptL4 = hcl.scalar(0, \"dOptL4\")\n\n dOptU1 = hcl.scalar(0, \"dOptU1\")\n dOptU2 = hcl.scalar(0, \"dOptU2\")\n dOptU3 = hcl.scalar(0, \"dOptU3\")\n dOptU4 = hcl.scalar(0, \"dOptU4\")\n\n # Storing alphas\n alpha1 = hcl.scalar(0, \"alpha1\")\n alpha2 = hcl.scalar(0, \"alpha2\")\n alpha3 = hcl.scalar(0, \"alpha3\")\n alpha4 = hcl.scalar(0, \"alpha4\")\n alpha5 = hcl.scalar(0, \"alpha5\")\n alpha6 = hcl.scalar(0, \"alpha6\")\n\n # Find LOWER BOUND optimal disturbance\n dOptL = my_object.optDstb((min_deriv1[0], min_deriv2[0], min_deriv3[0], min_deriv4[0], min_deriv5[0], min_deriv6[0]))\n # Find UPPER BOUND optimal disturbance\n dOptU = my_object.optDstb((max_deriv1[0], max_deriv2[0], max_deriv3[0], max_deriv4[0], min_deriv5[0], min_deriv6[0]))\n with hcl.for_(0, V_init.shape[0], name=\"i\") as i:\n with hcl.for_(0, V_init.shape[1], name=\"j\") as j:\n with hcl.for_(0, V_init.shape[2], name=\"k\") as k:\n with hcl.for_(0, V_init.shape[3], name=\"l\") as l:\n with hcl.for_(0, V_init.shape[4], name=\"m\") as m:\n with hcl.for_(0, V_init.shape[5], name=\"n\") as n:\n dx_LL1 = hcl.scalar(0, \"dx_LL1\")\n dx_LL2 = hcl.scalar(0, \"dx_LL2\")\n dx_LL3 = hcl.scalar(0, \"dx_LL3\")\n dx_LL4 = hcl.scalar(0, \"dx_LL4\")\n dx_LL5 = hcl.scalar(0, \"dx_LL5\")\n dx_LL6 = hcl.scalar(0, \"dx_LL6\")\n\n dx_UL1 = hcl.scalar(0, \"dx_UL1\")\n dx_UL2 = hcl.scalar(0, \"dx_UL2\")\n dx_UL3 = hcl.scalar(0, \"dx_UL3\")\n dx_UL4 = hcl.scalar(0, \"dx_UL4\")\n dx_UL5 = hcl.scalar(0, \"dx_UL5\")\n dx_UL6 = hcl.scalar(0, \"dx_UL6\")\n #\n dx_LU1 = hcl.scalar(0, \"dx_LU1\")\n dx_LU2 = hcl.scalar(0, \"dx_LU2\")\n dx_LU3 = hcl.scalar(0, \"dx_LU3\")\n dx_LU4 = hcl.scalar(0, \"dx_LU4\")\n dx_LU5 = hcl.scalar(0, \"dx_LU5\")\n dx_LU6 = hcl.scalar(0, \"dx_LU6\")\n\n dx_UU1 = hcl.scalar(0, \"dx_UU1\")\n dx_UU2 = hcl.scalar(0, \"dx_UU2\")\n dx_UU3 = hcl.scalar(0, \"dx_UU3\")\n dx_UU4 = hcl.scalar(0, \"dx_UU4\")\n dx_UU5 = hcl.scalar(0, \"dx_UU5\")\n dx_UU6 = hcl.scalar(0, \"dx_UU6\")\n\n # Find LOWER BOUND optimal control\n uOptL = my_object.opt_ctrl(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), (min_deriv1[0], min_deriv2[0], min_deriv3[0], min_deriv4[0], min_deriv5[0], min_deriv6[0]))\n # Find UPPER BOUND optimal control\n uOptU = my_object.opt_ctrl(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), (max_deriv1[0], max_deriv2[0], max_deriv3[0], max_deriv4[0], max_deriv5[0], max_deriv6[0]))\n\n # Get upper bound and lower bound rates of changes\n dx_LL1[0], dx_LL2[0], dx_LL3[0], dx_LL4[0], dx_LL5[0], dx_LL6[0] = my_object.dynamics(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), uOptL, dOptL)\n # Get absolute value of each\n dx_LL1[0] = my_abs(dx_LL1[0])\n dx_LL2[0] = my_abs(dx_LL2[0])\n dx_LL3[0] = my_abs(dx_LL3[0])\n dx_LL4[0] = my_abs(dx_LL4[0])\n dx_LL5[0] = my_abs(dx_LL5[0])\n dx_LL6[0] = my_abs(dx_LL6[0])\n\n dx_UL1[0], dx_UL2[0], dx_UL3[0], dx_UL4[0], dx_UL5[0], dx_UL6[0] = my_object.dynamics(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), uOptU, dOptL)\n # Get absolute value of each\n dx_UL1[0] = my_abs(dx_UL1[0])\n dx_UL2[0] = my_abs(dx_UL2[0])\n dx_UL3[0] = my_abs(dx_UL3[0])\n dx_UL4[0] = my_abs(dx_UL4[0])\n dx_UL5[0] = my_abs(dx_UL5[0])\n dx_UL6[0] = my_abs(dx_UL6[0])\n\n # Set maximum alphas\n alpha1[0] = my_max(dx_UL1[0], dx_LL1[0])\n alpha2[0] = my_max(dx_UL2[0], dx_LL2[0])\n alpha3[0] = my_max(dx_UL3[0], dx_LL3[0])\n alpha4[0] = my_max(dx_UL4[0], dx_LL4[0])\n alpha5[0] = my_max(dx_UL5[0], dx_LL5[0])\n alpha6[0] = my_max(dx_UL6[0], dx_LL6[0])\n\n dx_LU1[0], dx_LU2[0], dx_LU3[0], dx_LU4[0], dx_LU5[0], dx_LU6[0] = my_object.dynamics(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), uOptL, dOptU)\n # Get absolute value of each\n dx_LU1[0] = my_abs(dx_LU1[0])\n dx_LU2[0] = my_abs(dx_LU2[0])\n dx_LU3[0] = my_abs(dx_LU3[0])\n dx_LU4[0] = my_abs(dx_LU4[0])\n dx_LU5[0] = my_abs(dx_LU5[0])\n dx_LU6[0] = my_abs(dx_LU6[0])\n\n alpha1[0] = my_max(alpha1[0], dx_LU1[0])\n alpha2[0] = my_max(alpha2[0], dx_LU2[0])\n alpha3[0] = my_max(alpha3[0], dx_LU3[0])\n alpha4[0] = my_max(alpha4[0], dx_LU4[0])\n alpha5[0] = my_max(alpha5[0], dx_LU5[0])\n alpha6[0] = my_max(alpha6[0], dx_LU6[0])\n\n dx_UU1[0], dx_UU2[0], dx_UU3[0], dx_UU4[0], dx_UU5[0], dx_UU6[0] = my_object.dynamics(t, (x1[i], x2[j], x3[k], x4[l], x5[m], x6[n]), uOptU, dOptU)\n dx_UU1[0] = my_abs(dx_UU1[0])\n dx_UU2[0] = my_abs(dx_UU2[0])\n dx_UU3[0] = my_abs(dx_UU3[0])\n dx_UU4[0] = my_abs(dx_UU4[0])\n dx_UU5[0] = my_abs(dx_UU5[0])\n dx_UU6[0] = my_abs(dx_UU6[0])\n\n alpha1[0] = my_max(alpha1[0], dx_UU1[0])\n alpha2[0] = my_max(alpha2[0], dx_UU2[0])\n alpha3[0] = my_max(alpha3[0], dx_UU3[0])\n alpha4[0] = my_max(alpha4[0], dx_UU4[0])\n alpha5[0] = my_max(alpha5[0], dx_UU5[0])\n alpha6[0] = my_max(alpha6[0], dx_UU6[0])\n\n diss = hcl.scalar(0, \"diss\")\n diss[0] = 0.5*(deriv_diff1[i, j, k, l, m, n]*alpha1[0] + deriv_diff2[i, j, k, l, m, n]*alpha2[0] \\\n + deriv_diff3[i, j, k, l, m, n]* alpha3[0] + deriv_diff4[i, j, k, l, m, n]* alpha4[0] \\\n + deriv_diff5[i, j, k, l, m, n]* alpha5[0] + deriv_diff6[i, j, k, l, m, n]* alpha6[0])\n\n # Finally\n V_new[i, j, k, l, m, n] = -(V_new[i, j, k, l, m, n] - diss[0])\n # Get maximum alphas in each dimension\n\n # Calculate alphas\n with hcl.if_(alpha1 > max_alpha1):\n max_alpha1[0] = alpha1[0]\n with hcl.if_(alpha2 > max_alpha2):\n max_alpha2[0] = alpha2[0]\n with hcl.if_(alpha3 > max_alpha3):\n max_alpha3[0] = alpha3[0]\n with hcl.if_(alpha4 > max_alpha4):\n max_alpha4[0] = alpha4[0]\n with hcl.if_(alpha5 > max_alpha5):\n max_alpha5[0] = alpha5[0]\n with hcl.if_(alpha6 > max_alpha6):\n max_alpha6[0] = alpha6[0]\n\n\n # Determine time step\n delta_t = hcl.compute((1,), lambda x: step_bound(), name=\"delta_t\")\n #hcl.update(t, lambda x: t[x] + delta_t[x])\n\n # Integrate\n #if compMethod == 'HJ_PDE':\n result = hcl.update(V_new, lambda i, j, k, l, m, n: V_init[i, j, k, l, m, n] + V_new[i, j, k, l, m, n] * delta_t[0])\n if compMethod == 'maxVWithV0':\n result = hcl.update(V_new, lambda i, j, k, l, m, n: maxVWithV0(i, j, k, l, m, n))\n if compMethod == 'maxVWithCStraint':\n result = hcl.update(V_new, lambda i, j, k, l, m, n: maxVWithCStraint(i, j, k, l, m, n))\n # Copy V_new to V_init\n hcl.update(V_init, lambda i, j, k, l, m, n: V_new[i, j, k, l, m, n])\n return result\n\ndef main():\n ################### PARSING ARGUMENTS FROM USERS #####################\n\n parser = ArgumentParser()\n parser.add_argument(\"-p\", \"--plot\", default=False, type=bool)\n # Print out LLVM option only\n parser.add_argument(\"-l\", \"--llvm\", default=False, type=bool)\n args = parser.parse_args()\n\n hcl.init()\n hcl.config.init_dtype = hcl.Float()\n\n ################## DATA SHAPE PREPARATION FOR GRAPH FORMATION ####################\n V_f = hcl.placeholder(tuple(g.pts_each_dim), name=\"V_f\", dtype = hcl.Float())\n V_init = hcl.placeholder(tuple(g.pts_each_dim), name=\"V_init\", dtype=hcl.Float())\n l0 = hcl.placeholder(tuple(g.pts_each_dim), name=\"l0\", dtype=hcl.Float())\n #x = hcl.placeholder((6, g.pts_each_dim[0]), name=\"x\", dtype=hcl.Float())\n t = hcl.placeholder((2,), name=\"t\", dtype=hcl.Float())\n\n # Deriv diff tensor\n deriv_diff1 = hcl.placeholder((tuple(g.pts_each_dim)), name=\"deriv_diff1\")\n deriv_diff2 = hcl.placeholder((tuple(g.pts_each_dim)), name=\"deriv_diff2\")\n deriv_diff3 = hcl.placeholder((tuple(g.pts_each_dim)), name=\"deriv_diff3\")\n deriv_diff4 = hcl.placeholder((tuple(g.pts_each_dim)), name=\"deriv_diff4\")\n deriv_diff5 = hcl.placeholder((tuple(g.pts_each_dim)), name=\"deriv_diff5\")\n deriv_diff6 = hcl.placeholder((tuple(g.pts_each_dim)), name=\"deriv_diff6\")\n\n # Positions vector\n x1 = hcl.placeholder((g.pts_each_dim[0],), name=\"x1\", dtype=hcl.Float())\n x2 = hcl.placeholder((g.pts_each_dim[1],), name=\"x2\", dtype=hcl.Float())\n x3 = hcl.placeholder((g.pts_each_dim[2],), name=\"x3\", dtype=hcl.Float())\n x4 = hcl.placeholder((g.pts_each_dim[3],), name=\"x4\", dtype=hcl.Float())\n x5 = hcl.placeholder((g.pts_each_dim[4],), name=\"x5\", dtype=hcl.Float())\n x6 = hcl.placeholder((g.pts_each_dim[5],), name=\"x6\", dtype=hcl.Float())\n\n # Obstacle placeholder\n obstacle = hcl.placeholder((tuple(g.pts_each_dim)), name=\"obstacle\")\n \n ##################### CREATE SCHEDULE##############\n\n # Create schedule\n s = hcl.create_schedule(\n [V_f, V_init, deriv_diff1, deriv_diff2, deriv_diff3, deriv_diff4, deriv_diff5, deriv_diff6, x1, x2, x3, x4, x5, x6, t, l0, obstacle], graph_6D)\n\n # Inspect the LLVM code\n print(hcl.lower(s))\n\n\n\n ################# INITIALIZE DATA TO BE INPUT INTO GRAPH ##########################\n\n print(\"Initializing\\n\")\n\n V_0 = hcl.asarray(my_shape)\n V_1 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n l0 = hcl.asarray(my_shape)\n obstacle = hcl.asarray(cstraint_values)\n\n list_x1 = np.reshape(g.vs[0], g.pts_each_dim[0])\n list_x2 = np.reshape(g.vs[1], g.pts_each_dim[1])\n list_x3 = np.reshape(g.vs[2], g.pts_each_dim[2])\n list_x4 = np.reshape(g.vs[3], g.pts_each_dim[3])\n list_x5 = np.reshape(g.vs[4], g.pts_each_dim[4])\n list_x6 = np.reshape(g.vs[5], g.pts_each_dim[5])\n\n # Convert to hcl array type\n list_x1 = hcl.asarray(list_x1)\n list_x2 = hcl.asarray(list_x2)\n list_x3 = hcl.asarray(list_x3)\n list_x4 = hcl.asarray(list_x4)\n list_x5 = hcl.asarray(list_x5)\n list_x6 = hcl.asarray(list_x6)\n\n # Initialize deriv diff tensor\n deriv_diff1 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n deriv_diff2 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n deriv_diff3 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n deriv_diff4 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n deriv_diff5 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n deriv_diff6 = hcl.asarray(np.zeros(tuple(g.pts_each_dim)))\n\n\n ##################### CODE OPTIMIZATION HERE ###########################\n print(\"Optimizing\\n\")\n\n # Accessing the hamiltonian stage\n s_H = graph_6D.Hamiltonian\n s_D = graph_6D.Dissipation\n\n #\n s[s_H].parallel(s_H.i)\n s[s_D].parallel(s_D.i)\n\n # Inspect IR\n #if args.llvm:\n # print(hcl.lower(s))\n\n ################ GET EXECUTABLE AND USE THE EXECUTABLE ############\n print(\"Running\\n\")\n\n # Get executable\n solve_pde = hcl.build(s)\n\n # Variables used for timing\n execution_time = 0\n lookback_time = 0\n\n tNow = tau[0]\n for i in range (1, len(tau)):\n #tNow = tau[i-1]\n t_minh= hcl.asarray(np.array((tNow, tau[i])))\n while tNow <= tau[i] - 1e-4:\n # Start timing\n start = time.time()\n\n # Run the execution and pass input into graph\n solve_pde(V_1, V_0, deriv_diff1, deriv_diff2, deriv_diff3, deriv_diff4, deriv_diff5, deriv_diff6,\n list_x1, list_x2, list_x3, list_x4, list_x5, list_x6, t_minh, l0, obstacle)\n\n tNow = np.asscalar((t_minh.asnumpy())[0])\n\n if lookback_time != 0: # Exclude first time of the computation\n execution_time += time.time() - start\n\n # Some information printing\n print(t_minh)\n print(\"Computational time to integrate (s): {:.5f}\".format(time.time() - start))\n # Saving data into disk\n if tNow >= tau[i] - 1e-4:\n print(\"Saving files\\n\")\n sio.savemat('/local-scratch/Humannoid/humannoid_v_{:d}.mat'.format(i), {'V_array': V_1.asnumpy()})\n \n #print(V_1.asnumpy())\n #\n # V_1 = V_1.asnumpy()\n # # V_1 = np.swapaxes(V_1, 0,2)\n # #V = np.swapaxes(V, 1,2)\n # #probe = probe.asnumpy()\n # #probe = np.swapaxes(probe, 0, 2)\n # #probe = np.swapaxes(probe, 1, 2)\n # #print(V)\n # #V_1 = V_1.asnumpy()\n #\n #\n # Time info printing\n print(\"Total kernel time (s): {:.5f}\".format(execution_time))\n print(\"Finished solving\\n\")\n\n ##################### PLOTTING #####################\n if args.plot:\n print(\"Plotting beautiful plots. Please wait\\n\")\n fig = go.Figure(data=go.Isosurface(\n x=g.mg_X.flatten(),\n y=g.mg_Y.flatten(),\n z=g.mg_T.flatten(),\n value=V_1.flatten(),\n colorscale='jet',\n isomin=0,\n surface_count=1,\n isomax=0,\n caps=dict(x_show=True, y_show=True)\n ))\n fig.show()\n print(\"Please check the plot on your browser.\")\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.reshape",
"numpy.arange",
"numpy.array"
]
] |
harirakul/PlagiarismDetection
|
[
"f6ddff2392590fde85d1958068ebc3ff5acc8cee"
] |
[
"similarity.py"
] |
[
"import nltk\nimport websearch\nfrom difflib import SequenceMatcher\nimport pandas as pd\n\nnltk.download('stopwords')\nnltk.download('punkt')\nstop_words = set(nltk.corpus.stopwords.words('english')) \n\ndef purifyText(string):\n words = nltk.word_tokenize(string)\n return (\" \".join([word for word in words if word not in stop_words]))\n\ndef webVerify(string, results_per_sentence):\n sentences = nltk.sent_tokenize(string)\n matching_sites = []\n for url in websearch.searchBing(query=string, num=results_per_sentence):\n matching_sites.append(url)\n for sentence in sentences:\n for url in websearch.searchBing(query = sentence, num = results_per_sentence):\n matching_sites.append(url)\n\n return (list(set(matching_sites)))\n\ndef similarity(str1, str2):\n return (SequenceMatcher(None,str1,str2).ratio())*100\n\ndef report(text):\n\n matching_sites = webVerify(purifyText(text), 2)\n matches = {}\n\n for i in range(len(matching_sites)):\n matches[matching_sites[i]] = similarity(text, websearch.extractText(matching_sites[i]))\n\n matches = {k: v for k, v in sorted(matches.items(), key=lambda item: item[1], reverse=True)}\n\n return matches\n\n\ndef returnTable(dictionary):\n\n df = pd.DataFrame({'Similarity (%)': dictionary})\n #df = df.fillna(' ').T\n #df = df.transpose()\n return df.to_html()\n\nif __name__ == '__main__':\n report('This is a pure test')\n"
] |
[
[
"pandas.DataFrame"
]
] |
matsavage/models
|
[
"634309ac537bbfc5198197b92096a59b52b0bb45",
"42f98218d7b0ee54077d4e07658442bc7ae0e661"
] |
[
"official/recommendation/data_async_generation.py",
"research/object_detection/predictors/convolutional_keras_box_predictor_test.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Asynchronously generate TFRecords files for NCF.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport atexit\nimport contextlib\nimport datetime\nimport gc\nimport multiprocessing\nimport json\nimport os\nimport pickle\nimport signal\nimport sys\nimport tempfile\nimport time\nimport timeit\nimport traceback\nimport typing\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom absl import app as absl_app\nfrom absl import flags\n\nfrom official.datasets import movielens\nfrom official.recommendation import constants as rconst\nfrom official.recommendation import stat_utils\nfrom official.recommendation import popen_helper\nfrom official.utils.logs import mlperf_helper\n\n\n_log_file = None\n\n\ndef log_msg(msg):\n \"\"\"Include timestamp info when logging messages to a file.\"\"\"\n if flags.FLAGS.use_tf_logging:\n tf.logging.info(msg)\n return\n\n if flags.FLAGS.redirect_logs:\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n print(\"[{}] {}\".format(timestamp, msg), file=_log_file)\n else:\n print(msg, file=_log_file)\n if _log_file:\n _log_file.flush()\n\n\ndef get_cycle_folder_name(i):\n return \"cycle_{}\".format(str(i).zfill(5))\n\n\ndef _process_shard(args):\n # type: ((str, int, int, int, bool)) -> (np.ndarray, np.ndarray, np.ndarray)\n \"\"\"Read a shard of training data and return training vectors.\n\n Args:\n shard_path: The filepath of the positive instance training shard.\n num_items: The cardinality of the item set.\n num_neg: The number of negatives to generate per positive example.\n seed: Random seed to be used when generating negatives.\n is_training: Generate training (True) or eval (False) data.\n match_mlperf: Match the MLPerf reference behavior\n \"\"\"\n shard_path, num_items, num_neg, seed, is_training, match_mlperf = args\n np.random.seed(seed)\n\n # The choice to store the training shards in files rather than in memory\n # is motivated by the fact that multiprocessing serializes arguments,\n # transmits them to map workers, and then deserializes them. By storing the\n # training shards in files, the serialization work only needs to be done once.\n #\n # A similar effect could be achieved by simply holding pickled bytes in\n # memory, however the processing is not I/O bound and is therefore\n # unnecessary.\n with tf.gfile.Open(shard_path, \"rb\") as f:\n shard = pickle.load(f)\n\n users = shard[rconst.TRAIN_KEY][movielens.USER_COLUMN]\n items = shard[rconst.TRAIN_KEY][movielens.ITEM_COLUMN]\n\n if not is_training:\n # For eval, there is one positive which was held out from the training set.\n test_positive_dict = dict(zip(\n shard[rconst.EVAL_KEY][movielens.USER_COLUMN],\n shard[rconst.EVAL_KEY][movielens.ITEM_COLUMN]))\n\n delta = users[1:] - users[:-1]\n boundaries = ([0] + (np.argwhere(delta)[:, 0] + 1).tolist() +\n [users.shape[0]])\n\n user_blocks = []\n item_blocks = []\n label_blocks = []\n for i in range(len(boundaries) - 1):\n assert len(set(users[boundaries[i]:boundaries[i+1]])) == 1\n current_user = users[boundaries[i]]\n\n positive_items = items[boundaries[i]:boundaries[i+1]]\n positive_set = set(positive_items)\n if positive_items.shape[0] != len(positive_set):\n raise ValueError(\"Duplicate entries detected.\")\n\n if is_training:\n n_pos = len(positive_set)\n negatives = stat_utils.sample_with_exclusion(\n num_items, positive_set, n_pos * num_neg, replacement=True)\n\n else:\n if not match_mlperf:\n # The mlperf reference allows the holdout item to appear as a negative.\n # Including it in the positive set makes the eval more stringent,\n # because an appearance of the test item would be removed by\n # deduplication rules. (Effectively resulting in a minute reduction of\n # NUM_EVAL_NEGATIVES)\n positive_set.add(test_positive_dict[current_user])\n\n negatives = stat_utils.sample_with_exclusion(\n num_items, positive_set, num_neg, replacement=match_mlperf)\n positive_set = [test_positive_dict[current_user]]\n n_pos = len(positive_set)\n assert n_pos == 1\n\n user_blocks.append(current_user * np.ones(\n (n_pos * (1 + num_neg),), dtype=np.int32))\n item_blocks.append(\n np.array(list(positive_set) + negatives, dtype=np.uint16))\n labels_for_user = np.zeros((n_pos * (1 + num_neg),), dtype=np.int8)\n labels_for_user[:n_pos] = 1\n label_blocks.append(labels_for_user)\n\n users_out = np.concatenate(user_blocks)\n items_out = np.concatenate(item_blocks)\n labels_out = np.concatenate(label_blocks)\n\n assert users_out.shape == items_out.shape == labels_out.shape\n return users_out, items_out, labels_out\n\n\ndef _construct_record(users, items, labels=None, dupe_mask=None):\n \"\"\"Convert NumPy arrays into a TFRecords entry.\"\"\"\n feature_dict = {\n movielens.USER_COLUMN: tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[memoryview(users).tobytes()])),\n movielens.ITEM_COLUMN: tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[memoryview(items).tobytes()])),\n }\n if labels is not None:\n feature_dict[\"labels\"] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[memoryview(labels).tobytes()]))\n\n if dupe_mask is not None:\n feature_dict[rconst.DUPLICATE_MASK] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[memoryview(dupe_mask).tobytes()]))\n\n return tf.train.Example(\n features=tf.train.Features(feature=feature_dict)).SerializeToString()\n\n\ndef sigint_handler(signal, frame):\n log_msg(\"Shutting down worker.\")\n\n\ndef init_worker():\n signal.signal(signal.SIGINT, sigint_handler)\n\n\ndef _construct_records(\n is_training, # type: bool\n train_cycle, # type: typing.Optional[int]\n num_workers, # type: int\n cache_paths, # type: rconst.Paths\n num_readers, # type: int\n num_neg, # type: int\n num_positives, # type: int\n num_items, # type: int\n epochs_per_cycle, # type: int\n batch_size, # type: int\n training_shards, # type: typing.List[str]\n deterministic=False, # type: bool\n match_mlperf=False # type: bool\n ):\n \"\"\"Generate false negatives and write TFRecords files.\n\n Args:\n is_training: Are training records (True) or eval records (False) created.\n train_cycle: Integer of which cycle the generated data is for.\n num_workers: Number of multiprocessing workers to use for negative\n generation.\n cache_paths: Paths object with information of where to write files.\n num_readers: The number of reader datasets in the input_fn. This number is\n approximate; fewer shards will be created if not all shards are assigned\n batches. This can occur due to discretization in the assignment process.\n num_neg: The number of false negatives per positive example.\n num_positives: The number of positive examples. This value is used\n to pre-allocate arrays while the imap is still running. (NumPy does not\n allow dynamic arrays.)\n num_items: The cardinality of the item set.\n epochs_per_cycle: The number of epochs worth of data to construct.\n batch_size: The expected batch size used during training. This is used\n to properly batch data when writing TFRecords.\n training_shards: The picked positive examples from which to generate\n negatives.\n \"\"\"\n st = timeit.default_timer()\n\n if is_training:\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_STEP_TRAIN_NEG_GEN)\n mlperf_helper.ncf_print(\n key=mlperf_helper.TAGS.INPUT_HP_NUM_NEG, value=num_neg)\n\n # set inside _process_shard()\n mlperf_helper.ncf_print(\n key=mlperf_helper.TAGS.INPUT_HP_SAMPLE_TRAIN_REPLACEMENT, value=True)\n\n else:\n # Later logic assumes that all items for a given user are in the same batch.\n assert not batch_size % (rconst.NUM_EVAL_NEGATIVES + 1)\n assert num_neg == rconst.NUM_EVAL_NEGATIVES\n\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_STEP_EVAL_NEG_GEN)\n\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_HP_NUM_USERS,\n value=num_positives)\n\n assert epochs_per_cycle == 1 or is_training\n num_workers = min([num_workers, len(training_shards) * epochs_per_cycle])\n\n num_pts = num_positives * (1 + num_neg)\n\n # Equivalent to `int(ceil(num_pts / batch_size)) * batch_size`, but without\n # precision concerns\n num_pts_with_padding = (num_pts + batch_size - 1) // batch_size * batch_size\n num_padding = num_pts_with_padding - num_pts\n\n # We choose a different random seed for each process, so that the processes\n # will not all choose the same random numbers.\n process_seeds = [stat_utils.random_int32()\n for _ in training_shards * epochs_per_cycle]\n map_args = [\n (shard, num_items, num_neg, process_seeds[i], is_training, match_mlperf)\n for i, shard in enumerate(training_shards * epochs_per_cycle)]\n\n with popen_helper.get_pool(num_workers, init_worker) as pool:\n map_fn = pool.imap if deterministic else pool.imap_unordered # pylint: disable=no-member\n data_generator = map_fn(_process_shard, map_args)\n data = [\n np.zeros(shape=(num_pts_with_padding,), dtype=np.int32) - 1,\n np.zeros(shape=(num_pts_with_padding,), dtype=np.uint16),\n np.zeros(shape=(num_pts_with_padding,), dtype=np.int8),\n ]\n\n # Training data is shuffled. Evaluation data MUST not be shuffled.\n # Downstream processing depends on the fact that evaluation data for a given\n # user is grouped within a batch.\n if is_training:\n index_destinations = np.random.permutation(num_pts)\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_ORDER)\n else:\n index_destinations = np.arange(num_pts)\n\n start_ind = 0\n for data_segment in data_generator:\n n_in_segment = data_segment[0].shape[0]\n dest = index_destinations[start_ind:start_ind + n_in_segment]\n start_ind += n_in_segment\n for i in range(3):\n data[i][dest] = data_segment[i]\n\n assert np.sum(data[0] == -1) == num_padding\n\n if is_training:\n if num_padding:\n # In order to have a full batch, randomly include points from earlier in\n # the batch.\n\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_ORDER)\n pad_sample_indices = np.random.randint(\n low=0, high=num_pts, size=(num_padding,))\n dest = np.arange(start=start_ind, stop=start_ind + num_padding)\n start_ind += num_padding\n for i in range(3):\n data[i][dest] = data[i][pad_sample_indices]\n else:\n # For Evaluation, padding is all zeros. The evaluation input_fn knows how\n # to interpret and discard the zero padded entries.\n data[0][num_pts:] = 0\n\n # Check that no points were overlooked.\n assert not np.sum(data[0] == -1)\n\n if is_training:\n # The number of points is slightly larger than num_pts due to padding.\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_SIZE,\n value=int(data[0].shape[0]))\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_BATCH_SIZE,\n value=batch_size)\n else:\n # num_pts is logged instead of int(data[0].shape[0]), because the size\n # of the data vector includes zero pads which are ignored.\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_SIZE, value=num_pts)\n\n batches_per_file = np.ceil(num_pts_with_padding / batch_size / num_readers)\n current_file_id = -1\n current_batch_id = -1\n batches_by_file = [[] for _ in range(num_readers)]\n\n while True:\n current_batch_id += 1\n if (current_batch_id % batches_per_file) == 0:\n current_file_id += 1\n\n start_ind = current_batch_id * batch_size\n end_ind = start_ind + batch_size\n if end_ind > num_pts_with_padding:\n if start_ind != num_pts_with_padding:\n raise ValueError(\"Batch padding does not line up\")\n break\n batches_by_file[current_file_id].append(current_batch_id)\n\n # Drop shards which were not assigned batches\n batches_by_file = [i for i in batches_by_file if i]\n num_readers = len(batches_by_file)\n\n if is_training:\n # Empirically it is observed that placing the batch with repeated values at\n # the start rather than the end improves convergence.\n mlperf_helper.ncf_print(key=mlperf_helper.TAGS.INPUT_ORDER)\n batches_by_file[0][0], batches_by_file[-1][-1] = \\\n batches_by_file[-1][-1], batches_by_file[0][0]\n\n if is_training:\n template = rconst.TRAIN_RECORD_TEMPLATE\n record_dir = os.path.join(cache_paths.train_epoch_dir,\n get_cycle_folder_name(train_cycle))\n tf.gfile.MakeDirs(record_dir)\n else:\n template = rconst.EVAL_RECORD_TEMPLATE\n record_dir = cache_paths.eval_data_subdir\n\n batch_count = 0\n for i in range(num_readers):\n fpath = os.path.join(record_dir, template.format(i))\n log_msg(\"Writing {}\".format(fpath))\n with tf.python_io.TFRecordWriter(fpath) as writer:\n for j in batches_by_file[i]:\n start_ind = j * batch_size\n end_ind = start_ind + batch_size\n record_kwargs = dict(\n users=data[0][start_ind:end_ind],\n items=data[1][start_ind:end_ind],\n )\n\n if is_training:\n record_kwargs[\"labels\"] = data[2][start_ind:end_ind]\n else:\n record_kwargs[\"dupe_mask\"] = stat_utils.mask_duplicates(\n record_kwargs[\"items\"].reshape(-1, num_neg + 1),\n axis=1).flatten().astype(np.int8)\n\n batch_bytes = _construct_record(**record_kwargs)\n\n writer.write(batch_bytes)\n batch_count += 1\n\n # We write to a temp file then atomically rename it to the final file, because\n # writing directly to the final file can cause the main process to read a\n # partially written JSON file.\n ready_file_temp = os.path.join(record_dir, rconst.READY_FILE_TEMP)\n with tf.gfile.Open(ready_file_temp, \"w\") as f:\n json.dump({\n \"batch_size\": batch_size,\n \"batch_count\": batch_count,\n }, f)\n ready_file = os.path.join(record_dir, rconst.READY_FILE)\n tf.gfile.Rename(ready_file_temp, ready_file)\n\n if is_training:\n log_msg(\"Cycle {} complete. Total time: {:.1f} seconds\"\n .format(train_cycle, timeit.default_timer() - st))\n else:\n log_msg(\"Eval construction complete. Total time: {:.1f} seconds\"\n .format(timeit.default_timer() - st))\n\n\ndef _generation_loop(num_workers, # type: int\n cache_paths, # type: rconst.Paths\n num_readers, # type: int\n num_neg, # type: int\n num_train_positives, # type: int\n num_items, # type: int\n num_users, # type: int\n epochs_per_cycle, # type: int\n train_batch_size, # type: int\n eval_batch_size, # type: int\n deterministic, # type: bool\n match_mlperf # type: bool\n ):\n # type: (...) -> None\n \"\"\"Primary run loop for data file generation.\"\"\"\n\n log_msg(\"Entering generation loop.\")\n tf.gfile.MakeDirs(cache_paths.train_epoch_dir)\n tf.gfile.MakeDirs(cache_paths.eval_data_subdir)\n\n training_shards = [os.path.join(cache_paths.train_shard_subdir, i) for i in\n tf.gfile.ListDirectory(cache_paths.train_shard_subdir)]\n\n shared_kwargs = dict(\n num_workers=multiprocessing.cpu_count(), cache_paths=cache_paths,\n num_readers=num_readers, num_items=num_items,\n training_shards=training_shards, deterministic=deterministic,\n match_mlperf=match_mlperf\n )\n\n # Training blocks on the creation of the first epoch, so the num_workers\n # limit is not respected for this invocation\n train_cycle = 0\n _construct_records(\n is_training=True, train_cycle=train_cycle, num_neg=num_neg,\n num_positives=num_train_positives, epochs_per_cycle=epochs_per_cycle,\n batch_size=train_batch_size, **shared_kwargs)\n\n # Construct evaluation set.\n shared_kwargs[\"num_workers\"] = num_workers\n _construct_records(\n is_training=False, train_cycle=None, num_neg=rconst.NUM_EVAL_NEGATIVES,\n num_positives=num_users, epochs_per_cycle=1, batch_size=eval_batch_size,\n **shared_kwargs)\n\n wait_count = 0\n start_time = time.time()\n while True:\n ready_epochs = tf.gfile.ListDirectory(cache_paths.train_epoch_dir)\n if len(ready_epochs) >= rconst.CYCLES_TO_BUFFER:\n wait_count += 1\n sleep_time = max([0, wait_count * 5 - (time.time() - start_time)])\n time.sleep(sleep_time)\n\n if (wait_count % 10) == 0:\n log_msg(\"Waited {} times for data to be consumed.\"\n .format(wait_count))\n\n if time.time() - start_time > rconst.TIMEOUT_SECONDS:\n log_msg(\"Waited more than {} seconds. Concluding that this \"\n \"process is orphaned and exiting gracefully.\"\n .format(rconst.TIMEOUT_SECONDS))\n sys.exit()\n\n continue\n\n train_cycle += 1\n _construct_records(\n is_training=True, train_cycle=train_cycle, num_neg=num_neg,\n num_positives=num_train_positives, epochs_per_cycle=epochs_per_cycle,\n batch_size=train_batch_size, **shared_kwargs)\n\n wait_count = 0\n start_time = time.time()\n gc.collect()\n\n\ndef wait_for_path(fpath):\n start_time = time.time()\n while not tf.gfile.Exists(fpath):\n if time.time() - start_time > rconst.TIMEOUT_SECONDS:\n log_msg(\"Waited more than {} seconds. Concluding that this \"\n \"process is orphaned and exiting gracefully.\"\n .format(rconst.TIMEOUT_SECONDS))\n sys.exit()\n time.sleep(1)\n\ndef _parse_flagfile(flagfile):\n \"\"\"Fill flags with flagfile written by the main process.\"\"\"\n tf.logging.info(\"Waiting for flagfile to appear at {}...\"\n .format(flagfile))\n wait_for_path(flagfile)\n tf.logging.info(\"flagfile found.\")\n\n # `flags` module opens `flagfile` with `open`, which does not work on\n # google cloud storage etc.\n _, flagfile_temp = tempfile.mkstemp()\n tf.gfile.Copy(flagfile, flagfile_temp, overwrite=True)\n\n flags.FLAGS([__file__, \"--flagfile\", flagfile_temp])\n tf.gfile.Remove(flagfile_temp)\n\n\ndef write_alive_file(cache_paths):\n \"\"\"Write file to signal that generation process started correctly.\"\"\"\n wait_for_path(cache_paths.cache_root)\n\n log_msg(\"Signaling that I am alive.\")\n with tf.gfile.Open(cache_paths.subproc_alive, \"w\") as f:\n f.write(\"Generation subproc has started.\")\n\n @atexit.register\n def remove_alive_file():\n try:\n tf.gfile.Remove(cache_paths.subproc_alive)\n except tf.errors.NotFoundError:\n return # Main thread has already deleted the entire cache dir.\n\n\ndef main(_):\n # Note: The async process must execute the following two steps in the\n # following order BEFORE doing anything else:\n # 1) Write the alive file\n # 2) Wait for the flagfile to be written.\n global _log_file\n cache_paths = rconst.Paths(\n data_dir=flags.FLAGS.data_dir, cache_id=flags.FLAGS.cache_id)\n write_alive_file(cache_paths=cache_paths)\n\n flagfile = os.path.join(cache_paths.cache_root, rconst.FLAGFILE)\n _parse_flagfile(flagfile)\n\n redirect_logs = flags.FLAGS.redirect_logs\n\n log_file_name = \"data_gen_proc_{}.log\".format(cache_paths.cache_id)\n log_path = os.path.join(cache_paths.data_dir, log_file_name)\n if log_path.startswith(\"gs://\") and redirect_logs:\n fallback_log_file = os.path.join(tempfile.gettempdir(), log_file_name)\n print(\"Unable to log to {}. Falling back to {}\"\n .format(log_path, fallback_log_file))\n log_path = fallback_log_file\n\n # This server is generally run in a subprocess.\n if redirect_logs:\n print(\"Redirecting output of data_async_generation.py process to {}\"\n .format(log_path))\n _log_file = open(log_path, \"wt\") # Note: not tf.gfile.Open().\n try:\n log_msg(\"sys.argv: {}\".format(\" \".join(sys.argv)))\n\n if flags.FLAGS.seed is not None:\n np.random.seed(flags.FLAGS.seed)\n\n with mlperf_helper.LOGGER(\n enable=flags.FLAGS.output_ml_perf_compliance_logging):\n mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])\n _generation_loop(\n num_workers=flags.FLAGS.num_workers,\n cache_paths=cache_paths,\n num_readers=flags.FLAGS.num_readers,\n num_neg=flags.FLAGS.num_neg,\n num_train_positives=flags.FLAGS.num_train_positives,\n num_items=flags.FLAGS.num_items,\n num_users=flags.FLAGS.num_users,\n epochs_per_cycle=flags.FLAGS.epochs_per_cycle,\n train_batch_size=flags.FLAGS.train_batch_size,\n eval_batch_size=flags.FLAGS.eval_batch_size,\n deterministic=flags.FLAGS.seed is not None,\n match_mlperf=flags.FLAGS.ml_perf,\n )\n except KeyboardInterrupt:\n log_msg(\"KeyboardInterrupt registered.\")\n except:\n traceback.print_exc(file=_log_file)\n raise\n finally:\n log_msg(\"Shutting down generation subprocess.\")\n sys.stdout.flush()\n sys.stderr.flush()\n if redirect_logs:\n _log_file.close()\n\n\ndef define_flags():\n \"\"\"Construct flags for the server.\"\"\"\n flags.DEFINE_integer(name=\"num_workers\", default=multiprocessing.cpu_count(),\n help=\"Size of the negative generation worker pool.\")\n flags.DEFINE_string(name=\"data_dir\", default=None,\n help=\"The data root. (used to construct cache paths.)\")\n flags.DEFINE_string(name=\"cache_id\", default=None,\n help=\"The cache_id generated in the main process.\")\n flags.DEFINE_integer(name=\"num_readers\", default=4,\n help=\"Number of reader datasets in training. This sets\"\n \"how the epoch files are sharded.\")\n flags.DEFINE_integer(name=\"num_neg\", default=None,\n help=\"The Number of negative instances to pair with a \"\n \"positive instance.\")\n flags.DEFINE_integer(name=\"num_train_positives\", default=None,\n help=\"The number of positive training examples.\")\n flags.DEFINE_integer(name=\"num_items\", default=None,\n help=\"Number of items from which to select negatives.\")\n flags.DEFINE_integer(name=\"num_users\", default=None,\n help=\"The number of unique users. Used for evaluation.\")\n flags.DEFINE_integer(name=\"epochs_per_cycle\", default=1,\n help=\"The number of epochs of training data to produce\"\n \"at a time.\")\n flags.DEFINE_integer(name=\"train_batch_size\", default=None,\n help=\"The batch size with which training TFRecords will \"\n \"be chunked.\")\n flags.DEFINE_integer(name=\"eval_batch_size\", default=None,\n help=\"The batch size with which evaluation TFRecords \"\n \"will be chunked.\")\n flags.DEFINE_boolean(name=\"redirect_logs\", default=False,\n help=\"Catch logs and write them to a file. \"\n \"(Useful if this is run as a subprocess)\")\n flags.DEFINE_boolean(name=\"use_tf_logging\", default=False,\n help=\"Use tf.logging instead of log file.\")\n flags.DEFINE_integer(name=\"seed\", default=None,\n help=\"NumPy random seed to set at startup. If not \"\n \"specified, a seed will not be set.\")\n flags.DEFINE_boolean(name=\"ml_perf\", default=None,\n help=\"Match MLPerf. See ncf_main.py for details.\")\n flags.DEFINE_bool(name=\"output_ml_perf_compliance_logging\", default=None,\n help=\"Output the MLPerf compliance logging. See \"\n \"ncf_main.py for details.\")\n\n flags.mark_flags_as_required([\"data_dir\", \"cache_id\"])\n\nif __name__ == \"__main__\":\n define_flags()\n absl_app.run(main)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.predictors.convolutional_keras_box_predictor.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom object_detection.builders import box_predictor_builder\nfrom object_detection.builders import hyperparams_builder\nfrom object_detection.predictors import convolutional_keras_box_predictor as box_predictor\nfrom object_detection.protos import hyperparams_pb2\nfrom object_detection.utils import test_case\n\n\nclass ConvolutionalKerasBoxPredictorTest(test_case.TestCase):\n\n def _build_conv_hyperparams(self):\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n activation: RELU_6\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\n\n def test_get_boxes_for_five_aspect_ratios_per_location(self):\n def graph_fn(image_features):\n conv_box_predictor = (\n box_predictor_builder.build_convolutional_keras_box_predictor(\n is_training=False,\n num_classes=0,\n conv_hyperparams=self._build_conv_hyperparams(),\n freeze_batchnorm=False,\n inplace_batchnorm_update=False,\n num_predictions_per_location_list=[5],\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n ))\n box_predictions = conv_box_predictor([image_features])\n box_encodings = tf.concat(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n objectness_predictions = tf.concat(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1)\n return (box_encodings, objectness_predictions)\n image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)\n (box_encodings, objectness_predictions) = self.execute(graph_fn,\n [image_features])\n self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])\n self.assertAllEqual(objectness_predictions.shape, [4, 320, 1])\n\n def test_get_boxes_for_one_aspect_ratio_per_location(self):\n def graph_fn(image_features):\n conv_box_predictor = (\n box_predictor_builder.build_convolutional_keras_box_predictor(\n is_training=False,\n num_classes=0,\n conv_hyperparams=self._build_conv_hyperparams(),\n freeze_batchnorm=False,\n inplace_batchnorm_update=False,\n num_predictions_per_location_list=[1],\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n ))\n box_predictions = conv_box_predictor([image_features])\n box_encodings = tf.concat(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n objectness_predictions = tf.concat(box_predictions[\n box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1)\n return (box_encodings, objectness_predictions)\n image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)\n (box_encodings, objectness_predictions) = self.execute(graph_fn,\n [image_features])\n self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4])\n self.assertAllEqual(objectness_predictions.shape, [4, 64, 1])\n\n def test_get_multi_class_predictions_for_five_aspect_ratios_per_location(\n self):\n num_classes_without_background = 6\n image_features = np.random.rand(4, 8, 8, 64).astype(np.float32)\n def graph_fn(image_features):\n conv_box_predictor = (\n box_predictor_builder.build_convolutional_keras_box_predictor(\n is_training=False,\n num_classes=num_classes_without_background,\n conv_hyperparams=self._build_conv_hyperparams(),\n freeze_batchnorm=False,\n inplace_batchnorm_update=False,\n num_predictions_per_location_list=[5],\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n ))\n box_predictions = conv_box_predictor([image_features])\n box_encodings = tf.concat(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n class_predictions_with_background = tf.concat(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1)\n return (box_encodings, class_predictions_with_background)\n (box_encodings,\n class_predictions_with_background) = self.execute(graph_fn,\n [image_features])\n self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4])\n self.assertAllEqual(class_predictions_with_background.shape,\n [4, 320, num_classes_without_background+1])\n\n def test_get_predictions_with_feature_maps_of_dynamic_shape(\n self):\n image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64])\n conv_box_predictor = (\n box_predictor_builder.build_convolutional_keras_box_predictor(\n is_training=False,\n num_classes=0,\n conv_hyperparams=self._build_conv_hyperparams(),\n freeze_batchnorm=False,\n inplace_batchnorm_update=False,\n num_predictions_per_location_list=[5],\n min_depth=0,\n max_depth=32,\n num_layers_before_predictor=1,\n use_dropout=True,\n dropout_keep_prob=0.8,\n kernel_size=1,\n box_code_size=4\n ))\n box_predictions = conv_box_predictor([image_features])\n box_encodings = tf.concat(\n box_predictions[box_predictor.BOX_ENCODINGS], axis=1)\n objectness_predictions = tf.concat(\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],\n axis=1)\n init_op = tf.global_variables_initializer()\n\n resolution = 32\n expected_num_anchors = resolution*resolution*5\n with self.test_session() as sess:\n sess.run(init_op)\n (box_encodings_shape,\n objectness_predictions_shape) = sess.run(\n [tf.shape(box_encodings), tf.shape(objectness_predictions)],\n feed_dict={image_features:\n np.random.rand(4, resolution, resolution, 64)})\n actual_variable_set = set(\n [var.op.name for var in tf.trainable_variables()])\n self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4])\n self.assertAllEqual(objectness_predictions_shape,\n [4, expected_num_anchors, 1])\n expected_variable_set = set([\n 'BoxPredictor/PreHeadConvolutions_0/Conv2d_0_1x1_32/bias',\n 'BoxPredictor/PreHeadConvolutions_0/Conv2d_0_1x1_32/kernel',\n 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias',\n 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel',\n 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias',\n 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel'])\n self.assertEqual(expected_variable_set, actual_variable_set)\n\n # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.gfile.Exists",
"numpy.concatenate",
"tensorflow.gfile.MakeDirs",
"numpy.random.randint",
"numpy.arange",
"tensorflow.python_io.TFRecordWriter",
"numpy.ceil",
"tensorflow.gfile.Remove",
"numpy.zeros",
"tensorflow.gfile.ListDirectory",
"tensorflow.gfile.Open",
"tensorflow.logging.info",
"tensorflow.train.Features",
"numpy.sum",
"numpy.random.seed",
"tensorflow.gfile.Copy",
"numpy.ones",
"numpy.argwhere",
"tensorflow.gfile.Rename",
"numpy.random.permutation"
],
[
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"numpy.random.rand",
"tensorflow.trainable_variables"
]
] |
DrowseyDevelopers/create-spectrograms
|
[
"889cd93fc6fd86c7e691b74083b8595d59632d60"
] |
[
"__main__.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"\n Module to take in .mat MatLab files and generate spectrogram images via Short Time Fourier Transform\n ---------- ------------------------------ --------------------\n | Data.mat | -> | Short-Time Fourier Transform | -> | Spectrogram Images |\n ---------- ------------------------------ --------------------\n\"\"\"\n\nfrom scipy import signal # imports to make spectrogram images\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport shutil\nimport numpy as np\nimport os\nimport scipy.io\nimport argparse\nimport glob\nimport math\n\nnp.seterr(divide='raise')\n\nKEYS = ['id', 'tag', 'nS', 'sampFreq', 'marker', 'timestamp', 'data', 'trials']\nCWD = os.path.dirname(os.path.realpath(__file__))\n\n# Ranges of data points representing a certain mental state e.g. focused, unfocused or drowsy\nFOCUSED_DATA = [0, 76801]\nUNFOCUSED_DATA = [76801, 153600]\nDROWSY_DATA = [153601, 230400]\n\nDATA_FILES_PATH = os.path.join(CWD, 'data') # constant representing directory path to data files\nSTATE_DATA_OUTPUT = os.path.join(CWD, 'state-data')\nCHANNELS = [4, 5, 8, 9, 10, 11, 16]\n\nMAT = '.mat' # suffix of input files\nFREQUENCY = 128 # frequency rate is 128Hz\nM = 64\nMAX_AMP = 2 # Max amplitude for short-time fourier transform graph\n\n\ndef handle_arguments():\n \"\"\"\n Function used to set the arguments that can be passed to the script\n :return: the Parsed arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Split EEG data preprocess and create spectrograms')\n\n parser.add_argument('-s', '--split', action='store_true', default=False, dest='split_data',\n help='Flag used to split the data: Focused, Unfocused, and Drowsy data sets')\n\n parser.add_argument('-i', '--images', dest='state', choices=['FOCUSED', 'UNFOCUSED', 'DROWSY', 'ALL'],\n help='Flag used to determine what mental state we want to create spectrogram images for')\n\n return parser.parse_args()\n\n\ndef handle_split_data(input_files, channels):\n \"\"\"\n Function used to handle the split of data by mental state\n :return:\n \"\"\"\n # create directory where we will output split data\n create_output_directory(STATE_DATA_OUTPUT)\n\n for data_file in input_files:\n # data from a single file\n data = load_data_from_file(data_file)\n\n # name of the output image file\n output_basename = os.path.basename(data_file)\n output_basename = output_basename.split('.')[0]\n\n # full path location of directory we want to create for data file we are analyzing\n output_dirpath = os.path.join(STATE_DATA_OUTPUT, output_basename)\n\n # make a directory for data file being analyzed in order to generate images for all channels of data file.\n # e.g. ./output/eeg_record2/\n os.mkdir(output_dirpath)\n\n for channel in channels:\n channel_dir = os.path.join(output_dirpath, str(channel))\n os.mkdir(channel_dir)\n\n output_data_to_csv(channel_dir, data[:, channel], FOCUSED_DATA, 'FOCUSED')\n output_data_to_csv(channel_dir, data[:, channel], UNFOCUSED_DATA, 'UNFOCUSED')\n output_data_to_csv(channel_dir, data[:, channel], DROWSY_DATA, 'DROWSY')\n\n\ndef handle_create_spectrograms(state):\n \"\"\"\n Function used to determine what what state (e.g., FOCUSED, UNFOCUSED, DROWSY, or ALL) spectrogram\n images to create\n :param state:\n :return None:\n \"\"\"\n states = []\n\n if state == 'ALL':\n states = ['FOCUSED', 'UNFOCUSED', 'DROWSY']\n else:\n states = [state]\n\n # need to check if state-data directory exists in path\n if not os.path.isdir(STATE_DATA_OUTPUT):\n print('Error: Directory \\'{0}\\' with raw input data doesnt exists!'.format(STATE_DATA_OUTPUT))\n exit(1)\n\n # iterate through states that we need to generate spectrogram images for\n for curr_state in states:\n output_root = os.path.join(CWD, curr_state)\n\n create_output_directory(output_root)\n\n path_to_search = os.path.join(STATE_DATA_OUTPUT, '**', curr_state)\n state_data_files = glob.glob(path_to_search, recursive=True)\n\n for filename in state_data_files:\n output_subpath = filename.replace(STATE_DATA_OUTPUT, '')\n output_subpath = output_subpath.replace(curr_state, '')\n output_filepath = '{0}{1}'.format(output_root, output_subpath)\n\n os.makedirs(output_filepath)\n\n # need to get data from file\n data = load_raw_state_data(filename)\n\n output_image = os.path.join(output_filepath, curr_state)\n\n # 128, 256, 10mins, ./FOCUSED/eeg_record7/10/FOCUSED\n interate_data(FREQUENCY, M, data, output_image)\n\n\ndef get_all_data_files():\n \"\"\"\n Function used to get string values of all files in a directory e.g.\n '/create-spectrograms/data/eeg_record1.mat',\n '/create-spectrograms/data/eeg_record2.mat', etc.\n :return all_files: list of string values of all files in a directory\n \"\"\"\n all_files = []\n\n for dirname, _, filenames in os.walk(DATA_FILES_PATH):\n for filename in filenames:\n\n # ignore anything that is not a .mat file\n if MAT in filename:\n # Example: complete_path_to_file = /create-spectrograms/data/eeg_record1.mat\n complete_path_to_file = os.path.join(dirname, filename)\n all_files.append(complete_path_to_file)\n\n return all_files\n\n\ndef load_raw_state_data(path_to_file):\n \"\"\"\n Function to load raw state data from a csv file\n :param path_to_file: the path to file we want to read\n :return data: raw data from file\n \"\"\"\n data = np.genfromtxt(path_to_file)\n\n return data\n\n\ndef load_data_from_file(path_to_file):\n \"\"\"\n Function used to get data from a .mat file\n :param path_to_file: path to file we want to read e.g. /create-spectrograms/data/eeg_record2.mat\n :return data: numpy 2-D array 25x308868 to represent all data points gathered in 25 channels\n \"\"\"\n raw_file = scipy.io.loadmat(path_to_file)\n raw_data = raw_file['o'][0, 0]\n\n data = raw_data[6]\n\n return data\n\n\ndef generate_stft_from_data(channel, fs, m, max_amp, data, output_filepath):\n \"\"\"\n Function used to generate the Fast-Time Fourier Transform (stft) from data\n :param channel: which channel of the data we are analyzing. Integer value between 0 - 24\n :param fs: frequency sample rate e.g. 128 Hz\n :param m: total number of points in window e.g. 1920\n :param max_amp: max amplitude for stft plot\n :param data: complete dataset from input file\n :param output_filepath: path to export file of short time fourier transform plot of data\n :return None:\n \"\"\"\n f, t, Zxx = signal.stft(data[:, channel], fs, window='blackman', nperseg=m)\n\n plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=max_amp)\n plt.title('STFT Magnitude')\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n\n plt.savefig(output_filepath)\n\n\ndef generate_spectrogram_from_data(fs, m, data, output_filepath):\n \"\"\"\n Function used to generate Spectrogram images\n :param fs: frequency sample rate e.g. 128 Hz\n :param m: total number of points in window e.g. 128\n :param data: complete dataset from an input file\n :param output_filepath: path to export file of spectrogram\n :return None:\n \"\"\"\n overlap = math.floor(m * 0.9)\n\n f, t, Sxx = signal.spectrogram(data, fs, noverlap=overlap, window=signal.tukey(m, 0.25))\n\n try:\n plt.pcolormesh(t, f, np.log10(Sxx))\n plt.set_cmap('jet')\n plt.axis('off')\n\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, dpi=35)\n plt.clf()\n except FloatingPointError as e:\n print('Caught divide by 0 error: {0}'.format(output_filepath))\n return\n\n\ndef generate_graph_from_data(channel, data, output_filepath):\n \"\"\"\n Function used to generate time domain graph from channel data\n :param channel: specific channel lead we are analyzing\n :param data: complete dataset from an input file\n :param output_filepath: path to export file of time domain data\n :return None:\n \"\"\"\n x = np.linspace(0, len(data[:, channel]) / 512., len(data[:, channel]))\n y = data[:, channel]\n\n plt.plot(x, y, color='blue')\n plt.title('Lead: {}'.format(str(channel)))\n plt.xlabel('Time [secs]')\n plt.ylabel('MicroVolts [muV]')\n\n plt.savefig(output_filepath)\n\n\ndef interate_data(fs, m, data, output_file):\n \"\"\"\n Function used to interate through data and generate spectrogram images\n :param fs:\n :param m:\n :param data:\n :param output_file:\n :return:\n \"\"\"\n move = 128\n i = 0\n j = 256\n counter = 1\n\n while j < len(data):\n sub_data = data[i:j]\n\n # FOCUSED/eeg_record7/10/FOCUSED_1\n sub_output_file = '{0}_{1}'.format(output_file, str(counter))\n\n generate_spectrogram_from_data(fs, m, sub_data, sub_output_file)\n\n i += move\n j += move\n counter += 1\n\n\ndef create_output_directory(output_path):\n \"\"\"\n Function used to create the output directory for Short-Time Fourier Transform\n images created for all input files and each channel of an input file.\n if output directory exists, we delete it and recreate it.\n :param output_path: path of the output files we want to create e.g. './output'\n :return None:\n \"\"\"\n if os.path.isdir(output_path):\n shutil.rmtree(output_path, ignore_errors=True)\n\n os.mkdir(output_path)\n\n\ndef output_data_to_csv(output_dir, data, state, filename):\n \"\"\"\n Function used to parse out focused data and output it into csv files\n :param output_dir: directory to output data\n :param data: to output to csv\n :param state: state we are outputting to csv e.g., focused, unfocused or drowsy\n :param filename: name of file we are writing data to\n :return None:\n \"\"\"\n\n output_path = os.path.join(output_dir, filename)\n\n try:\n parsed_data = np.array(data[range(state[0], state[1])])\n except IndexError as e:\n print('File: {0}'.format(output_path))\n print('Size: {0}'.format(len(data)))\n return\n\n np.savetxt(output_path, parsed_data, delimiter=',')\n\n\ndef main():\n \"\"\"\n Main Entrance of program\n :return None:\n \"\"\"\n args = handle_arguments()\n\n all_files = get_all_data_files()\n\n if args.split_data:\n handle_split_data(all_files, CHANNELS)\n\n if args.state:\n handle_create_spectrograms(args.state)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.abs",
"matplotlib.pyplot.title",
"scipy.signal.stft",
"matplotlib.use",
"scipy.signal.tukey",
"matplotlib.pyplot.savefig",
"numpy.genfromtxt",
"matplotlib.pyplot.plot",
"numpy.seterr",
"matplotlib.pyplot.set_cmap",
"matplotlib.pyplot.clf",
"numpy.log10",
"matplotlib.pyplot.axis",
"numpy.savetxt",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] |
phil-hoang/general-object-detector
|
[
"a59fcfd4cf237dda7bde370b947d0d3096631d56"
] |
[
"detr/detr.py"
] |
[
"import torchvision.transforms as T\nimport torch\n\n\"\"\"\nFunctions for the detr object detection model\n\n\"\"\"\n\ndef detr_load():\n \"\"\"\n Loads the detr model using resnet50\n\n Returns: the detr model pretrained on COCO dataset\n \"\"\"\n\n model = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=True)\n model.eval()\n\n return model\n\n\ndef detr_predict(model, image, thresh=0.95):\n \"\"\"\n Function used to preprocess the image, feed it into the detr model, and prepare the output draw bounding boxes.\n Outputs are thresholded.\n Related functions: detr_load, draw_boxes in coco.py\n\n Args: \n model -- the detr model from detr_load()\n image -- Array the original image from openCV [width, height, channels]\n\n Returns: \n boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]\n labels -- Torch tensor of index labels for each bounding box [<label indices>]\n scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes \n \"\"\"\n\n def box_cxcywh_to_xyxy(x):\n # Converts bounding boxes to (x1, y1, x2, y2) coordinates of top left and bottom right corners\n\n # (center_x, center_y, h, w)\n x_c, y_c, w, h = x.unbind(1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=1)\n\n def rescale_bboxes(out_bbox, size):\n # Scale the bounding boxes to the image size\n img_w, img_h = size\n b = box_cxcywh_to_xyxy(out_bbox)\n b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)\n return b\n\n # Preprocess image\n transform = T.Compose([\n T.ToPILImage(),\n T.Resize(800),\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n t_image = transform(image).unsqueeze(0)\n\n # output is a dict containing \"pred_logits\" of [batch_size x num_queries x (num_classes + 1)]\n # and \"pred_boxes\" of shape (center_x, center_y, height, width) normalized to be between [0, 1]\n output = model(t_image)\n\n # Scale the class probabilities to add up to 1\n probas = output['pred_logits'].softmax(-1)[0,:,:-1]\n\n # Create outputs\n boxes = rescale_bboxes(output['pred_boxes'][0], (image.shape[1], image.shape[0])).detach()\n labels = probas.max(-1).indices\n conf = probas.max(-1).values.detach()\n\n ### Threshold scores\n conf = conf.detach()\n keep = conf > thresh\n\n # Filter out scores, boxes, and labels using threshold\n conf = conf[keep]\n boxes = boxes.detach()[keep]\n labels = labels.detach()[keep]\n\n return boxes, labels, conf\n\n\n\n \n\n\n"
] |
[
[
"torch.stack",
"torch.hub.load",
"torch.tensor"
]
] |
P1R/cinves
|
[
"8251acfa00a9a26d9b0665e1897316b6664fb9bb",
"8251acfa00a9a26d9b0665e1897316b6664fb9bb"
] |
[
"TrabajoFinal/PortadoraVariableModuladaFija/TvsFreq-FM.py",
"TrabajoFinal/PortadoraVariableModuladaFija/AM/TvsFrqRate-AM-pawn50.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n#la frecuencia de la modulada FM es de 50 hz en todas las variaciones de la portadora\nFreq=np.array([20,30,40,50,60,70,80,90,100,110,120,130,140,150,160]);\nDeltaTemp=np.array([0.5,1.2,3.2,4.1,2.3,2.0,1.8,0.8,0.2,1.2,2.3,4.1,8.5,3.4,0.1])\nTempT1=np.array([20.8,20.3,18.8,17.9,23.2,22.8,22.7,22.9,21.2,20.3,19.9,19.2,17.1,19.8,22.1])\nTempT2=np.array([21.3,21.5,22.0,22.0,20.9,20.8,20.9,22.1,21.4,21.5,22.2,23.3,25.6,23.2,22.2])\nTempAmb=np.array([21.0,21.0,21.0,21.0,21.1,21.3,21.3,21.4,20.9,21.0,21.1,21.4,21.5,21.6,21.8])\n\nplt.xlabel('Frecuencia')\nplt.ylabel('Temperatura')\nplt.title('Temperatura vs Frecuencia modulada en FM')\n#for i in range(len(Freq)):\n#\tplt.text(Freq[i],Db[i], r'$Freq=%f, \\ Db=%f$' % (Freq[i], Db[i]))\nplt.axis([0, 200, 0, 30])\nplt.plot(Freq,TempT1,'bo',Freq,TempT1,'k')\nplt.plot(Freq,TempT2,'r^',Freq,TempT2,'r')\nplt.plot(Freq,DeltaTemp,'ko',Freq,DeltaTemp,'k')\nplt.plot(Freq,TempAmb,'yo',Freq,TempAmb,'y')\nplt.grid(True)\nplt.show()\n\n",
"import numpy as np\nimport matplotlib.pyplot as plt\n'''\n\nLa Frecuencia Base es de 50 Hz y las variaciones en frecuencia de rate de 30 a 200\neste ejemplo es con un pawn de 50% en AM.\n\npara este experimento los valores son: \n\ttiempo de medicion: 2 minutos\n\tvoltaje de generador: 0.3 volts\n\ttubo de prueba: cobre 350 cm\n\tSIN STREAMING.\n'''\nFreq=np.array([30,40,50,60,70,80,90,100,200])\nDeltaTemp=np.array([2.2,1.9,2.0,2.0,2.7,2.6,2.7,2.8,2.6])\nTempT1=np.array([21.2,21.5,21.5,21.5,21.1,21.1,21.1,21.0,21.2])\nTempT2=np.array([23.4,23.4,23.5,23.5,23.8,23.7,23.8,23.8,23.8])\nTempAmb=np.array([22.4,22.4,22.4,22.4,22.5,22.5,22.5,22.5,22.6])\n\nplt.xlabel('Rate')\nplt.ylabel('Temperatura')\nplt.title('Temperatura vs Rate en AM con pawn de 50%')\n#for i in range(len(Freq)):\n#\tplt.text(Freq[i],Db[i], r'$Freq=%f, \\ Db=%f$' % (Freq[i], Db[i]))\nplt.axis([0, 220, 0, 30])\nplt.plot(Freq,TempT1,'bo',Freq,TempT1,'k')\nplt.plot(Freq,TempT2,'r^',Freq,TempT2,'r')\nplt.plot(Freq,DeltaTemp,'ko',Freq,DeltaTemp,'k')\nplt.plot(Freq,TempAmb,'yo',Freq,TempAmb,'y')\nplt.grid(True)\nplt.show()\n\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
nsnmsak/graphillion_tutorial
|
[
"d5446b15f8a59784b37ef1786d1150ee59fe4a3a"
] |
[
"ja/tutorial_util.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\nfrom graphillion import GraphSet\nfrom graphviz import Digraph\nimport networkx as nx\nimport json\nimport matplotlib.pyplot as plt\nfrom IPython.display import Image\n\ndef zdd_size(graph_set):\n zdd = dump2zdd(graph_set.dumps().split(\"\\n\"))\n return len(zdd)\n\n\ndef draw_zdd(graph_set, universe=None):\n if not universe:\n universe = GraphSet.universe()\n zdd = dump2zdd(graph_set.dumps().split(\"\\n\"))\n\n return draw(zdd, universe)\n\ndef draw(zdd, labels):\n dot_str_lines = []\n dot_str_lines.append(\"digraph top {\")\n dot_str_lines.append('node[ colorscheme = \"rdylgn11\", color = 3];')\n\n dot = Digraph()\n\n same_label_nodes = {}\n for nid in zdd:\n vals = zdd[nid]\n label = vals['label']\n lo = vals['lo']\n hi = vals['hi']\n if label not in same_label_nodes:\n same_label_nodes[label] = []\n same_label_nodes[label].append(nid)\n dot.node(nid, str(labels[int(label)-1]))\n dot.edge(nid, lo, style='dashed')\n dot.edge(nid, hi, style='solid')\n\n dot.node('T', '1', shape='square')\n dot.node('B', '0', shape='square')\n for labels in same_label_nodes.values():\n with dot.subgraph() as c:\n c.body.append(\"{rank= same;\" + \"; \".join(labels) + \";}\")\n return dot\n\ndef dump2zdd(arr):\n nodes = {}\n\n for elem in arr:\n elems = elem.split()\n if len(elems) != 4: \n continue\n nid, label, lo, hi = elems\n nodes[nid] = {'label': label, 'lo': lo, 'hi': hi}\n\n return nodes\n\ndef _encode_digit(val):\n if isinstance(val, int):\n return '_int' + str(val)\n return val\n \ndef _decode_digit(val):\n if isinstance(val, str) and val.startswith('_int'):\n return int(val[4:])\n return val\n\n\ndef _graph2nx_layout(graph):\n dot = Digraph()\n for u, v in graph.edges:\n u = _encode_digit(u)\n v = _encode_digit(v)\n dot.edge(u, v)\n json_obj = json.loads(dot.pipe(format='json'))\n positions = {}\n for node in json_obj['objects']:\n name = _decode_digit(node['name'])\n pos_pair = tuple(float(x) for x in node['pos'].split(','))\n positions[name] = pos_pair\n\n return positions\n\ndef draw_universe(universe=None):\n draw_subgraph(None, universe)\n\n\n\ndef draw_subgraph(subgraph=None, universe=None):\n if not universe:\n universe = GraphSet.universe()\n g = nx.Graph(sorted(universe))\n \n if not subgraph:\n subgraph = set([])\n else:\n subgraph = set(subgraph)\n\n pos = _graph2nx_layout(g)\n nx.draw_networkx_nodes(g, pos, node_color='#FFFFFF', edgecolors='#000000')\n edge_weights = []\n edge_colors = []\n for edge in g.edges():\n if edge in subgraph or (edge[1], edge[0]) in subgraph:\n edge_weights.append(5)\n edge_colors.append('#FF0000')\n else:\n edge_weights.append(1)\n edge_colors.append('#000000')\n\n\n nx.draw_networkx_labels(g, pos)\n nx.draw_networkx_edges(g, pos, edge_color=edge_colors, width=edge_weights)\n\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.show"
]
] |
manmanCover/OCNet.pytorch
|
[
"8484daaac4fab5b513a45e56b1b04cdebc620116"
] |
[
"utils/loss.py"
] |
[
"import pdb\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2\n\nclass CrossEntropy2d(nn.Module):\n\n def __init__(self, size_average=True, ignore_label=255, use_weight=True):\n super(CrossEntropy2d, self).__init__()\n self.size_average = size_average\n self.ignore_label = ignore_label\n self.use_weight = use_weight\n if self.use_weight:\n self.weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507]).cuda()\n print('CrossEntropy2d weights : {}'.format(self.weight))\n else:\n self.weight = None\n\n\n def forward(self, predict, target, weight=None):\n\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n weight (Tensor, optional): a manual rescaling weight given to each class.\n If given, has to be a Tensor of size \"nclasses\"\n \"\"\"\n # Variable(torch.randn(2,10)\n if self.use_weight:\n print('target size {}'.format(target.shape))\n freq = np.zeros(19)\n for k in range(19):\n mask = (target[:, :, :] == k)\n freq[k] = torch.sum(mask)\n print('{}th frequency {}'.format(k, freq[k]))\n weight = freq / np.sum(freq)\n print(weight)\n self.weight = torch.FloatTensor(weight)\n print('Online class weight: {}'.format(self.weight))\n else:\n self.weight = None\n\n\n criterion = torch.nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_label)\n # torch.FloatTensor([2.87, 13.19, 5.11, 37.98, 35.14, 30.9, 26.23, 40.24, 6.66, 32.07, 21.08, 28.14, 46.01, 10.35, 44.25, 44.9, 44.25, 47.87, 40.39])\n #weight = Variable(torch.FloatTensor([1, 1.49, 1.28, 1.62, 1.62, 1.62, 1.64, 1.62, 1.49, 1.62, 1.43, 1.62, 1.64, 1.43, 1.64, 1.64, 1.64, 1.64, 1.62]), requires_grad=False).cuda()\n assert not target.requires_grad\n assert predict.dim() == 4\n assert target.dim() == 3\n assert predict.size(0) == target.size(0), \"{0} vs {1} \".format(predict.size(0), target.size(0))\n assert predict.size(2) == target.size(1), \"{0} vs {1} \".format(predict.size(2), target.size(1))\n assert predict.size(3) == target.size(2), \"{0} vs {1} \".format(predict.size(3), target.size(3))\n n, c, h, w = predict.size()\n target_mask = (target >= 0) * (target != self.ignore_label)\n target = target[target_mask]\n if not target.data.dim():\n return Variable(torch.zeros(1))\n predict = predict.transpose(1, 2).transpose(2, 3).contiguous()\n predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)\n loss = criterion(predict, target)\n return loss\n\n\nclass OhemCrossEntropy2d(nn.Module):\n def __init__(self, ignore_label=255, thresh=0.6, min_kept=0, use_weight=True):\n super(OhemCrossEntropy2d, self).__init__()\n self.ignore_label = ignore_label\n self.thresh = float(thresh)\n self.min_kept = int(min_kept)\n if use_weight:\n print(\"w/ class balance\")\n weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, 1.0865, 1.1529, 1.0507])\n self.criterion = torch.nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_label)\n else:\n print(\"w/o class balance\")\n self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)\n\n def forward(self, predict, target, weight=None):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n weight (Tensor, optional): a manual rescaling weight given to each class.\n If given, has to be a Tensor of size \"nclasses\"\n \"\"\"\n assert not target.requires_grad\n assert predict.dim() == 4\n assert target.dim() == 3\n assert predict.size(0) == target.size(0), \"{0} vs {1} \".format(predict.size(0), target.size(0))\n assert predict.size(2) == target.size(1), \"{0} vs {1} \".format(predict.size(2), target.size(1))\n assert predict.size(3) == target.size(2), \"{0} vs {1} \".format(predict.size(3), target.size(3))\n\n n, c, h, w = predict.size()\n input_label = target.data.cpu().numpy().ravel().astype(np.int32)\n x = np.rollaxis(predict.data.cpu().numpy(), 1).reshape((c, -1))\n input_prob = np.exp(x - x.max(axis=0).reshape((1, -1)))\n input_prob /= input_prob.sum(axis=0).reshape((1, -1))\n\n valid_flag = input_label != self.ignore_label\n valid_inds = np.where(valid_flag)[0]\n label = input_label[valid_flag]\n num_valid = valid_flag.sum()\n if self.min_kept >= num_valid:\n print('Labels: {}'.format(num_valid))\n elif num_valid > 0:\n prob = input_prob[:,valid_flag]\n pred = prob[label, np.arange(len(label), dtype=np.int32)]\n threshold = self.thresh\n if self.min_kept > 0:\n index = pred.argsort()\n threshold_index = index[ min(len(index), self.min_kept) - 1 ]\n if pred[threshold_index] > self.thresh:\n threshold = pred[threshold_index]\n kept_flag = pred <= threshold\n valid_inds = valid_inds[kept_flag]\n # print('hard ratio: {} = {} / {} '.format(round(len(valid_inds)/num_valid, 4), len(valid_inds), num_valid))\n\n label = input_label[valid_inds].copy()\n input_label.fill(self.ignore_label)\n input_label[valid_inds] = label\n valid_flag_new = input_label != self.ignore_label\n # print(np.sum(valid_flag_new))\n target = Variable(torch.from_numpy(input_label.reshape(target.size())).long().cuda())\n\n return self.criterion(predict, target)"
] |
[
[
"torch.nn.CrossEntropyLoss",
"numpy.sum",
"torch.zeros",
"torch.sum",
"torch.FloatTensor",
"numpy.zeros",
"numpy.where"
]
] |
K-ona/--------
|
[
"1bae093758c61e4863ca0b150195286e189af591"
] |
[
"mtl.py"
] |
[
"import matplotlib.pyplot as plt\nplt.style.use('ggplot')\nimport pandas as pd\nimport numpy as np\n\n#随机生成两个dataframe\nd1 = pd.DataFrame(columns=['x', 'y'])\nd1['x'] = np.random.normal(0, 1, 100)\nd1['y'] = np.random.normal(0, 1, 100)\nd2 = pd.DataFrame(columns=['x', 'y'])\nd2['x'] = np.random.normal(2, 1, 100)\nd2['y'] = np.random.normal(2, 1, 100)\nprint(d1.values)\nprint(d2.values)\n\n#分别画出scatter图,但是设置不同的颜色\nplt.scatter(d1['x'], d1['y'], color='blue', label='d1 points')\nplt.scatter(d2['x'], d2['y'], color='green', label='d2 points')\n\n#设置图例\nplt.legend(loc=(1, 0))\n\n#显示图片\nplt.show()\n\n\n# positive_index = np.nonzero( testLabelArr == 1 )\n# testDateArr_positive = testDateArr[positive_index]\n# negative_index = np.nonzero( testLabelArr == 0 )\n# testDateArr_negative = testDateArr[negative_index]\n# fig = plt.figure()\n# ax = fig.add_subplot(111)\n\n# ax.scatter(testDateArr_positive[:, 1], testDateArr_positive[:, 2], c='red')\n# ax.scatter(testDateArr_negative[:, 1], testDateArr_negative[:, 2], c='black')\n# ax.plot(np.arange(0,10),(-np.arange(0,10)*weight_vector[1]-weight_vector[0])/weight_vector[2])\n\n# plt.xlabel('x', fontsize=10)\n# plt.ylabel('y', fontsize=10)\n# plt.show()"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"numpy.random.normal",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
]
] |
rebeccadavidsson/covid19-sir
|
[
"ca7a408c5fcf87e4857edd14a9276cae0b6737cf"
] |
[
"covsirphy/cleaning/pcr_data.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nfrom dask import dataframe as dd\nfrom covsirphy.util.plotting import line_plot\nfrom covsirphy.util.error import PCRIncorrectPreconditionError, SubsetNotFoundError\nfrom covsirphy.cleaning.cbase import CleaningBase\nfrom covsirphy.cleaning.country_data import CountryData\n\n\nclass PCRData(CleaningBase):\n \"\"\"\n Data cleaning of PCR dataset.\n\n Args:\n filename (str or None): CSV filename of the dataset\n interval (int): expected update interval of the number of confirmed cases and tests [days]\n min_pcr_tests (int): minimum number of valid daily tests performed in order to calculate positive rate\n citation (str): citation\n \"\"\"\n # Column names\n PCR_VALUE_COLUMNS = [CleaningBase.TESTS, CleaningBase.C]\n PCR_NLOC_COLUMNS = [CleaningBase.DATE, *PCR_VALUE_COLUMNS]\n PCR_COLUMNS = [*CleaningBase.STR_COLUMNS, *PCR_VALUE_COLUMNS]\n # Daily values\n T_DIFF = \"Tests_diff\"\n C_DIFF = \"Confirmed_diff\"\n PCR_RATE = \"Test_positive_rate\"\n\n def __init__(self, filename, interval=2, min_pcr_tests=100, citation=None):\n if filename is None:\n self._raw = pd.DataFrame()\n self._cleaned_df = pd.DataFrame(columns=self.PCR_COLUMNS)\n else:\n self._raw = dd.read_csv(\n filename, dtype={\"Province/State\": \"object\"}\n ).compute()\n self._cleaned_df = self._cleaning()\n self.interval = self.ensure_natural_int(interval, name=\"interval\")\n self.min_pcr_tests = self.ensure_natural_int(\n min_pcr_tests, name=\"min_pcr_tests\")\n self._citation = citation or \"\"\n # Cleaned dataset of \"Our World In Data\"\n self._cleaned_df_owid = pd.DataFrame()\n\n def cleaned(self):\n \"\"\"\n Return the cleaned dataset of PCRData with tests and confirmed data.\n\n Returns:\n pandas.DataFrame\n Index:\n reset index\n Columns:\n - Date (pd.TimeStamp): Observation date\n - Country (pandas.Category): country/region name\n - Province (pandas.Category): province/prefecture/state name\n - Tests (int): the number of total tests performed\n - Confirmed (int): the number of confirmed cases\n\n Note:\n Cleaning method is defined by self._cleaning() method.\n \"\"\"\n return self._cleaned_df.loc[:, self.PCR_COLUMNS]\n\n def _cleaning(self):\n \"\"\"\n Perform data cleaning of the raw data.\n This method overwrites super()._cleaning() method.\n\n Returns:\n pandas.DataFrame\n Index:\n reset index\n Columns:\n - Date (pd.TimeStamp): Observation date\n - ISO3 (str): ISO3 code\n - Country (pandas.Category): country/region name\n - Province (pandas.Category): province/prefecture/state name\n - Tests (int): the number of total tests performed\n - Confirmed (int): the number of confirmed cases\n \"\"\"\n df = super()._cleaning()\n # Rename the columns\n df = df.rename(\n {\n \"ObservationDate\": self.DATE,\n \"Country/Region\": self.COUNTRY,\n \"Province/State\": self.PROVINCE,\n },\n axis=1\n )\n # Confirm the expected columns are in raw data\n self.ensure_dataframe(\n df, name=\"the raw data\", columns=self.PCR_COLUMNS)\n # Datetime columns\n df[self.DATE] = pd.to_datetime(df[self.DATE])\n # Country\n df[self.COUNTRY] = df[self.COUNTRY].replace(\n {\n # COD\n \"Congo, the Democratic Republic of the\": \"Democratic Republic of the Congo\",\n # COG\n \"Congo\": \"Republic of the Congo\",\n # South Korea\n \"Korea, South\": \"South Korea\",\n }\n )\n # Province\n df[self.PROVINCE] = df[self.PROVINCE].fillna(self.UNKNOWN)\n df.loc[df[self.COUNTRY] == \"Diamond Princess\", [\n self.COUNTRY, self.PROVINCE]] = [\"Others\", \"Diamond Princess\"]\n # Values\n df = df.fillna(method=\"ffill\").fillna(0)\n df[self.TESTS] = df[self.TESTS].astype(np.int64)\n df[self.C] = df[self.C].astype(np.int64)\n df = df.loc[:, [self.ISO3, *self.PCR_COLUMNS]].reset_index(drop=True)\n # Update data types to reduce memory\n df[self.AREA_ABBR_COLS] = df[self.AREA_ABBR_COLS].astype(\"category\")\n return df\n\n @classmethod\n def from_dataframe(cls, dataframe):\n \"\"\"\n Create PCRData instance using a pandas dataframe.\n\n Args:\n dataframe (pd.DataFrame): cleaned dataset\n\n Returns:\n covsirphy.PCRData: PCR dataset\n \"\"\"\n instance = cls(filename=None)\n instance._cleaned_df = cls.ensure_dataframe(\n dataframe, name=\"dataframe\", columns=cls.PCR_COLUMNS)\n return instance\n\n def _download_ourworldindata(self, filename):\n \"\"\"\n Download the dataset (ISO code/date/the number of tests) from \"Our World In Data\" site.\n https://github.com/owid/covid-19-data/tree/master/public/data\n https://ourworldindata.org/coronavirus\n\n Args:\n filename (str): CSV filename to save the datasetretrieved from \"Our World In Data\"\n \"\"\"\n url = \"https://covid.ourworldindata.org/data/testing/covid-testing-all-observations.csv\"\n col_dict = {\n \"ISO code\": self.ISO3,\n \"Date\": self.DATE,\n \"Cumulative total\": self.TESTS,\n \"Daily change in cumulative total\": self.T_DIFF,\n }\n # Download the dataset\n df = self.load(url, columns=list(col_dict))\n # Data cleaning\n df = df.rename(col_dict, axis=1)\n df[self.TESTS] = pd.to_numeric(df[self.TESTS], errors=\"coerce\")\n df[self.TESTS] = df[self.TESTS].fillna(method=\"ffill\").astype(np.int64)\n # Calculate cumulative values if necessary\n df[self.T_DIFF] = df[self.T_DIFF].fillna(0).astype(np.int64)\n na_last_df = df.loc[\n (df[self.TESTS].isna()) & (df[self.DATE] == df[self.DATE].max())]\n re_countries_set = set(na_last_df[self.ISO3].unique())\n df[\"cumsum\"] = df.groupby(self.ISO3)[self.T_DIFF].cumsum()\n df[self.TESTS] = df[[self.ISO3, self.TESTS, \"cumsum\"]].apply(\n lambda x: x[1] if x[0] in re_countries_set else x[2], axis=1)\n df = df.drop(\"cumsum\", axis=1)\n # Drop duplicated records\n df = df.drop_duplicates(subset=[self.ISO3, self.DATE])\n # Save as CSV file\n df.to_csv(filename, index=False)\n return df\n\n def use_ourworldindata(self, filename, force=False):\n \"\"\"\n Set the cleaned dataset retrieved from \"Our World In Data\" site.\n https://github.com/owid/covid-19-data/tree/master/public/data\n https://ourworldindata.org/coronavirus\n\n Args:\n filename (str): CSV filename to save the datasetretrieved from \"Our World In Data\"\n force (bool): if True, always download the dataset from \"Our World In Data\"\n \"\"\"\n # Retrieve dataset from \"Our World In Data\" if necessary\n Path(filename).parent.mkdir(exist_ok=True, parents=True)\n if Path(filename).exists() and not force:\n df = self.load(filename, dtype={self.TESTS: np.int64})\n else:\n df = self._download_ourworldindata(filename)\n # Add \"Country\" and \"Confirmed\" column using \"COVID-19 Data Hub\" dataset\n df[self.COUNTRY] = None\n df[self.C] = None\n df.index = df[self.ISO3].str.cat(df[self.DATE], sep=\"_\")\n series = df.loc[:, self.TESTS]\n hub_df = self._cleaned_df.copy()\n hub_df = hub_df.loc[hub_df[self.PROVINCE] == self.UNKNOWN]\n hub_df.index = hub_df[self.ISO3].str.cat(\n hub_df[self.DATE].astype(str), sep=\"_\")\n df.update(hub_df)\n df[self.TESTS] = series\n df = df.dropna().reset_index(drop=True)\n # Add \"Province\" column (Unknown because not)\n df[self.PROVINCE] = self.UNKNOWN\n # Data types\n df[self.DATE] = pd.to_datetime(df[self.DATE])\n df[self.COUNTRY] = df[self.COUNTRY].astype(\"category\")\n df[self.PROVINCE] = df[self.PROVINCE].astype(\"category\")\n df[self.C] = df[self.C].astype(np.int64)\n # Save the dataframe as the cleaned dataset\n self._cleaned_df_owid = df.reset_index(drop=True)\n # Update citation\n self._citation += \"\\nHasell, J., Mathieu, E., Beltekian, D. et al.\" \\\n \" A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020).\" \\\n \" https://doi.org/10.1038/s41597-020-00688-8\"\n return self\n\n def replace(self, country_data):\n \"\"\"\n Replace a part of cleaned dataset with a dataframe.\n\n Args:\n country_data (covsirphy.CountryData): dataset object of the country\n Index: reset index\n Columns:\n - Date (pd.TimeStamp): Observation date\n - Province (pandas.Category): province name\n - Tests (int): the number of total tests performed\n - Confirmed (int): the number of confirmed cases\n - The other columns will be ignored\n\n Returns:\n covsirphy.PCRData: self\n \"\"\"\n self.ensure_instance(country_data, CountryData, name=\"country_data\")\n # Read new dataset\n country = country_data.country\n new = self.ensure_dataframe(\n country_data.cleaned(), name=\"the raw data\", columns=self.PCR_COLUMNS)\n new = new.loc[:, self.PCR_COLUMNS]\n new[self.ISO3] = self.country_to_iso3(country)\n # Remove the data in the country from the current datset\n df = self._cleaned_df.copy()\n df = df.loc[df[self.COUNTRY] != country]\n # Add the new data\n df = pd.concat([df, new], axis=0, sort=False)\n # Update data types to reduce memory\n df[self.AREA_ABBR_COLS] = df[self.AREA_ABBR_COLS].astype(\"category\")\n self._cleaned_df = df.copy()\n # Citation\n self._citation += f\"\\n{country_data.citation}\"\n return self\n\n @staticmethod\n def _pcr_monotonic(df, variable):\n \"\"\"\n Force the variable show monotonic increasing.\n\n Args:\n df (pandas.DataFrame):\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed\n variable (str): variable name to show monotonic increasing\n\n Returns:\n pandas.DataFrame: complemented records\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed\n \"\"\"\n # Whether complement is necessary or not\n if df[variable].is_monotonic_increasing:\n return df\n # Complement\n decreased_dates = df[df[variable].diff() < 0].index.tolist()\n for date in decreased_dates:\n # Raw value on the decreased date\n raw_last = df.loc[date, variable]\n # Extrapolated value on the date\n series = df.loc[:date, variable]\n series.iloc[-1] = None\n series.interpolate(method=\"spline\", order=1, inplace=True)\n series.fillna(method=\"ffill\", inplace=True)\n # Reduce values to the previous date\n df.loc[:date, variable] = series * raw_last / series.iloc[-1]\n df[variable] = df[variable].fillna(0).astype(np.int64)\n return df\n\n def _pcr_check_complement(self, df, variable):\n \"\"\"\n If variable values do not change for more than applied 'self.interval' days,\n indicate compliment action is needed.\n\n Args:\n df (pandas.DataFrame):\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed, Tests_diff, C_diff\n variable: the desired column to use\n\n Returns:\n bool: whether complement is necessary or not\n \"\"\"\n max_frequency = df[variable].value_counts().max()\n return max_frequency > self.interval or not df.loc[df.index[-1], variable]\n\n def _pcr_partial_complement_ending(self, df, window):\n \"\"\"\n If ending test values do not change daily, while there are new cases,\n apply previous diff() only to these ending unupdated values\n and keep the previous valid ones.\n\n Args:\n df (pandas.DataFrame):\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed\n window (int): window of moving average, >= 1\n\n Returns:\n pandas.DataFrame: complemented test records\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed\n \"\"\"\n # Whether complement is necessary or not\n tests_max = df[self.TESTS].max()\n check_tests_ending = (df[self.TESTS] == tests_max).sum() > self.interval\n last_new_C = df[self.C].diff().rolling(window).mean().iloc[-1]\n check_C = last_new_C > self.min_pcr_tests\n if not (check_tests_ending and check_C):\n return df\n # Complement any ending unupdated test values\n # that are not updated daily, by keeping and\n # propagating forward previous valid diff()\n # min_index: index for first ending max test reoccurrence\n min_index = df[self.TESTS].idxmax() + 1\n first_value = df.loc[min_index, self.TESTS]\n df_ending = df.copy()\n df_ending.loc[df_ending.duplicated(\n [self.TESTS], keep=\"first\"), self.TESTS] = None\n diff_series = df_ending[self.TESTS].diff(\n ).ffill().fillna(0).astype(np.int64)\n diff_series.loc[diff_series.duplicated(keep=\"last\")] = None\n diff_series.interpolate(\n method=\"linear\", inplace=True, limit_direction=\"both\")\n df.loc[min_index:, self.TESTS] = first_value + \\\n diff_series.loc[min_index:].cumsum()\n return df\n\n def _pcr_partial_complement(self, before_df, variable):\n \"\"\"\n If there are missing values in variable column,\n apply partial compliment (bfill, ffill) to all columns.\n\n Args:\n before_df (pandas.DataFrame):\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed, Tests_diff, C_diff\n variable: the desired column to use\n\n Returns:\n pandas.DataFrame: complemented records\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed, Tests_diff, C_diff\n\n Note:\n Filling NA with 0 will be always applied.\n \"\"\"\n df = before_df.copy()\n df[self.TESTS].fillna(0, inplace=True)\n if self.T_DIFF in df.columns:\n df[self.T_DIFF].fillna(0, inplace=True)\n if not self._pcr_check_complement(df, variable):\n return df\n for col in df:\n df[col].replace(0, np.nan, inplace=True)\n df[col].fillna(method=\"ffill\", inplace=True)\n df[col].fillna(method=\"bfill\", inplace=True)\n return df\n\n def _pcr_processing(self, before_df, window):\n \"\"\"\n Return the processed pcr data\n\n Args:\n before_df (pandas.DataFrame):\n Index: reset index\n Columns: Date (pandas.TimeStamp), Tests, Confirmed\n window (int): window of moving average, >= 1\n\n Returns:\n tuple (pandas.DataFrame, bool):\n pandas.DataFrame\n Index: reset index\n Columns:\n - Date (pd.TimeStamp): Observation date\n - Tests (int): the number of total tests performed\n - Confirmed (int): the number of confirmed cases\n - Tests_diff (int): daily tests performed\n - Confirmed_diff (int): daily confirmed cases\n bool: True if complement is needed or False\n \"\"\"\n df = before_df.copy()\n df[self.TESTS].fillna(method=\"ffill\", inplace=True)\n # Confirmed must show monotonic increasing\n df = self._pcr_monotonic(df, self.C)\n df = self._pcr_partial_complement(df, self.TESTS)\n # If Tests values are all valid, with no missing values in-between,\n # they must be monotonically increasing as well\n compare_df = df.copy()\n df = self._pcr_monotonic(df, self.TESTS)\n # Complement any ending unupdated test records\n df = self._pcr_partial_complement_ending(df, window)\n # Complemented or not\n is_complemented = not df.equals(compare_df)\n # Calculate daily values for tests and confirmed (with window=1)\n df[self.T_DIFF] = df[self.TESTS].diff()\n df[self.C_DIFF] = df[self.C].diff()\n # Ensure that tests > confirmed in daily basis\n df.loc[\n df[self.T_DIFF].abs() < df[self.C_DIFF].abs(), self.T_DIFF] = None\n # Keep valid non-zero values by ignoring zeros at the beginning\n df = df.replace(0, np.nan)\n non_zero_index_start = df[self.T_DIFF].first_valid_index()\n df = df.loc[non_zero_index_start:].reset_index(drop=True)\n non_zero_index_end = df[self.T_DIFF].last_valid_index()\n # Keep valid non-zero values by complementing zeros at the end\n if non_zero_index_end < (len(df) - 1):\n df.loc[non_zero_index_end + 1:, self.T_DIFF] = None\n df = self._pcr_partial_complement(df, self.T_DIFF)\n # Use rolling window for averaging tests and confirmed\n df[self.T_DIFF] = df[self.T_DIFF].rolling(window).mean()\n df[self.C_DIFF] = df[self.C_DIFF].rolling(window).mean()\n df = self._pcr_partial_complement(df, self.T_DIFF)\n # Remove first zero lines due to window\n df = df.replace(0, np.nan)\n non_zero_index_start = df[self.T_DIFF].first_valid_index()\n df = df.loc[non_zero_index_start:].reset_index(drop=True)\n return (df, is_complemented)\n\n def _pcr_check_preconditions(self, df):\n \"\"\"\n Check preconditions in order to proceed with PCR data processing.\n\n Args:\n df (pandas.DataFrame):\n Index: Date (pandas.TimeStamp)\n Columns: Tests, Confirmed\n\n Return:\n bool: whether the dataset has sufficient data or not\n \"\"\"\n df[self.TESTS].fillna(0, inplace=True)\n if self.T_DIFF in df.columns:\n df[self.T_DIFF].fillna(0, inplace=True)\n # Check if the values are zero or nan\n check_zero = df[self.TESTS].max()\n # Check if the number of the missing values\n # is more than 50% of the total values\n check_missing = (df[self.TESTS] == 0).mean() < 0.5\n # Check if the number of the positive unique values\n # is less than 1% of the total values\n positive_df = df.loc[df[self.TESTS] > 0, self.TESTS]\n try:\n check_unique = (positive_df.nunique() / positive_df.size) >= 0.01\n except ZeroDivisionError:\n return False\n # Result\n return check_zero and check_missing and check_unique\n\n def _subset_by_area(self, country, province, dataset=\"COVID-19 Data Hub\"):\n \"\"\"\n Return the subset of \"Our World In Data\".\n\n Args:\n country (str): country name\n province (str): province name or \"-\"\n dataset (str): 'COVID-19 Data Hub' or 'Our World In Data'\n \"\"\"\n dataset_dict = {\n \"COVID-19 Data Hub\": self._cleaned_df,\n \"Our World In Data\": self._cleaned_df_owid,\n }\n df = dataset_dict[dataset].copy()\n return df.loc[(df[self.COUNTRY] == country) & (df[self.PROVINCE] == province)]\n\n def _subset_select(self, country, province):\n \"\"\"\n When only \"Our World In Data\" has sufficient data, the subset of this dataset will be returned.\n If not, \"COVID-19 Data Hub\" will be selected.\n\n Args:\n country (str): country name\n province (str): province name or \"-\"\n \"\"\"\n # If 'COVID-19 Data Hub' has sufficient data for the area, it will be used\n hub_df = self._subset_by_area(\n country, province, dataset=\"COVID-19 Data Hub\")\n if self._pcr_check_preconditions(hub_df):\n return hub_df\n # If 'Our World In Data' has sufficient data for the area, it will be used\n owid_df = self._subset_by_area(\n country, province, dataset=\"Our World In Data\")\n if self._pcr_check_preconditions(owid_df):\n return owid_df\n # Failed in retrieving sufficient data\n raise PCRIncorrectPreconditionError(\n country=country, province=province, message=\"Too many missing Tests records\")\n\n def positive_rate(self, country, province=None, window=7, show_figure=True, filename=None):\n \"\"\"\n Return the PCR rate of a country as a dataframe.\n\n Args:\n country(str): country name or ISO3 code\n province(str or None): province name\n window (int): window of moving average, >= 1\n show_figure (bool): if True, show the records as a line-plot.\n filename (str): filename of the figure, or None (display figure)\n\n Raises:\n PCRIncorrectPreconditionError: the dataset has too many missing values\n\n Returns:\n pandas.DataFrame\n Columns:\n - Date (pd.TimeStamp): Observation date\n - Tests (int): the number of total tests performed\n - Confirmed (int): the number of confirmed cases\n - Tests_diff (int): daily tests performed\n - Confirmed_diff (int): daily confirmed cases\n - Test_positive_rate (float): positive rate (%) of the daily cases over the total daily tests performed\n\n Note:\n If non monotonic records were found for either confirmed cases or tests,\n \"with partially complemented tests data\" will be added to the title of the figure.\n \"\"\"\n window = self.ensure_natural_int(window, name=\"window\")\n # Subset with area\n country_alias = self.ensure_country_name(country)\n province = province or self.UNKNOWN\n try:\n subset_df = self._subset_select(country_alias, province)\n except PCRIncorrectPreconditionError:\n raise PCRIncorrectPreconditionError(\n country=country, province=province, message=\"Too many missing Tests records\") from None\n # Process PCR data\n df, is_complemented = self._pcr_processing(subset_df, window)\n # Calculate PCR values\n df[self.PCR_RATE] = df[[self.C_DIFF, self.T_DIFF]].apply(\n lambda x: x[0] / x[1] * 100 if x[1] > self.min_pcr_tests else 0, axis=1)\n if not show_figure:\n return df\n # Create figure\n area = self.area_name(country, province=province)\n comp_status = \"\\nwith partially complemented tests data\" if is_complemented else \"\"\n line_plot(\n df.set_index(self.DATE)[self.PCR_RATE],\n title=f\"{area}: Test positive rate (%) over time {comp_status}\",\n ylabel=\"Test positive rate (%)\",\n y_integer=True,\n filename=filename,\n show_legend=False,\n )\n return df\n\n def subset(self, country, province=None, start_date=None, end_date=None, dataset=\"COVID-19 Data Hub\"):\n \"\"\"\n Return subset of the country/province and start/end date.\n\n Args:\n country (str): country name or ISO3 code\n province (str or None): province name\n start_date (str or None): start date, like 22Jan2020\n end_date (str or None): end date, like 01Feb2020\n dataset (str): 'COVID-19 Data Hub' or 'Our World In Data'\n\n Returns:\n pandas.DataFrame\n Index: reset index\n Columns:\n - Date (pd.TimeStamp): Observation date\n - Tests (int): the number of total tests performed\n - Confirmed (int): the number of confirmed cases\n \"\"\"\n country_alias = self.ensure_country_name(country)\n df = self._subset_by_area(\n country=country_alias, province=province, dataset=dataset)\n df = df.drop(\n [self.COUNTRY, self.ISO3, self.PROVINCE], axis=1)\n # Subset with Start/end date\n if start_date is None and end_date is None:\n return df.reset_index(drop=True)\n series = df[self.DATE].copy()\n start_obj = self.date_obj(date_str=start_date, default=series.min())\n end_obj = self.date_obj(date_str=end_date, default=series.max())\n df = df.loc[(start_obj <= series) & (series <= end_obj), :]\n if df.empty:\n raise SubsetNotFoundError(\n country=country, country_alias=country_alias, province=province,\n start_date=start_date, end_date=end_date)\n return df.reset_index(drop=True)\n"
] |
[
[
"pandas.concat",
"pandas.to_datetime",
"pandas.to_numeric",
"pandas.DataFrame"
]
] |
TheCheeseToast/fooof
|
[
"f3f8422af7d87fa73772e083deaf8439ca59908d"
] |
[
"fooof/synth.py"
] |
[
"\"\"\"Synthesis functions for generating model components and synthetic power spectra.\"\"\"\n\nimport numpy as np\n\nfrom fooof.core.funcs import gaussian_function, get_bg_func, infer_bg_func\n\n###################################################################################################\n###################################################################################################\n\ndef gen_freqs(freq_range, freq_res):\n \"\"\"Generate a frequency vector, from the frequency range and resolution.\n\n Parameters\n ----------\n freq_range : list of [float, float]\n Frequency range of desired frequency vector, as [f_low, f_high].\n freq_res : float\n Frequency resolution of desired frequency vector.\n\n Returns\n -------\n 1d array\n Frequency values (linear).\n \"\"\"\n\n return np.arange(freq_range[0], freq_range[1]+freq_res, freq_res)\n\n\ndef gen_power_spectrum(freq_range, background_params, gauss_params, nlv=0.005, freq_res=0.5):\n \"\"\"Generate a synthetic power spectrum.\n\n Parameters\n ----------\n freq_range : list of [float, float]\n Minimum and maximum values of the desired frequency vector.\n background_params : list of float\n Parameters to create the background of a power spectrum.\n gauss_params : list of list of float\n Parameters to create peaks. Length of n_peaks * 3.\n nlv : float, optional\n Noise level to add to generated power spectrum. Default: 0.005\n freq_res : float, optional\n Frequency resolution for the synthetic power spectra.\n\n Returns\n -------\n xs : 1d array\n Frequency values (linear).\n ys : 1d array\n Power values (linear).\n\n Notes\n -----\n - The type of background process to use is inferred from the provided parameters.\n - If length of 2, 'fixed' background is used, if length of 3, 'knee' is used.\n \"\"\"\n\n xs = gen_freqs(freq_range, freq_res)\n ys = _gen_power_vals(xs, background_params, gauss_params, nlv)\n\n return xs, ys\n\n\ndef gen_group_power_spectra(n_spectra, freq_range, bgp_opts, gauss_opts, nlv=0.005, freq_res=0.5):\n \"\"\"Generate a group of synthetic power spectra.\n\n Parameters\n ----------\n n_spectra : int\n The number of power spectra to generate in the matrix.\n freq_range : list of [float, float]\n Minimum and maximum values of the desired frequency vector.\n background_opts : list of list of float\n Group of parameter sets to create the background of power spectrum.\n gauss_opts : list of of list of float\n Group of parameters sets to create peaks. Length of n_peaks * 3.\n nlv : float, optional\n Noise level to add to generated power spectrum. default: 0.005\n freq_res : float, optional\n Frequency resolution for the synthetic power spectra. default: 0.5\n\n Returns\n -------\n xs : 1d array\n Frequency values (linear).\n ys : 2d array\n Matrix of power values (linear).\n\n Notes\n -----\n - Paramaters options can contain more than one parameter description.\n - If so, for each power spectrum, parameters are randomly chosen from the options.\n - The type of background process to use is inferred from the provided parameters.\n - If length of 2, 'fixed' background is used, if length of 3, 'knee' is used.\n \"\"\"\n\n xs = gen_freqs(freq_range, freq_res)\n\n ys = np.zeros([n_spectra, len(xs)])\n\n for ind in range(n_spectra):\n\n # Randomly select parameters from options to use for power spectrum\n bg_params = bgp_opts[np.random.randint(0, len(bgp_opts))]\n gauss_params = gauss_opts[np.random.randint(0, len(gauss_opts))]\n\n ys[ind, :] = _gen_power_vals(xs, bg_params, gauss_params, nlv)\n\n return xs, ys\n\n\ndef gen_background(xs, background_params, background_mode=None):\n \"\"\"Generate background values, from parameter definition.\n\n Parameters\n ----------\n xs : 1d array\n Frequency vector to create background from.\n background_params : list of float\n Paramters that define the background process.\n background_mode : {'fixed', 'knee'}, optional\n Which kind of background to generate power spectra with.\n If not provided, is infered from the parameters.\n\n Returns\n -------\n 1d array\n Generated background values\n \"\"\"\n\n if not background_mode:\n background_mode = infer_bg_func(background_params)\n\n bg_func = get_bg_func(background_mode)\n\n return bg_func(xs, *background_params)\n\n\ndef gen_peaks(xs, gauss_params):\n \"\"\"Generate peaks values, from parameter definition.\n\n Parameters\n ----------\n xs : 1d array\n Frequency vector to create peak values from.\n gauss_params : list of list of float\n Parameters to create peaks. Length of n_peaks * 3.\n\n Returns\n -------\n 1d array\n Generated background values.\n \"\"\"\n\n return gaussian_function(xs, *gauss_params)\n\n\ndef _gen_power_vals(xs, bg_params, gauss_params, nlv):\n \"\"\"Generate power values for a power spectrum.\n\n Parameters\n ----------\n xs : 1d array\n Frequency vector to create power values from.\n background_params : list of float\n Parameters to create the background of power spectrum.\n gauss_params : list of float\n Parameters to create peaks. Length of n_peaks * 3.\n nlv : float\n Noise level to add to generated power spectrum.\n\n Returns\n -------\n ys : 1d vector\n Power values (linear).\n \"\"\"\n\n background = gen_background(xs, bg_params, infer_bg_func(bg_params))\n peaks = gen_peaks(xs, gauss_params)\n noise = np.random.normal(0, nlv, len(xs))\n\n ys = np.power(10, background + peaks + noise)\n\n return ys\n"
] |
[
[
"numpy.arange",
"numpy.power"
]
] |
deniskamazur/hm-debug
|
[
"cf31951504c38a1ea5e868e607ea74691092561a"
] |
[
"tests/test_p2p_daemon.py"
] |
[
"import asyncio\nimport multiprocessing as mp\nimport subprocess\nfrom contextlib import closing\nfrom functools import partial\nfrom typing import List\n\nimport numpy as np\nimport pytest\nfrom multiaddr import Multiaddr\n\nfrom hivemind.p2p import P2P, P2PDaemonError, P2PHandlerError\nfrom hivemind.proto import dht_pb2\nfrom hivemind.utils.networking import get_free_port\nfrom hivemind.utils.serializer import MSGPackSerializer\n\n\ndef is_process_running(pid: int) -> bool:\n return subprocess.run([\"ps\", \"-p\", str(pid)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode == 0\n\n\nasync def replicate_if_needed(p2p: P2P, replicate: bool) -> P2P:\n return await P2P.replicate(p2p.daemon_listen_maddr) if replicate else p2p\n\n\n@pytest.mark.asyncio\nasync def test_daemon_killed_on_del():\n p2p_daemon = await P2P.create()\n\n child_pid = p2p_daemon._child.pid\n assert is_process_running(child_pid)\n\n await p2p_daemon.shutdown()\n assert not is_process_running(child_pid)\n\n\n@pytest.mark.asyncio\nasync def test_startup_error_message():\n with pytest.raises(P2PDaemonError, match=r\"Failed to connect to bootstrap peers\"):\n await P2P.create(\n initial_peers=[f\"/ip4/127.0.0.1/tcp/{get_free_port()}/p2p/QmdaK4LUeQaKhqSFPRu9N7MvXUEWDxWwtCvPrS444tCgd1\"]\n )\n\n with pytest.raises(P2PDaemonError, match=r\"Daemon failed to start in .+ seconds\"):\n await P2P.create(startup_timeout=0.01) # Test that startup_timeout works\n\n\n@pytest.mark.parametrize(\n \"host_maddrs\",\n [\n [Multiaddr(\"/ip4/127.0.0.1/tcp/0\")],\n [Multiaddr(\"/ip4/127.0.0.1/udp/0/quic\")],\n [Multiaddr(\"/ip4/127.0.0.1/tcp/0\"), Multiaddr(\"/ip4/127.0.0.1/udp/0/quic\")],\n ],\n)\n@pytest.mark.asyncio\nasync def test_transports(host_maddrs: List[Multiaddr]):\n server = await P2P.create(quic=True, host_maddrs=host_maddrs)\n peers = await server.list_peers()\n assert len(peers) == 0\n\n client = await P2P.create(quic=True, host_maddrs=host_maddrs, initial_peers=await server.get_visible_maddrs())\n await client.wait_for_at_least_n_peers(1)\n\n peers = await client.list_peers()\n assert len(peers) == 1\n peers = await server.list_peers()\n assert len(peers) == 1\n\n\n@pytest.mark.asyncio\nasync def test_daemon_replica_does_not_affect_primary():\n p2p_daemon = await P2P.create()\n p2p_replica = await P2P.replicate(p2p_daemon.daemon_listen_maddr)\n\n child_pid = p2p_daemon._child.pid\n assert is_process_running(child_pid)\n\n await p2p_replica.shutdown()\n assert is_process_running(child_pid)\n\n await p2p_daemon.shutdown()\n assert not is_process_running(child_pid)\n\n\n@pytest.mark.parametrize(\n \"should_cancel,replicate\",\n [\n (True, False),\n (True, True),\n (False, False),\n (False, True),\n ],\n)\n@pytest.mark.asyncio\nasync def test_call_protobuf_handler(should_cancel, replicate, handle_name=\"handle\"):\n handler_cancelled = False\n server_primary = await P2P.create()\n server = await replicate_if_needed(server_primary, replicate)\n\n async def ping_handler(request, context):\n try:\n await asyncio.sleep(2)\n except asyncio.CancelledError:\n nonlocal handler_cancelled\n handler_cancelled = True\n return dht_pb2.PingResponse(peer=dht_pb2.NodeInfo(node_id=server.peer_id.to_bytes()), available=True)\n\n server_pid = server_primary._child.pid\n await server.add_protobuf_handler(handle_name, ping_handler, dht_pb2.PingRequest)\n assert is_process_running(server_pid)\n\n client_primary = await P2P.create(initial_peers=await server.get_visible_maddrs())\n client = await replicate_if_needed(client_primary, replicate)\n client_pid = client_primary._child.pid\n assert is_process_running(client_pid)\n await client.wait_for_at_least_n_peers(1)\n\n ping_request = dht_pb2.PingRequest(peer=dht_pb2.NodeInfo(node_id=client.peer_id.to_bytes()), validate=True)\n expected_response = dht_pb2.PingResponse(peer=dht_pb2.NodeInfo(node_id=server.peer_id.to_bytes()), available=True)\n\n if should_cancel:\n call_task = asyncio.create_task(\n client.call_protobuf_handler(server.peer_id, handle_name, ping_request, dht_pb2.PingResponse)\n )\n await asyncio.sleep(0.25)\n\n call_task.cancel()\n\n await asyncio.sleep(0.25)\n assert handler_cancelled\n else:\n actual_response = await client.call_protobuf_handler(\n server.peer_id, handle_name, ping_request, dht_pb2.PingResponse\n )\n assert actual_response == expected_response\n assert not handler_cancelled\n\n await server.shutdown()\n await server_primary.shutdown()\n assert not is_process_running(server_pid)\n\n await client_primary.shutdown()\n assert not is_process_running(client_pid)\n\n\n@pytest.mark.asyncio\nasync def test_call_protobuf_handler_error(handle_name=\"handle\"):\n async def error_handler(request, context):\n raise ValueError(\"boom\")\n\n server = await P2P.create()\n server_pid = server._child.pid\n await server.add_protobuf_handler(handle_name, error_handler, dht_pb2.PingRequest)\n assert is_process_running(server_pid)\n\n client = await P2P.create(initial_peers=await server.get_visible_maddrs())\n client_pid = client._child.pid\n assert is_process_running(client_pid)\n await client.wait_for_at_least_n_peers(1)\n\n ping_request = dht_pb2.PingRequest(peer=dht_pb2.NodeInfo(node_id=client.peer_id.to_bytes()), validate=True)\n\n with pytest.raises(P2PHandlerError) as excinfo:\n await client.call_protobuf_handler(server.peer_id, handle_name, ping_request, dht_pb2.PingResponse)\n assert \"boom\" in str(excinfo.value)\n\n await server.shutdown()\n await client.shutdown()\n\n\nasync def handle_square_stream(_, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n with closing(writer):\n while True:\n try:\n x = MSGPackSerializer.loads(await P2P.receive_raw_data(reader))\n except asyncio.IncompleteReadError:\n break\n\n result = x ** 2\n\n await P2P.send_raw_data(MSGPackSerializer.dumps(result), writer)\n\n\nasync def validate_square_stream(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n with closing(writer):\n for _ in range(10):\n x = np.random.randint(100)\n\n await P2P.send_raw_data(MSGPackSerializer.dumps(x), writer)\n result = MSGPackSerializer.loads(await P2P.receive_raw_data(reader))\n\n assert result == x ** 2\n\n\n@pytest.mark.asyncio\nasync def test_call_peer_single_process():\n server = await P2P.create()\n server_pid = server._child.pid\n assert is_process_running(server_pid)\n\n handler_name = \"square\"\n await server.add_binary_stream_handler(handler_name, handle_square_stream)\n\n client = await P2P.create(initial_peers=await server.get_visible_maddrs())\n client_pid = client._child.pid\n assert is_process_running(client_pid)\n\n await client.wait_for_at_least_n_peers(1)\n\n _, reader, writer = await client.call_binary_stream_handler(server.peer_id, handler_name)\n await validate_square_stream(reader, writer)\n\n await server.shutdown()\n assert not is_process_running(server_pid)\n\n await client.shutdown()\n assert not is_process_running(client_pid)\n\n\nasync def run_server(handler_name, server_side, response_received):\n server = await P2P.create()\n server_pid = server._child.pid\n assert is_process_running(server_pid)\n\n await server.add_binary_stream_handler(handler_name, handle_square_stream)\n\n server_side.send(server.peer_id)\n server_side.send(await server.get_visible_maddrs())\n while response_received.value == 0:\n await asyncio.sleep(0.5)\n\n await server.shutdown()\n assert not is_process_running(server_pid)\n\n\ndef server_target(handler_name, server_side, response_received):\n asyncio.run(run_server(handler_name, server_side, response_received))\n\n\n@pytest.mark.asyncio\nasync def test_call_peer_different_processes():\n handler_name = \"square\"\n\n server_side, client_side = mp.Pipe()\n response_received = mp.Value(np.ctypeslib.as_ctypes_type(np.int32))\n response_received.value = 0\n\n proc = mp.Process(target=server_target, args=(handler_name, server_side, response_received))\n proc.start()\n\n peer_id = client_side.recv()\n peer_maddrs = client_side.recv()\n\n client = await P2P.create(initial_peers=peer_maddrs)\n client_pid = client._child.pid\n assert is_process_running(client_pid)\n\n await client.wait_for_at_least_n_peers(1)\n\n _, reader, writer = await client.call_binary_stream_handler(peer_id, handler_name)\n await validate_square_stream(reader, writer)\n\n response_received.value = 1\n\n await client.shutdown()\n assert not is_process_running(client_pid)\n\n proc.join()\n assert proc.exitcode == 0\n\n\n@pytest.mark.asyncio\nasync def test_error_closes_connection():\n async def handle_raising_error(_, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n with closing(writer):\n command = await P2P.receive_raw_data(reader)\n if command == b\"raise_error\":\n raise Exception(\"The handler has failed\")\n else:\n await P2P.send_raw_data(b\"okay\", writer)\n\n server = await P2P.create()\n server_pid = server._child.pid\n assert is_process_running(server_pid)\n\n handler_name = \"handler\"\n await server.add_binary_stream_handler(handler_name, handle_raising_error)\n\n client = await P2P.create(initial_peers=await server.get_visible_maddrs())\n client_pid = client._child.pid\n assert is_process_running(client_pid)\n\n await client.wait_for_at_least_n_peers(1)\n\n _, reader, writer = await client.call_binary_stream_handler(server.peer_id, handler_name)\n with closing(writer):\n await P2P.send_raw_data(b\"raise_error\", writer)\n with pytest.raises(asyncio.IncompleteReadError): # Means that the connection is closed\n await P2P.receive_raw_data(reader)\n\n # Despite the handler raised an exception, the server did not crash and ready for next requests\n assert is_process_running(server_pid)\n\n _, reader, writer = await client.call_binary_stream_handler(server.peer_id, handler_name)\n with closing(writer):\n await P2P.send_raw_data(b\"behave_normally\", writer)\n assert await P2P.receive_raw_data(reader) == b\"okay\"\n\n await server.shutdown()\n assert not is_process_running(server_pid)\n\n await client.shutdown()\n assert not is_process_running(client_pid)\n\n\n@pytest.mark.asyncio\nasync def test_handlers_on_different_replicas():\n async def handler(_, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, key: str) -> None:\n with closing(writer):\n await P2P.send_raw_data(key, writer)\n\n server_primary = await P2P.create()\n server_id = server_primary.peer_id\n await server_primary.add_binary_stream_handler(\"handle_primary\", partial(handler, key=b\"primary\"))\n\n server_replica1 = await replicate_if_needed(server_primary, True)\n await server_replica1.add_binary_stream_handler(\"handle1\", partial(handler, key=b\"replica1\"))\n\n server_replica2 = await replicate_if_needed(server_primary, True)\n await server_replica2.add_binary_stream_handler(\"handle2\", partial(handler, key=b\"replica2\"))\n\n client = await P2P.create(initial_peers=await server_primary.get_visible_maddrs())\n await client.wait_for_at_least_n_peers(1)\n\n for name, expected_key in [(\"handle_primary\", b\"primary\"), (\"handle1\", b\"replica1\"), (\"handle2\", b\"replica2\")]:\n _, reader, writer = await client.call_binary_stream_handler(server_id, name)\n with closing(writer):\n assert await P2P.receive_raw_data(reader) == expected_key\n\n await server_replica1.shutdown()\n await server_replica2.shutdown()\n\n # Primary does not handle replicas protocols after their shutdown\n\n for name in [\"handle1\", \"handle2\"]:\n _, reader, writer = await client.call_binary_stream_handler(server_id, name)\n with pytest.raises(asyncio.IncompleteReadError), closing(writer):\n await P2P.receive_raw_data(reader)\n\n await server_primary.shutdown()\n await client.shutdown()\n"
] |
[
[
"numpy.ctypeslib.as_ctypes_type",
"numpy.random.randint"
]
] |
OmnesRes/ATGC2
|
[
"53ee01e60fc6f180b590f5acc5f083155581c96c",
"53ee01e60fc6f180b590f5acc5f083155581c96c"
] |
[
"figures/tmb/tcga/nonsyn_table/VICC_01_R2/analysis.py",
"figures/controls/samples/sim_data/regression/experiment_3/sim_run_instance.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport pandas as pd\nfrom model.Sample_MIL import InstanceModels, RaggedModels\nfrom model.KerasLayers import Losses, Metrics\nfrom model import DatasetsUtils\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import r2_score\nimport pickle\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[-1], True)\ntf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')\n\nimport pathlib\npath = pathlib.Path.cwd()\nif path.stem == 'ATGC2':\n cwd = path\nelse:\n cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]\n import sys\n sys.path.append(str(cwd))\n\n\nD, samples, maf, sample_df = pickle.load(open(cwd / 'figures' / 'tmb' / 'tcga' / 'nonsyn_table' / 'VICC_01_R2' / 'data' / 'data.pkl', 'rb'))\npanels = pickle.load(open(cwd / 'files' / 'tcga_panel_table.pkl', 'rb'))\n\n\n##bin position\ndef pos_one_hot(pos):\n one_pos = int(pos * 100)\n return one_pos, (pos * 100) - one_pos\n\nresult = np.apply_along_axis(pos_one_hot, -1, D['pos_float'][:, np.newaxis])\n\nD['pos_bin'] = np.stack(result[:, 0]) + 1\nD['pos_loc'] = np.stack(result[:, 1])\n\nindexes = [np.where(D['sample_idx'] == idx) for idx in range(sample_df.shape[0])]\n\nones_loader = DatasetsUtils.Map.FromNumpy(np.array([np.ones_like(D['pos_loc'])[i] for i in indexes], dtype='object'), tf.float32)\n\n\nloaders = [\n [ones_loader],\n]\n\n\n# set y label\ny_label = np.log(sample_df['non_syn_counts'].values/(panels.loc[panels['Panel'] == 'Agilent_kit']['cds'].values[0]/1e6) + 1)[:, np.newaxis]\ny_strat = np.argmax(samples['histology'], axis=-1)\n\nlosses = [Losses.QuantileLoss()]\nmetrics = [Metrics.QuantileLoss()]\n\nencoders = [InstanceModels.PassThrough(shape=(1,)),\n ]\n\nall_weights = [\n pickle.load(open(cwd / 'figures' / 'tmb' / 'tcga' / 'nonsyn_table' / 'VICC_01_R2' / 'results' / 'run_naive.pkl', 'rb'))\n ]\n\nresults = {}\n\nfor encoder, loaders, weights, name in zip(encoders, loaders, all_weights, ['naive']):\n\n mil = RaggedModels.MIL(instance_encoders=[encoder.model], output_dim=1, pooling='sum', mil_hidden=(64, 32, 16), output_type='quantiles', regularization=0)\n mil.model.compile(loss=losses,\n metrics=metrics,\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))\n ##test eval\n test_idx = []\n predictions = []\n\n for index, (idx_train, idx_test) in enumerate(StratifiedKFold(n_splits=8, random_state=0, shuffle=True).split(y_strat, y_strat)):\n mil.model.set_weights(weights[index])\n\n ds_test = tf.data.Dataset.from_tensor_slices((idx_test, y_label[idx_test]))\n ds_test = ds_test.batch(len(idx_test), drop_remainder=False)\n ds_test = ds_test.map(lambda x, y: (tuple([i(x, ragged_output=True) for i in loaders]),\n y,\n ))\n predictions.append(mil.model.predict(ds_test))\n test_idx.append(idx_test)\n\n #mse\n print(round(np.mean((y_label[:, 0][np.concatenate(test_idx)] - np.concatenate(predictions)[:, 1])**2), 4))\n #mae\n print(round(np.mean(np.absolute(y_label[:, 0][np.concatenate(test_idx)] - np.concatenate(predictions)[:, 1])), 4))\n #r2\n print(round(r2_score(y_label[:, 0][np.concatenate(test_idx)], np.concatenate(predictions)[:, 1]), 4))\n print()\n results[name] = np.concatenate(predictions)",
"import numpy as np\nimport tensorflow as tf\nfrom model.Instance_MIL import InstanceModels, RaggedModels\nfrom model import DatasetsUtils\nfrom sklearn.model_selection import StratifiedShuffleSplit\nimport pickle\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[-1], True)\ntf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')\nimport pathlib\npath = pathlib.Path.cwd()\n\nif path.stem == 'ATGC2':\n cwd = path\nelse:\n cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]\n import sys\n sys.path.append(str(cwd))\n\n##load the instance and sample data\nD, samples = pickle.load(open(cwd / 'figures' / 'controls' / 'samples' / 'sim_data' / 'regression' / 'experiment_3' / 'sim_data.pkl', 'rb'))\n\n##perform embeddings with a zero vector for index 0\nstrand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)\nD['strand_emb'] = strand_emb_mat[D['strand']]\n\nindexes = [np.where(D['sample_idx'] == idx) for idx in range(len(samples['classes']))]\n\nfive_p = np.array([D['seq_5p'][i] for i in indexes], dtype='object')\nthree_p = np.array([D['seq_3p'][i] for i in indexes], dtype='object')\nref = np.array([D['seq_ref'][i] for i in indexes], dtype='object')\nalt = np.array([D['seq_alt'][i] for i in indexes], dtype='object')\nstrand = np.array([D['strand_emb'][i] for i in indexes], dtype='object')\n\nfive_p_loader = DatasetsUtils.Map.FromNumpy(five_p, tf.int32)\nthree_p_loader = DatasetsUtils.Map.FromNumpy(three_p, tf.int32)\nref_loader = DatasetsUtils.Map.FromNumpy(ref, tf.int32)\nalt_loader = DatasetsUtils.Map.FromNumpy(alt, tf.int32)\nstrand_loader = DatasetsUtils.Map.FromNumpy(strand, tf.float32)\n\ny_label = np.log(np.array(samples['values']) + 1)[:, np.newaxis]\ny_strat = np.ones_like(y_label)\n\nidx_train, idx_test = next(StratifiedShuffleSplit(random_state=0, n_splits=1, test_size=200).split(y_strat, y_strat))\nidx_train, idx_valid = [idx_train[idx] for idx in list(StratifiedShuffleSplit(n_splits=1, test_size=300, random_state=0).split(np.zeros_like(y_strat)[idx_train], y_strat[idx_train]))[0]]\n\nds_train = tf.data.Dataset.from_tensor_slices((idx_train, y_label[idx_train], y_strat[idx_train]))\nds_train = ds_train.apply(DatasetsUtils.Apply.StratifiedMinibatch(batch_size=100, ds_size=len(idx_train)))\nds_train = ds_train.map(lambda x, y: ((five_p_loader(x, ragged_output=True),\n three_p_loader(x, ragged_output=True),\n ref_loader(x, ragged_output=True),\n alt_loader(x, ragged_output=True),\n strand_loader(x, ragged_output=True)),\n y))\n\nds_valid = tf.data.Dataset.from_tensor_slices((idx_valid, y_label[idx_valid]))\nds_valid = ds_valid.batch(len(idx_valid), drop_remainder=False)\nds_valid = ds_valid.map(lambda x, y: ((five_p_loader(x, ragged_output=True),\n three_p_loader(x, ragged_output=True),\n ref_loader(x, ragged_output=True),\n alt_loader(x, ragged_output=True),\n strand_loader(x, ragged_output=True)),\n y))\n\nds_test = tf.data.Dataset.from_tensor_slices((idx_test, y_label[idx_test]))\nds_test = ds_test.batch(len(idx_test), drop_remainder=False)\nds_test = ds_test.map(lambda x, y: ((five_p_loader(x, ragged_output=True),\n three_p_loader(x, ragged_output=True),\n ref_loader(x, ragged_output=True),\n alt_loader(x, ragged_output=True),\n strand_loader(x, ragged_output=True)),\n y))\n\nhistories = []\nevaluations = []\nweights = []\nfor i in range(3):\n tile_encoder = InstanceModels.VariantSequence(6, 4, 2, [16, 16, 8, 8])\n mil = RaggedModels.MIL(instance_encoders=[tile_encoder.model], output_dim=1, pooling='mean', output_type='regression')\n losses = ['mse']\n mil.model.compile(loss=losses,\n metrics=['mse'],\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001,\n )\n )\n callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_mse', min_delta=0.001, patience=20, mode='min', restore_best_weights=True)]\n history = mil.model.fit(ds_train, steps_per_epoch=10, validation_data=ds_valid, epochs=100000, callbacks=callbacks)\n evaluation = mil.model.evaluate(ds_test)\n histories.append(history.history)\n evaluations.append(evaluation)\n weights.append(mil.model.get_weights())\n\n\nwith open(cwd / 'figures' / 'controls' / 'samples' / 'sim_data' / 'regression' / 'experiment_3' / 'instance_model_mean.pkl', 'wb') as f:\n pickle.dump([evaluations, histories, weights], f)"
] |
[
[
"numpy.log",
"numpy.ones_like",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.stack",
"sklearn.model_selection.StratifiedKFold",
"numpy.concatenate",
"numpy.apply_along_axis",
"numpy.argmax",
"tensorflow.keras.optimizers.Adam",
"tensorflow.config.experimental.set_visible_devices",
"numpy.where"
],
[
"numpy.ones_like",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.ones",
"tensorflow.keras.optimizers.Adam",
"numpy.zeros_like",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.array",
"tensorflow.config.experimental.set_visible_devices",
"numpy.where",
"numpy.zeros"
]
] |
ReinardKuroi/worldgen
|
[
"4cd6cca4547579ca89e8e5ccdcb2d360796efc4c"
] |
[
"worldgen/marching_cubes/cube.py"
] |
[
"import numpy\n\n\ndef random_cube():\n return numpy.random.randint(low=0, high=2, size=(2, 2, 2))\n\n\ndef iterate_as_cube_data(data: numpy.ndarray) -> numpy.ndarray:\n for x in range(data.size):\n yield random_cube()\n\n\ndef check_hash(cube, cube_hash):\n check = int(''.join([str(s) for s in reversed(cube.flatten())]), 2)\n assert check == cube_hash, f'{check} != {cube_hash}'\n\n\ndef calculate_hash(cube: numpy.ndarray):\n cube_hash = 0\n for i, v in enumerate(cube.flat):\n if v == 1:\n cube_hash += 1 << i\n check_hash(cube, cube_hash)\n return cube_hash\n\n\ndef march(data: numpy.ndarray):\n for cube in iterate_as_cube_data(data):\n cube_hash = calculate_hash(cube)\n print(f'{cube.flatten()} hash: {cube_hash}')\n\n\nmarch(numpy.zeros((128*128,)))\n"
] |
[
[
"numpy.zeros",
"numpy.random.randint"
]
] |
Zhe-Cai/pyCM
|
[
"15823c9812ce779d453b65c31be7b1a0ee13c9ee"
] |
[
"pyCM/align_average.py"
] |
[
"#!/usr/bin/env python\n'''\nUses VTK python to allow for editing point clouds associated with the contour \nmethod. Full interaction requires a 3-button mouse and keyboard.\n-------------------------------------------------------------------------------\nCurrent mapping is as follows:\nLMB - rotate about point cloud centroid.\nMMB - pan\nRMB - zoom/refresh window extents\n1 - view 1, default, looks down z axis onto xy plane\n2 - view 2, looks down x axis onto zy plane\n3 - view 3, looks down y axis onto zx plane\nz - increase z-aspect ratio\nx - decrease z-aspect ratio\nc - return to default z-aspect\nf - flip colors from white on dark to dark on white\ni - save output to .png in current working directory\na - remove compass/axes\no - hide/restore outlines\nl - load a results file\n-------------------------------------------------------------------------------\nver 19-01-08\n1.1 - Initial release\n1.2 - Refactored to use PyQt interface and eliminated global variables\n1.3 - Refactored to use PyQt5, Python 3\n1.4 - added option to remove start at centroid\n1.5 - added additional tools for alignment\n'''\n__author__ = \"M.J. Roy\"\n__version__ = \"1.5\"\n__email__ = \"matthew.roy@manchester.ac.uk\"\n__status__ = \"Experimental\"\n__copyright__ = \"(c) M. J. Roy, 2014-2019\"\n\nimport sys\nimport os.path\nimport vtk\nimport vtk.util.numpy_support as VN\nimport numpy as np\nimport numpy.matlib\nimport scipy.io as sio\nfrom scipy.interpolate import griddata\nfrom scipy.spatial.distance import pdist, squareform\nfrom matplotlib import path\nfrom vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom pyCM.pyCMcommon import *\nfrom pkg_resources import Requirement, resource_filename\n\n\ndef aa_def(*args,**kwargs):\n \"\"\"\n Main function, builds qt interaction\n \"\"\" \n app = QtWidgets.QApplication.instance()\n if app is None:\n app = QtWidgets.QApplication(sys.argv)\n \n spl_fname=resource_filename(\"pyCM\",\"meta/pyCM_logo.png\")\n splash_pix = QtGui.QPixmap(spl_fname,'PNG')\n splash = QtWidgets.QSplashScreen(splash_pix)\n splash.setMask(splash_pix.mask())\n\n splash.show()\n app.processEvents()\n \n window = aa_interactor(None)\n\n if len(args)==1: \n aa_interactor.get_input_data(window,args[0])\n else: \n aa_interactor.get_input_data(window,None)\n\n window.show()\n splash.finish(window)\n window.iren.Initialize() # Need this line to actually show the render inside Qt\n\n ret = app.exec_()\n \n if sys.stdin.isatty() and not hasattr(sys,'ps1'):\n sys.exit(ret)\n else:\n return window\n\nclass ali_avg(object):\n \"\"\"\n Class to build qt interaction, including VTK widget\n setupUi builds, initialize starts VTK widget\n \"\"\"\n \n def setupUi(self, MainWindow):\n MainWindow.setWindowTitle(\"pyCM - Alignment and averaging tool v%s\" %__version__)\n MainWindow.setWindowIcon(QtGui.QIcon(resource_filename(\"pyCM\",\"meta/pyCM_icon.png\")))\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n if hasattr(MainWindow,'setCentralWidget'):\n MainWindow.setCentralWidget(self.centralWidget)\n else:\n self.centralWidget=MainWindow\n self.mainlayout=QtWidgets.QGridLayout(self.centralWidget)\n \n self.vtkWidget = QVTKRenderWindowInteractor(self.centralWidget)\n \n mainUiBox = QtWidgets.QGridLayout()\n \n self.vtkWidget.setMinimumSize(QtCore.QSize(1050, 600))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)\n sizePolicy.setHorizontalStretch(10)\n sizePolicy.setVerticalStretch(10)\n sizePolicy.setHeightForWidth(self.vtkWidget.sizePolicy().hasHeightForWidth())\n self.vtkWidget.setSizePolicy(sizePolicy)\n \n self.statLabel=QtWidgets.QLabel(\"Idle\")\n self.statLabel.setWordWrap(True)\n self.statLabel.setFont(QtGui.QFont(\"Helvetica\",italic=True))\n self.statLabel.setMinimumWidth(100)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.statLabel.sizePolicy().hasHeightForWidth())\n self.statLabel.setSizePolicy(sizePolicy)\n\n headFont=QtGui.QFont(\"Helvetica [Cronyx]\",weight=QtGui.QFont.Bold)\n\n \n # #define buttons/widgets\n # self.reloadButton = QtWidgets.QPushButton('Load')\n scalingLabel=QtWidgets.QLabel(\"Active axis for scaling\")\n scalingLabel.setFont(headFont)\n self.xsButton=QtWidgets.QRadioButton(\"x\")\n self.ysButton=QtWidgets.QRadioButton(\"y\")\n self.zsButton=QtWidgets.QRadioButton(\"z\")\n self.zsButton.setChecked(True)\n self.scalingButtonGroup = QtWidgets.QButtonGroup()\n self.scalingButtonGroup.addButton(self.xsButton)\n self.scalingButtonGroup.addButton(self.ysButton)\n self.scalingButtonGroup.addButton(self.zsButton)\n self.scalingButtonGroup.setExclusive(True)\n scaleBoxlayout = QtWidgets.QGridLayout()\n scaleBoxlayout.addWidget(self.xsButton,1,1)\n scaleBoxlayout.addWidget(self.ysButton,1,2)\n scaleBoxlayout.addWidget(self.zsButton,1,3)\n \n horizLine1=QtWidgets.QFrame()\n horizLine1.setFrameStyle(QtWidgets.QFrame.HLine)\n mirrorLabel=QtWidgets.QLabel(\"Mirroring\")\n mirrorLabel.setFont(headFont)\n self.mirrorXbutton = QtWidgets.QPushButton('ZY')\n self.mirrorYbutton = QtWidgets.QPushButton('ZX')\n\n horizLine2=QtWidgets.QFrame()\n horizLine2.setFrameStyle(QtWidgets.QFrame.HLine)\n alignLabel=QtWidgets.QLabel(\"Alignment\")\n alignLabel.setFont(headFont)\n self.centRefButton=QtWidgets.QPushButton(\"Move reference centroid to origin\")\n self.centFloatButton=QtWidgets.QPushButton(\"Move float centroid to origin\")\n\n self.transXlabel=QtWidgets.QLabel(\"Translate x:\")\n self.transX = QtWidgets.QDoubleSpinBox()\n self.transX.setValue(0)\n self.transX.setMaximum(300)\n self.transX.setMinimum(-300)\n self.transYlabel=QtWidgets.QLabel(\"Translate y:\")\n self.transY = QtWidgets.QDoubleSpinBox()\n self.transY.setValue(0)\n self.transY.setMaximum(300)\n self.transY.setMinimum(-300)\n self.rotateZlabel=QtWidgets.QLabel(\"Rotate about z (deg):\")\n self.rotateZ= QtWidgets.QDoubleSpinBox()\n self.rotateZ.setValue(0)\n self.rotateZ.setMaximum(180)\n self.rotateZ.setMinimum(-180)\n self.transButton=QtWidgets.QPushButton('Transform floating')\n self.numPntsOutline = QtWidgets.QSpinBox()\n self.numPntsOutline.setMaximum(10000)\n self.reduceOutlineButton = QtWidgets.QPushButton('Decimate outlines')\n\n \n alignAlgoButtonGroup = QtWidgets.QButtonGroup()\n self.useVTKalignButton=QtWidgets.QRadioButton(\"VTK ICP\")\n self.useICPalignButton=QtWidgets.QRadioButton(\"K-neighbour ICP\")\n self.useVTKalignButton.setChecked(True)\n alignAlgoButtonGroup.addButton(self.useVTKalignButton)\n alignAlgoButtonGroup.addButton(self.useICPalignButton)\n alignAlgoButtonGroup.setExclusive(True)\n self.X180Button = QtWidgets.QPushButton(\"Flip X\")\n self.Y180Button = QtWidgets.QPushButton(\"Flip Y\")\n self.alignButton = QtWidgets.QPushButton(\"Align\")\n self.acceptAlignButton = QtWidgets.QPushButton(\"Accept\")\n \n self.alignButton.setStyleSheet(\"background-color : None \")\n \n horizLine3=QtWidgets.QFrame()\n horizLine3.setFrameStyle(QtWidgets.QFrame.HLine)\n averageLabel=QtWidgets.QLabel(\"Averaging\")\n averageLabel.setFont(headFont)\n \n #widgets for setting grid\n gridLabel=QtWidgets.QLabel(\"Grid spacing:\")\n self.gridInd = QtWidgets.QDoubleSpinBox()\n self.gridInd.setValue(0)\n self.gridInd.setMaximum(5)\n self.gridInd.setMinimum(0.001)\n \n self.averageButton = QtWidgets.QPushButton('Average')\n self.averageButton.setStyleSheet(\"background-color : None \")\n \n horizLine4=QtWidgets.QFrame()\n horizLine4.setFrameStyle(QtWidgets.QFrame.HLine)\n self.writeButton=QtWidgets.QPushButton('Write')\n \n horizLine5=QtWidgets.QFrame()\n horizLine5.setFrameStyle(QtWidgets.QFrame.HLine)\n\n\n #add widgets to ui\n mainUiBox.addWidget(scalingLabel,0,0,1,2)\n mainUiBox.addLayout(scaleBoxlayout,1,0,1,2)\n mainUiBox.addWidget(horizLine1,2,0,1,2)\n mainUiBox.addWidget(mirrorLabel,3,0,1,2)\n mainUiBox.addWidget(self.mirrorYbutton,4,0,1,1)\n mainUiBox.addWidget(self.mirrorXbutton,4,1,1,1)\n mainUiBox.addWidget(horizLine2,5,0,1,2)\n mainUiBox.addWidget(alignLabel,6,0,1,2)\n mainUiBox.addWidget(self.centRefButton,7,0,1,2)\n mainUiBox.addWidget(self.centFloatButton,8,0,1,2)\n mainUiBox.addWidget(self.transXlabel,9,0,1,1)\n mainUiBox.addWidget(self.transX,9,1,1,1)\n mainUiBox.addWidget(self.transYlabel,10,0,1,1)\n mainUiBox.addWidget(self.transY,10,1,1,1)\n mainUiBox.addWidget(self.rotateZlabel,11,0,1,1)\n mainUiBox.addWidget(self.rotateZ,11,1,1,1)\n mainUiBox.addWidget(self.transButton,12,0,1,2)\n mainUiBox.addWidget(self.X180Button,13,0,1,1)\n mainUiBox.addWidget(self.Y180Button,13,1,1,1)\n mainUiBox.addWidget(self.numPntsOutline,14,0,1,1)\n mainUiBox.addWidget(self.reduceOutlineButton,14,1,1,1)\n \n mainUiBox.addWidget(self.useVTKalignButton,15,0,1,1)\n mainUiBox.addWidget(self.useICPalignButton,15,1,1,1)\n\n mainUiBox.addWidget(self.alignButton,16,0,1,1)\n mainUiBox.addWidget(self.acceptAlignButton,16,1,1,1)\n mainUiBox.addWidget(horizLine3,17,0,1,2)\n mainUiBox.addWidget(averageLabel,18,0,1,2)\n mainUiBox.addWidget(gridLabel,19,0,1,1)\n mainUiBox.addWidget(self.gridInd,19,1,1,1)\n mainUiBox.addWidget(self.averageButton,20,0,1,2)\n mainUiBox.addWidget(horizLine4,21,0,1,2)\n mainUiBox.addWidget(self.writeButton,22,0,1,2)\n mainUiBox.addWidget(horizLine5,23,0,1,2)\n # mainUiBox.addWidget(self.statusLabel,18,0,1,2)\n\n mainUiBox.setColumnMinimumWidth(0,mainUiBox.columnMinimumWidth(0))\n mainUiBox.setColumnMinimumWidth(1,mainUiBox.columnMinimumWidth(0))\n \n lvLayout=QtWidgets.QVBoxLayout()\n lhLayout=QtWidgets.QHBoxLayout()\n lvLayout.addLayout(mainUiBox)\n lvLayout.addStretch(1)\n lhLayout.addLayout(lvLayout)\n lhLayout.addStretch(2)\n\n\n\n self.mainlayout.addWidget(self.vtkWidget,0,0,1,1)\n self.mainlayout.addLayout(lhLayout,0,1,1,1)\n self.mainlayout.addWidget(self.statLabel,1,0,1,2)\n \n def initialize(self):\n self.vtkWidget.start()\n\nclass aa_interactor(QtWidgets.QWidget):\n \"\"\"\n Sets up the main VTK window, reads file and sets connections between UI and interactor\n \"\"\"\n def __init__(self, parent):\n super(aa_interactor,self).__init__(parent)\n self.ui = ali_avg()\n self.ui.setupUi(self)\n self.ren = vtk.vtkRenderer()\n self.ren.SetBackground(0.1, 0.2, 0.4)\n self.ui.vtkWidget.GetRenderWindow().AddRenderer(self.ren)\n self.iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor()\n style=vtk.vtkInteractorStyleTrackballCamera()\n style.AutoAdjustCameraClippingRangeOn()\n self.iren.SetInteractorStyle(style)\n self.ren.GetActiveCamera().ParallelProjectionOn()\n self.cp=self.ren.GetActiveCamera().GetPosition()\n self.fp=self.ren.GetActiveCamera().GetFocalPoint()\n self.iren.AddObserver(\"KeyPressEvent\", self.keypress)\n \n self.PointSize=2\n self.LineWidth=1\n self.Zaspect=1.0\n self.limits=np.empty(6)\n self.picking=False\n self.Offset=0\n self.mirrored=False\n self.aligned=False\n \n # self.ui.reloadButton.clicked.connect(lambda: self.get_input_data(None))\n self.ui.mirrorXbutton.clicked.connect(lambda: self.flipside('x'))\n self.ui.mirrorYbutton.clicked.connect(lambda: self.flipside('y'))\n self.ui.centRefButton.clicked.connect(lambda: self.zero_pos('ref'))\n self.ui.centFloatButton.clicked.connect(lambda: self.zero_pos('float'))\n self.ui.transButton.clicked.connect(lambda: self.shift())\n self.ui.X180Button.clicked.connect(lambda: self.flip('x'))\n self.ui.Y180Button.clicked.connect(lambda: self.flip('y'))\n self.ui.alignButton.clicked.connect(lambda: self.align())\n self.ui.acceptAlignButton.clicked.connect(lambda: self.accept_align())\n self.ui.averageButton.clicked.connect(lambda: self.average())\n self.ui.writeButton.clicked.connect(lambda: self.write())\n self.ui.reduceOutlineButton.clicked.connect(lambda: self.reduce_outline())\n \n def update_float(self):\n \n if hasattr(self,'fActor'):\n self.ren.RemoveActor(self.fActor)\n self.ren.RemoveActor(self.fOutlineActor)\n \n color=(255, 205, 52)\n self.fPC, self.fActor, _, = gen_point_cloud(self.flp,color,self.PointSize)\n self.ren.AddActor(self.fActor)\n \n self.fOutlineActor, self.fOPC = gen_outline(self.fO_local,color,self.PointSize)\n self.ren.AddActor(self.fOutlineActor)\n \n def update_limits(self):\n\n self.limits = get_limits(np.vstack((self.flp,self.rp,self.rO_local,self.fO_local)))\n \n s,nl,axs=self.get_scale()\n\n self.fActor.SetScale(s)\n self.fActor.Modified()\n\n #add axes\n try: self.ren.RemoveActor(self.axisActor)\n except: pass\n self.axisActor = add_axis(self.ren,nl,axs)\n\n #update\n self.ui.vtkWidget.update()\n self.ui.vtkWidget.setFocus() \n \n def zero_pos(self,p):\n '''\n Moves the outline and point cloud from the centroidof the outline of p to 0,0,0 \n '''\n self.unsaved_changes=True\n \n if self.averaged == True: #then set it false and change the button\n self.averaged = False\n self.ui.averageButton.setStyleSheet(\"background-color : None \")\n \n if self.aligned == True: #then set it false and change the button\n self.aligned = False\n self.ui.alignButton.setStyleSheet(\"background-color : None \")\n \n local_trans=np.identity(4)\n if p == \"ref\":\n self.ren.RemoveActor(self.rActor)\n #perform move on datasets\n centroid = np.mean(self.rO, axis = 0)\n #update the homogeneous transformation matrix\n local_trans[0:3,3]=-centroid\n self.refTrans.append(local_trans)\n self.rp = self.rp - centroid\n self.rO = self.rO - centroid\n self.rO_local = self.rO_local - centroid\n\n color=(242, 101, 34)\n self.ren.RemoveActor(self.rOutlineActor)\n self.rPC, self.rActor, _, = gen_point_cloud(self.rp,color,self.PointSize)\n self.ren.AddActor(self.rActor)\n self.rOutlineActor, self.rOPC = gen_outline(self.rO_local,color,self.PointSize)\n self.ren.AddActor(self.rOutlineActor)\n \n if p == \"float\":\n \n # perform move on datasets\n centroid = np.mean(self.fO, axis = 0)\n #update homogeneous transformation matrix\n local_trans[0:3,3]=-centroid\n self.floatTrans.append(local_trans)\n self.flp = self.flp - centroid\n self.fO = self.fO - centroid\n self.fO_local = self.fO_local - centroid\n self.update_float()\n\n self.update_limits()\n self.ren.ResetCamera()\n \n def reduce_outline(self):\n '''\n Decimates alias outlines used for alignment using reduce_equally.\n '''\n\n self.ren.RemoveActor(self.rOutlineActor)\n \n if self.ui.numPntsOutline.value() > len(self.rO_local):\n self.rO_local=self.rO\n self.fO_local=self.fO\n \n #Do reference first\n color=(242, 101, 34)\n\n X = respace_equally(self.rO_local,self.ui.numPntsOutline.value())[0]\n self.rO_local=np.zeros((self.ui.numPntsOutline.value(),3))\n self.rO_local[:,:-1]=X\n self.rOutlineActor, self.rOPC = gen_outline(self.rO_local,color,self.PointSize)\n self.ren.AddActor(self.rOutlineActor)\n \n color=(255, 205, 52)\n\n X = respace_equally(self.fO_local,self.ui.numPntsOutline.value())[0]\n self.fO_local=np.zeros((self.ui.numPntsOutline.value(),3))\n self.fO_local[:,:-1]=X\n \n self.update_float()\n self.update_limits()\n \n def shift(self):\n '''\n Applies rigid body transformations to the floating dataset\n '''\n self.unsaved_changes=True\n \n if self.averaged == True: #then set it false and change the button\n self.averaged = False\n self.ui.averageButton.setStyleSheet(\"background-color : None \")\n \n if self.aligned == True: #then set it false and change the button\n self.aligned = False\n self.ui.alignButton.setStyleSheet(\"background-color : None \")\n\n #get x and y translations and z rotation and update float transformation matrix\n local_trans = np.identity(4)\n a=np.deg2rad(float(self.ui.rotateZ.value()))\n local_trans[0:2,0:2]=np.array([[np.cos(a),-np.sin(a)],[np.sin(a),np.cos(a)]])\n \n local_trans[0,-1]=float(self.ui.transX.value())\n local_trans[1,-1]=float(self.ui.transY.value())\n self.floatTrans.append(local_trans)\n \n #apply operation\n self.flp=apply_trans(self.flp,local_trans)\n self.fO=apply_trans(self.fO,local_trans)\n self.fO_local=apply_trans(self.fO_local,local_trans)\n\n self.update_float()\n self.update_limits()\n \n def flip(self,axis):\n '''\n Applies a rotation of 180 degrees about 'axis'\n '''\n self.unsaved_changes=True\n \n if self.averaged == True: #then set it false and change the button\n self.averaged = False\n self.ui.averageButton.setStyleSheet(\"background-color : None \")\n \n if self.aligned == True: #then set it false and change the button\n self.aligned = False\n self.ui.alignButton.setStyleSheet(\"background-color : None \")\n\n \n local_trans = np.identity(4)\n if axis == 'x':\n local_trans[1,1] = -1\n local_trans[2,2] = -1\n if axis == 'y':\n local_trans[0,0] = -1\n local_trans[2,2] = -1\n #update overall homogeneous transformation matrix\n self.floatTrans.append(local_trans)\n \n #apply operation\n self.flp=np.dot(self.flp,local_trans[0:3,0:3])\n self.fO=np.dot(self.fO,local_trans[0:3,0:3])\n self.fO_local=np.dot(self.fO_local,local_trans[0:3,0:3])\n \n \n self.update_float()\n self.update_limits()\n \n \n def write(self):\n \n mat_vars=sio.whosmat(self.fileo)\n if not set(['aa', 'trans']).isdisjoint([item for sublist in mat_vars for item in sublist]): #tell the user that they might overwrite their data\n ret=QtWidgets.QMessageBox.warning(self, \"pyCM Warning\", \\\n \"There is already data associated with this analysis step saved. Overwrite and invalidate subsequent steps?\", \\\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)\n if ret == QtWidgets.QMessageBox.No: #don't overwrite\n return\n else:\n #delete fitting parameters with pyCMcommon helper function, which negates FEA pre-processing as well.\n clear_mat(self.fileo,['x_out','aa_mask','spline_x']) \n\n mat_contents=sio.loadmat(self.fileo)\n \n new={'trans': {'ref':self.refTrans, 'float':self.floatTrans},'aa': {'pnts': self.ap, 'gsize': self.gsize}}\n \n mat_contents.update(new) #update the dictionary\n \n sio.savemat(self.fileo,mat_contents) \n self.ui.statLabel.setText(\"Wrote data.\")\n self.unsaved_changes=False\n \n def average(self):\n \n self.unsaved_changes=True\n \n if hasattr(self,'aActor'):\n self.ren.RemoveActor(self.aActor)\n \n self.ui.statLabel.setText(\"Averaging, applying grid . . .\")\n QtWidgets.QApplication.processEvents()\n \n #temporarily shift all data such that it appears in the first cartesian quadrant\n tT=np.amin(self.rO,axis=0)\n self.rO, self.fO, self.rp, self.flp=self.rO-tT, self.fO-tT, self.rp-tT, self.flp-tT\n \n #use max to get a 'window' for assessing grid spacing\n RefMax=np.amax(self.rO,axis=0)\n RefMin=np.amin(self.rO,axis=0)\n windowVerts=np.matrix([[0.25*RefMin[0], 0.25*RefMin[1]],\n [0.25*RefMin[0], 0.25*(RefMax[1])],\n [0.25*(RefMax[1]), 0.25*(RefMax[1])],\n [0.25*(RefMax[0]), 0.25*(RefMin[1])]]);\n \n p=path.Path(windowVerts)\n inWindow=p.contains_points(self.rp[:,:2]) #first 2 columns of RefPoints is x and y\n \n windowed=self.rp[inWindow,:2]\n \n #populate grid size if attribute doesn't exist\n if not hasattr(self,'gsize'):\n gs=squareform(pdist(windowed,'euclidean')) \n self.gsize = np.mean(np.sort(gs)[:,1])\n self.ui.gridInd.setValue(self.gsize)\n else:\n self.gsize=self.ui.gridInd.value()\n \n #grid the reference based on gsize, bumping out the grid by 10% in either direction\n grid_x, grid_y = np.meshgrid(\n np.linspace(1.1*RefMin[0],1.1*RefMax[0],int((1.1*RefMax[0]-1.1*RefMin[0])/self.gsize)),\n np.linspace(1.1*RefMin[1],1.1*RefMax[1],int((1.1*RefMax[1]-1.1*RefMin[1])/self.gsize)), \n indexing='xy')\n \n #apply the grid to the reference data\n grid_Ref=griddata(self.rp[:,:2],self.rp[:,-1],(grid_x,grid_y),method='linear')\n \n #apply the grid to the aligned data\n grid_Align=griddata(self.flp[:,:2],self.flp[:,-1],(grid_x,grid_y),method='linear')\n \n self.ui.statLabel.setText(\"Averaging using grid . . .\")\n QtWidgets.QApplication.processEvents()\n \n #average z values\n grid_Avg=(grid_Ref+grid_Align)/2\n \n #make sure that there isn't anything averaged outside the floating outline\n p=path.Path(self.rO[:,:2])\n inTest=np.hstack((np.ravel(grid_x.T)[np.newaxis].T,np.ravel(grid_y.T)[np.newaxis].T))\n inOutline=p.contains_points(inTest)\n \n #averaged points\n self.ap = np.hstack((inTest[inOutline,:], \\\n np.ravel(grid_Avg.T)[np.newaxis].T[inOutline]))\n \n #move everything back to original location\n self.rO, self.fO, self.rp, self.flp, self.ap = \\\n self.rO+tT, self.fO+tT, self.rp+tT, self.flp+tT, self.ap+tT\n \n self.ui.statLabel.setText(\"Rendering . . .\")\n QtWidgets.QApplication.processEvents()\n \n #show it\n color=(int(0.2784*255),int(0.6745*255),int(0.6941*255))\n _, self.aActor, _, = gen_point_cloud(self.ap,color,self.PointSize)\n self.ren.AddActor(self.aActor)\n \n s,nl,axs=self.get_scale()\n\n self.aActor.SetScale(s)\n self.aActor.Modified()\n \n #update\n self.ui.vtkWidget.update()\n self.ui.vtkWidget.setFocus()\n self.ui.statLabel.setText(\"Averaging complete.\")\n self.averaged=True\n self.ui.averageButton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n \n \n def flipside(self,flipDirection):\n self.ui.statLabel.setText(\"Starting mirroring . . .\")\n self.ui.vtkWidget.update()\n local_trans=np.identity(4)\n\n self.unsaved_changes=True\n \n if self.averaged == True: #then set it false and change the button\n self.averaged = False\n self.ui.averageButton.setStyleSheet(\"background-color : None \")\n \n if flipDirection == \"x\":\n local_trans[0,0]=-1\n self.flp[:,0]=-self.flp[:,0]\n # self.fO[:,0]=-self.fO[:,0]\n self.fO_local[:,0]=-self.fO_local[:,0]\n \n if not self.mirrored:\n self.ui.mirrorXbutton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n self.mirrored=True\n self.mirror_plane=\"x\"\n elif self.mirror_plane == \"x\":\n self.ui.mirrorXbutton.setStyleSheet(\"background-color: None\")\n self.mirrored=False\n elif self.mirror_plane == \"y\":\n self.flp[:,1]=-self.flp[:,1]\n # self.fO[:,1]=-self.fO[:,1]\n self.fO_local[:,1]=-self.fO_local[:,1]\n self.mirrored=True\n self.mirror_plane=\"x\"\n self.ui.mirrorYbutton.setStyleSheet(\"background-color: None\")\n self.ui.mirrorXbutton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n \n elif flipDirection == \"y\":\n local_trans[1,1]=-1\n self.flp[:,1]=-self.flp[:,1]\n # self.fO[:,1]=-self.fO[:,1]\n self.fO_local[:,1]=-self.fO_local[:,1]\n if not self.mirrored:\n self.ui.mirrorYbutton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n self.mirrored=True\n self.mirror_plane=\"y\"\n elif self.mirror_plane == \"y\":\n self.ui.mirrorYbutton.setStyleSheet(\"background-color: None\")\n self.mirrored=False\n elif self.mirror_plane == \"x\":\n self.flp[:,0]=-self.flp[:,0]\n # self.fO[:,0]=-self.fO[:,0]\n self.fO_local[:,0]=-self.fO_local[:,0]\n self.mirrored=True\n self.mirror_plane=\"y\"\n self.ui.mirrorXbutton.setStyleSheet(\"background-color: None\")\n self.ui.mirrorYbutton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n\n self.floatTrans.append(local_trans)\n self.update_float()\n self.update_limits()\n self.ren.ResetCamera()\n self.ui.statLabel.setText(\"Mirror operation complete.\")\n \n def accept_align(self):\n '''\n Accepts the current alignment and allows analysis to proceed if the profile has not been algorithmically aligned with the align button being pressed.\n '''\n self.aligned = True\n self.ui.alignButton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n \n def align(self):\n '''\n Uses the built-in icp landmark transformation provided by vtk to outline actors, and then updates the renderer and the stored homogeneous transformation matrix transM\n '''\n self.unsaved_changes=True\n \n if self.averaged == True: #then set it false and change the button\n self.averaged = False\n self.ui.averageButton.setStyleSheet(\"background-color : None \")\n \n if self.aligned == True: #then set it false and change the button\n self.aligned = False\n self.ui.alignButton.setStyleSheet(\"background-color : None \")\n\n \n self.ui.statLabel.setText(\"Starting alignment . . .\")\n QtWidgets.QApplication.processEvents()\n \n \n \n if self.ui.useVTKalignButton.isChecked():\n icp_trans=vtk.vtkIterativeClosestPointTransform()\n icp_trans.SetSource(self.fOPC)\n icp_trans.SetTarget(self.rOPC)\n\n\n icp_trans.StartByMatchingCentroidsOn()\n icp_trans.GetLandmarkTransform().SetModeToRigidBody()\n icp_trans.SetMeanDistanceModeToRMS()\n icp_trans.CheckMeanDistanceOn()\n icp_trans.SetMeanDistanceModeToAbsoluteValue()\n # icp_trans.SetMaximumNumberOfLandmarks(200)\n icp_trans.DebugOn()\n icp_trans.Modified()\n icp_trans.Update()\n icp_trans.Inverse()\n \n T=np.ones(shape=(4,4))\n for i in range(4):\n for j in range(4):\n T[i,j]=icp_trans.GetMatrix().GetElement(i, j)\n T=np.linalg.inv(T)\n\n if self.ui.useICPalignButton.isChecked():\n self.reduce_outline()\n T,_,_ = icp(self.fO_local,self.rO_local)\n \n \n \n \n #apply operation\n self.flp=apply_trans(self.flp,T)\n self.fO_local=apply_trans(self.fO_local,T)\n self.floatTrans.append(T)\n\n \n self.update_float()\n self.update_limits()\n \n \n if self.mirrored==False:\n self.ui.statLabel.setText(\"WARNING alignment proceeded without a mirror operation. Alignment complete.\")\n else:\n self.ui.statLabel.setText(\"Alignment complete.\")\n \n \n def get_input_data(self,filem):\n \"\"\"\n Loads the content of a *.mat file pertaining to this particular step\n \"\"\"\n \n if hasattr(self,'rActor'): #then remove everything\n self.ren.RemoveActor(self.rActor)\n self.ren.RemoveActor(self.fActor)\n self.ren.RemoveActor(self.rOutlineActor)\n self.ren.RemoveActor(self.fOutlineActor)\n \n if hasattr(self,'aActor'):\n self.ren.RemoveActor(self.aActor)\n \n if filem == None:\n filem, _, =get_file('*.mat')\n \n if filem: #check variables\n mat_contents = sio.loadmat(filem)\n self.fileo=filem\n if 'aa' in mat_contents:\n\n \n #draw floating and reference datasets\n \n self.rp=mat_contents['ref']['rawPnts'][0][0]\n ind=mat_contents['ref']['mask'][0][0][0]\n self.rO=mat_contents['ref']['x_out'][0][0]\n self.rO_local=self.rO\n \n self.refTrans=mat_contents['trans']['ref'][0][0]\n \n \n self.rp=self.rp[np.where(ind)]\n \n #apply the transform with post multiplication\n \n for transformation in self.refTrans:\n self.rp=apply_trans(self.rp,transformation)\n self.rO=apply_trans(self.rO,transformation)\n \n self.rO_local=self.rO\n\n \n color=(242, 101, 34)\n self.rPC, self.rActor, _, = gen_point_cloud(self.rp,color,self.PointSize)\n self.ren.AddActor(self.rActor)\n self.rOutlineActor, self.rOPC = gen_outline(self.rO_local,color,self.PointSize)\n self.ren.AddActor(self.rOutlineActor)\n \n s,nl,axs=self.get_scale()\n \n self.rActor.SetScale(s)\n self.rActor.Modified()\n \n #do other one, but with transformed floating points\n self.flp=mat_contents['float']['rawPnts'][0][0]\n ind=mat_contents['float']['mask'][0][0][0]\n self.fO=mat_contents['float']['x_out'][0][0]\n self.fO_local = self.fO\n \n self.flp=self.flp[np.where(ind)]\n\n\n self.floatTrans=mat_contents['trans']['float'][0][0]\n #read in as np array\n \n for transformation in self.floatTrans:\n self.flp=apply_trans(self.flp,transformation)\n self.fO=apply_trans(self.fO,transformation)\n \n \n self.fO_local=self.fO\n \n self.update_float()\n \n #after applied, convert transformation arrays to lists\n self.refTrans = self.refTrans.tolist()\n self.floatTrans = self.floatTrans.tolist()\n \n #show aligned and averaged data\n self.ap=mat_contents['aa']['pnts'][0][0]\n self.gsize=mat_contents['aa']['gsize'][0][0]\n\n #do grid\n self.ui.gridInd.setValue(self.gsize)\n\n \n \n color=(int(0.2784*255),int(0.6745*255),int(0.6941*255))\n _, self.aActor, _, = gen_point_cloud(self.ap,color,self.PointSize)\n self.ren.AddActor(self.aActor)\n\n self.aActor.SetScale(s)\n self.aActor.Modified()\n \n self.limits = get_limits(np.vstack((self.flp,self.rp)))\n \n self.update_limits()\n self.ren.ResetCamera()\n\n self.ui.numPntsOutline.setValue(np.min([len(self.rO),len(self.fO)]))\n \n self.ui.statLabel.setText(\"This dataset has already been aligned and averaged.\")\n self.aligned = True\n self.ui.alignButton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n self.mirrored=True\n self.averaged=True\n self.ui.averageButton.setStyleSheet(\"background-color :rgb(77, 209, 97);\")\n else:\n self.ui.statLabel.setText(\"This dataset has not been previously aligned.\")\n self.averaged = False\n\n try:\n self.rp=mat_contents['ref']['rawPnts'][0][0]\n ind=mat_contents['ref']['mask'][0][0][0]\n self.rO=mat_contents['ref']['x_out'][0][0]\n self.rO_local=self.rO\n \n \n self.rp=self.rp[np.where(ind)]\n \n color=(242, 101, 34)\n self.rPC, self.rActor, _, = gen_point_cloud(self.rp,color,self.PointSize)\n self.ren.AddActor(self.rActor)\n self.rOutlineActor, self.rOPC = gen_outline(self.rO_local,color,self.PointSize)\n self.ren.AddActor(self.rOutlineActor)\n \n #do other one\n self.flp=mat_contents['float']['rawPnts'][0][0]\n ind=mat_contents['float']['mask'][0][0][0]\n self.fO=mat_contents['float']['x_out'][0][0]\n self.fO_local=self.fO\n \n \n self.flp=self.flp[np.where(ind)]\n \n #populate outline\n self.ui.numPntsOutline.setValue(np.min([len(self.rO),len(self.fO)]))\n \n\n \n color=(255, 205, 52)\n self.fPC, self.fActor, _, = gen_point_cloud(self.flp,color,self.PointSize)\n self.ren.AddActor(self.fActor)\n self.fOutlineActor, self.fOPC = gen_outline(self.fO_local,color,self.PointSize)\n self.ren.AddActor(self.fOutlineActor)\n \n self.limits = get_limits(np.vstack((self.flp,self.rp,self.fO_local,self.rO_local)))\n\n #add axes\n try: self.ren.RemoveActor(self.axisActor)\n except: pass\n self.axisActor = add_axis(self.ren,self.limits,[1,1,1])\n \n #initialize both transformation matrices\n self.refTrans=[]\n self.floatTrans=[]\n\n except Exception as e:\n print(\"Couldn't read in both sets of data.\")\n print(e)\n \n else:\n print(\"Invalid *.mat file\")\n return\n \n self.unsaved_changes=False\n #update\n self.ren.ResetCamera()\n self.ui.vtkWidget.update()\n self.ui.vtkWidget.setFocus()\n \n def keypress(self,obj,event):\n key = obj.GetKeyCode()\n\n if key ==\"1\":\n xyview(self.ren, self.ren.GetActiveCamera(),self.cp,self.fp)\n elif key ==\"2\":\n yzview(self.ren, self.ren.GetActiveCamera(),self.cp,self.fp)\n elif key ==\"3\":\n xzview(self.ren, self.ren.GetActiveCamera(),self.cp,self.fp)\n elif key==\"z\":\n self.Zaspect=self.Zaspect*2\n s,nl,axs=self.get_scale()\n if hasattr(self,'pointActor'):\n self.pointActor.SetScale(s)\n self.pointActor.Modified()\n if hasattr(self,'rActor'):\n self.rActor.SetScale(s)\n self.rActor.Modified()\n if hasattr(self,'fActor'):\n self.fActor.SetScale(s)\n self.fActor.Modified()\n if hasattr(self,'aActor'):\n self.aActor.SetScale(s)\n self.aActor.Modified()\n \n self.ren.RemoveActor(self.axisActor)\n self.axisActor = add_axis(self.ren,nl,axs)\n\n elif key==\"x\":\n self.Zaspect=self.Zaspect*0.5\n s,nl,axs=self.get_scale()\n if hasattr(self,'pointActor'):\n self.pointActor.SetScale(s)\n if hasattr(self,'rActor'):\n self.rActor.SetScale(s)\n self.rActor.Modified()\n if hasattr(self,'fActor'):\n self.fActor.SetScale(s)\n self.fActor.Modified()\n if hasattr(self,'aActor'):\n self.aActor.SetScale(s)\n self.aActor.Modified()\n\n self.ren.RemoveActor(self.axisActor)\n self.axisActor = add_axis(self.ren,nl,axs)\n\n\n elif key==\"c\":\n self.Zaspect=1.0\n s,_,_,=self.get_scale()\n if hasattr(self,'pointActor'):\n self.pointActor.SetScale(s)\n if hasattr(self,'rActor'):\n self.rActor.SetScale(s)\n self.rActor.Modified()\n if hasattr(self,'fActor'):\n self.fActor.SetScale(s)\n self.fActor.Modified()\n if hasattr(self,'aActor'):\n # self.fActor.SetScale(1,1,self.Zaspect)\n self.aActor.SetScale(s)\n self.aActor.Modified()\n self.ren.RemoveActor(self.axisActor)\n self.axisActor = add_axis(self.ren,self.limits,[1,1,1])\n self.ren.ResetCamera()\n\n elif key==\"i\":\n im = vtk.vtkWindowToImageFilter()\n writer = vtk.vtkPNGWriter()\n im.SetInput(self.ui.vtkWidget._RenderWindow)\n im.Update()\n writer.SetInputConnection(im.GetOutputPort())\n writer.SetFileName(\"Avg_aligned.png\")\n writer.Write()\n print(\"Screen output saved to %s\" %os.path.join(os.getcwd(),'Avg_aligned.png'))\n\n elif key==\"a\":\n flip_visible(self.axisActor)\n \n elif key == \"o\":\n flip_visible(self.outlineActor)\n \n elif key == \"f\":\n flip_colors(self.ren,self.axisActor)\n \n \n elif key==\"l\":\n self.get_input_data(None)\n \n self.ui.vtkWidget.update()\n self.ui.vtkWidget.setFocus()\n\n def get_scale(self):\n '''\n Returns array for the keypress function based on what radio button is selected.\n '''\n if self.ui.xsButton.isChecked():\n s=np.array([self.Zaspect,1,1])\n nl=np.append([self.limits[0]*self.Zaspect,self.limits[1]*self.Zaspect],self.limits[2:])\n axs=np.array([1/self.Zaspect,1,1])\n \n elif self.ui.ysButton.isChecked():\n s=np.array([1,self.Zaspect,1])\n nl=np.append(self.limits[0:2],([self.limits[2]*self.Zaspect,self.limits[3]*self.Zaspect],self.limits[4:]))\n axs=np.array([1,1/self.Zaspect,1])\n else:\n s=np.array([1,1,self.Zaspect])\n nl=np.append(self.limits[0:4],([self.limits[-2]*self.Zaspect,self.limits[-1]*self.Zaspect]))\n axs=np.array([1,1,1/self.Zaspect])\n return s,nl,axs\n\n\ndef apply_trans(P,T):\n '''\n Apply rotation/reflection and translation in homogeneous matrix T on a discrete basis to a Nx3 point cloud P\n '''\n\n return np.dot(P,T[0:3,0:3])+T[0:3,-1]\n\n\nif __name__ == '__main__':\n if len(sys.argv)>1:\n aa_def(sys.argv[1])\n else:\n aa_def()"
] |
[
[
"numpy.matrix",
"numpy.dot",
"numpy.amax",
"scipy.io.whosmat",
"numpy.mean",
"scipy.interpolate.griddata",
"numpy.where",
"scipy.io.loadmat",
"numpy.sin",
"numpy.ravel",
"numpy.amin",
"numpy.linalg.inv",
"matplotlib.path.Path",
"numpy.append",
"numpy.identity",
"scipy.io.savemat",
"numpy.array",
"numpy.cos",
"numpy.empty",
"numpy.ones",
"numpy.sort",
"scipy.spatial.distance.pdist",
"numpy.vstack"
]
] |
chorng/eo-learn
|
[
"a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c"
] |
[
"features/eolearn/tests/test_doubly_logistic_approximation.py"
] |
[
"\"\"\"\nCredits:\nCopyright (c) 2020 Beno Šircelj (Josef Stefan Institute)\nCopyright (c) 2017-2022 Matej Aleksandrov, Žiga Lukšič (Sinergise)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\n\nfrom pytest import approx\nimport numpy as np\n\nfrom eolearn.core import EOPatch, FeatureType\nfrom eolearn.features.doubly_logistic_approximation import DoublyLogisticApproximationTask\n\n\ndef test_double_logistic_approximation(example_eopatch):\n\n data = example_eopatch.data[\"NDVI\"]\n timestamps = example_eopatch.timestamp\n mask = example_eopatch.mask[\"IS_VALID\"]\n indices = list(np.nonzero([t.year == 2016 for t in timestamps])[0])\n start, stop = indices[0], indices[-1] + 2\n\n eopatch = EOPatch()\n eopatch.timestamp = timestamps[start:stop]\n eopatch.data[\"TEST\"] = np.reshape(data[start:stop, 0, 0, 0], (-1, 1, 1, 1))\n eopatch.mask[\"IS_VALID\"] = np.reshape(mask[start:stop, 0, 0, 0], (-1, 1, 1, 1))\n eopatch = DoublyLogisticApproximationTask(\n feature=(FeatureType.DATA, \"TEST\"),\n valid_mask=(FeatureType.MASK, \"IS_VALID\"),\n new_feature=(FeatureType.DATA_TIMELESS, \"TEST_OUT\"),\n ).execute(eopatch)\n\n names = \"c1\", \"c2\", \"a1\", \"a2\", \"a3\", \"a4\", \"a5\"\n values = eopatch.data_timeless[\"TEST_OUT\"].squeeze()\n expected_values = 0.207, 0.464, 0.686, 0.222, 1.204, 0.406, 15.701\n delta = 0.1\n\n for name, value, expected_value in zip(names, values, expected_values):\n assert value == approx(expected_value, abs=delta), f\"Missmatch in value of {name}\"\n"
] |
[
[
"numpy.reshape",
"numpy.nonzero"
]
] |
chrisdonlan/pandas
|
[
"af4e2ce19c9c0b89db4bc06d7730b68068c6aeae"
] |
[
"pandas/core/indexes/base.py"
] |
[
"from datetime import datetime\nimport operator\nfrom textwrap import dedent\nfrom typing import Dict, FrozenSet, Hashable, Optional, Union\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import algos as libalgos, index as libindex, lib\nimport pandas._libs.join as libjoin\nfrom pandas._libs.lib import is_datetime_array\nfrom pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\nfrom pandas._libs.tslibs.timezones import tz_compare\nfrom pandas.compat import set_function_name\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\n\nfrom pandas.core.dtypes import concat as _concat\nfrom pandas.core.dtypes.cast import maybe_cast_to_integer_array\nfrom pandas.core.dtypes.common import (\n ensure_categorical,\n ensure_int64,\n ensure_object,\n ensure_platform_int,\n is_bool,\n is_bool_dtype,\n is_categorical,\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_iterator,\n is_list_like,\n is_object_dtype,\n is_period_dtype,\n is_scalar,\n is_signed_integer_dtype,\n is_timedelta64_dtype,\n is_unsigned_integer_dtype,\n)\nfrom pandas.core.dtypes.concat import concat_compat\nfrom pandas.core.dtypes.generic import (\n ABCCategorical,\n ABCDataFrame,\n ABCDatetimeArray,\n ABCDatetimeIndex,\n ABCIndexClass,\n ABCIntervalIndex,\n ABCMultiIndex,\n ABCPandasArray,\n ABCPeriodIndex,\n ABCSeries,\n ABCTimedeltaIndex,\n)\nfrom pandas.core.dtypes.missing import array_equivalent, isna\n\nfrom pandas.core import ops\nfrom pandas.core.accessor import CachedAccessor\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.base import IndexOpsMixin, PandasObject\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.indexers import maybe_convert_indices\nfrom pandas.core.indexes.frozen import FrozenList\nimport pandas.core.missing as missing\nfrom pandas.core.ops import get_op_result_name\nfrom pandas.core.ops.invalid import make_invalid_op\nfrom pandas.core.strings import StringMethods\n\nfrom pandas.io.formats.printing import (\n default_pprint,\n format_object_attrs,\n format_object_summary,\n pprint_thing,\n)\n\n__all__ = [\"Index\"]\n\n_unsortable_types = frozenset((\"mixed\", \"mixed-integer\"))\n\n_index_doc_kwargs = dict(\n klass=\"Index\",\n inplace=\"\",\n target_klass=\"Index\",\n raises_section=\"\",\n unique=\"Index\",\n duplicated=\"np.ndarray\",\n)\n_index_shared_docs = dict()\n\n\ndef _make_comparison_op(op, cls):\n def cmp_method(self, other):\n if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)):\n if other.ndim > 0 and len(self) != len(other):\n raise ValueError(\"Lengths must match to compare\")\n\n if is_object_dtype(self) and isinstance(other, ABCCategorical):\n left = type(other)(self._values, dtype=other.dtype)\n return op(left, other)\n elif is_object_dtype(self) and isinstance(other, ExtensionArray):\n # e.g. PeriodArray\n with np.errstate(all=\"ignore\"):\n result = op(self.values, other)\n\n elif is_object_dtype(self) and not isinstance(self, ABCMultiIndex):\n # don't pass MultiIndex\n with np.errstate(all=\"ignore\"):\n result = ops.comp_method_OBJECT_ARRAY(op, self.values, other)\n\n else:\n with np.errstate(all=\"ignore\"):\n result = op(self.values, np.asarray(other))\n\n if is_bool_dtype(result):\n return result\n return ops.invalid_comparison(self, other, op)\n\n name = f\"__{op.__name__}__\"\n return set_function_name(cmp_method, name, cls)\n\n\ndef _make_arithmetic_op(op, cls):\n def index_arithmetic_method(self, other):\n if isinstance(other, (ABCSeries, ABCDataFrame, ABCTimedeltaIndex)):\n return NotImplemented\n\n from pandas import Series\n\n result = op(Series(self), other)\n if isinstance(result, tuple):\n return (Index(result[0]), Index(result[1]))\n return Index(result)\n\n name = f\"__{op.__name__}__\"\n # TODO: docstring?\n return set_function_name(index_arithmetic_method, name, cls)\n\n\nclass InvalidIndexError(Exception):\n pass\n\n\n_o_dtype = np.dtype(object)\n_Identity = object\n\n\ndef _new_Index(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__.\n \"\"\"\n # required for backward compat, because PI can't be instantiated with\n # ordinals through __new__ GH #13277\n if issubclass(cls, ABCPeriodIndex):\n from pandas.core.indexes.period import _new_PeriodIndex\n\n return _new_PeriodIndex(cls, **d)\n\n if issubclass(cls, ABCMultiIndex):\n if \"labels\" in d and \"codes\" not in d:\n # GH#23752 \"labels\" kwarg has been replaced with \"codes\"\n d[\"codes\"] = d.pop(\"labels\")\n\n return cls.__new__(cls, **d)\n\n\nclass Index(IndexOpsMixin, PandasObject):\n \"\"\"\n Immutable ndarray implementing an ordered, sliceable set. The basic object\n storing axis labels for all pandas objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: object)\n If dtype is None, we find the dtype that best fits the data.\n If an actual dtype is provided, we coerce to that dtype if it's safe.\n Otherwise, an error will be raised.\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible.\n\n See Also\n --------\n RangeIndex : Index implementing a monotonic integer range.\n CategoricalIndex : Index of :class:`Categorical` s.\n MultiIndex : A multi-level, or hierarchical, Index.\n IntervalIndex : An Index of :class:`Interval` s.\n DatetimeIndex, TimedeltaIndex, PeriodIndex\n Int64Index, UInt64Index, Float64Index\n\n Notes\n -----\n An Index instance can **only** contain hashable objects\n\n Examples\n --------\n >>> pd.Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> pd.Index(list('abc'))\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n\n # tolist is not actually deprecated, just suppressed in the __dir__\n _deprecations: FrozenSet[str] = (\n PandasObject._deprecations\n | IndexOpsMixin._deprecations\n | frozenset([\"contains\", \"set_value\"])\n )\n\n # To hand over control to subclasses\n _join_precedence = 1\n\n # Cython methods; see github.com/cython/cython/issues/2647\n # for why we need to wrap these instead of making them class attributes\n # Moreover, cython will choose the appropriate-dtyped sub-function\n # given the dtypes of the passed arguments\n def _left_indexer_unique(self, left, right):\n return libjoin.left_join_indexer_unique(left, right)\n\n def _left_indexer(self, left, right):\n return libjoin.left_join_indexer(left, right)\n\n def _inner_indexer(self, left, right):\n return libjoin.inner_join_indexer(left, right)\n\n def _outer_indexer(self, left, right):\n return libjoin.outer_join_indexer(left, right)\n\n _typ = \"index\"\n _data: Union[ExtensionArray, np.ndarray]\n _id = None\n _name: Optional[Hashable] = None\n # MultiIndex.levels previously allowed setting the index name. We\n # don't allow this anymore, and raise if it happens rather than\n # failing silently.\n _no_setting_name: bool = False\n _comparables = [\"name\"]\n _attributes = [\"name\"]\n _is_numeric_dtype = False\n _can_hold_na = True\n\n # would we like our indexing holder to defer to us\n _defer_to_indexing = False\n\n # prioritize current class for _shallow_copy_with_infer,\n # used to infer integers as datetime-likes\n _infer_as_myclass = False\n\n _engine_type = libindex.ObjectEngine\n # whether we support partial string indexing. Overridden\n # in DatetimeIndex and PeriodIndex\n _supports_partial_string_indexing = False\n\n _accessors = {\"str\"}\n\n str = CachedAccessor(\"str\", StringMethods)\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs,\n ) -> \"Index\":\n\n from .range import RangeIndex\n from pandas import PeriodIndex, DatetimeIndex, TimedeltaIndex\n from .numeric import Float64Index, Int64Index, UInt64Index\n from .interval import IntervalIndex\n from .category import CategoricalIndex\n\n name = maybe_extract_name(name, data, cls)\n\n if isinstance(data, ABCPandasArray):\n # ensure users don't accidentally put a PandasArray in an index.\n data = data.to_numpy()\n\n # range\n if isinstance(data, RangeIndex):\n return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)\n elif isinstance(data, range):\n return RangeIndex.from_range(data, dtype=dtype, name=name)\n\n # categorical\n elif is_categorical_dtype(data) or is_categorical_dtype(dtype):\n return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, **kwargs)\n\n # interval\n elif is_interval_dtype(data) or is_interval_dtype(dtype):\n closed = kwargs.pop(\"closed\", None)\n if is_dtype_equal(_o_dtype, dtype):\n return IntervalIndex(\n data, name=name, copy=copy, closed=closed, **kwargs\n ).astype(object)\n return IntervalIndex(\n data, dtype=dtype, name=name, copy=copy, closed=closed, **kwargs\n )\n\n elif (\n is_datetime64_any_dtype(data)\n or is_datetime64_any_dtype(dtype)\n or \"tz\" in kwargs\n ):\n if is_dtype_equal(_o_dtype, dtype):\n # GH#23524 passing `dtype=object` to DatetimeIndex is invalid,\n # will raise in the where `data` is already tz-aware. So\n # we leave it out of this step and cast to object-dtype after\n # the DatetimeIndex construction.\n # Note we can pass copy=False because the .astype below\n # will always make a copy\n return DatetimeIndex(data, copy=False, name=name, **kwargs).astype(\n object\n )\n else:\n return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs)\n\n elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype):\n if is_dtype_equal(_o_dtype, dtype):\n # Note we can pass copy=False because the .astype below\n # will always make a copy\n return TimedeltaIndex(data, copy=False, name=name, **kwargs).astype(\n object\n )\n else:\n return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs)\n\n elif is_period_dtype(data) or is_period_dtype(dtype):\n if is_dtype_equal(_o_dtype, dtype):\n return PeriodIndex(data, copy=False, name=name, **kwargs).astype(object)\n return PeriodIndex(data, dtype=dtype, copy=copy, name=name, **kwargs)\n\n # extension dtype\n elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype):\n if not (dtype is None or is_object_dtype(dtype)):\n # coerce to the provided dtype\n ea_cls = dtype.construct_array_type()\n data = ea_cls._from_sequence(data, dtype=dtype, copy=False)\n else:\n data = np.asarray(data, dtype=object)\n\n # coerce to the object dtype\n data = data.astype(object)\n return Index(data, dtype=object, copy=copy, name=name, **kwargs)\n\n # index-like\n elif isinstance(data, (np.ndarray, Index, ABCSeries)):\n if dtype is not None:\n # we need to avoid having numpy coerce\n # things that look like ints/floats to ints unless\n # they are actually ints, e.g. '0' and 0.0\n # should not be coerced\n # GH 11836\n data = _maybe_cast_with_dtype(data, dtype, copy)\n dtype = data.dtype # TODO: maybe not for object?\n\n # maybe coerce to a sub-class\n if is_signed_integer_dtype(data.dtype):\n return Int64Index(data, copy=copy, dtype=dtype, name=name)\n elif is_unsigned_integer_dtype(data.dtype):\n return UInt64Index(data, copy=copy, dtype=dtype, name=name)\n elif is_float_dtype(data.dtype):\n return Float64Index(data, copy=copy, dtype=dtype, name=name)\n elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):\n subarr = data.astype(\"object\")\n else:\n subarr = com.asarray_tuplesafe(data, dtype=object)\n\n # asarray_tuplesafe does not always copy underlying data,\n # so need to make sure that this happens\n if copy:\n subarr = subarr.copy()\n\n if dtype is None:\n new_data, new_dtype = _maybe_cast_data_without_dtype(subarr)\n if new_dtype is not None:\n return cls(\n new_data, dtype=new_dtype, copy=False, name=name, **kwargs\n )\n\n if kwargs:\n raise TypeError(f\"Unexpected keyword arguments {repr(set(kwargs))}\")\n return cls._simple_new(subarr, name, **kwargs)\n\n elif hasattr(data, \"__array__\"):\n return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs)\n elif data is None or is_scalar(data):\n raise cls._scalar_data_error(data)\n else:\n if tupleize_cols and is_list_like(data):\n # GH21470: convert iterable to list before determining if empty\n if is_iterator(data):\n data = list(data)\n\n if data and all(isinstance(e, tuple) for e in data):\n # we must be all tuples, otherwise don't construct\n # 10697\n from .multi import MultiIndex\n\n return MultiIndex.from_tuples(\n data, names=name or kwargs.get(\"names\")\n )\n # other iterable of some kind\n subarr = com.asarray_tuplesafe(data, dtype=object)\n return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)\n\n \"\"\"\n NOTE for new Index creation:\n\n - _simple_new: It returns new Index with the same type as the caller.\n All metadata (such as name) must be provided by caller's responsibility.\n Using _shallow_copy is recommended because it fills these metadata\n otherwise specified.\n\n - _shallow_copy: It returns new Index with the same type (using\n _simple_new), but fills caller's metadata otherwise specified. Passed\n kwargs will overwrite corresponding metadata.\n\n - _shallow_copy_with_infer: It returns new Index inferring its type\n from passed values. It fills caller's metadata otherwise specified as the\n same as _shallow_copy.\n\n See each method's docstring.\n \"\"\"\n\n @property\n def asi8(self):\n \"\"\"\n Integer representation of the values.\n\n Returns\n -------\n ndarray\n An ndarray with int64 dtype.\n \"\"\"\n return None\n\n @classmethod\n def _simple_new(cls, values, name=None, dtype=None):\n \"\"\"\n We require that we have a dtype compat for the values. If we are passed\n a non-dtype compat, then coerce using the constructor.\n\n Must be careful not to recurse.\n \"\"\"\n if isinstance(values, (ABCSeries, ABCIndexClass)):\n # Index._data must always be an ndarray.\n # This is no-copy for when _values is an ndarray,\n # which should be always at this point.\n values = np.asarray(values._values)\n\n result = object.__new__(cls)\n result._data = values\n # _index_data is a (temporary?) fix to ensure that the direct data\n # manipulation we do in `_libs/reduction.pyx` continues to work.\n # We need access to the actual ndarray, since we're messing with\n # data buffers and strides. We don't re-use `_ndarray_values`, since\n # we actually set this value too.\n result._index_data = values\n result._name = name\n\n return result._reset_identity()\n\n @cache_readonly\n def _constructor(self):\n return type(self)\n\n # --------------------------------------------------------------------\n # Index Internals Methods\n\n def _get_attributes_dict(self):\n \"\"\"\n Return an attributes dict for my class.\n \"\"\"\n return {k: getattr(self, k, None) for k in self._attributes}\n\n _index_shared_docs[\n \"_shallow_copy\"\n ] = \"\"\"\n Create a new Index with the same class as the caller, don't copy the\n data, use the same object attributes with passed in attributes taking\n precedence.\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n kwargs : updates the default attributes for this Index\n \"\"\"\n\n @Appender(_index_shared_docs[\"_shallow_copy\"])\n def _shallow_copy(self, values=None, **kwargs):\n if values is None:\n values = self.values\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n if not len(values) and \"dtype\" not in kwargs:\n attributes[\"dtype\"] = self.dtype\n\n # _simple_new expects an the type of self._data\n values = getattr(values, \"_values\", values)\n if isinstance(values, ABCDatetimeArray):\n # `self.values` returns `self` for tz-aware, so we need to unwrap\n # more specifically\n values = values.asi8\n\n return self._simple_new(values, **attributes)\n\n def _shallow_copy_with_infer(self, values, **kwargs):\n \"\"\"\n Create a new Index inferring the class with passed value, don't copy\n the data, use the same object attributes with passed in attributes\n taking precedence.\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n kwargs : updates the default attributes for this Index\n \"\"\"\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n attributes[\"copy\"] = False\n if not len(values) and \"dtype\" not in kwargs:\n attributes[\"dtype\"] = self.dtype\n if self._infer_as_myclass:\n try:\n return self._constructor(values, **attributes)\n except (TypeError, ValueError):\n pass\n return Index(values, **attributes)\n\n def _update_inplace(self, result, **kwargs):\n # guard when called from IndexOpsMixin\n raise TypeError(\"Index can't be updated inplace\")\n\n def is_(self, other) -> bool:\n \"\"\"\n More flexible, faster check like ``is`` but that works through views.\n\n Note: this is *not* the same as ``Index.identical()``, which checks\n that metadata is also the same.\n\n Parameters\n ----------\n other : object\n other object to compare against.\n\n Returns\n -------\n True if both have same underlying data, False otherwise : bool\n \"\"\"\n # use something other than None to be clearer\n return self._id is getattr(other, \"_id\", Ellipsis) and self._id is not None\n\n def _reset_identity(self):\n \"\"\"\n Initializes or resets ``_id`` attribute with new object.\n \"\"\"\n self._id = _Identity()\n return self\n\n def _cleanup(self):\n self._engine.clear_mapping()\n\n @cache_readonly\n def _engine(self):\n # property, for now, slow to look up\n\n # to avoid a reference cycle, bind `_ndarray_values` to a local variable, so\n # `self` is not passed into the lambda.\n _ndarray_values = self._ndarray_values\n return self._engine_type(lambda: _ndarray_values, len(self))\n\n # --------------------------------------------------------------------\n # Array-Like Methods\n\n # ndarray compat\n def __len__(self) -> int:\n \"\"\"\n Return the length of the Index.\n \"\"\"\n return len(self._data)\n\n def __array__(self, dtype=None):\n \"\"\"\n The array interface, return my values.\n \"\"\"\n return np.asarray(self._data, dtype=dtype)\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc.\n \"\"\"\n result = lib.item_from_zerodim(result)\n if is_bool_dtype(result) or lib.is_scalar(result):\n return result\n\n attrs = self._get_attributes_dict()\n return Index(result, **attrs)\n\n @cache_readonly\n def dtype(self):\n \"\"\"\n Return the dtype object of the underlying data.\n \"\"\"\n return self._data.dtype\n\n def ravel(self, order=\"C\"):\n \"\"\"\n Return an ndarray of the flattened values of the underlying data.\n\n Returns\n -------\n numpy.ndarray\n Flattened array.\n\n See Also\n --------\n numpy.ndarray.ravel\n \"\"\"\n return self._ndarray_values.ravel(order=order)\n\n def view(self, cls=None):\n\n # we need to see if we are subclassing an\n # index type here\n if cls is not None and not hasattr(cls, \"_typ\"):\n result = self._data.view(cls)\n else:\n result = self._shallow_copy()\n if isinstance(result, Index):\n result._id = self._id\n return result\n\n _index_shared_docs[\n \"astype\"\n ] = \"\"\"\n Create an Index with values cast to dtypes. The class of a new Index\n is determined by dtype. When conversion is impossible, a ValueError\n exception is raised.\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n Note that any signed integer `dtype` is treated as ``'int64'``,\n and any unsigned integer `dtype` is treated as ``'uint64'``,\n regardless of the size.\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and internal requirements on dtype are\n satisfied, the original data is used to create a new Index\n or the original Index is returned.\n\n Returns\n -------\n Index\n Index with values cast to specified dtype.\n \"\"\"\n\n @Appender(_index_shared_docs[\"astype\"])\n def astype(self, dtype, copy=True):\n if is_dtype_equal(self.dtype, dtype):\n return self.copy() if copy else self\n\n elif is_categorical_dtype(dtype):\n from .category import CategoricalIndex\n\n return CategoricalIndex(self.values, name=self.name, dtype=dtype, copy=copy)\n\n elif is_extension_array_dtype(dtype):\n return Index(np.asarray(self), dtype=dtype, copy=copy)\n\n try:\n return Index(\n self.values.astype(dtype, copy=copy), name=self.name, dtype=dtype\n )\n except (TypeError, ValueError):\n raise TypeError(f\"Cannot cast {type(self).__name__} to dtype {dtype}\")\n\n _index_shared_docs[\n \"take\"\n ] = \"\"\"\n Return a new %(klass)s of the values selected by the indices.\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n indices : list\n Indices to be taken.\n axis : int, optional\n The axis over which to select values, always 0.\n allow_fill : bool, default True\n fill_value : bool, default None\n If allow_fill=True and fill_value is not None, indices specified by\n -1 is regarded as NA. If Index doesn't hold NA, raise ValueError.\n\n Returns\n -------\n numpy.ndarray\n Elements of given indices.\n\n See Also\n --------\n numpy.ndarray.take\n \"\"\"\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):\n if kwargs:\n nv.validate_take(tuple(), kwargs)\n indices = ensure_platform_int(indices)\n if self._can_hold_na:\n taken = self._assert_take_fillable(\n self.values,\n indices,\n allow_fill=allow_fill,\n fill_value=fill_value,\n na_value=self._na_value,\n )\n else:\n if allow_fill and fill_value is not None:\n cls_name = type(self).__name__\n raise ValueError(\n f\"Unable to fill values because {cls_name} cannot contain NA\"\n )\n taken = self.values.take(indices)\n return self._shallow_copy(taken)\n\n def _assert_take_fillable(\n self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan\n ):\n \"\"\"\n Internal method to handle NA filling of take.\n \"\"\"\n indices = ensure_platform_int(indices)\n\n # only fill if we are passing a non-None fill_value\n if allow_fill and fill_value is not None:\n if (indices < -1).any():\n raise ValueError(\n \"When allow_fill=True and fill_value is not None, \"\n \"all indices must be >= -1\"\n )\n taken = algos.take(\n values, indices, allow_fill=allow_fill, fill_value=na_value\n )\n else:\n taken = values.take(indices)\n return taken\n\n _index_shared_docs[\n \"repeat\"\n ] = \"\"\"\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n repeated_index : %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.repeat(2)\n Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')\n >>> idx.repeat([1, 2, 3])\n Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')\n \"\"\"\n\n @Appender(_index_shared_docs[\"repeat\"] % _index_doc_kwargs)\n def repeat(self, repeats, axis=None):\n repeats = ensure_platform_int(repeats)\n nv.validate_repeat(tuple(), dict(axis=axis))\n return self._shallow_copy(self._values.repeat(repeats))\n\n # --------------------------------------------------------------------\n # Copying Methods\n\n _index_shared_docs[\n \"copy\"\n ] = \"\"\"\n Make a copy of this object. Name and dtype sets those attributes on\n the new object.\n\n Parameters\n ----------\n name : str, optional\n deep : bool, default False\n dtype : numpy dtype or pandas type\n\n Returns\n -------\n copy : Index\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n \"\"\"\n\n @Appender(_index_shared_docs[\"copy\"])\n def copy(self, name=None, deep=False, dtype=None, **kwargs):\n if deep:\n new_index = self._shallow_copy(self._data.copy())\n else:\n new_index = self._shallow_copy()\n\n names = kwargs.get(\"names\")\n names = self._validate_names(name=name, names=names, deep=deep)\n new_index = new_index.set_names(names)\n\n if dtype:\n new_index = new_index.astype(dtype)\n return new_index\n\n def __copy__(self, **kwargs):\n return self.copy(**kwargs)\n\n def __deepcopy__(self, memo=None):\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n # --------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self):\n \"\"\"\n Return a string representation for this object.\n \"\"\"\n klass_name = type(self).__name__\n data = self._format_data()\n attrs = self._format_attrs()\n space = self._format_space()\n attrs_str = [f\"{k}={v}\" for k, v in attrs]\n prepr = f\",{space}\".join(attrs_str)\n\n # no data provided, just attributes\n if data is None:\n data = \"\"\n\n res = f\"{klass_name}({data}{prepr})\"\n\n return res\n\n def _format_space(self):\n\n # using space here controls if the attributes\n # are line separated or not (the default)\n\n # max_seq_items = get_option('display.max_seq_items')\n # if len(self) > max_seq_items:\n # space = \"\\n%s\" % (' ' * (len(klass) + 1))\n return \" \"\n\n @property\n def _formatter_func(self):\n \"\"\"\n Return the formatter function.\n \"\"\"\n return default_pprint\n\n def _format_data(self, name=None):\n \"\"\"\n Return the formatted data as a unicode string.\n \"\"\"\n\n # do we want to justify (only do so for non-objects)\n is_justify = not (\n self.inferred_type in (\"string\", \"unicode\")\n or (\n self.inferred_type == \"categorical\" and is_object_dtype(self.categories)\n )\n )\n\n return format_object_summary(\n self, self._formatter_func, is_justify=is_justify, name=name\n )\n\n def _format_attrs(self):\n \"\"\"\n Return a list of tuples of the (attr,formatted_value).\n \"\"\"\n return format_object_attrs(self)\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return self.values\n\n def format(self, name=False, formatter=None, **kwargs):\n \"\"\"\n Render a string representation of the Index.\n \"\"\"\n header = []\n if name:\n header.append(\n pprint_thing(self.name, escape_chars=(\"\\t\", \"\\r\", \"\\n\"))\n if self.name is not None\n else \"\"\n )\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(header, **kwargs)\n\n def _format_with_header(self, header, na_rep=\"NaN\", **kwargs):\n values = self.values\n\n from pandas.io.formats.format import format_array\n\n if is_categorical_dtype(values.dtype):\n values = np.array(values)\n\n elif is_object_dtype(values.dtype):\n values = lib.maybe_convert_objects(values, safe=1)\n\n if is_object_dtype(values.dtype):\n result = [pprint_thing(x, escape_chars=(\"\\t\", \"\\r\", \"\\n\")) for x in values]\n\n # could have nans\n mask = isna(values)\n if mask.any():\n result = np.array(result)\n result[mask] = na_rep\n result = result.tolist()\n\n else:\n result = _trim_front(format_array(values, None, justify=\"left\"))\n return header + result\n\n def to_native_types(self, slicer=None, **kwargs):\n \"\"\"\n Format specified values of `self` and return them.\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values.\n\n Returns\n -------\n numpy.ndarray\n Formatted values.\n \"\"\"\n\n values = self\n if slicer is not None:\n values = values[slicer]\n return values._format_native_types(**kwargs)\n\n def _format_native_types(self, na_rep=\"\", quoting=None, **kwargs):\n \"\"\"\n Actually format specific types of the index.\n \"\"\"\n mask = isna(self)\n if not self.is_object() and not quoting:\n values = np.asarray(self).astype(str)\n else:\n values = np.array(self, dtype=object, copy=True)\n\n values[mask] = na_rep\n return values\n\n def _summary(self, name=None):\n \"\"\"\n Return a summarized representation.\n\n Parameters\n ----------\n name : str\n name to use in the summary representation\n\n Returns\n -------\n String with a summarized representation of the index\n \"\"\"\n if len(self) > 0:\n head = self[0]\n if hasattr(head, \"format\") and not isinstance(head, str):\n head = head.format()\n tail = self[-1]\n if hasattr(tail, \"format\") and not isinstance(tail, str):\n tail = tail.format()\n index_summary = f\", {head} to {tail}\"\n else:\n index_summary = \"\"\n\n if name is None:\n name = type(self).__name__\n return f\"{name}: {len(self)} entries{index_summary}\"\n\n # --------------------------------------------------------------------\n # Conversion Methods\n\n def to_flat_index(self):\n \"\"\"\n Identity method.\n\n .. versionadded:: 0.24.0\n\n This is implemented for compatibility with subclass implementations\n when chaining.\n\n Returns\n -------\n pd.Index\n Caller.\n\n See Also\n --------\n MultiIndex.to_flat_index : Subclass implementation.\n \"\"\"\n return self\n\n def to_series(self, index=None, name=None):\n \"\"\"\n Create a Series with both index and values equal to the index keys.\n\n Useful with map for returning an indexer based on an index.\n\n Parameters\n ----------\n index : Index, optional\n Index of resulting Series. If None, defaults to original index.\n name : str, optional\n Dame of resulting Series. If None, defaults to name of original\n index.\n\n Returns\n -------\n Series\n The dtype will be based on the type of the Index values.\n \"\"\"\n\n from pandas import Series\n\n if index is None:\n index = self._shallow_copy()\n if name is None:\n name = self.name\n\n return Series(self.values.copy(), index=index, name=name)\n\n def to_frame(self, index=True, name=None):\n \"\"\"\n Create a DataFrame with a column containing the Index.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n index : bool, default True\n Set the index of the returned DataFrame as the original Index.\n\n name : object, default None\n The passed name should substitute for the index name (if it has\n one).\n\n Returns\n -------\n DataFrame\n DataFrame containing the original Index data.\n\n See Also\n --------\n Index.to_series : Convert an Index to a Series.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n >>> idx.to_frame()\n animal\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n\n By default, the original Index is reused. To enforce a new Index:\n\n >>> idx.to_frame(index=False)\n animal\n 0 Ant\n 1 Bear\n 2 Cow\n\n To override the name of the resulting column, specify `name`:\n\n >>> idx.to_frame(index=False, name='zoo')\n zoo\n 0 Ant\n 1 Bear\n 2 Cow\n \"\"\"\n\n from pandas import DataFrame\n\n if name is None:\n name = self.name or 0\n result = DataFrame({name: self._values.copy()})\n\n if index:\n result.index = self\n return result\n\n # --------------------------------------------------------------------\n # Name-Centric Methods\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n if self._no_setting_name:\n # Used in MultiIndex.levels to avoid silently ignoring name updates.\n raise RuntimeError(\n \"Cannot set name on a level of a MultiIndex. Use \"\n \"'MultiIndex.set_names' instead.\"\n )\n maybe_extract_name(value, None, type(self))\n self._name = value\n\n def _validate_names(self, name=None, names=None, deep=False):\n \"\"\"\n Handles the quirks of having a singular 'name' parameter for general\n Index and plural 'names' parameter for MultiIndex.\n \"\"\"\n from copy import deepcopy\n\n if names is not None and name is not None:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif names is None and name is None:\n return deepcopy(self.names) if deep else self.names\n elif names is not None:\n if not is_list_like(names):\n raise TypeError(\"Must pass list-like as `names`.\")\n return names\n else:\n if not is_list_like(name):\n return [name]\n return name\n\n def _get_names(self):\n return FrozenList((self.name,))\n\n def _set_names(self, values, level=None):\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n\n Raises\n ------\n TypeError if each name is not hashable.\n \"\"\"\n if not is_list_like(values):\n raise ValueError(\"Names must be a list-like\")\n if len(values) != 1:\n raise ValueError(f\"Length of new names must be 1, got {len(values)}\")\n\n # GH 20527\n # All items in 'name' need to be hashable:\n for name in values:\n if not is_hashable(name):\n raise TypeError(f\"{type(self).__name__}.name must be a hashable type\")\n self._name = values[0]\n\n names = property(fset=_set_names, fget=_get_names)\n\n def set_names(self, names, level=None, inplace=False):\n \"\"\"\n Set Index or MultiIndex name.\n\n Able to set new names partially and by level.\n\n Parameters\n ----------\n names : label or list of label\n Name(s) to set.\n level : int, label or list of int or label, optional\n If the index is a MultiIndex, level(s) to set (None for all\n levels). Otherwise level must be None.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index\n The same type as the caller or None if inplace is True.\n\n See Also\n --------\n Index.rename : Able to set new names without level.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n >>> idx.set_names('quarter')\n Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]])\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n )\n >>> idx.set_names(['kind', 'year'], inplace=True)\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['kind', 'year'])\n >>> idx.set_names('species', level=0)\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['species', 'year'])\n \"\"\"\n\n if level is not None and not isinstance(self, ABCMultiIndex):\n raise ValueError(\"Level must be None for non-MultiIndex\")\n\n if level is not None and not is_list_like(level) and is_list_like(names):\n raise TypeError(\"Names must be a string when a single level is provided.\")\n\n if not is_list_like(names) and level is None and self.nlevels > 1:\n raise TypeError(\"Must pass list-like as `names`.\")\n\n if not is_list_like(names):\n names = [names]\n if level is not None and not is_list_like(level):\n level = [level]\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._set_names(names, level=level)\n if not inplace:\n return idx\n\n def rename(self, name, inplace=False):\n \"\"\"\n Alter Index or MultiIndex name.\n\n Able to set new names without level. Defaults to returning new index.\n Length of names must match number of levels in MultiIndex.\n\n Parameters\n ----------\n name : label or list of labels\n Name(s) to set.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Index or\n MultiIndex.\n\n Returns\n -------\n Index\n The same type as the caller or None if inplace is True.\n\n See Also\n --------\n Index.set_names : Able to set new names partially and by level.\n\n Examples\n --------\n >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')\n >>> idx.rename('grade')\n Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')\n\n >>> idx = pd.MultiIndex.from_product([['python', 'cobra'],\n ... [2018, 2019]],\n ... names=['kind', 'year'])\n >>> idx\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['kind', 'year'])\n >>> idx.rename(['species', 'year'])\n MultiIndex([('python', 2018),\n ('python', 2019),\n ( 'cobra', 2018),\n ( 'cobra', 2019)],\n names=['species', 'year'])\n >>> idx.rename('species')\n Traceback (most recent call last):\n TypeError: Must pass list-like as `names`.\n \"\"\"\n return self.set_names([name], inplace=inplace)\n\n # --------------------------------------------------------------------\n # Level-Centric Methods\n\n @property\n def nlevels(self) -> int:\n \"\"\"\n Number of levels.\n \"\"\"\n return 1\n\n def _sort_levels_monotonic(self):\n \"\"\"\n Compat with MultiIndex.\n \"\"\"\n return self\n\n def _validate_index_level(self, level):\n \"\"\"\n Validate index level.\n\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n\n \"\"\"\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(\n \"Too many levels: Index has only 1 level, \"\n f\"{level} is not a valid level number\"\n )\n elif level > 0:\n raise IndexError(\n f\"Too many levels: Index has only 1 level, not {level + 1}\"\n )\n elif level != self.name:\n raise KeyError(\n f\"Requested level ({level}) does not match index name ({self.name})\"\n )\n\n def _get_level_number(self, level):\n self._validate_index_level(level)\n return 0\n\n def sortlevel(self, level=None, ascending=True, sort_remaining=None):\n \"\"\"\n For internal compatibility with with the Index API.\n\n Sort the Index. This is for compat with MultiIndex\n\n Parameters\n ----------\n ascending : bool, default True\n False to sort in descending order\n\n level, sort_remaining are compat parameters\n\n Returns\n -------\n Index\n \"\"\"\n return self.sort_values(return_indexer=True, ascending=ascending)\n\n def _get_level_values(self, level):\n \"\"\"\n Return an Index of values for requested level.\n\n This is primarily useful to get an individual level of values from a\n MultiIndex, but is provided on Index as well for compatibility.\n\n Parameters\n ----------\n level : int or str\n It is either the integer position or the name of the level.\n\n Returns\n -------\n Index\n Calling object, as there is only one level in the Index.\n\n See Also\n --------\n MultiIndex.get_level_values : Get values for a level of a MultiIndex.\n\n Notes\n -----\n For Index, level should be 0, since there are no multiple levels.\n\n Examples\n --------\n\n >>> idx = pd.Index(list('abc'))\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n\n Get level values by supplying `level` as integer:\n\n >>> idx.get_level_values(0)\n Index(['a', 'b', 'c'], dtype='object')\n \"\"\"\n self._validate_index_level(level)\n return self\n\n get_level_values = _get_level_values\n\n def droplevel(self, level=0):\n \"\"\"\n Return index with requested level(s) removed.\n\n If resulting index has only 1 level left, the result will be\n of Index type, not MultiIndex.\n\n .. versionadded:: 0.23.1 (support for non-MultiIndex)\n\n Parameters\n ----------\n level : int, str, or list-like, default 0\n If a string is given, must be the name of a level\n If list-like, elements must be names or indexes of levels.\n\n Returns\n -------\n Index or MultiIndex\n \"\"\"\n if not isinstance(level, (tuple, list)):\n level = [level]\n\n levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]\n\n if len(level) == 0:\n return self\n if len(level) >= self.nlevels:\n raise ValueError(\n f\"Cannot remove {len(level)} levels from an index with {self.nlevels} \"\n \"levels: at least one level must be left.\"\n )\n # The two checks above guarantee that here self is a MultiIndex\n\n new_levels = list(self.levels)\n new_codes = list(self.codes)\n new_names = list(self.names)\n\n for i in levnums:\n new_levels.pop(i)\n new_codes.pop(i)\n new_names.pop(i)\n\n if len(new_levels) == 1:\n\n # set nan if needed\n mask = new_codes[0] == -1\n result = new_levels[0].take(new_codes[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n\n result._name = new_names[0]\n return result\n else:\n from .multi import MultiIndex\n\n return MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=new_names,\n verify_integrity=False,\n )\n\n _index_shared_docs[\n \"_get_grouper_for_level\"\n ] = \"\"\"\n Get index grouper corresponding to an index level\n\n Parameters\n ----------\n mapper: Group mapping function or None\n Function mapping index values to groups\n level : int or None\n Index level\n\n Returns\n -------\n grouper : Index\n Index of values to group on.\n labels : ndarray of int or None\n Array of locations in level_index.\n uniques : Index or None\n Index of unique values for level.\n \"\"\"\n\n @Appender(_index_shared_docs[\"_get_grouper_for_level\"])\n def _get_grouper_for_level(self, mapper, level=None):\n assert level is None or level == 0\n if mapper is None:\n grouper = self\n else:\n grouper = self.map(mapper)\n\n return grouper, None, None\n\n # --------------------------------------------------------------------\n # Introspection Methods\n\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Alias for is_monotonic_increasing.\n \"\"\"\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self):\n \"\"\"\n Return if the index is monotonic increasing (only equal or\n increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3]).is_monotonic_increasing\n True\n >>> Index([1, 2, 2]).is_monotonic_increasing\n True\n >>> Index([1, 3, 2]).is_monotonic_increasing\n False\n \"\"\"\n return self._engine.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return if the index is monotonic decreasing (only equal or\n decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n \"\"\"\n return self._engine.is_monotonic_decreasing\n\n @property\n def _is_strictly_monotonic_increasing(self) -> bool:\n \"\"\"\n Return if the index is strictly monotonic increasing\n (only increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3])._is_strictly_monotonic_increasing\n True\n >>> Index([1, 2, 2])._is_strictly_monotonic_increasing\n False\n >>> Index([1, 3, 2])._is_strictly_monotonic_increasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_increasing\n\n @property\n def _is_strictly_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return if the index is strictly monotonic decreasing\n (only decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing\n True\n >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing\n False\n >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_decreasing\n\n @cache_readonly\n def is_unique(self) -> bool:\n \"\"\"\n Return if the index has unique values.\n \"\"\"\n return self._engine.is_unique\n\n @property\n def has_duplicates(self) -> bool:\n return not self.is_unique\n\n def is_boolean(self) -> bool:\n return self.inferred_type in [\"boolean\"]\n\n def is_integer(self) -> bool:\n return self.inferred_type in [\"integer\"]\n\n def is_floating(self) -> bool:\n return self.inferred_type in [\"floating\", \"mixed-integer-float\", \"integer-na\"]\n\n def is_numeric(self) -> bool:\n return self.inferred_type in [\"integer\", \"floating\"]\n\n def is_object(self) -> bool:\n return is_object_dtype(self.dtype)\n\n def is_categorical(self) -> bool:\n \"\"\"\n Check if the Index holds categorical data.\n\n Returns\n -------\n boolean\n True if the Index is categorical.\n\n See Also\n --------\n CategoricalIndex : Index for categorical data.\n\n Examples\n --------\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_categorical()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_categorical()\n False\n\n >>> s = pd.Series([\"Peter\", \"Victor\", \"Elisabeth\", \"Mar\"])\n >>> s\n 0 Peter\n 1 Victor\n 2 Elisabeth\n 3 Mar\n dtype: object\n >>> s.index.is_categorical()\n False\n \"\"\"\n return self.inferred_type in [\"categorical\"]\n\n def is_interval(self) -> bool:\n return self.inferred_type in [\"interval\"]\n\n def is_mixed(self) -> bool:\n return self.inferred_type in [\"mixed\"]\n\n def holds_integer(self):\n \"\"\"\n Whether the type is an integer type.\n \"\"\"\n return self.inferred_type in [\"integer\", \"mixed-integer\"]\n\n @cache_readonly\n def inferred_type(self):\n \"\"\"\n Return a string of the type inferred from the values.\n \"\"\"\n return lib.infer_dtype(self, skipna=False)\n\n @cache_readonly\n def is_all_dates(self) -> bool:\n return is_datetime_array(ensure_object(self.values))\n\n # --------------------------------------------------------------------\n # Pickle Methods\n\n def __reduce__(self):\n d = dict(data=self._data)\n d.update(self._get_attributes_dict())\n return _new_Index, (type(self), d), None\n\n def __setstate__(self, state):\n \"\"\"\n Necessary for making this object picklable.\n \"\"\"\n\n if isinstance(state, dict):\n self._data = state.pop(\"data\")\n for k, v in state.items():\n setattr(self, k, v)\n\n elif isinstance(state, tuple):\n\n if len(state) == 2:\n nd_state, own_state = state\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n self._name = own_state[0]\n\n else: # pragma: no cover\n data = np.empty(state)\n np.ndarray.__setstate__(data, state)\n\n self._data = data\n self._reset_identity()\n else:\n raise Exception(\"invalid pickle state\")\n\n _unpickle_compat = __setstate__\n\n # --------------------------------------------------------------------\n # Null Handling Methods\n\n _na_value = np.nan\n \"\"\"The expected NA value to use with this index.\"\"\"\n\n @cache_readonly\n def _isnan(self):\n \"\"\"\n Return if each value is NaN.\n \"\"\"\n if self._can_hold_na:\n return isna(self)\n else:\n # shouldn't reach to this condition by checking hasnans beforehand\n values = np.empty(len(self), dtype=np.bool_)\n values.fill(False)\n return values\n\n @cache_readonly\n def _nan_idxs(self):\n if self._can_hold_na:\n return self._isnan.nonzero()[0]\n else:\n return np.array([], dtype=np.int64)\n\n @cache_readonly\n def hasnans(self):\n \"\"\"\n Return if I have any nans; enables various perf speedups.\n \"\"\"\n if self._can_hold_na:\n return bool(self._isnan.any())\n else:\n return False\n\n def isna(self):\n \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get\n mapped to ``True`` values.\n Everything else get mapped to ``False`` values. Characters such as\n empty strings `''` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n numpy.ndarray\n A boolean array of whether my values are NA.\n\n See Also\n --------\n Index.notna : Boolean inverse of isna.\n Index.dropna : Omit entries with missing values.\n isna : Top-level isna.\n Series.isna : Detect missing values in Series object.\n\n Examples\n --------\n Show which entries in a pandas.Index are NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.isna()\n array([False, False, True], dtype=bool)\n\n Empty strings are not considered NA values. None is considered an NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.isna()\n array([False, False, False, True], dtype=bool)\n\n For datetimes, `NaT` (Not a Time) is considered as an NA value.\n\n >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),\n ... pd.Timestamp(''), None, pd.NaT])\n >>> idx\n DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.isna()\n array([False, True, True, True], dtype=bool)\n \"\"\"\n return self._isnan\n\n isnull = isna\n\n def notna(self):\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to ``True``. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``\n values.\n\n Returns\n -------\n numpy.ndarray\n Boolean array to indicate which entries are not NA.\n\n See Also\n --------\n Index.notnull : Alias of notna.\n Index.isna: Inverse of notna.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in an Index are not NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.notna()\n array([ True, True, False])\n\n Empty strings are not considered NA values. None is considered a NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.notna()\n array([ True, True, True, False])\n \"\"\"\n return ~self.isna()\n\n notnull = notna\n\n _index_shared_docs[\n \"fillna\"\n ] = \"\"\"\n Fill NA/NaN values with the specified value.\n\n Parameters\n ----------\n value : scalar\n Scalar value to use to fill holes (e.g. 0).\n This value cannot be a list-likes.\n downcast : dict, default is None\n a dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n filled : Index\n \"\"\"\n\n @Appender(_index_shared_docs[\"fillna\"])\n def fillna(self, value=None, downcast=None):\n self._assert_can_do_op(value)\n if self.hasnans:\n result = self.putmask(self._isnan, value)\n if downcast is None:\n # no need to care metadata other than name\n # because it can't have freq if\n return Index(result, name=self.name)\n return self._shallow_copy()\n\n _index_shared_docs[\n \"dropna\"\n ] = \"\"\"\n Return Index without NA/NaN values.\n\n Parameters\n ----------\n how : {'any', 'all'}, default 'any'\n If the Index is a MultiIndex, drop the value when any or all levels\n are NaN.\n\n Returns\n -------\n valid : Index\n \"\"\"\n\n @Appender(_index_shared_docs[\"dropna\"])\n def dropna(self, how=\"any\"):\n if how not in (\"any\", \"all\"):\n raise ValueError(f\"invalid how option: {how}\")\n\n if self.hasnans:\n return self._shallow_copy(self._values[~self._isnan])\n return self._shallow_copy()\n\n # --------------------------------------------------------------------\n # Uniqueness Methods\n\n _index_shared_docs[\n \"index_unique\"\n ] = \"\"\"\n Return unique values in the index. Uniques are returned in order\n of appearance, this does NOT sort.\n\n Parameters\n ----------\n level : int or str, optional, default None\n Only return values from specified level (for MultiIndex).\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n Index without duplicates\n\n See Also\n --------\n unique\n Series.unique\n \"\"\"\n\n @Appender(_index_shared_docs[\"index_unique\"] % _index_doc_kwargs)\n def unique(self, level=None):\n if level is not None:\n self._validate_index_level(level)\n result = super().unique()\n return self._shallow_copy(result)\n\n def drop_duplicates(self, keep=\"first\"):\n \"\"\"\n Return Index with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n Returns\n -------\n deduplicated : Index\n\n See Also\n --------\n Series.drop_duplicates : Equivalent method on Series.\n DataFrame.drop_duplicates : Equivalent method on DataFrame.\n Index.duplicated : Related method on Index, indicating duplicate\n Index values.\n\n Examples\n --------\n Generate an pandas.Index with duplicate values.\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])\n\n The `keep` parameter controls which duplicate values are removed.\n The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> idx.drop_duplicates(keep='first')\n Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')\n\n The value 'last' keeps the last occurrence for each set of duplicated\n entries.\n\n >>> idx.drop_duplicates(keep='last')\n Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')\n\n The value ``False`` discards all sets of duplicated entries.\n\n >>> idx.drop_duplicates(keep=False)\n Index(['cow', 'beetle', 'hippo'], dtype='object')\n \"\"\"\n return super().drop_duplicates(keep=keep)\n\n def duplicated(self, keep=\"first\"):\n \"\"\"\n Indicate duplicate index values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n array. Either all duplicates, all except the first, or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n The value or values in a set of duplicates to mark as missing.\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.duplicated : Equivalent method on pandas.Series.\n DataFrame.duplicated : Equivalent method on pandas.DataFrame.\n Index.drop_duplicates : Remove duplicate values from Index.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set to False and all others to True:\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> idx.duplicated()\n array([False, False, True, False, True])\n\n which is equivalent to\n\n >>> idx.duplicated(keep='first')\n array([False, False, True, False, True])\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> idx.duplicated(keep='last')\n array([ True, False, True, False, False])\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> idx.duplicated(keep=False)\n array([ True, False, True, False, True])\n \"\"\"\n return super().duplicated(keep=keep)\n\n def _get_unique_index(self, dropna=False):\n \"\"\"\n Returns an index containing unique values.\n\n Parameters\n ----------\n dropna : bool\n If True, NaN values are dropped.\n\n Returns\n -------\n uniques : index\n \"\"\"\n if self.is_unique and not dropna:\n return self\n\n values = self.values\n\n if not self.is_unique:\n values = self.unique()\n\n if dropna:\n try:\n if self.hasnans:\n values = values[~isna(values)]\n except NotImplementedError:\n pass\n\n return self._shallow_copy(values)\n\n # --------------------------------------------------------------------\n # Arithmetic & Logical Methods\n\n def __add__(self, other):\n if isinstance(other, (ABCSeries, ABCDataFrame)):\n return NotImplemented\n from pandas import Series\n\n return Index(Series(self) + other)\n\n def __radd__(self, other):\n from pandas import Series\n\n return Index(other + Series(self))\n\n def __iadd__(self, other):\n # alias for __add__\n return self + other\n\n def __sub__(self, other):\n return Index(np.array(self) - other)\n\n def __rsub__(self, other):\n # wrap Series to ensure we pin name correctly\n from pandas import Series\n\n return Index(other - Series(self))\n\n def __and__(self, other):\n return self.intersection(other)\n\n def __or__(self, other):\n return self.union(other)\n\n def __xor__(self, other):\n return self.symmetric_difference(other)\n\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n # --------------------------------------------------------------------\n # Set Operation Methods\n\n def _get_reconciled_name_object(self, other):\n \"\"\"\n If the result of a set operation will be self,\n return self, unless the name changes, in which\n case make a shallow copy of self.\n \"\"\"\n name = get_op_result_name(self, other)\n if self.name != name:\n return self._shallow_copy(name=name)\n return self\n\n def _union_incompatible_dtypes(self, other, sort):\n \"\"\"\n Casts this and other index to object dtype to allow the formation\n of a union between incompatible types.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n Returns\n -------\n Index\n \"\"\"\n this = self.astype(object, copy=False)\n # cast to Index for when `other` is list-like\n other = Index(other).astype(object, copy=False)\n return Index.union(this, other, sort=sort).astype(object, copy=False)\n\n def _is_compatible_with_other(self, other):\n \"\"\"\n Check whether this and the other dtype are compatible with each other.\n Meaning a union can be formed between them without needing to be cast\n to dtype object.\n\n Parameters\n ----------\n other : Index or array-like\n\n Returns\n -------\n bool\n \"\"\"\n return type(self) is type(other) and is_dtype_equal(self.dtype, other.dtype)\n\n def _validate_sort_keyword(self, sort):\n if sort not in [None, False]:\n raise ValueError(\n \"The 'sort' keyword only takes the values of \"\n f\"None or False; {sort} was passed.\"\n )\n\n def union(self, other, sort=None):\n \"\"\"\n Form the union of two Index objects.\n\n If the Index objects are incompatible, both Index objects will be\n cast to dtype('object') first.\n\n .. versionchanged:: 0.25.0\n\n Parameters\n ----------\n other : Index or array-like\n sort : bool or None, default None\n Whether to sort the resulting Index.\n\n * None : Sort the result, except when\n\n 1. `self` and `other` are equal.\n 2. `self` or `other` has length 0.\n 3. Some values in `self` or `other` cannot be compared.\n A RuntimeWarning is issued in this case.\n\n * False : do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n union : Index\n\n Examples\n --------\n\n Union matching dtypes\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.union(idx2)\n Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')\n\n Union mismatched dtypes\n\n >>> idx1 = pd.Index(['a', 'b', 'c', 'd'])\n >>> idx2 = pd.Index([1, 2, 3, 4])\n >>> idx1.union(idx2)\n Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n\n if not self._is_compatible_with_other(other):\n return self._union_incompatible_dtypes(other, sort=sort)\n\n return self._union(other, sort=sort)\n\n def _union(self, other, sort):\n \"\"\"\n Specific union logic should go here. In subclasses, union behavior\n should be overwritten here rather than in `self.union`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n Returns\n -------\n Index\n \"\"\"\n\n if not len(other) or self.equals(other):\n return self._get_reconciled_name_object(other)\n\n if not len(self):\n return other._get_reconciled_name_object(self)\n\n # TODO(EA): setops-refactor, clean all this up\n if is_period_dtype(self) or is_datetime64tz_dtype(self):\n lvals = self._ndarray_values\n else:\n lvals = self._values\n if is_period_dtype(other) or is_datetime64tz_dtype(other):\n rvals = other._ndarray_values\n else:\n rvals = other._values\n\n if sort is None and self.is_monotonic and other.is_monotonic:\n try:\n result = self._outer_indexer(lvals, rvals)[0]\n except TypeError:\n # incomparable objects\n result = list(lvals)\n\n # worth making this faster? a very unusual case\n value_set = set(lvals)\n result.extend([x for x in rvals if x not in value_set])\n else:\n # find indexes of things in \"other\" that are not in \"self\"\n if self.is_unique:\n indexer = self.get_indexer(other)\n indexer = (indexer == -1).nonzero()[0]\n else:\n indexer = algos.unique1d(self.get_indexer_non_unique(other)[1])\n\n if len(indexer) > 0:\n other_diff = algos.take_nd(rvals, indexer, allow_fill=False)\n result = concat_compat((lvals, other_diff))\n\n else:\n result = lvals\n\n if sort is None:\n try:\n result = algos.safe_sort(result)\n except TypeError as err:\n warnings.warn(\n f\"{err}, sort order is undefined for incomparable objects\",\n RuntimeWarning,\n stacklevel=3,\n )\n\n # for subclasses\n return self._wrap_setop_result(other, result)\n\n def _wrap_setop_result(self, other, result):\n return self._constructor(result, name=get_op_result_name(self, other))\n\n _index_shared_docs[\n \"intersection\"\n ] = \"\"\"\n Form the intersection of two Index objects.\n\n This returns a new Index with elements common to the index and `other`.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default False\n Whether to sort the resulting index.\n\n * False : do not sort the result.\n * None : sort the result, except when `self` and `other` are equal\n or when the values cannot be compared.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default from ``True`` to ``False``, to match\n the behaviour of 0.23.4 and earlier.\n\n Returns\n -------\n intersection : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.intersection(idx2)\n Int64Index([3, 4], dtype='int64')\n \"\"\"\n\n # TODO: standardize return type of non-union setops type(self vs other)\n @Appender(_index_shared_docs[\"intersection\"])\n def intersection(self, other, sort=False):\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if self.equals(other):\n return self._get_reconciled_name_object(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype(\"O\")\n other = other.astype(\"O\")\n return this.intersection(other, sort=sort)\n\n # TODO(EA): setops-refactor, clean all this up\n lvals = self._values\n rvals = other._values\n\n if self.is_monotonic and other.is_monotonic:\n try:\n result = self._inner_indexer(lvals, rvals)[0]\n return self._wrap_setop_result(other, result)\n except TypeError:\n pass\n\n try:\n indexer = Index(rvals).get_indexer(lvals)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n except (InvalidIndexError, IncompatibleFrequency):\n # InvalidIndexError raised by get_indexer if non-unique\n # IncompatibleFrequency raised by PeriodIndex.get_indexer\n indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])\n indexer = indexer[indexer != -1]\n\n taken = other.take(indexer)\n res_name = get_op_result_name(self, other)\n\n if sort is None:\n taken = algos.safe_sort(taken.values)\n return self._shallow_copy(taken, name=res_name)\n\n taken.name = res_name\n return taken\n\n def difference(self, other, sort=None):\n \"\"\"\n Return a new Index with elements from the index that are not in\n `other`.\n\n This is the set difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([2, 1, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2)\n Int64Index([1, 2], dtype='int64')\n >>> idx1.difference(idx2, sort=False)\n Int64Index([2, 1], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n\n if self.equals(other):\n # pass an empty np.ndarray with the appropriate dtype\n return self._shallow_copy(self._data[:0])\n\n other, result_name = self._convert_can_do_setop(other)\n\n this = self._get_unique_index()\n\n indexer = this.get_indexer(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)\n the_diff = this.values.take(label_diff)\n if sort is None:\n try:\n the_diff = algos.safe_sort(the_diff)\n except TypeError:\n pass\n\n return this._shallow_copy(the_diff, name=result_name)\n\n def symmetric_difference(self, other, result_name=None, sort=None):\n \"\"\"\n Compute the symmetric difference of two Index objects.\n\n Parameters\n ----------\n other : Index or array-like\n result_name : str\n sort : False or None, default None\n Whether to sort the resulting index. By default, the\n values are attempted to be sorted, but any TypeError from\n incomparable elements is caught by pandas.\n\n * None : Attempt to sort the result, but catch any TypeErrors\n from comparing incomparable elements.\n * False : Do not sort the result.\n\n .. versionadded:: 0.24.0\n\n .. versionchanged:: 0.24.1\n\n Changed the default value from ``True`` to ``None``\n (without change in behaviour).\n\n Returns\n -------\n symmetric_difference : Index\n\n Notes\n -----\n ``symmetric_difference`` contains elements that appear in either\n ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by\n ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates\n dropped.\n\n Examples\n --------\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([2, 3, 4, 5])\n >>> idx1.symmetric_difference(idx2)\n Int64Index([1, 5], dtype='int64')\n\n You can also use the ``^`` operator:\n\n >>> idx1 ^ idx2\n Int64Index([1, 5], dtype='int64')\n \"\"\"\n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name_update = self._convert_can_do_setop(other)\n if result_name is None:\n result_name = result_name_update\n\n this = self._get_unique_index()\n other = other._get_unique_index()\n indexer = this.get_indexer(other)\n\n # {this} minus {other}\n common_indexer = indexer.take((indexer != -1).nonzero()[0])\n left_indexer = np.setdiff1d(\n np.arange(this.size), common_indexer, assume_unique=True\n )\n left_diff = this._values.take(left_indexer)\n\n # {other} minus {this}\n right_indexer = (indexer == -1).nonzero()[0]\n right_diff = other._values.take(right_indexer)\n\n the_diff = concat_compat([left_diff, right_diff])\n if sort is None:\n try:\n the_diff = algos.safe_sort(the_diff)\n except TypeError:\n pass\n\n attribs = self._get_attributes_dict()\n attribs[\"name\"] = result_name\n if \"freq\" in attribs:\n attribs[\"freq\"] = None\n return self._shallow_copy_with_infer(the_diff, **attribs)\n\n def _assert_can_do_setop(self, other):\n if not is_list_like(other):\n raise TypeError(\"Input must be Index or array-like\")\n return True\n\n def _convert_can_do_setop(self, other):\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = get_op_result_name(self, other)\n return other, result_name\n\n # --------------------------------------------------------------------\n # Indexing Methods\n\n _index_shared_docs[\n \"get_loc\"\n ] = \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n tolerance : int or float, optional\n Maximum distance from index value for inexact matches. The value of\n the index at the matching location most satisfy the equation\n ``abs(index[loc] - key) <= tolerance``.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> unique_index = pd.Index(list('abc'))\n >>> unique_index.get_loc('b')\n 1\n\n >>> monotonic_index = pd.Index(list('abbc'))\n >>> monotonic_index.get_loc('b')\n slice(1, 3, None)\n\n >>> non_monotonic_index = pd.Index(list('abcb'))\n >>> non_monotonic_index.get_loc('b')\n array([False, True, False, True], dtype=bool)\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_loc\"])\n def get_loc(self, key, method=None, tolerance=None):\n if method is None:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if using pad, \"\n \"backfill or nearest lookups\"\n )\n try:\n return self._engine.get_loc(key)\n except KeyError:\n return self._engine.get_loc(self._maybe_cast_indexer(key))\n indexer = self.get_indexer([key], method=method, tolerance=tolerance)\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError(\"get_loc requires scalar valued input\")\n loc = indexer.item()\n if loc == -1:\n raise KeyError(key)\n return loc\n\n _index_shared_docs[\n \"get_indexer\"\n ] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n limit : int, optional\n Maximum number of consecutive labels in ``target`` to match for\n inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n indexer : ndarray of int\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n %(raises_section)s\n Examples\n --------\n >>> index = pd.Index(['c', 'a', 'b'])\n >>> index.get_indexer(['a', 'b', 'x'])\n array([ 1, 2, -1])\n\n Notice that the return value is an array of locations in ``index``\n and ``x`` is marked by -1, as it is not in ``index``.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_indexer\"] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n method = missing.clean_reindex_fill_method(method)\n target = ensure_index(target)\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n\n # Treat boolean labels passed to a numeric index as not found. Without\n # this fix False and True would be treated as 0 and 1 respectively.\n # (GH #16877)\n if target.is_boolean() and self.is_numeric():\n return ensure_platform_int(np.repeat(-1, target.size))\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer(\n ptarget, method=method, limit=limit, tolerance=tolerance\n )\n\n if not is_dtype_equal(self.dtype, target.dtype):\n this = self.astype(object)\n target = target.astype(object)\n return this.get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n\n if not self.is_unique:\n raise InvalidIndexError(\n \"Reindexing only valid with uniquely valued Index objects\"\n )\n\n if method == \"pad\" or method == \"backfill\":\n indexer = self._get_fill_indexer(target, method, limit, tolerance)\n elif method == \"nearest\":\n indexer = self._get_nearest_indexer(target, limit, tolerance)\n else:\n if tolerance is not None:\n raise ValueError(\n \"tolerance argument only valid if doing pad, \"\n \"backfill or nearest reindexing\"\n )\n if limit is not None:\n raise ValueError(\n \"limit argument only valid if doing pad, \"\n \"backfill or nearest reindexing\"\n )\n\n indexer = self._engine.get_indexer(target._ndarray_values)\n\n return ensure_platform_int(indexer)\n\n def _convert_tolerance(self, tolerance, target):\n # override this method on subclasses\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError(\"list-like tolerance size must match target index size\")\n return tolerance\n\n def _get_fill_indexer(self, target, method, limit=None, tolerance=None):\n if self.is_monotonic_increasing and target.is_monotonic_increasing:\n method = (\n self._engine.get_pad_indexer\n if method == \"pad\"\n else self._engine.get_backfill_indexer\n )\n indexer = method(target._ndarray_values, limit)\n else:\n indexer = self._get_fill_indexer_searchsorted(target, method, limit)\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(\n target._ndarray_values, indexer, tolerance\n )\n return indexer\n\n def _get_fill_indexer_searchsorted(self, target, method, limit=None):\n \"\"\"\n Fallback pad/backfill get_indexer that works for monotonic decreasing\n indexes and non-monotonic targets.\n \"\"\"\n if limit is not None:\n raise ValueError(\n f\"limit argument for {repr(method)} method only well-defined \"\n \"if index and target are monotonic\"\n )\n\n side = \"left\" if method == \"pad\" else \"right\"\n\n # find exact matches first (this simplifies the algorithm)\n indexer = self.get_indexer(target)\n nonexact = indexer == -1\n indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)\n if side == \"left\":\n # searchsorted returns \"indices into a sorted array such that,\n # if the corresponding elements in v were inserted before the\n # indices, the order of a would be preserved\".\n # Thus, we need to subtract 1 to find values to the left.\n indexer[nonexact] -= 1\n # This also mapped not found values (values of 0 from\n # np.searchsorted) to -1, which conveniently is also our\n # sentinel for missing values\n else:\n # Mark indices to the right of the largest value as not found\n indexer[indexer == len(self)] = -1\n return indexer\n\n def _get_nearest_indexer(self, target, limit, tolerance):\n \"\"\"\n Get the indexer for the nearest index labels; requires an index with\n values that can be subtracted from each other (e.g., not strings or\n tuples).\n \"\"\"\n left_indexer = self.get_indexer(target, \"pad\", limit=limit)\n right_indexer = self.get_indexer(target, \"backfill\", limit=limit)\n\n target = np.asarray(target)\n left_distances = abs(self.values[left_indexer] - target)\n right_distances = abs(self.values[right_indexer] - target)\n\n op = operator.lt if self.is_monotonic_increasing else operator.le\n indexer = np.where(\n op(left_distances, right_distances) | (right_indexer == -1),\n left_indexer,\n right_indexer,\n )\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target, indexer, tolerance)\n return indexer\n\n def _filter_indexer_tolerance(self, target, indexer, tolerance):\n distance = abs(self.values[indexer] - target)\n indexer = np.where(distance <= tolerance, indexer, -1)\n return indexer\n\n # --------------------------------------------------------------------\n # Indexer Conversion Methods\n\n _index_shared_docs[\n \"_convert_scalar_indexer\"\n ] = \"\"\"\n Convert a scalar indexer.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'ix', 'loc', 'getitem', 'iloc'} or None\n \"\"\"\n\n @Appender(_index_shared_docs[\"_convert_scalar_indexer\"])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in [\"ix\", \"loc\", \"getitem\", \"iloc\", None]\n\n if kind == \"iloc\":\n return self._validate_indexer(\"positional\", key, kind)\n\n if len(self) and not isinstance(self, ABCMultiIndex):\n\n # we can raise here if we are definitive that this\n # is positional indexing (eg. .ix on with a float)\n # or label indexing if we are using a type able\n # to be represented in the index\n\n if kind in [\"getitem\", \"ix\"] and is_float(key):\n if not self.is_floating():\n return self._invalid_indexer(\"label\", key)\n\n elif kind in [\"loc\"] and is_float(key):\n\n # we want to raise KeyError on string/mixed here\n # technically we *could* raise a TypeError\n # on anything but mixed though\n if self.inferred_type not in [\n \"floating\",\n \"mixed-integer-float\",\n \"integer-na\",\n \"string\",\n \"unicode\",\n \"mixed\",\n ]:\n self._invalid_indexer(\"label\", key)\n\n elif kind in [\"loc\"] and is_integer(key):\n if not self.holds_integer():\n self._invalid_indexer(\"label\", key)\n\n return key\n\n _index_shared_docs[\n \"_convert_slice_indexer\"\n ] = \"\"\"\n Convert a slice indexer.\n\n By definition, these are labels unless 'iloc' is passed in.\n Floats are not allowed as the start, step, or stop of the slice.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'ix', 'loc', 'getitem', 'iloc'} or None\n \"\"\"\n\n @Appender(_index_shared_docs[\"_convert_slice_indexer\"])\n def _convert_slice_indexer(self, key: slice, kind=None):\n assert kind in [\"ix\", \"loc\", \"getitem\", \"iloc\", None]\n\n # validate iloc\n if kind == \"iloc\":\n return slice(\n self._validate_indexer(\"slice\", key.start, kind),\n self._validate_indexer(\"slice\", key.stop, kind),\n self._validate_indexer(\"slice\", key.step, kind),\n )\n\n # potentially cast the bounds to integers\n start, stop, step = key.start, key.stop, key.step\n\n # figure out if this is a positional indexer\n def is_int(v):\n return v is None or is_integer(v)\n\n is_null_slicer = start is None and stop is None\n is_index_slice = is_int(start) and is_int(stop)\n is_positional = is_index_slice and not (\n self.is_integer() or self.is_categorical()\n )\n\n if kind == \"getitem\":\n \"\"\"\n called from the getitem slicers, validate that we are in fact\n integers\n \"\"\"\n if self.is_integer() or is_index_slice:\n return slice(\n self._validate_indexer(\"slice\", key.start, kind),\n self._validate_indexer(\"slice\", key.stop, kind),\n self._validate_indexer(\"slice\", key.step, kind),\n )\n\n # convert the slice to an indexer here\n\n # if we are mixed and have integers\n try:\n if is_positional and self.is_mixed():\n # Validate start & stop\n if start is not None:\n self.get_loc(start)\n if stop is not None:\n self.get_loc(stop)\n is_positional = False\n except KeyError:\n if self.inferred_type in [\"mixed-integer-float\", \"integer-na\"]:\n raise\n\n if is_null_slicer:\n indexer = key\n elif is_positional:\n indexer = key\n else:\n indexer = self.slice_indexer(start, stop, step, kind=kind)\n\n return indexer\n\n def _convert_listlike_indexer(self, keyarr, kind=None):\n \"\"\"\n Parameters\n ----------\n keyarr : list-like\n Indexer to convert.\n\n Returns\n -------\n indexer : numpy.ndarray or None\n Return an ndarray or None if cannot convert.\n keyarr : numpy.ndarray\n Return tuple-safe keys.\n \"\"\"\n if isinstance(keyarr, Index):\n keyarr = self._convert_index_indexer(keyarr)\n else:\n keyarr = self._convert_arr_indexer(keyarr)\n\n indexer = self._convert_list_indexer(keyarr, kind=kind)\n return indexer, keyarr\n\n _index_shared_docs[\n \"_convert_arr_indexer\"\n ] = \"\"\"\n Convert an array-like indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : array-like\n Indexer to convert.\n\n Returns\n -------\n converted_keyarr : array-like\n \"\"\"\n\n @Appender(_index_shared_docs[\"_convert_arr_indexer\"])\n def _convert_arr_indexer(self, keyarr):\n keyarr = com.asarray_tuplesafe(keyarr)\n return keyarr\n\n _index_shared_docs[\n \"_convert_index_indexer\"\n ] = \"\"\"\n Convert an Index indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : Index (or sub-class)\n Indexer to convert.\n\n Returns\n -------\n converted_keyarr : Index (or sub-class)\n \"\"\"\n\n @Appender(_index_shared_docs[\"_convert_index_indexer\"])\n def _convert_index_indexer(self, keyarr):\n return keyarr\n\n _index_shared_docs[\n \"_convert_list_indexer\"\n ] = \"\"\"\n Convert a list-like indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : Index (or sub-class)\n Indexer to convert.\n kind : iloc, ix, loc, optional\n\n Returns\n -------\n positional indexer or None\n \"\"\"\n\n @Appender(_index_shared_docs[\"_convert_list_indexer\"])\n def _convert_list_indexer(self, keyarr, kind=None):\n if (\n kind in [None, \"iloc\", \"ix\"]\n and is_integer_dtype(keyarr)\n and not self.is_floating()\n and not isinstance(keyarr, ABCPeriodIndex)\n ):\n\n if self.inferred_type == \"mixed-integer\":\n indexer = self.get_indexer(keyarr)\n if (indexer >= 0).all():\n return indexer\n # missing values are flagged as -1 by get_indexer and negative\n # indices are already converted to positive indices in the\n # above if-statement, so the negative flags are changed to\n # values outside the range of indices so as to trigger an\n # IndexError in maybe_convert_indices\n indexer[indexer < 0] = len(self)\n\n return maybe_convert_indices(indexer, len(self))\n\n elif not self.inferred_type == \"integer\":\n keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)\n return keyarr\n\n return None\n\n def _invalid_indexer(self, form, key):\n \"\"\"\n Consistent invalid indexer message.\n \"\"\"\n raise TypeError(\n f\"cannot do {form} indexing on {type(self)} with these \"\n f\"indexers [{key}] of {type(key)}\"\n )\n\n # --------------------------------------------------------------------\n # Reindex Methods\n\n def _can_reindex(self, indexer):\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer indexer\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n\n # trying to reindex on an axis with duplicates\n if not self.is_unique and len(indexer):\n raise ValueError(\"cannot reindex from a duplicate axis\")\n\n def reindex(self, target, method=None, level=None, limit=None, tolerance=None):\n \"\"\"\n Create index with target's values (move/add/delete values\n as necessary).\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray or None\n Indices of output values in original index.\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, \"name\")\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n target = _ensure_has_len(target) # target may be an iterator\n\n if not isinstance(target, Index) and len(target) == 0:\n attrs = self._get_attributes_dict()\n attrs.pop(\"freq\", None) # don't preserve freq\n values = self._data[:0] # appropriately-dtyped empty array\n target = self._simple_new(values, dtype=self.dtype, **attrs)\n else:\n target = ensure_index(target)\n\n if level is not None:\n if method is not None:\n raise TypeError(\"Fill method not supported if level passed\")\n _, indexer, _ = self._join_level(\n target, level, how=\"right\", return_indexers=True\n )\n else:\n if self.equals(target):\n indexer = None\n else:\n # check is_overlapping for IntervalIndex compat\n if self.is_unique and not getattr(self, \"is_overlapping\", False):\n indexer = self.get_indexer(\n target, method=method, limit=limit, tolerance=tolerance\n )\n else:\n if method is not None or limit is not None:\n raise ValueError(\n \"cannot reindex a non-unique index \"\n \"with a method or limit\"\n )\n indexer, missing = self.get_indexer_non_unique(target)\n\n if preserve_names and target.nlevels == 1 and target.name != self.name:\n target = target.copy()\n target.name = self.name\n\n return target, indexer\n\n def _reindex_non_unique(self, target):\n \"\"\"\n Create a new index with target's values (move/add/delete values as\n necessary) use with non-unique Index and a possibly non-unique target.\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index.\n indexer : np.ndarray or None\n Indices of output values in original index.\n\n \"\"\"\n\n target = ensure_index(target)\n indexer, missing = self.get_indexer_non_unique(target)\n check = indexer != -1\n new_labels = self.take(indexer[check])\n new_indexer = None\n\n if len(missing):\n length = np.arange(len(indexer))\n\n missing = ensure_platform_int(missing)\n missing_labels = target.take(missing)\n missing_indexer = ensure_int64(length[~check])\n cur_labels = self.take(indexer[check]).values\n cur_indexer = ensure_int64(length[check])\n\n new_labels = np.empty(tuple([len(indexer)]), dtype=object)\n new_labels[cur_indexer] = cur_labels\n new_labels[missing_indexer] = missing_labels\n\n # a unique indexer\n if target.is_unique:\n\n # see GH5553, make sure we use the right indexer\n new_indexer = np.arange(len(indexer))\n new_indexer[cur_indexer] = np.arange(len(cur_labels))\n new_indexer[missing_indexer] = -1\n\n # we have a non_unique selector, need to use the original\n # indexer here\n else:\n\n # need to retake to have the same size as the indexer\n indexer[~check] = -1\n\n # reset the new indexer to account for the new size\n new_indexer = np.arange(len(self.take(indexer)))\n new_indexer[~check] = -1\n\n new_index = self._shallow_copy_with_infer(new_labels)\n return new_index, indexer, new_indexer\n\n # --------------------------------------------------------------------\n # Join Methods\n\n _index_shared_docs[\n \"join\"\n ] = \"\"\"\n Compute join_index and indexers to conform data\n structures to the new index.\n\n Parameters\n ----------\n other : Index\n how : {'left', 'right', 'inner', 'outer'}\n level : int or level name, default None\n return_indexers : bool, default False\n sort : bool, default False\n Sort the join keys lexicographically in the result Index. If False,\n the order of the join keys depends on the join type (how keyword).\n\n Returns\n -------\n join_index, (left_indexer, right_indexer)\n \"\"\"\n\n @Appender(_index_shared_docs[\"join\"])\n def join(self, other, how=\"left\", level=None, return_indexers=False, sort=False):\n self_is_mi = isinstance(self, ABCMultiIndex)\n other_is_mi = isinstance(other, ABCMultiIndex)\n\n # try to figure out the join level\n # GH3662\n if level is None and (self_is_mi or other_is_mi):\n\n # have the same levels/names so a simple join\n if self.names == other.names:\n pass\n else:\n return self._join_multi(other, how=how, return_indexers=return_indexers)\n\n # join on the level\n if level is not None and (self_is_mi or other_is_mi):\n return self._join_level(\n other, level, how=how, return_indexers=return_indexers\n )\n\n other = ensure_index(other)\n\n if len(other) == 0 and how in (\"left\", \"outer\"):\n join_index = self._shallow_copy()\n if return_indexers:\n rindexer = np.repeat(-1, len(join_index))\n return join_index, None, rindexer\n else:\n return join_index\n\n if len(self) == 0 and how in (\"right\", \"outer\"):\n join_index = other._shallow_copy()\n if return_indexers:\n lindexer = np.repeat(-1, len(join_index))\n return join_index, lindexer, None\n else:\n return join_index\n\n if self._join_precedence < other._join_precedence:\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n result = other.join(\n self, how=how, level=level, return_indexers=return_indexers\n )\n if return_indexers:\n x, y, z = result\n result = x, z, y\n return result\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype(\"O\")\n other = other.astype(\"O\")\n return this.join(other, how=how, return_indexers=return_indexers)\n\n _validate_join_method(how)\n\n if not self.is_unique and not other.is_unique:\n return self._join_non_unique(\n other, how=how, return_indexers=return_indexers\n )\n elif not self.is_unique or not other.is_unique:\n if self.is_monotonic and other.is_monotonic:\n return self._join_monotonic(\n other, how=how, return_indexers=return_indexers\n )\n else:\n return self._join_non_unique(\n other, how=how, return_indexers=return_indexers\n )\n elif self.is_monotonic and other.is_monotonic:\n try:\n return self._join_monotonic(\n other, how=how, return_indexers=return_indexers\n )\n except TypeError:\n pass\n\n if how == \"left\":\n join_index = self\n elif how == \"right\":\n join_index = other\n elif how == \"inner\":\n # TODO: sort=False here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.intersection(other, sort=False)\n elif how == \"outer\":\n # TODO: sort=True here for backwards compat. It may\n # be better to use the sort parameter passed into join\n join_index = self.union(other)\n\n if sort:\n join_index = join_index.sort_values()\n\n if return_indexers:\n if join_index is self:\n lindexer = None\n else:\n lindexer = self.get_indexer(join_index)\n if join_index is other:\n rindexer = None\n else:\n rindexer = other.get_indexer(join_index)\n return join_index, lindexer, rindexer\n else:\n return join_index\n\n def _join_multi(self, other, how, return_indexers=True):\n from .multi import MultiIndex\n from pandas.core.reshape.merge import _restore_dropped_levels_multijoin\n\n # figure out join names\n self_names = set(com.not_none(*self.names))\n other_names = set(com.not_none(*other.names))\n overlap = self_names & other_names\n\n # need at least 1 in common\n if not overlap:\n raise ValueError(\"cannot join with no overlapping index names\")\n\n self_is_mi = isinstance(self, MultiIndex)\n other_is_mi = isinstance(other, MultiIndex)\n\n if self_is_mi and other_is_mi:\n\n # Drop the non-matching levels from left and right respectively\n ldrop_names = list(self_names - overlap)\n rdrop_names = list(other_names - overlap)\n\n # if only the order differs\n if not len(ldrop_names + rdrop_names):\n self_jnlevels = self\n other_jnlevels = other.reorder_levels(self.names)\n else:\n self_jnlevels = self.droplevel(ldrop_names)\n other_jnlevels = other.droplevel(rdrop_names)\n\n # Join left and right\n # Join on same leveled multi-index frames is supported\n join_idx, lidx, ridx = self_jnlevels.join(\n other_jnlevels, how, return_indexers=True\n )\n\n # Restore the dropped levels\n # Returned index level order is\n # common levels, ldrop_names, rdrop_names\n dropped_names = ldrop_names + rdrop_names\n\n levels, codes, names = _restore_dropped_levels_multijoin(\n self, other, dropped_names, join_idx, lidx, ridx\n )\n\n # Re-create the multi-index\n multi_join_idx = MultiIndex(\n levels=levels, codes=codes, names=names, verify_integrity=False\n )\n\n multi_join_idx = multi_join_idx.remove_unused_levels()\n\n return multi_join_idx, lidx, ridx\n\n jl = list(overlap)[0]\n\n # Case where only one index is multi\n # make the indices into mi's that match\n flip_order = False\n if self_is_mi:\n self, other = other, self\n flip_order = True\n # flip if join method is right or left\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n\n level = other.names.index(jl)\n result = self._join_level(\n other, level, how=how, return_indexers=return_indexers\n )\n\n if flip_order:\n if isinstance(result, tuple):\n return result[0], result[2], result[1]\n return result\n\n def _join_non_unique(self, other, how=\"left\", return_indexers=False):\n from pandas.core.reshape.merge import _get_join_indexers\n\n left_idx, right_idx = _get_join_indexers(\n [self._ndarray_values], [other._ndarray_values], how=how, sort=True\n )\n\n left_idx = ensure_platform_int(left_idx)\n right_idx = ensure_platform_int(right_idx)\n\n join_index = np.asarray(self._ndarray_values.take(left_idx))\n mask = left_idx == -1\n np.putmask(join_index, mask, other._ndarray_values.take(right_idx))\n\n join_index = self._wrap_joined_index(join_index, other)\n\n if return_indexers:\n return join_index, left_idx, right_idx\n else:\n return join_index\n\n def _join_level(\n self, other, level, how=\"left\", return_indexers=False, keep_order=True\n ):\n \"\"\"\n The join method *only* affects the level of the resulting\n MultiIndex. Otherwise it just exactly aligns the Index data to the\n labels of the level in the MultiIndex.\n\n If ```keep_order == True```, the order of the data indexed by the\n MultiIndex will not be changed; otherwise, it will tie out\n with `other`.\n \"\"\"\n from .multi import MultiIndex\n\n def _get_leaf_sorter(labels):\n \"\"\"\n Returns sorter for the inner most level while preserving the\n order of higher levels.\n \"\"\"\n if labels[0].size == 0:\n return np.empty(0, dtype=\"int64\")\n\n if len(labels) == 1:\n lab = ensure_int64(labels[0])\n sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())\n return sorter\n\n # find indexers of beginning of each set of\n # same-key labels w.r.t all but last level\n tic = labels[0][:-1] != labels[0][1:]\n for lab in labels[1:-1]:\n tic |= lab[:-1] != lab[1:]\n\n starts = np.hstack(([True], tic, [True])).nonzero()[0]\n lab = ensure_int64(labels[-1])\n return lib.get_level_sorter(lab, ensure_int64(starts))\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n raise TypeError(\"Join on level between two MultiIndex objects is ambiguous\")\n\n left, right = self, other\n\n flip_order = not isinstance(self, MultiIndex)\n if flip_order:\n left, right = right, left\n how = {\"right\": \"left\", \"left\": \"right\"}.get(how, how)\n\n level = left._get_level_number(level)\n old_level = left.levels[level]\n\n if not right.is_unique:\n raise NotImplementedError(\n \"Index._join_level on non-unique index is not implemented\"\n )\n\n new_level, left_lev_indexer, right_lev_indexer = old_level.join(\n right, how=how, return_indexers=True\n )\n\n if left_lev_indexer is None:\n if keep_order or len(left) == 0:\n left_indexer = None\n join_index = left\n else: # sort the leaves\n left_indexer = _get_leaf_sorter(left.codes[: level + 1])\n join_index = left[left_indexer]\n\n else:\n left_lev_indexer = ensure_int64(left_lev_indexer)\n rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))\n\n new_lev_codes = algos.take_nd(\n rev_indexer, left.codes[level], allow_fill=False\n )\n\n new_codes = list(left.codes)\n new_codes[level] = new_lev_codes\n\n new_levels = list(left.levels)\n new_levels[level] = new_level\n\n if keep_order: # just drop missing values. o.w. keep order\n left_indexer = np.arange(len(left), dtype=np.intp)\n mask = new_lev_codes != -1\n if not mask.all():\n new_codes = [lab[mask] for lab in new_codes]\n left_indexer = left_indexer[mask]\n\n else: # tie out the order with other\n if level == 0: # outer most level, take the fast route\n ngroups = 1 + new_lev_codes.max()\n left_indexer, counts = libalgos.groupsort_indexer(\n new_lev_codes, ngroups\n )\n\n # missing values are placed first; drop them!\n left_indexer = left_indexer[counts[0] :]\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n else: # sort the leaves\n mask = new_lev_codes != -1\n mask_all = mask.all()\n if not mask_all:\n new_codes = [lab[mask] for lab in new_codes]\n\n left_indexer = _get_leaf_sorter(new_codes[: level + 1])\n new_codes = [lab[left_indexer] for lab in new_codes]\n\n # left_indexers are w.r.t masked frame.\n # reverse to original frame!\n if not mask_all:\n left_indexer = mask.nonzero()[0][left_indexer]\n\n join_index = MultiIndex(\n levels=new_levels,\n codes=new_codes,\n names=left.names,\n verify_integrity=False,\n )\n\n if right_lev_indexer is not None:\n right_indexer = algos.take_nd(\n right_lev_indexer, join_index.codes[level], allow_fill=False\n )\n else:\n right_indexer = join_index.codes[level]\n\n if flip_order:\n left_indexer, right_indexer = right_indexer, left_indexer\n\n if return_indexers:\n left_indexer = (\n None if left_indexer is None else ensure_platform_int(left_indexer)\n )\n right_indexer = (\n None if right_indexer is None else ensure_platform_int(right_indexer)\n )\n return join_index, left_indexer, right_indexer\n else:\n return join_index\n\n def _join_monotonic(self, other, how=\"left\", return_indexers=False):\n if self.equals(other):\n ret_index = other if how == \"right\" else self\n if return_indexers:\n return ret_index, None, None\n else:\n return ret_index\n\n sv = self._ndarray_values\n ov = other._ndarray_values\n\n if self.is_unique and other.is_unique:\n # We can perform much better than the general case\n if how == \"left\":\n join_index = self\n lidx = None\n ridx = self._left_indexer_unique(sv, ov)\n elif how == \"right\":\n join_index = other\n lidx = self._left_indexer_unique(ov, sv)\n ridx = None\n elif how == \"inner\":\n join_index, lidx, ridx = self._inner_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n elif how == \"outer\":\n join_index, lidx, ridx = self._outer_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n else:\n if how == \"left\":\n join_index, lidx, ridx = self._left_indexer(sv, ov)\n elif how == \"right\":\n join_index, ridx, lidx = self._left_indexer(ov, sv)\n elif how == \"inner\":\n join_index, lidx, ridx = self._inner_indexer(sv, ov)\n elif how == \"outer\":\n join_index, lidx, ridx = self._outer_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n\n if return_indexers:\n lidx = None if lidx is None else ensure_platform_int(lidx)\n ridx = None if ridx is None else ensure_platform_int(ridx)\n return join_index, lidx, ridx\n else:\n return join_index\n\n def _wrap_joined_index(self, joined, other):\n name = get_op_result_name(self, other)\n return Index(joined, name=name)\n\n # --------------------------------------------------------------------\n # Uncategorized Methods\n\n @property\n def values(self):\n \"\"\"\n Return an array representing the data in the Index.\n\n .. warning::\n\n We recommend using :attr:`Index.array` or\n :meth:`Index.to_numpy`, depending on whether you need\n a reference to the underlying data or a NumPy array.\n\n Returns\n -------\n array: numpy.ndarray or ExtensionArray\n\n See Also\n --------\n Index.array : Reference to the underlying data.\n Index.to_numpy : A NumPy array representing the underlying data.\n \"\"\"\n return self._data.view(np.ndarray)\n\n @property\n def _values(self) -> Union[ExtensionArray, ABCIndexClass, np.ndarray]:\n # TODO(EA): remove index types as they become extension arrays\n \"\"\"\n The best array representation.\n\n This is an ndarray, ExtensionArray, or Index subclass. This differs\n from ``_ndarray_values``, which always returns an ndarray.\n\n Both ``_values`` and ``_ndarray_values`` are consistent between\n ``Series`` and ``Index``.\n\n It may differ from the public '.values' method.\n\n index | values | _values | _ndarray_values |\n ----------------- | --------------- | ------------- | --------------- |\n Index | ndarray | ndarray | ndarray |\n CategoricalIndex | Categorical | Categorical | ndarray[int] |\n DatetimeIndex | ndarray[M8ns] | ndarray[M8ns] | ndarray[M8ns] |\n DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |\n PeriodIndex | ndarray[object] | PeriodArray | ndarray[int] |\n IntervalIndex | IntervalArray | IntervalArray | ndarray[object] |\n\n See Also\n --------\n values\n _ndarray_values\n \"\"\"\n return self._data\n\n def _internal_get_values(self):\n \"\"\"\n Return `Index` data as an `numpy.ndarray`.\n\n Returns\n -------\n numpy.ndarray\n A one-dimensional numpy array of the `Index` values.\n\n See Also\n --------\n Index.values : The attribute that _internal_get_values wraps.\n\n Examples\n --------\n Getting the `Index` values of a `DataFrame`:\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n ... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])\n >>> df\n A B C\n a 1 2 3\n b 4 5 6\n c 7 8 9\n >>> df.index._internal_get_values()\n array(['a', 'b', 'c'], dtype=object)\n\n Standalone `Index` values:\n\n >>> idx = pd.Index(['1', '2', '3'])\n >>> idx._internal_get_values()\n array(['1', '2', '3'], dtype=object)\n\n `MultiIndex` arrays also have only one dimension:\n\n >>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],\n ... names=('number', 'letter'))\n >>> midx._internal_get_values()\n array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)\n >>> midx._internal_get_values().ndim\n 1\n \"\"\"\n return self.values\n\n @Appender(IndexOpsMixin.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n result = super().memory_usage(deep=deep)\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n _index_shared_docs[\n \"where\"\n ] = \"\"\"\n Return an Index of same shape as self and whose corresponding\n entries are from self where cond is True and otherwise are from\n other.\n\n Parameters\n ----------\n cond : bool array-like with the same length as self\n other : scalar, or array-like\n\n Returns\n -------\n Index\n \"\"\"\n\n @Appender(_index_shared_docs[\"where\"])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n\n dtype = self.dtype\n values = self.values\n\n if is_bool(other) or is_bool_dtype(other):\n\n # bools force casting\n values = values.astype(object)\n dtype = None\n\n values = np.where(cond, values, other)\n\n if self._is_numeric_dtype and np.any(isna(values)):\n # We can't coerce to the numeric dtype of \"self\" (unless\n # it's float) if there are NaN values in our output.\n dtype = None\n\n return self._shallow_copy_with_infer(values, dtype=dtype)\n\n # construction helpers\n @classmethod\n def _scalar_data_error(cls, data):\n # We return the TypeError so that we can raise it from the constructor\n # in order to keep mypy happy\n return TypeError(\n f\"{cls.__name__}(...) must be called with a collection of some \"\n f\"kind, {repr(data)} was passed\"\n )\n\n @classmethod\n def _string_data_error(cls, data):\n raise TypeError(\n \"String dtype not supported, you may need \"\n \"to explicitly cast to a numeric type\"\n )\n\n def _coerce_scalar_to_index(self, item):\n \"\"\"\n We need to coerce a scalar to a compat for our index type.\n\n Parameters\n ----------\n item : scalar item to coerce\n \"\"\"\n dtype = self.dtype\n\n if self._is_numeric_dtype and isna(item):\n # We can't coerce to the numeric dtype of \"self\" (unless\n # it's float) if there are NaN values in our output.\n dtype = None\n\n return Index([item], dtype=dtype, **self._get_attributes_dict())\n\n def _to_safe_for_reshape(self):\n \"\"\"\n Convert to object if we are a categorical.\n \"\"\"\n return self\n\n def _convert_for_op(self, value):\n \"\"\"\n Convert value to be insertable to ndarray.\n \"\"\"\n return value\n\n def _assert_can_do_op(self, value):\n \"\"\"\n Check value is valid for scalar op.\n \"\"\"\n if not is_scalar(value):\n raise TypeError(f\"'value' must be a scalar, passed: {type(value).__name__}\")\n\n def _is_memory_usage_qualified(self) -> bool:\n \"\"\"\n Return a boolean if we need a qualified .info display.\n \"\"\"\n return self.is_object()\n\n def is_type_compatible(self, kind) -> bool:\n \"\"\"\n Whether the index type is compatible with the provided type.\n \"\"\"\n return kind == self.inferred_type\n\n _index_shared_docs[\n \"contains\"\n ] = \"\"\"\n Return a boolean indicating whether the provided key is in the index.\n\n Parameters\n ----------\n key : label\n The key to check if it is present in the index.\n\n Returns\n -------\n bool\n Whether the key search is in the index.\n\n See Also\n --------\n Index.isin : Returns an ndarray of boolean dtype indicating whether the\n list-like key is in the index.\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3, 4])\n >>> idx\n Int64Index([1, 2, 3, 4], dtype='int64')\n\n >>> 2 in idx\n True\n >>> 6 in idx\n False\n \"\"\"\n\n @Appender(_index_shared_docs[\"contains\"] % _index_doc_kwargs)\n def __contains__(self, key) -> bool:\n hash(key)\n try:\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n def __hash__(self):\n raise TypeError(f\"unhashable type: {repr(type(self).__name__)}\")\n\n def __setitem__(self, key, value):\n raise TypeError(\"Index does not support mutable operations\")\n\n def __getitem__(self, key):\n \"\"\"\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n \"\"\"\n # There's no custom logic to be implemented in __getslice__, so it's\n # not overloaded intentionally.\n getitem = self._data.__getitem__\n promote = self._shallow_copy\n\n if is_scalar(key):\n key = com.cast_scalar_indexer(key)\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization of basic indexing.\n return promote(getitem(key))\n\n if com.is_bool_indexer(key):\n key = np.asarray(key, dtype=bool)\n\n key = com.values_from_object(key)\n result = getitem(key)\n if not is_scalar(result):\n return promote(result)\n else:\n return result\n\n def _can_hold_identifiers_and_holds_name(self, name) -> bool:\n \"\"\"\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n \"\"\"\n if self.is_object() or self.is_categorical():\n return name in self\n return False\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together.\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n\n to_concat = [self]\n\n if isinstance(other, (list, tuple)):\n to_concat = to_concat + list(other)\n else:\n to_concat.append(other)\n\n for obj in to_concat:\n if not isinstance(obj, Index):\n raise TypeError(\"all inputs must be Index\")\n\n names = {obj.name for obj in to_concat}\n name = None if len(names) > 1 else self.name\n\n return self._concat(to_concat, name)\n\n def _concat(self, to_concat, name):\n\n typs = _concat.get_dtype_kinds(to_concat)\n\n if len(typs) == 1:\n return self._concat_same_dtype(to_concat, name=name)\n return Index._concat_same_dtype(self, to_concat, name=name)\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n Concatenate to_concat which has the same class.\n \"\"\"\n # must be overridden in specific classes\n klasses = (\n ABCDatetimeIndex,\n ABCTimedeltaIndex,\n ABCPeriodIndex,\n ExtensionArray,\n ABCIntervalIndex,\n )\n to_concat = [\n x.astype(object) if isinstance(x, klasses) else x for x in to_concat\n ]\n\n self = to_concat[0]\n attribs = self._get_attributes_dict()\n attribs[\"name\"] = name\n\n to_concat = [x._values if isinstance(x, Index) else x for x in to_concat]\n\n return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)\n\n def putmask(self, mask, value):\n \"\"\"\n Return a new Index of the values set with the mask.\n\n Returns\n -------\n Index\n\n See Also\n --------\n numpy.ndarray.putmask\n \"\"\"\n values = self.values.copy()\n try:\n np.putmask(values, mask, self._convert_for_op(value))\n return self._shallow_copy(values)\n except (ValueError, TypeError) as err:\n if is_object_dtype(self):\n raise err\n\n # coerces to object\n return self.astype(object).putmask(mask, value)\n\n def equals(self, other) -> bool:\n \"\"\"\n Determine if two Index objects contain the same elements.\n\n Returns\n -------\n bool\n True if \"other\" is an Index and it has the same elements as calling\n index; False otherwise.\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if is_object_dtype(self) and not is_object_dtype(other):\n # if other is not object, use other's logic for coercion\n return other.equals(self)\n\n if isinstance(other, ABCMultiIndex):\n # d-level MultiIndex can equal d-tuple Index\n if not is_object_dtype(self.dtype):\n if self.nlevels != other.nlevels:\n return False\n\n return array_equivalent(\n com.values_from_object(self), com.values_from_object(other)\n )\n\n def identical(self, other) -> bool:\n \"\"\"\n Similar to equals, but check that other comparable attributes are\n also equal.\n\n Returns\n -------\n bool\n If two Index objects have equal elements and same type True,\n otherwise False.\n \"\"\"\n return (\n self.equals(other)\n and all(\n (\n getattr(self, c, None) == getattr(other, c, None)\n for c in self._comparables\n )\n )\n and type(self) == type(other)\n )\n\n def asof(self, label):\n \"\"\"\n Return the label from the index, or, if not present, the previous one.\n\n Assuming that the index is sorted, return the passed index label if it\n is in the index, or return the previous index label if the passed one\n is not in the index.\n\n Parameters\n ----------\n label : object\n The label up to which the method returns the latest index label.\n\n Returns\n -------\n object\n The passed label if it is in the index. The previous label if the\n passed label is not in the sorted index or `NaN` if there is no\n such label.\n\n See Also\n --------\n Series.asof : Return the latest value in a Series up to the\n passed index.\n merge_asof : Perform an asof merge (similar to left join but it\n matches on nearest key rather than equal key).\n Index.get_loc : An `asof` is a thin wrapper around `get_loc`\n with method='pad'.\n\n Examples\n --------\n `Index.asof` returns the latest index label up to the passed label.\n\n >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])\n >>> idx.asof('2014-01-01')\n '2013-12-31'\n\n If the label is in the index, the method returns the passed label.\n\n >>> idx.asof('2014-01-02')\n '2014-01-02'\n\n If all of the labels in the index are later than the passed label,\n NaN is returned.\n\n >>> idx.asof('1999-01-02')\n nan\n\n If the index is not sorted, an error is raised.\n\n >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',\n ... '2014-01-03'])\n >>> idx_not_sorted.asof('2013-12-31')\n Traceback (most recent call last):\n ValueError: index must be monotonic increasing or decreasing\n \"\"\"\n try:\n loc = self.get_loc(label, method=\"pad\")\n except KeyError:\n return self._na_value\n else:\n if isinstance(loc, slice):\n loc = loc.indices(len(self))[-1]\n return self[loc]\n\n def asof_locs(self, where, mask):\n \"\"\"\n Find the locations (indices) of the labels from the index for\n every entry in the `where` argument.\n\n As in the `asof` function, if the label (a particular entry in\n `where`) is not in the index, the latest index label up to the\n passed label is chosen and its index returned.\n\n If all of the labels in the index are later than a label in `where`,\n -1 is returned.\n\n `mask` is used to ignore NA values in the index during calculation.\n\n Parameters\n ----------\n where : Index\n An Index consisting of an array of timestamps.\n mask : array-like\n Array of booleans denoting where values in the original\n data are not NA.\n\n Returns\n -------\n numpy.ndarray\n An array of locations (indices) of the labels from the Index\n which correspond to the return values of the `asof` function\n for every element in `where`.\n \"\"\"\n locs = self.values[mask].searchsorted(where.values, side=\"right\")\n locs = np.where(locs > 0, locs - 1, 0)\n\n result = np.arange(len(self))[mask].take(locs)\n\n first = mask.argmax()\n result[(locs == 0) & (where.values < self.values[first])] = -1\n\n return result\n\n def sort_values(self, return_indexer=False, ascending=True):\n \"\"\"\n Return a sorted copy of the index.\n\n Return a sorted copy of the index, and optionally return the indices\n that sorted the index itself.\n\n Parameters\n ----------\n return_indexer : bool, default False\n Should the indices that would sort the index be returned.\n ascending : bool, default True\n Should the index values be sorted in an ascending order.\n\n Returns\n -------\n sorted_index : pandas.Index\n Sorted copy of the index.\n indexer : numpy.ndarray, optional\n The indices that the index itself was sorted by.\n\n See Also\n --------\n Series.sort_values : Sort values of a Series.\n DataFrame.sort_values : Sort values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([10, 100, 1, 1000])\n >>> idx\n Int64Index([10, 100, 1, 1000], dtype='int64')\n\n Sort values in ascending order (default behavior).\n\n >>> idx.sort_values()\n Int64Index([1, 10, 100, 1000], dtype='int64')\n\n Sort values in descending order, and also get the indices `idx` was\n sorted by.\n\n >>> idx.sort_values(ascending=False, return_indexer=True)\n (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))\n \"\"\"\n _as = self.argsort()\n if not ascending:\n _as = _as[::-1]\n\n sorted_index = self.take(_as)\n\n if return_indexer:\n return sorted_index, _as\n else:\n return sorted_index\n\n def sort(self, *args, **kwargs):\n \"\"\"\n Use sort_values instead.\n \"\"\"\n raise TypeError(\"cannot sort an Index object in-place, use sort_values instead\")\n\n def shift(self, periods=1, freq=None):\n \"\"\"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or str, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.Index\n Shifted index.\n\n See Also\n --------\n Series.shift : Shift values of Series.\n\n Notes\n -----\n This method is only implemented for datetime-like index classes,\n i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.\n\n Examples\n --------\n Put the first 5 month starts of 2011 into an index.\n\n >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')\n >>> month_starts\n DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',\n '2011-05-01'],\n dtype='datetime64[ns]', freq='MS')\n\n Shift the index by 10 days.\n\n >>> month_starts.shift(10, freq='D')\n DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',\n '2011-05-11'],\n dtype='datetime64[ns]', freq=None)\n\n The default value of `freq` is the `freq` attribute of the index,\n which is 'MS' (month start) in this example.\n\n >>> month_starts.shift(10)\n DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',\n '2012-03-01'],\n dtype='datetime64[ns]', freq='MS')\n \"\"\"\n raise NotImplementedError(f\"Not supported for type {type(self).__name__}\")\n\n def argsort(self, *args, **kwargs):\n \"\"\"\n Return the integer indices that would sort the index.\n\n Parameters\n ----------\n *args\n Passed to `numpy.ndarray.argsort`.\n **kwargs\n Passed to `numpy.ndarray.argsort`.\n\n Returns\n -------\n numpy.ndarray\n Integer indices that would sort the index if used as\n an indexer.\n\n See Also\n --------\n numpy.argsort : Similar method for NumPy arrays.\n Index.sort_values : Return sorted copy of Index.\n\n Examples\n --------\n >>> idx = pd.Index(['b', 'a', 'd', 'c'])\n >>> idx\n Index(['b', 'a', 'd', 'c'], dtype='object')\n\n >>> order = idx.argsort()\n >>> order\n array([1, 0, 3, 2])\n\n >>> idx[order]\n Index(['a', 'b', 'c', 'd'], dtype='object')\n \"\"\"\n result = self.asi8\n if result is None:\n result = np.array(self)\n return result.argsort(*args, **kwargs)\n\n _index_shared_docs[\n \"get_value\"\n ] = \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing.\n\n Returns\n -------\n scalar\n A value in the Series with the index of the key value in self.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_value\"] % _index_doc_kwargs)\n def get_value(self, series, key):\n\n # if we have something that is Index-like, then\n # use this, e.g. DatetimeIndex\n # Things like `Series._get_value` (via .at) pass the EA directly here.\n s = extract_array(series, extract_numpy=True)\n if isinstance(s, ExtensionArray):\n if is_scalar(key):\n # GH 20882, 21257\n # First try to convert the key to a location\n # If that fails, raise a KeyError if an integer\n # index, otherwise, see if key is an integer, and\n # try that\n try:\n iloc = self.get_loc(key)\n return s[iloc]\n except KeyError:\n if len(self) > 0 and (self.holds_integer() or self.is_boolean()):\n raise\n elif is_integer(key):\n return s[key]\n else:\n # if key is not a scalar, directly raise an error (the code below\n # would convert to numpy arrays and raise later any way) - GH29926\n raise InvalidIndexError(key)\n\n s = com.values_from_object(series)\n k = com.values_from_object(key)\n\n k = self._convert_scalar_indexer(k, kind=\"getitem\")\n try:\n return self._engine.get_value(s, k, tz=getattr(series.dtype, \"tz\", None))\n except KeyError as e1:\n if len(self) > 0 and (self.holds_integer() or self.is_boolean()):\n raise\n\n try:\n return libindex.get_value_at(s, key)\n except IndexError:\n raise\n except TypeError:\n # generator/iterator-like\n if is_iterator(key):\n raise InvalidIndexError(key)\n else:\n raise e1\n except Exception:\n raise e1\n except TypeError:\n # e.g. \"[False] is an invalid key\"\n if is_scalar(key):\n raise IndexError(key)\n raise InvalidIndexError(key)\n\n def set_value(self, arr, key, value):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray.\n\n .. deprecated:: 1.0\n\n Notes\n -----\n Only use this if you know what you're doing.\n \"\"\"\n warnings.warn(\n (\n \"The 'set_value' method is deprecated, and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n self._engine.set_value(\n com.values_from_object(arr), com.values_from_object(key), value\n )\n\n _index_shared_docs[\n \"get_indexer_non_unique\"\n ] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n\n Returns\n -------\n indexer : ndarray of int\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n missing : ndarray of int\n An indexer into the target of the values not found.\n These correspond to the -1 in the indexer array.\n \"\"\"\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = ensure_index(target)\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer_non_unique(ptarget)\n\n if is_categorical(target):\n tgt_values = np.asarray(target)\n elif self.is_all_dates and target.is_all_dates: # GH 30399\n tgt_values = target.asi8\n else:\n tgt_values = target._ndarray_values\n\n indexer, missing = self._engine.get_indexer_non_unique(tgt_values)\n return ensure_platform_int(indexer), missing\n\n def get_indexer_for(self, target, **kwargs):\n \"\"\"\n Guaranteed return of an indexer even when non-unique.\n\n This dispatches to get_indexer or get_indexer_non_unique\n as appropriate.\n\n Returns\n -------\n numpy.ndarray\n List of indices.\n \"\"\"\n if self.is_unique:\n return self.get_indexer(target, **kwargs)\n indexer, _ = self.get_indexer_non_unique(target, **kwargs)\n return indexer\n\n def _maybe_promote(self, other):\n # A hack, but it works\n\n if self.inferred_type == \"date\" and isinstance(other, ABCDatetimeIndex):\n return type(other)(self), other\n elif self.inferred_type == \"boolean\":\n if not is_object_dtype(self.dtype):\n return self.astype(\"object\"), other.astype(\"object\")\n return self, other\n\n def groupby(self, values) -> Dict[Hashable, np.ndarray]:\n \"\"\"\n Group the index labels by a given array of values.\n\n Parameters\n ----------\n values : array\n Values used to determine the groups.\n\n Returns\n -------\n dict\n {group name -> group labels}\n \"\"\"\n\n # TODO: if we are a MultiIndex, we can do better\n # that converting to tuples\n if isinstance(values, ABCMultiIndex):\n values = values.values\n values = ensure_categorical(values)\n result = values._reverse_indexer()\n\n # map to the label\n result = {k: self.take(v) for k, v in result.items()}\n\n return result\n\n def map(self, mapper, na_action=None):\n \"\"\"\n Map values using input correspondence (a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n applied : Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n \"\"\"\n\n from .multi import MultiIndex\n\n new_values = super()._map_values(mapper, na_action=na_action)\n\n attributes = self._get_attributes_dict()\n\n # we can return a MultiIndex\n if new_values.size and isinstance(new_values[0], tuple):\n if isinstance(self, MultiIndex):\n names = self.names\n elif attributes.get(\"name\"):\n names = [attributes.get(\"name\")] * len(new_values[0])\n else:\n names = None\n return MultiIndex.from_tuples(new_values, names=names)\n\n attributes[\"copy\"] = False\n if not new_values.size:\n # empty\n attributes[\"dtype\"] = self.dtype\n\n return Index(new_values, **attributes)\n\n def isin(self, values, level=None):\n \"\"\"\n Return a boolean array where the index values are in `values`.\n\n Compute boolean array of whether each index value is found in the\n passed set of values. The length of the returned boolean array matches\n the length of the index.\n\n Parameters\n ----------\n values : set or list-like\n Sought values.\n level : str or int, optional\n Name or position of the index level to use (if the index is a\n `MultiIndex`).\n\n Returns\n -------\n is_contained : ndarray\n NumPy array of boolean values.\n\n See Also\n --------\n Series.isin : Same for Series.\n DataFrame.isin : Same method for DataFrames.\n\n Notes\n -----\n In the case of `MultiIndex` you must either specify `values` as a\n list-like object containing tuples that are the same length as the\n number of levels, or specify `level`. Otherwise it will raise a\n ``ValueError``.\n\n If `level` is specified:\n\n - if it is the name of one *and only one* index level, use that level;\n - otherwise it should be a number indicating level position.\n\n Examples\n --------\n >>> idx = pd.Index([1,2,3])\n >>> idx\n Int64Index([1, 2, 3], dtype='int64')\n\n Check whether each index value in a list of values.\n >>> idx.isin([1, 4])\n array([ True, False, False])\n\n >>> midx = pd.MultiIndex.from_arrays([[1,2,3],\n ... ['red', 'blue', 'green']],\n ... names=('number', 'color'))\n >>> midx\n MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],\n codes=[[0, 1, 2], [2, 0, 1]],\n names=['number', 'color'])\n\n Check whether the strings in the 'color' level of the MultiIndex\n are in a list of colors.\n\n >>> midx.isin(['red', 'orange', 'yellow'], level='color')\n array([ True, False, False])\n\n To check across the levels of a MultiIndex, pass a list of tuples:\n\n >>> midx.isin([(1, 'red'), (3, 'red')])\n array([ True, False, False])\n\n For a DatetimeIndex, string values in `values` are converted to\n Timestamps.\n\n >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']\n >>> dti = pd.to_datetime(dates)\n >>> dti\n DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],\n dtype='datetime64[ns]', freq=None)\n\n >>> dti.isin(['2000-03-11'])\n array([ True, False, False])\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n return algos.isin(self, values)\n\n def _get_string_slice(self, key, use_lhs=True, use_rhs=True):\n # this is for partial string indexing,\n # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex\n raise NotImplementedError\n\n def slice_indexer(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered or unique index, compute the slice indexer for input\n labels and step.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning.\n end : label, default None\n If None, defaults to the end.\n step : int, default None\n kind : str, default None\n\n Returns\n -------\n indexer : slice\n\n Raises\n ------\n KeyError : If key does not exist, or key is not unique and index is\n not ordered.\n\n Notes\n -----\n This function assumes that the data is sorted, so use at your own peril\n\n Examples\n --------\n This is a method on all index types. For example you can do:\n\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_indexer(start='b', end='c')\n slice(1, 3)\n\n >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])\n >>> idx.slice_indexer(start='b', end=('c', 'g'))\n slice(1, 3)\n \"\"\"\n start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind)\n\n # return a slice\n if not is_scalar(start_slice):\n raise AssertionError(\"Start slice bound is non-scalar\")\n if not is_scalar(end_slice):\n raise AssertionError(\"End slice bound is non-scalar\")\n\n return slice(start_slice, end_slice, step)\n\n def _maybe_cast_indexer(self, key):\n \"\"\"\n If we have a float key and are not a floating index, then try to cast\n to an int if equivalent.\n \"\"\"\n\n if is_float(key) and not self.is_floating():\n try:\n ckey = int(key)\n if ckey == key:\n key = ckey\n except (OverflowError, ValueError, TypeError):\n pass\n return key\n\n def _validate_indexer(self, form, key, kind):\n \"\"\"\n If we are positional indexer, validate that we have appropriate\n typed bounds must be an integer.\n \"\"\"\n assert kind in [\"ix\", \"loc\", \"getitem\", \"iloc\"]\n\n if key is None:\n pass\n elif is_integer(key):\n pass\n elif kind in [\"iloc\", \"getitem\"]:\n self._invalid_indexer(form, key)\n return key\n\n _index_shared_docs[\n \"_maybe_cast_slice_bound\"\n ] = \"\"\"\n This function should be overloaded in subclasses that allow non-trivial\n casting on label-slice bounds, e.g. datetime-like indices allowing\n strings containing formatted datetimes.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n \"\"\"\n\n @Appender(_index_shared_docs[\"_maybe_cast_slice_bound\"])\n def _maybe_cast_slice_bound(self, label, side, kind):\n assert kind in [\"ix\", \"loc\", \"getitem\", None]\n\n # We are a plain index here (sub-class override this method if they\n # wish to have special treatment for floats/ints, e.g. Float64Index and\n # datetimelike Indexes\n # reject them\n if is_float(label):\n if not (kind in [\"ix\"] and (self.holds_integer() or self.is_floating())):\n self._invalid_indexer(\"slice\", label)\n\n # we are trying to find integer bounds on a non-integer based index\n # this is rejected (generally .loc gets you here)\n elif is_integer(label):\n self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _searchsorted_monotonic(self, label, side=\"left\"):\n if self.is_monotonic_increasing:\n return self.searchsorted(label, side=side)\n elif self.is_monotonic_decreasing:\n # np.searchsorted expects ascending sort order, have to reverse\n # everything for it to work (element ordering, search side and\n # resulting value).\n pos = self[::-1].searchsorted(\n label, side=\"right\" if side == \"left\" else \"left\"\n )\n return len(self) - pos\n\n raise ValueError(\"index must be monotonic increasing or decreasing\")\n\n def get_slice_bound(self, label, side, kind):\n \"\"\"\n Calculate slice bound that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n int\n Index of label.\n \"\"\"\n assert kind in [\"ix\", \"loc\", \"getitem\", None]\n\n if side not in (\"left\", \"right\"):\n raise ValueError(\n f\"Invalid value for side kwarg, must be either\"\n f\" 'left' or 'right': {side}\"\n )\n\n original_label = label\n\n # For datetime indices label may be a string that has to be converted\n # to datetime boundary according to its resolution.\n label = self._maybe_cast_slice_bound(label, side, kind)\n\n # we need to look up the label\n try:\n slc = self.get_loc(label)\n except KeyError as err:\n try:\n return self._searchsorted_monotonic(label, side)\n except ValueError:\n # raise the original KeyError\n raise err\n\n if isinstance(slc, np.ndarray):\n # get_loc may return a boolean array or an array of indices, which\n # is OK as long as they are representable by a slice.\n if is_bool_dtype(slc):\n slc = lib.maybe_booleans_to_slice(slc.view(\"u1\"))\n else:\n slc = lib.maybe_indices_to_slice(slc.astype(\"i8\"), len(self))\n if isinstance(slc, np.ndarray):\n raise KeyError(\n f\"Cannot get {side} slice bound for non-unique \"\n f\"label: {repr(original_label)}\"\n )\n\n if isinstance(slc, slice):\n if side == \"left\":\n return slc.start\n else:\n return slc.stop\n else:\n if side == \"right\":\n return slc + 1\n else:\n return slc\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n Compute slice locations for input labels.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning.\n end : label, default None\n If None, defaults to the end.\n step : int, defaults None\n If None, defaults to 1.\n kind : {'ix', 'loc', 'getitem'} or None\n\n Returns\n -------\n start, end : int\n\n See Also\n --------\n Index.get_loc : Get location for a single label.\n\n Notes\n -----\n This method only works if the index is monotonic or unique.\n\n Examples\n --------\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_locs(start='b', end='c')\n (1, 3)\n \"\"\"\n inc = step is None or step >= 0\n\n if not inc:\n # If it's a reverse slice, temporarily swap bounds.\n start, end = end, start\n\n # GH 16785: If start and end happen to be date strings with UTC offsets\n # attempt to parse and check that the offsets are the same\n if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):\n try:\n ts_start = Timestamp(start)\n ts_end = Timestamp(end)\n except (ValueError, TypeError):\n pass\n else:\n if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):\n raise ValueError(\"Both dates must have the same UTC offset\")\n\n start_slice = None\n if start is not None:\n start_slice = self.get_slice_bound(start, \"left\", kind)\n if start_slice is None:\n start_slice = 0\n\n end_slice = None\n if end is not None:\n end_slice = self.get_slice_bound(end, \"right\", kind)\n if end_slice is None:\n end_slice = len(self)\n\n if not inc:\n # Bounds at this moment are swapped, swap them back and shift by 1.\n #\n # slice_locs('B', 'A', step=-1): s='B', e='A'\n #\n # s='A' e='B'\n # AFTER SWAP: | |\n # v ------------------> V\n # -----------------------------------\n # | | |A|A|A|A| | | | | |B|B| | | | |\n # -----------------------------------\n # ^ <------------------ ^\n # SHOULD BE: | |\n # end=s-1 start=e-1\n #\n end_slice, start_slice = start_slice - 1, end_slice - 1\n\n # i == -1 triggers ``len(self) + i`` selection that points to the\n # last element, not before-the-first one, subtracting len(self)\n # compensates that.\n if end_slice == -1:\n end_slice -= len(self)\n if start_slice == -1:\n start_slice -= len(self)\n\n return start_slice, end_slice\n\n def delete(self, loc):\n \"\"\"\n Make new Index with passed location(-s) deleted.\n\n Returns\n -------\n new_index : Index\n \"\"\"\n return self._shallow_copy(np.delete(self._data, loc))\n\n def insert(self, loc, item):\n \"\"\"\n Make new Index inserting new item at location.\n\n Follows Python list.append semantics for negative values.\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n \"\"\"\n _self = np.asarray(self)\n item = self._coerce_scalar_to_index(item)._ndarray_values\n idx = np.concatenate((_self[:loc], item, _self[loc:]))\n return self._shallow_copy_with_infer(idx)\n\n def drop(self, labels, errors=\"raise\"):\n \"\"\"\n Make new Index with passed list of labels deleted.\n\n Parameters\n ----------\n labels : array-like\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n Returns\n -------\n dropped : Index\n\n Raises\n ------\n KeyError\n If not all of the labels are found in the selected axis\n \"\"\"\n arr_dtype = \"object\" if self.dtype == \"object\" else None\n labels = com.index_labels_to_array(labels, dtype=arr_dtype)\n indexer = self.get_indexer(labels)\n mask = indexer == -1\n if mask.any():\n if errors != \"ignore\":\n raise KeyError(f\"{labels[mask]} not found in axis\")\n indexer = indexer[~mask]\n return self.delete(indexer)\n\n # --------------------------------------------------------------------\n # Generated Arithmetic, Comparison, and Unary Methods\n\n @classmethod\n def _add_comparison_methods(cls):\n \"\"\"\n Add in comparison methods.\n \"\"\"\n cls.__eq__ = _make_comparison_op(operator.eq, cls)\n cls.__ne__ = _make_comparison_op(operator.ne, cls)\n cls.__lt__ = _make_comparison_op(operator.lt, cls)\n cls.__gt__ = _make_comparison_op(operator.gt, cls)\n cls.__le__ = _make_comparison_op(operator.le, cls)\n cls.__ge__ = _make_comparison_op(operator.ge, cls)\n\n @classmethod\n def _add_numeric_methods_add_sub_disabled(cls):\n \"\"\"\n Add in the numeric add/sub methods to disable.\n \"\"\"\n cls.__add__ = make_invalid_op(\"__add__\")\n cls.__radd__ = make_invalid_op(\"__radd__\")\n cls.__iadd__ = make_invalid_op(\"__iadd__\")\n cls.__sub__ = make_invalid_op(\"__sub__\")\n cls.__rsub__ = make_invalid_op(\"__rsub__\")\n cls.__isub__ = make_invalid_op(\"__isub__\")\n\n @classmethod\n def _add_numeric_methods_disabled(cls):\n \"\"\"\n Add in numeric methods to disable other than add/sub.\n \"\"\"\n cls.__pow__ = make_invalid_op(\"__pow__\")\n cls.__rpow__ = make_invalid_op(\"__rpow__\")\n cls.__mul__ = make_invalid_op(\"__mul__\")\n cls.__rmul__ = make_invalid_op(\"__rmul__\")\n cls.__floordiv__ = make_invalid_op(\"__floordiv__\")\n cls.__rfloordiv__ = make_invalid_op(\"__rfloordiv__\")\n cls.__truediv__ = make_invalid_op(\"__truediv__\")\n cls.__rtruediv__ = make_invalid_op(\"__rtruediv__\")\n cls.__mod__ = make_invalid_op(\"__mod__\")\n cls.__divmod__ = make_invalid_op(\"__divmod__\")\n cls.__neg__ = make_invalid_op(\"__neg__\")\n cls.__pos__ = make_invalid_op(\"__pos__\")\n cls.__abs__ = make_invalid_op(\"__abs__\")\n cls.__inv__ = make_invalid_op(\"__inv__\")\n\n @classmethod\n def _add_numeric_methods_binary(cls):\n \"\"\"\n Add in numeric methods.\n \"\"\"\n cls.__add__ = _make_arithmetic_op(operator.add, cls)\n cls.__radd__ = _make_arithmetic_op(ops.radd, cls)\n cls.__sub__ = _make_arithmetic_op(operator.sub, cls)\n cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls)\n cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls)\n cls.__pow__ = _make_arithmetic_op(operator.pow, cls)\n\n cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls)\n cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls)\n\n # TODO: rmod? rdivmod?\n cls.__mod__ = _make_arithmetic_op(operator.mod, cls)\n cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls)\n cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls)\n cls.__divmod__ = _make_arithmetic_op(divmod, cls)\n cls.__mul__ = _make_arithmetic_op(operator.mul, cls)\n cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)\n\n @classmethod\n def _add_numeric_methods_unary(cls):\n \"\"\"\n Add in numeric unary methods.\n \"\"\"\n\n def _make_evaluate_unary(op, opstr):\n def _evaluate_numeric_unary(self):\n\n attrs = self._get_attributes_dict()\n return Index(op(self.values), **attrs)\n\n _evaluate_numeric_unary.__name__ = opstr\n return _evaluate_numeric_unary\n\n cls.__neg__ = _make_evaluate_unary(operator.neg, \"__neg__\")\n cls.__pos__ = _make_evaluate_unary(operator.pos, \"__pos__\")\n cls.__abs__ = _make_evaluate_unary(np.abs, \"__abs__\")\n cls.__inv__ = _make_evaluate_unary(lambda x: -x, \"__inv__\")\n\n @classmethod\n def _add_numeric_methods(cls):\n cls._add_numeric_methods_unary()\n cls._add_numeric_methods_binary()\n\n @classmethod\n def _add_logical_methods(cls):\n \"\"\"\n Add in logical methods.\n \"\"\"\n _doc = \"\"\"\n %(desc)s\n\n Parameters\n ----------\n *args\n These parameters will be passed to numpy.%(outname)s.\n **kwargs\n These parameters will be passed to numpy.%(outname)s.\n\n Returns\n -------\n %(outname)s : bool or array_like (if axis is specified)\n A single element array_like may be converted to bool.\"\"\"\n\n _index_shared_docs[\"index_all\"] = dedent(\n \"\"\"\n\n See Also\n --------\n Index.any : Return whether any element in an Index is True.\n Series.any : Return whether any element in a Series is True.\n Series.all : Return whether all elements in a Series are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n **all**\n\n True, because nonzero integers are considered True.\n\n >>> pd.Index([1, 2, 3]).all()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 1, 2]).all()\n False\n\n **any**\n\n True, because ``1`` is considered True.\n\n >>> pd.Index([0, 0, 1]).any()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 0, 0]).any()\n False\n \"\"\"\n )\n\n _index_shared_docs[\"index_any\"] = dedent(\n \"\"\"\n\n See Also\n --------\n Index.all : Return whether all elements are True.\n Series.all : Return whether all elements are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n >>> index = pd.Index([0, 1, 2])\n >>> index.any()\n True\n\n >>> index = pd.Index([0, 0, 0])\n >>> index.any()\n False\n \"\"\"\n )\n\n def _make_logical_function(name, desc, f):\n @Substitution(outname=name, desc=desc)\n @Appender(_index_shared_docs[\"index_\" + name])\n @Appender(_doc)\n def logical_func(self, *args, **kwargs):\n result = f(self.values)\n if (\n isinstance(result, (np.ndarray, ABCSeries, Index))\n and result.ndim == 0\n ):\n # return NumPy type\n return result.dtype.type(result.item())\n else: # pragma: no cover\n return result\n\n logical_func.__name__ = name\n return logical_func\n\n cls.all = _make_logical_function(\n \"all\", \"Return whether all elements are True.\", np.all\n )\n cls.any = _make_logical_function(\n \"any\", \"Return whether any element is True.\", np.any\n )\n\n @classmethod\n def _add_logical_methods_disabled(cls):\n \"\"\"\n Add in logical methods to disable.\n \"\"\"\n cls.all = make_invalid_op(\"all\")\n cls.any = make_invalid_op(\"any\")\n\n @property\n def shape(self):\n \"\"\"\n Return a tuple of the shape of the underlying data.\n \"\"\"\n # not using \"(len(self), )\" to return \"correct\" shape if the values\n # consists of a >1 D array (see GH-27775)\n # overridden in MultiIndex.shape to avoid materializing the values\n return self._values.shape\n\n\nIndex._add_numeric_methods_disabled()\nIndex._add_logical_methods()\nIndex._add_comparison_methods()\n\n\ndef ensure_index_from_sequences(sequences, names=None):\n \"\"\"\n Construct an index from sequences of data.\n\n A single sequence returns an Index. Many sequences returns a\n MultiIndex.\n\n Parameters\n ----------\n sequences : sequence of sequences\n names : sequence of str\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])\n Int64Index([1, 2, 3], dtype='int64', name='name')\n\n >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],\n names=['L1', 'L2'])\n MultiIndex([('a', 'a'),\n ('a', 'b')],\n names=['L1', 'L2'])\n\n See Also\n --------\n ensure_index\n \"\"\"\n from .multi import MultiIndex\n\n if len(sequences) == 1:\n if names is not None:\n names = names[0]\n return Index(sequences[0], name=names)\n else:\n return MultiIndex.from_arrays(sequences, names=names)\n\n\ndef ensure_index(index_like, copy=False):\n \"\"\"\n Ensure that we have an index from some index-like object.\n\n Parameters\n ----------\n index : sequence\n An Index or other sequence\n copy : bool\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> ensure_index(['a', 'b'])\n Index(['a', 'b'], dtype='object')\n\n >>> ensure_index([('a', 'a'), ('b', 'c')])\n Index([('a', 'a'), ('b', 'c')], dtype='object')\n\n >>> ensure_index([['a', 'a'], ['b', 'c']])\n MultiIndex([('a', 'b'),\n ('a', 'c')],\n dtype='object')\n )\n\n See Also\n --------\n ensure_index_from_sequences\n \"\"\"\n if isinstance(index_like, Index):\n if copy:\n index_like = index_like.copy()\n return index_like\n if hasattr(index_like, \"name\"):\n return Index(index_like, name=index_like.name, copy=copy)\n\n if is_iterator(index_like):\n index_like = list(index_like)\n\n # must check for exactly list here because of strict type\n # check in clean_index_list\n if isinstance(index_like, list):\n if type(index_like) != list:\n index_like = list(index_like)\n\n converted, all_arrays = lib.clean_index_list(index_like)\n\n if len(converted) > 0 and all_arrays:\n from .multi import MultiIndex\n\n return MultiIndex.from_arrays(converted)\n else:\n index_like = converted\n else:\n # clean_index_list does the equivalent of copying\n # so only need to do this if not list instance\n if copy:\n from copy import copy\n\n index_like = copy(index_like)\n\n return Index(index_like)\n\n\ndef _ensure_has_len(seq):\n \"\"\"\n If seq is an iterator, put its values into a list.\n \"\"\"\n try:\n len(seq)\n except TypeError:\n return list(seq)\n else:\n return seq\n\n\ndef _trim_front(strings):\n \"\"\"\n Trims zeros and decimal points.\n \"\"\"\n trimmed = strings\n while len(strings) > 0 and all(x[0] == \" \" for x in trimmed):\n trimmed = [x[1:] for x in trimmed]\n return trimmed\n\n\ndef _validate_join_method(method):\n if method not in [\"left\", \"right\", \"inner\", \"outer\"]:\n raise ValueError(f\"do not recognize join method {method}\")\n\n\ndef default_index(n):\n from pandas.core.indexes.range import RangeIndex\n\n return RangeIndex(0, n, name=None)\n\n\ndef maybe_extract_name(name, obj, cls) -> Optional[Hashable]:\n \"\"\"\n If no name is passed, then extract it from data, validating hashability.\n \"\"\"\n if name is None and isinstance(obj, (Index, ABCSeries)):\n # Note we don't just check for \"name\" attribute since that would\n # pick up e.g. dtype.name\n name = obj.name\n\n # GH#29069\n if not is_hashable(name):\n raise TypeError(f\"{cls.__name__}.name must be a hashable type\")\n\n return name\n\n\ndef _maybe_cast_with_dtype(data: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:\n \"\"\"\n If a dtype is passed, cast to the closest matching dtype that is supported\n by Index.\n\n Parameters\n ----------\n data : np.ndarray\n dtype : np.dtype\n copy : bool\n\n Returns\n -------\n np.ndarray\n \"\"\"\n # we need to avoid having numpy coerce\n # things that look like ints/floats to ints unless\n # they are actually ints, e.g. '0' and 0.0\n # should not be coerced\n # GH 11836\n if is_integer_dtype(dtype):\n inferred = lib.infer_dtype(data, skipna=False)\n if inferred == \"integer\":\n data = maybe_cast_to_integer_array(data, dtype, copy=copy)\n elif inferred in [\"floating\", \"mixed-integer-float\"]:\n if isna(data).any():\n raise ValueError(\"cannot convert float NaN to integer\")\n\n if inferred == \"mixed-integer-float\":\n data = maybe_cast_to_integer_array(data, dtype)\n\n # If we are actually all equal to integers,\n # then coerce to integer.\n try:\n data = _try_convert_to_int_array(data, copy, dtype)\n except ValueError:\n data = np.array(data, dtype=np.float64, copy=copy)\n\n elif inferred == \"string\":\n pass\n else:\n data = data.astype(dtype)\n elif is_float_dtype(dtype):\n inferred = lib.infer_dtype(data, skipna=False)\n if inferred == \"string\":\n pass\n else:\n data = data.astype(dtype)\n else:\n data = np.array(data, dtype=dtype, copy=copy)\n\n return data\n\n\ndef _maybe_cast_data_without_dtype(subarr):\n \"\"\"\n If we have an arraylike input but no passed dtype, try to infer\n a supported dtype.\n\n Parameters\n ----------\n subarr : np.ndarray, Index, or Series\n\n Returns\n -------\n converted : np.ndarray or ExtensionArray\n dtype : np.dtype or ExtensionDtype\n \"\"\"\n # Runtime import needed bc IntervalArray imports Index\n from pandas.core.arrays import (\n IntervalArray,\n PeriodArray,\n DatetimeArray,\n TimedeltaArray,\n )\n\n inferred = lib.infer_dtype(subarr, skipna=False)\n\n if inferred == \"integer\":\n try:\n data = _try_convert_to_int_array(subarr, False, None)\n return data, data.dtype\n except ValueError:\n pass\n\n return subarr, object\n\n elif inferred in [\"floating\", \"mixed-integer-float\", \"integer-na\"]:\n # TODO: Returns IntegerArray for integer-na case in the future\n return subarr, np.float64\n\n elif inferred == \"interval\":\n try:\n data = IntervalArray._from_sequence(subarr, copy=False)\n return data, data.dtype\n except ValueError:\n # GH27172: mixed closed Intervals --> object dtype\n pass\n elif inferred == \"boolean\":\n # don't support boolean explicitly ATM\n pass\n elif inferred != \"string\":\n if inferred.startswith(\"datetime\"):\n try:\n data = DatetimeArray._from_sequence(subarr, copy=False)\n return data, data.dtype\n except (ValueError, OutOfBoundsDatetime):\n # GH 27011\n # If we have mixed timezones, just send it\n # down the base constructor\n pass\n\n elif inferred.startswith(\"timedelta\"):\n data = TimedeltaArray._from_sequence(subarr, copy=False)\n return data, data.dtype\n elif inferred == \"period\":\n try:\n data = PeriodArray._from_sequence(subarr)\n return data, data.dtype\n except IncompatibleFrequency:\n pass\n\n return subarr, subarr.dtype\n\n\ndef _try_convert_to_int_array(\n data: np.ndarray, copy: bool, dtype: np.dtype\n) -> np.ndarray:\n \"\"\"\n Attempt to convert an array of data into an integer array.\n\n Parameters\n ----------\n data : The data to convert.\n copy : bool\n Whether to copy the data or not.\n dtype : np.dtype\n\n Returns\n -------\n int_array : data converted to either an ndarray[int64] or ndarray[uint64]\n\n Raises\n ------\n ValueError if the conversion was not successful.\n \"\"\"\n\n if not is_unsigned_integer_dtype(dtype):\n # skip int64 conversion attempt if uint-like dtype is passed, as\n # this could return Int64Index when UInt64Index is what's desired\n try:\n res = data.astype(\"i8\", copy=False)\n if (res == data).all():\n return res # TODO: might still need to copy\n except (OverflowError, TypeError, ValueError):\n pass\n\n # Conversion to int64 failed (possibly due to overflow) or was skipped,\n # so let's try now with uint64.\n try:\n res = data.astype(\"u8\", copy=False)\n if (res == data).all():\n return res # TODO: might still need to copy\n except (OverflowError, TypeError, ValueError):\n pass\n\n raise ValueError\n"
] |
[
[
"pandas.PeriodIndex",
"pandas.core.indexes.range.RangeIndex",
"pandas.core.dtypes.common.ensure_object",
"numpy.where",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.common.cast_scalar_indexer",
"pandas.core.arrays.PeriodArray._from_sequence",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas._libs.join.outer_join_indexer",
"pandas.core.dtypes.common.is_list_like",
"numpy.delete",
"numpy.array",
"pandas.core.algorithms.take",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.TimedeltaIndex",
"pandas.core.arrays.DatetimeArray._from_sequence",
"pandas.core.dtypes.missing.isna",
"pandas.io.formats.printing.pprint_thing",
"pandas.Series",
"pandas._libs.tslibs.Timestamp",
"numpy.asarray",
"pandas._libs.join.inner_join_indexer",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.concatenate",
"pandas._libs.lib.clean_index_list",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.core.common.asarray_tuplesafe",
"pandas.io.formats.format.format_array",
"pandas.core.algorithms.take_nd",
"pandas.core.algorithms.safe_sort",
"numpy.errstate",
"pandas._libs.algos.groupsort_indexer",
"pandas.core.arrays.TimedeltaArray._from_sequence",
"pandas.core.dtypes.common.is_integer",
"pandas._libs.lib.infer_dtype",
"numpy.ndarray.__setstate__",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.dtypes.cast.maybe_cast_to_integer_array",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.dtypes.common.ensure_categorical",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas._libs.lib.is_scalar",
"pandas.core.indexes.frozen.FrozenList",
"pandas.core.indexes.range.RangeIndex.from_range",
"pandas.core.reshape.merge._get_join_indexers",
"pandas.core.common.values_from_object",
"pandas._libs.index.get_value_at",
"numpy.hstack",
"pandas.util._decorators.Substitution",
"pandas.core.common.not_none",
"pandas.DatetimeIndex",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.concat.concat_compat",
"numpy.repeat",
"pandas._libs.tslibs.timezones.tz_compare",
"pandas.core.ops.invalid.make_invalid_op",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas._libs.join.left_join_indexer_unique",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.is_categorical",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.dtypes.common.is_hashable",
"pandas.core.dtypes.common.is_period_dtype",
"pandas.core.indexes.period._new_PeriodIndex",
"pandas.core.reshape.merge._restore_dropped_levels_multijoin",
"pandas.core.arrays.IntervalArray._from_sequence",
"pandas.core.ops.invalid_comparison",
"pandas.core.dtypes.common.is_bool",
"pandas.core.ops.comp_method_OBJECT_ARRAY",
"pandas.core.algorithms.isin",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.accessor.CachedAccessor",
"pandas.io.formats.printing.format_object_summary",
"numpy.dtype",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"numpy.arange",
"pandas.core.ops.get_op_result_name",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.io.formats.printing.format_object_attrs",
"pandas.core.dtypes.concat.get_dtype_kinds",
"pandas._libs.join.left_join_indexer",
"pandas.core.dtypes.common.is_object_dtype",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.common.index_labels_to_array",
"pandas.core.construction.extract_array"
]
] |
akvelon/Bitcoin-Transaction-Optimization
|
[
"e3740fe37869a0b84a472b19dbc5d879ec857837"
] |
[
"predictor-trainer/trainer/predictor_trainer.py"
] |
[
"\"\"\"\r\nCopyright 2019 Akvelon Inc.\r\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at \r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\r\n\"\"\"\r\r\nimport time\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pathlib import Path\r\nimport tensorflow as tf\r\nfrom sklearn.preprocessing import RobustScaler\r\nfrom sklearn.externals import joblib\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow import keras\r\n\r\n\r\nclass PredictorTrainer:\r\n DATA_PATH = 'data/training.csv'\r\n MODEL_PATH = 'data/models/'\r\n SCALER_PATH = 'data/models/scaler.pkl'\r\n TRAINED_MODEL_PATH = 'data/models/fee-predictor-model.h5'\r\n BATCH_SIZE = 256\r\n TRAIN_STEPS = 10\r\n TRAIN_DATA_PERCENT = 0.9\r\n\r\n def __init__(self, batch_size=BATCH_SIZE, train_steps=TRAIN_STEPS):\r\n self.initialize_scaler()\r\n\r\n def initialize_scaler(self):\r\n path = Path(PredictorTrainer.SCALER_PATH)\r\n\r\n if not path.is_file():\r\n print('Scaler model not found. Initializing.')\r\n #self.scaler = MinMaxScaler(feature_range=(0, 1))\r\n self.scaler = RobustScaler()\r\n data = self.load_data()\r\n self.scaler.fit(data.values[:, 1:])\r\n path.parent.mkdir(parents=True, exist_ok=True)\r\n joblib.dump(self.scaler, PredictorTrainer.SCALER_PATH)\r\n print('Scaler initialized and saved.')\r\n else:\r\n print('Found scaler model. Loading.')\r\n self.scaler = joblib.load(PredictorTrainer.SCALER_PATH)\r\n print('Scaler loaded.')\r\n\r\n def scale_data(self, data):\r\n return self.scaler.transform(data)\r\n\r\n # splits the data onto training and test set\r\n def split_data(self, data, n):\r\n train_start = 0\r\n train_end = int(np.floor(0.8 * n))\r\n test_start = train_end + 1\r\n test_end = n\r\n return data[train_start:train_end], data[test_start:test_end]\r\n\r\n # loads the file with default data\r\n def load_file(self):\r\n return pd.read_csv(PredictorTrainer.DATA_PATH)\r\n\r\n # there are helper fields in data, this function left only ones which needed to train the model\r\n def get_learning_data(self, dataframe):\r\n return dataframe.drop(['block_median_fee_per_byte', 'block_id'], axis='columns')\r\n\r\n # sometimes fee_per_byte is enormous, so we take care of having the normal one here\r\n def filter_out_outliners(self, dataframe):\r\n return dataframe.query('fee_per_byte < block_median_fee_per_byte')\r\n\r\n # do all transformation needed to get info suitable for training\r\n def load_data(self):\r\n data = self.load_file()\r\n data = self.filter_out_outliners(data)\r\n return self.get_learning_data(data)\r\n\r\n def train(self):\r\n data = self.load_data()\r\n n = data.shape[0]\r\n data = data.values\r\n\r\n data_train, data_test = self.split_data(data, n)\r\n\r\n x_train = self.scale_data(data_train[:, 1:])\r\n y_train = data_train[:, 0]\r\n x_test = self.scale_data(data_test[:, 1:])\r\n y_test = data_test[:, 0]\r\n\r\n model = keras.Sequential([\r\n keras.layers.Dense(3, kernel_initializer='normal', input_dim=3),\r\n keras.layers.Dense(1024, kernel_initializer='normal'),\r\n keras.layers.PReLU(),\r\n keras.layers.Dropout(0.1),\r\n keras.layers.Dense(512, kernel_initializer='normal'),\r\n keras.layers.PReLU(),\r\n keras.layers.Dropout(0.1),\r\n keras.layers.Dense(256, kernel_initializer='normal'),\r\n keras.layers.PReLU(),\r\n keras.layers.Dropout(0.1),\r\n keras.layers.Dense(128, kernel_initializer='normal'),\r\n keras.layers.PReLU(),\r\n keras.layers.Dropout(0.1),\r\n keras.layers.Dense(64, kernel_initializer='normal',),\r\n keras.layers.PReLU(),\r\n keras.layers.Dropout(0.1),\r\n keras.layers.Dense(32, kernel_initializer='normal'),\r\n keras.layers.PReLU(),\r\n keras.layers.Dropout(0.1),\r\n keras.layers.Dense(1, kernel_initializer='normal')\r\n ])\r\n\r\n model.compile(optimizer='adam', \r\n loss=tf.losses.huber_loss)\r\n model.fit(x_train, y_train, epochs=10, batch_size=250)\r\n\r\n model.save(PredictorTrainer.TRAINED_MODEL_PATH)\r\n\r\n def load_model(self, model_name):\r\n return keras.models.load_model(model_name, custom_objects={'huber_loss': tf.losses.huber_loss})\r\n\r\n def evaluate_block(self, model_name, test_file):\r\n model = self.load_model(model_name)\r\n data_raw = pd.read_csv(test_file)\r\n min_fee = data_raw[['fee_per_byte']].min().values[0]\r\n median_fee = data_raw[['block_median_fee_per_byte']].values[0][0]\r\n data = data_raw.query('confirmation_speed == 0')\r\n data = self.get_learning_data(data)\r\n data_y = data[:, 0]\r\n data_x = self.scale_data(data[:, 1:])\r\n predicted = model.predict(data_x).flatten()\r\n\r\n hit = np.where(predicted > min_fee)[0].size\r\n out = np.where(predicted > median_fee)[0].size\r\n total_good = np.where((min_fee < predicted) & (predicted < median_fee))[0].size\r\n\r\n print('hit', hit)\r\n print('out', out)\r\n print('total_good', total_good)\r\n\r\n total_fee_loss = 0\r\n sizes = data_raw.query('confirmation_speed == 0')[['vsize']].values.flatten()\r\n for i in range(0, data_y.size):\r\n total_fee_loss += sizes[i] * (data_y[i] - predicted[i])\r\n print('total_fee_loss', total_fee_loss)\r\n return\r\n\r\n # evaluates the model predictions and write down values to file for further analisys\r\n def evaluate(self):\r\n # idea is to check how well we predict fee so that transaction were added to the first block after they appear in mempool\r\n model = self.load_model(PredictorTrainer.TRAINED_MODEL_PATH)\r\n data_raw = self.load_file()\r\n # looking for blocks which wasn't used during training so that get legitimate result\r\n # the first step is get training set the same way as we did this during training session\r\n data = self.filter_out_outliners(data_raw)\r\n data_train, data_test = self.split_data(data, data.shape[0])\r\n\r\n data_train_blocks = set(data_train['block_id'].values.flatten()) # block ids which were used during training\r\n all_blocks = set(data_raw['block_id'].values.flatten()) # all block ids in our data\r\n block_indexes_to_evaluate = list(all_blocks.difference(data_train_blocks)) # this difference are block ids which wasn't used by training process\r\n data = data_raw[(data_raw['block_id'].isin(block_indexes_to_evaluate))] # filter the data which wasn't used in training so we can use it to evaluate\r\n data = data.query('confirmation_speed == 0') # we looking only for results where transaction were added to the first next block after it added to mempool\r\n\r\n #collecting the statistics\r\n output = pd.DataFrame(columns=['block_id', 'min_fee', 'median_fee', 'predicted_mean_fee', 'predicted_median_fee'])\r\n for name, group in data.groupby('block_id'):\r\n min_fee = group['fee_per_byte'].min()\r\n median_fee = group['fee_per_byte'].median()\r\n learning_data = self.get_learning_data(group)\r\n x_test = self.scale_data(learning_data.values[:, 1:])\r\n y_predicted = model.predict(x_test).flatten()\r\n predicted_mean_fee = float(np.mean(y_predicted))\r\n predicted_median_fee = float(np.median(y_predicted))\r\n output = output.append({\r\n 'block_id': name,\r\n 'min_fee': min_fee,\r\n 'median_fee': median_fee,\r\n 'predicted_mean_fee': predicted_mean_fee,\r\n 'predicted_median_fee': predicted_median_fee\r\n }, ignore_index=True)\r\n\r\n output.to_csv(os.path.join(PredictorTrainer.MODEL_PATH, 'evaluation_output.csv'))\r\n\r\n def predict(self, predict, expected, model_name):\r\n predict_scaled = self.scale_data(predict)[:, 1:]\r\n sess, x, y, out = self.load_model(os.path.join(PredictorTrainer.MODEL_PATH, model_name))\r\n predictions = sess.run(out, feed_dict={x: predict_scaled})\r\n\r\n template = 'Prediction is \"{}\", expected \"{}\"\\n'\r\n output = []\r\n i = 0\r\n\r\n for pred, expec in zip(predictions[0, :], expected):\r\n inversed = self.scaler.inverse_transform(np.array([[pred, predict[i][1], predict[i][2], predict[i][3]]]))\r\n pred = inversed[0, 0]\r\n print(template.format(pred, expec))\r\n output.append(\r\n {'mempool_megabytes': predict[i][1], 'mempool_tx_count': predict[i][2],\r\n 'confirmation_speed': predict[i][3],\r\n 'prediction': pred})\r\n\r\n i += 1\r\n\r\n return output\r\n"
] |
[
[
"tensorflow.keras.models.load_model",
"sklearn.externals.joblib.dump",
"pandas.read_csv",
"tensorflow.keras.layers.PReLU",
"sklearn.preprocessing.RobustScaler",
"tensorflow.keras.layers.Dense",
"numpy.median",
"pandas.DataFrame",
"numpy.mean",
"numpy.floor",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"sklearn.externals.joblib.load",
"numpy.where"
]
] |
kkosmo/orbitize
|
[
"5790100122f42224f9982e53d7338540a87c5fbc"
] |
[
"tests/test_read_input.py"
] |
[
"import pytest\nimport deprecation\nimport numpy as np\nimport os\nimport orbitize\nfrom orbitize.read_input import read_file, write_orbitize_input, read_formatted_file, read_orbitize_input\n\n\ndef _compare_table(input_table):\n \"\"\"\n Tests input table to expected values, which are:\n epoch object quant1 quant1_err quant2 quant2_err quant_type\n float64 int float64 float64 float64 float64 str5\n ------- ------ ------- ---------- ------- ---------- ----------\n 1234.0 1 0.01 0.005 0.5 0.05 radec\n 1235.0 1 1.0 0.005 89.0 0.1 seppa\n 1236.0 1 1.0 0.005 89.3 0.3 seppa\n 1237.0 0 10.0 0.1 nan nan rv\n \"\"\"\n rows_expected = 4\n epoch_expected = [1234, 1235, 1236, 1237]\n object_expected = [1,1,1,0]\n quant1_expected = [0.01, 1.0, 1.0, 10.0]\n quant1_err_expected = [0.005, 0.005, 0.005, 0.1]\n quant2_expected = [0.5, 89.0, 89.3, np.nan]\n quant2_err_expected = [0.05, 0.1, 0.3, np.nan]\n quant_type_expected = ['radec', 'seppa', 'seppa', 'rv']\n assert len(input_table) == rows_expected\n for meas,truth in zip(input_table['epoch'],epoch_expected):\n assert truth == pytest.approx(meas)\n for meas,truth in zip(input_table['object'],object_expected):\n assert truth == meas\n for meas,truth in zip(input_table['quant1'],quant1_expected):\n if np.isnan(truth):\n assert np.isnan(meas)\n else:\n assert truth == pytest.approx(meas)\n for meas,truth in zip(input_table['quant1_err'],quant1_err_expected):\n if np.isnan(truth):\n assert np.isnan(meas)\n else:\n assert truth == pytest.approx(meas)\n for meas,truth in zip(input_table['quant2'],quant2_expected):\n if np.isnan(truth):\n assert np.isnan(meas)\n else:\n assert truth == pytest.approx(meas)\n for meas,truth in zip(input_table['quant2_err'],quant2_err_expected):\n if np.isnan(truth):\n assert np.isnan(meas)\n else:\n assert truth == pytest.approx(meas)\n for meas,truth in zip(input_table['quant_type'],quant_type_expected):\n assert truth == meas\n\ndef test_read_file():\n \"\"\"\n Test the read_file function using the test_val.csv file and test_val_radec.csv\n \"\"\"\n # Check that main test input is read in with correct values\n input_file = os.path.join(orbitize.DATADIR, 'test_val.csv')\n _compare_table(read_file(input_file))\n # Check that an input value with all valid entries and only ra/dec columns can be read\n input_file_radec = os.path.join(orbitize.DATADIR, 'test_val_radec.csv')\n read_file(input_file_radec)\n\n@deprecation.fail_if_not_removed\ndef test_read_formatted_file():\n \"\"\"\n Tests the read_formatted_file function using the test_val.csv file and test_val_radec.csv\n\n This test exists with the fail_if_not_removed decorator as a reminder to remove in v2.0\n \"\"\"\n # Check that main test input is read in with correct values\n input_file = os.path.join(orbitize.DATADIR, 'test_val.csv')\n _compare_table(read_formatted_file(input_file))\n # Check that an input value with all valid entries and only ra/dec columns can be read\n input_file_radec = os.path.join(orbitize.DATADIR, 'test_val_radec.csv')\n read_file(input_file_radec)\n\ndef test_write_orbitize_input():\n \"\"\"\n Test the write_orbitize_input and the read_file functions\n \"\"\"\n input_file = os.path.join(orbitize.DATADIR, 'test_val.csv')\n test_table = read_file(input_file)\n output_file = os.path.join(orbitize.DATADIR, 'temp_test_orbitize_input.csv')\n # If temp output file already exists, delete it\n if os.path.isfile(output_file):\n os.remove(output_file)\n try: # Catch these tests so that we remove temporary file\n # Test that we were able to write the table\n write_orbitize_input(test_table,output_file)\n assert os.path.isfile(output_file)\n # Test that we can read the table and check if it's correct\n test_table_2 = read_file(output_file)\n _compare_table(test_table_2)\n finally:\n # Remove temporary file\n os.remove(output_file)\n\n@deprecation.fail_if_not_removed\ndef test_write_orbitize_input_2():\n \"\"\"\n Test the write_orbitize_input and the read_orbitize_input functions\n\n This test exists with the fail_if_not_removed decorator as a reminder to remove in v2.0\n \"\"\"\n input_file = os.path.join(orbitize.DATADIR, 'test_val.csv')\n test_table = read_file(input_file)\n output_file = os.path.join(orbitize.DATADIR, 'temp_test_orbitize_input.csv')\n # If temp output file already exists, delete it\n if os.path.isfile(output_file):\n os.remove(output_file)\n try: # Catch these tests so that we remove temporary file\n # Test that we were able to write the table\n write_orbitize_input(test_table,output_file)\n assert os.path.isfile(output_file)\n # Test that we can read the table and check if it's correct\n test_table_2 = read_orbitize_input(output_file)\n _compare_table(test_table_2)\n finally:\n # Remove temporary file\n os.remove(output_file)\n\nif __name__ == \"__main__\":\n test_read_file()\n test_read_formatted_file()\n test_write_orbitize_input()\n test_write_orbitize_input_2()\n"
] |
[
[
"numpy.isnan"
]
] |
afshimono/data_analyst_nanodegree
|
[
"8a047abe3770fbd2865c078ecaa121ce096189c2"
] |
[
"Intro to Machine Learning/outliers/outlier_cleaner.py"
] |
[
"#!/usr/bin/python\n\n\ndef outlierCleaner(predictions, ages, net_worths):\n \"\"\"\n Clean away the 10% of points that have the largest\n residual errors (difference between the prediction\n and the actual net worth).\n\n Return a list of tuples named cleaned_data where \n each tuple is of the form (age, net_worth, error).\n \"\"\"\n \n cleaned_data = []\n\n import numpy as np\n ### your code goes here \n\n error = abs((np.asarray(predictions) - np.asarray(net_worths)))\n \n #print(len(error))\n temp=np.zeros(shape=(90,3))\n \n for i in range(0,len(error),1):\n temp[i][0]=error[i]\n temp[i][1]=ages[i]\n temp[i][2]=net_worths[i]\n #print(len(temp))\n \n list_to_remove = [0 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0]\n\n for k in range(0,9,1):\n maximun = 0\n max_i = 0\n for i in range(0,len(temp),1):\n #print('Temp[i]: '+ str(temp[i][0]))\n if (temp[i][0] >= maximun and i not in list_to_remove):\n maximun = temp[i][0]\n max_i = i\n list_to_remove.append(max_i)\n list_to_remove.pop(0) \n #print(list_to_remove)\n #for i in list_to_remove:\n #print(temp[i])\n temp = np.delete(temp,np.asarray(list_to_remove),0)\n cleaned_data=[]\n #print(len(temp))\n for i in range(0,len(temp),1):\n tup=(temp[i][1],temp[i][2],temp[i][0])\n #print(tup)\n cleaned_data.append(tup)\n #print(len(cleaned_data))\n return cleaned_data\n\n"
] |
[
[
"numpy.asarray",
"numpy.zeros"
]
] |
mscelnik/concurrency-demos
|
[
"aba31b5fba48b7e843aee016a8261d1494c0c65d"
] |
[
"randdata.py"
] |
[
"\"\"\" Make random dataframes.\n\"\"\"\n\nfrom string import ascii_uppercase\n\nLETTERS = list(ascii_uppercase)\nMAX_COLUMNS = len(LETTERS)\n\n\ndef make_df(row_count, columns):\n import numpy as np\n import pandas as pd\n values = np.random.rand(row_count, len(columns)) * 100.0\n return pd.DataFrame(values, columns=columns)\n\n\ndef size_df(row_count, columns, fpath):\n import os\n df = make_df(row_count, columns)\n if fpath.lower().endswith('.xlsx'):\n df.to_excel(fpath, index=False)\n else:\n df.to_csv(fpath, index=False)\n return os.stat(fpath).st_size / 1024\n\n\ndef rows_for_size(size, column_count=MAX_COLUMNS, fmt='csv'):\n \"\"\" Determine number of rows required to make a file of at least `size` KBs.\n\n Args:\n size (float): Desired file size in KBs.\n column_count (int): Number of columns to add to file.\n fmt (str): Either `csv` or `excel`. Default is CSV.\n\n Returns:\n Number of rows (int) required for file size.\n \"\"\"\n import os\n import os.path\n import tempfile\n\n if fmt.lower() in ('excel', 'xl', 'xls', 'xlsx'):\n fname = 'test.xlsx'\n is_excel = True\n else:\n fname = 'test.csv'\n is_excel = False\n\n columns = LETTERS[:column_count]\n\n # Guestimate initial number of rows from the dimensions.\n nrows = int(25 * size / column_count)\n\n with tempfile.TemporaryDirectory() as dpath:\n fpath = os.path.join(dpath, fname)\n\n # Set the upper bound.\n current_size = 0.0\n while current_size < size:\n nrows *= 2\n print(nrows)\n df = make_df(nrows, columns)\n if is_excel:\n df.to_excel(fpath, index=False)\n else:\n df.to_csv(fpath, index=False)\n current_size = os.stat(fpath).st_size / 1024\n\n # Binary search to find row count.\n lowrows = nrows / 2\n highrows = nrows\n while (highrows - lowrows) > 1:\n nrows = int((highrows + lowrows) / 2)\n\n df = make_df(nrows, columns)\n if is_excel:\n df.to_excel(fpath, index=False)\n else:\n df.to_csv(fpath, index=False)\n current_size = os.stat(fpath).st_size / 1024\n\n if current_size < size:\n lowrows, highrows = nrows, highrows\n else:\n lowrows, highrows = lowrows, nrows\n\n print(current_size, nrows, lowrows, highrows)\n\n return highrows\n\n\nif __name__ == '__main__':\n for sz in [100, 1024, 10 * 1024]:\n n = rows_for_size(sz)\n print(sz, n)\n"
] |
[
[
"pandas.DataFrame"
]
] |
weizi-li/flow
|
[
"958b64ece8af6db715e6fb3b6042035b05b93bc2"
] |
[
"flow/benchmarks/baselines/bottleneck1.py"
] |
[
"\"\"\"Evaluates the baseline performance of bottleneck1 without RL control.\n\nBaseline is no AVs.\n\"\"\"\n\nimport numpy as np\nfrom flow.core.experiment import Experiment\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import InFlows\nfrom flow.core.params import SumoLaneChangeParams\nfrom flow.core.params import SumoCarFollowingParams\nfrom flow.core.params import VehicleParams\nfrom flow.core.params import TrafficLightParams\nfrom flow.controllers import ContinuousRouter\nfrom flow.benchmarks.bottleneck1 import flow_params\nfrom flow.benchmarks.bottleneck1 import SCALING\n\n\ndef bottleneck1_baseline(num_runs, render=True):\n \"\"\"Run script for the bottleneck1 baseline.\n\n Parameters\n ----------\n num_runs : int\n number of rollouts the performance of the environment is evaluated\n over\n render: str, optional\n specifies whether to use the gui during execution\n\n Returns\n -------\n flow.core.experiment.Experiment\n class needed to run simulations\n \"\"\"\n exp_tag = flow_params['exp_tag']\n sim_params = flow_params['sim']\n env_params = flow_params['env']\n net_params = flow_params['net']\n initial_config = flow_params.get('initial', InitialConfig())\n traffic_lights = flow_params.get('tls', TrafficLightParams())\n\n # we want no autonomous vehicles in the simulation\n vehicles = VehicleParams()\n vehicles.add(veh_id='human',\n car_following_params=SumoCarFollowingParams(\n speed_mode=9,\n ),\n routing_controller=(ContinuousRouter, {}),\n lane_change_params=SumoLaneChangeParams(\n lane_change_mode=1621,\n ),\n num_vehicles=1 * SCALING)\n\n # only include human vehicles in inflows\n flow_rate = 2300 * SCALING\n inflow = InFlows()\n inflow.add(veh_type='human', edge='1',\n vehs_per_hour=flow_rate,\n departLane='random', departSpeed=10)\n net_params.inflows = inflow\n\n # modify the rendering to match what is requested\n sim_params.render = render\n\n # set the evaluation flag to True\n env_params.evaluate = True\n\n # import the network class\n module = __import__('flow.networks', fromlist=[flow_params['network']])\n network_class = getattr(module, flow_params['network'])\n\n # create the network object\n network = network_class(\n name=exp_tag,\n vehicles=vehicles,\n net_params=net_params,\n initial_config=initial_config,\n traffic_lights=traffic_lights\n )\n\n # import the environment class\n module = __import__('flow.envs', fromlist=[flow_params['env_name']])\n env_class = getattr(module, flow_params['env_name'])\n\n # create the environment object\n env = env_class(env_params, sim_params, network)\n\n exp = Experiment(env)\n\n results = exp.run(num_runs, env_params.horizon)\n\n return np.mean(results['returns']), np.std(results['returns'])\n\n\nif __name__ == '__main__':\n runs = 2 # number of simulations to average over\n mean, std = bottleneck1_baseline(num_runs=runs, render=False)\n\n print('---------')\n print('The average outflow, std. deviation over 500 seconds '\n 'across {} runs is {}, {}'.format(runs, mean, std))\n"
] |
[
[
"numpy.std",
"numpy.mean"
]
] |
Averylamp/composer
|
[
"1afc56e9c207734aee75ff8c5b046fb55d928fb5"
] |
[
"tests/trainer/test_ddp.py"
] |
[
"# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport collections.abc\nimport os\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Sequence\nfrom unittest import mock\n\nimport pytest\nimport torch\nimport torch.distributed\nimport yahp as hp\nfrom _pytest.monkeypatch import MonkeyPatch\n\nimport composer.core.types as types\nfrom composer import Callback\nfrom composer.callbacks import CallbackHparams\nfrom composer.core.logging import Logger\nfrom composer.core.state import State\nfrom composer.datasets import DataloaderHparams, DataloaderSpec, MemoryFormat, SyntheticDataset, SyntheticDatasetHparams\nfrom composer.trainer.ddp import DDPHparams, FileStoreHparams\nfrom composer.trainer.devices import CPUDeviceHparams, GPUDeviceHparams\nfrom composer.trainer.trainer_hparams import TrainerHparams, callback_registry, dataset_registry\nfrom tests.fixtures.models import SimpleBatchPairModelHparams\n\n\ndef get_file_path(tmpdir: str, *, idx: int, epoch: int, is_train: bool) -> str:\n train_str = \"train\" if is_train else \"val\"\n return os.path.join(tmpdir, f\"{train_str}-epoch-{epoch}-sample-{idx}\")\n\n\ndef get_batch_file_path(tmpdir: str, *, rank: int, epoch: int, is_train: bool) -> str:\n train_str = \"train\" if is_train else \"val\"\n return os.path.join(tmpdir, f\"{train_str}-rank-{rank}-epoch-{epoch}-batch0.pt\")\n\n\nclass TrackedDataset(SyntheticDataset):\n \"\"\"\n TrackedDataset atomically writes a file every time a record is accessed.\n It is thread-safe and subprocess-safe, and is useful to measure how many times a sample is accessed.\n Because of atomic file writes, it is slow and should not be used in any performance measurements.\n \"\"\"\n\n def __init__(self, *, total_dataset_size: int, data_shape: Sequence[int], memory_format: MemoryFormat, device: str,\n num_classes: int, is_train: bool, tmpdir: str):\n super().__init__(total_dataset_size=total_dataset_size,\n data_shape=data_shape,\n num_classes=num_classes,\n memory_format=memory_format,\n device=device)\n self.is_train = is_train\n self.tmpdir = tmpdir\n\n def __getitem__(self, idx: int):\n access = 0\n while True:\n try:\n with open(get_file_path(self.tmpdir, idx=idx, epoch=access, is_train=self.is_train), \"x\") as f:\n f.write(str(idx))\n return super().__getitem__(idx)\n except FileExistsError:\n access += 1\n\n\n@dataclass\nclass TrackedDatasetHparams(SyntheticDatasetHparams):\n is_train: Optional[bool] = hp.optional(\"is_train\", default=None)\n tmpdir: Optional[str] = hp.optional(\"tmpdir\", default=None)\n\n def initialize_object(self) -> DataloaderSpec:\n assert self.is_train is not None\n assert self.tmpdir is not None\n return DataloaderSpec(\n TrackedDataset(\n total_dataset_size=self.total_dataset_size,\n data_shape=self.data_shape,\n num_classes=self.num_classes,\n device=self.device,\n memory_format=self.memory_format,\n is_train=self.is_train,\n tmpdir=self.tmpdir,\n ),\n drop_last=self.drop_last,\n shuffle=self.shuffle,\n )\n\n\nclass CheckBatch0(Callback):\n\n def __init__(self, tmpdir: str):\n super().__init__()\n self.tmpdir = tmpdir\n\n def before_forward(self, state: State, logger: Logger):\n if state.batch_idx > 0:\n return\n rank: int = torch.distributed.get_rank()\n last_input, last_target = state.batch_pair\n torch.save( # type: ignore\n {\n \"last_input\": last_input,\n \"last_target\": last_target,\n }, get_batch_file_path(self.tmpdir, rank=rank, epoch=state.epoch, is_train=True))\n\n def eval_before_forward(self, state: State, logger: Logger):\n rank: int = torch.distributed.get_rank()\n filepath = get_batch_file_path(self.tmpdir, rank=rank, epoch=state.epoch, is_train=False)\n if os.path.exists(filepath):\n return\n assert not state.model.training\n last_input, last_target = state.batch_pair\n torch.save( # type: ignore\n {\n \"last_input\": last_input,\n \"last_target\": last_target,\n }, get_batch_file_path(self.tmpdir, rank=rank, epoch=state.epoch, is_train=False))\n\n\n@dataclass\nclass CheckBatch0Hparams(CallbackHparams):\n tmpdir: str = hp.required(\"tmpdir\")\n\n def initialize_object(self) -> Callback:\n return CheckBatch0(self.tmpdir)\n\n\n@pytest.fixture(autouse=True)\ndef patch_registries(monkeypatch: MonkeyPatch):\n monkeypatch.setitem(callback_registry, \"checkbatch0\", CheckBatch0Hparams)\n monkeypatch.setitem(dataset_registry, \"tracked\", TrackedDatasetHparams)\n\n\n@pytest.mark.timeout(90)\n@pytest.mark.parametrize(\"fork_rank_0\", [True, False], ids=[\"fork-rank-0\", \"no-fork-rank-0\"])\n@pytest.mark.parametrize(\"is_gpu,num_procs\", [\n pytest.param(False, 1, id=\"1-cpu\"),\n pytest.param(False, 2, id=\"2-cpu\"),\n pytest.param(True, 1, marks=[pytest.mark.n_gpus(1)], id=\"1-gpu\"),\n pytest.param(True, 2, marks=[pytest.mark.n_gpus(2)], id=\"2-gpu\"),\n])\ndef test_ddp(is_gpu: bool, num_procs: int, fork_rank_0: bool, *, ddp_tmpdir: str, is_main_pytest_process: bool,\n mosaic_trainer_hparams: TrainerHparams) -> None:\n \"\"\"\n test strategy for ddp:\n 1) Train a dummy model on two gps, for two epochs, using the tracked dataset.\n 2) The tracked dataset should record two -- and only two -- accesses for each sample -- one for each epoch\n If each sample is accessed more than this number of times, then the distributed sampler isn't working properly\n If each sample is accessed less than this number of times, then either the sample pool size isn't a multiple of\n the batch size (and samples are getting dropped), or not all processes are working\n 3) We use a callback to save the (x, y) for the first batch in each epoch on each process\n ({train, eval} * {epoch 1, epoch 2} * {ddp 1, ddp2})\n We assert that each of these tensors are different to ensure that 1) random seeding works properly,\n and 2) each ddp process is indeed getting different data.\n \"\"\"\n hparams = mosaic_trainer_hparams\n model_hparams = hparams.model\n assert isinstance(model_hparams, SimpleBatchPairModelHparams)\n model = model_hparams.initialize_object()\n shape = list(model.in_shape) # type: ignore\n mosaic_trainer_hparams.train_dataset = TrackedDatasetHparams(\n total_dataset_size=300,\n data_shape=shape,\n num_classes=model.num_classes,\n device=\"cpu\",\n is_train=True,\n memory_format=MemoryFormat.CONTIGUOUS_FORMAT,\n tmpdir=ddp_tmpdir,\n )\n hparams.val_dataset = TrackedDatasetHparams(\n total_dataset_size=300,\n data_shape=shape,\n num_classes=model.num_classes,\n device=\"cpu\",\n is_train=False,\n memory_format=MemoryFormat.CONTIGUOUS_FORMAT,\n tmpdir=ddp_tmpdir,\n )\n if is_gpu:\n device = GPUDeviceHparams(n_gpus=num_procs)\n else:\n device = CPUDeviceHparams(n_cpus=num_procs)\n hparams.device = device\n hparams.ddp = DDPHparams(\n store=FileStoreHparams(os.path.join(ddp_tmpdir, \"store\")),\n node_rank=0,\n num_nodes=1,\n fork_rank_0=fork_rank_0,\n )\n hparams.dataloader = DataloaderHparams(\n num_workers=0,\n prefetch_factor=2,\n persistent_workers=False,\n pin_memory=False,\n timeout=0,\n )\n hparams.total_batch_size = 50\n hparams.eval_batch_size = 50\n hparams.max_epochs = 2\n hparams.precision = types.Precision.FP32\n hparams.loggers = []\n hparams.validate_every_n_batches = 0\n hparams.validate_every_n_epochs = 1\n hparams.callbacks.append(CheckBatch0Hparams(tmpdir=ddp_tmpdir))\n trainer = hparams.initialize_object()\n assert trainer.state.world_size == num_procs\n assert trainer.state.nproc_per_node == num_procs\n assert isinstance(trainer.train_dl_spec.dataset, collections.abc.Sized)\n num_train_samples = len(trainer.train_dl_spec.dataset)\n assert isinstance(trainer.eval_dl_spec.dataset, collections.abc.Sized)\n num_eval_samples = len(trainer.eval_dl_spec.dataset)\n trainer.fit()\n\n # we want to validate on the spawning process only\n if is_main_pytest_process:\n # now validate that each sample were accessed exactly hparams.max_epochs * batch size times\n num_epochs = hparams.max_epochs\n\n for i in range(num_train_samples):\n for epoch in range(num_epochs):\n assert os.path.exists(\n get_file_path(ddp_tmpdir, idx=i, epoch=epoch,\n is_train=True)), f\"train sample {i} was not accessed during epoch {epoch}\"\n assert not os.path.exists(get_file_path(ddp_tmpdir, idx=i, epoch=num_epochs,\n is_train=True)), f\"train sample {i} was accessed too many times\"\n\n for i in range(num_eval_samples):\n for epoch in range(num_epochs):\n assert os.path.exists(\n get_file_path(ddp_tmpdir, idx=i, epoch=epoch,\n is_train=False)), f\"val sample {i} was not accessed during epoch {epoch}\"\n # the eval dataloader is spun once more to initialize the rng, so expecting num_epochs + 1 to not exist\n assert not os.path.exists(get_file_path(ddp_tmpdir, idx=i, epoch=num_epochs + 1,\n is_train=False)), f\"val sample {i} was accessed too many times\"\n\n is_train_to_pickles: Dict[bool, List[Dict[str, types.Tensor]]] = {True: [], False: []}\n\n for epoch in range(num_epochs):\n for local_rank in range(trainer.device.nproc_per_node):\n for is_train in (True, False):\n data: Dict[str, types.Tensor] = torch.load( # type: ignore\n get_batch_file_path(ddp_tmpdir, rank=local_rank, epoch=epoch, is_train=is_train),\n map_location='cpu',\n )\n for pickle in is_train_to_pickles[is_train]:\n assert not torch.all(data['last_input'] == pickle['last_input'])\n assert not torch.all(data['last_target'] == pickle['last_target'])\n is_train_to_pickles[is_train].append(data)\n\n\ndef test_ddp_cuda_available_check(mosaic_trainer_hparams: TrainerHparams):\n with mock.patch.object(torch.cuda, 'device_count') as device_count, \\\n mock.patch.object(torch.cuda, 'is_available') as is_cuda_available:\n is_cuda_available.return_value = False\n device_count = 1\n\n mosaic_trainer_hparams.device = GPUDeviceHparams(n_gpus=1)\n assert (not torch.cuda.is_available())\n\n with pytest.raises(ValueError, match=\"CUDA not available but gpu backend requested.\"):\n mosaic_trainer_hparams.initialize_object()\n\n\ndef test_ddp_cuda_ngpus_check(mosaic_trainer_hparams: TrainerHparams):\n with mock.patch.object(torch.cuda, 'device_count') as device_count, \\\n mock.patch.object(torch.cuda, 'is_available') as is_cuda_available:\n is_cuda_available.return_value = True\n device_count.return_value = 2\n\n mosaic_trainer_hparams.device = GPUDeviceHparams(n_gpus=8)\n\n with pytest.raises(ValueError, match=\"Requested 8 GPUs, but only 2 available.\"):\n mosaic_trainer_hparams.initialize_object()\n\n\ndef test_ddp_nccl_check(mosaic_trainer_hparams: TrainerHparams):\n with mock.patch.object(torch.cuda, 'device_count') as device_count, \\\n mock.patch.object(torch.distributed, 'is_nccl_available') as nccl_available, \\\n mock.patch.object(torch.cuda, 'is_available') as is_cuda_available:\n\n device_count.return_value = 1\n is_cuda_available.return_value = True\n nccl_available.return_value = False\n\n mosaic_trainer_hparams.device = GPUDeviceHparams(n_gpus=1)\n\n with pytest.raises(ValueError, match=\"Requested NCCL backend not available in torch.distributed\"):\n mosaic_trainer_hparams.initialize_object()\n"
] |
[
[
"torch.all",
"torch.distributed.get_rank",
"torch.cuda.is_available"
]
] |
typhoonzero/elasticdl
|
[
"c4966a66d72b0b24f4174f2fe7ef308db21a8cac"
] |
[
"elasticdl/python/master/servicer.py"
] |
[
"import threading\n\nimport numpy as np\nimport tensorflow as tf\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.file_utils import copy_if_not_exists\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.model_utils import load_from_checkpoint_file\nfrom elasticdl.python.common.tensor import (\n Tensor,\n emplace_tensor_pb_from_ndarray,\n tensor_pb_to_ndarray,\n)\nfrom elasticdl.python.common.tensor_utils import merge_indexed_slices\nfrom elasticdl.python.master.checkpoint_service import CheckpointService\nfrom elasticdl.python.master.learning_rate_modulator import (\n add_lr_modulation_to_optimizer,\n)\nfrom elasticdl.python.master.optimizer_wrapper import OptimizerWrapper\n\n\nclass MasterServicer(elasticdl_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(\n self,\n grads_to_wait,\n minibatch_size,\n optimizer,\n task_d,\n *,\n init_var,\n checkpoint_filename_for_init,\n checkpoint_service,\n evaluation_service,\n embedding_service_endpoint=None,\n embedding_dims={},\n lr_staleness_modulation=False,\n use_async=False,\n ):\n # TODO: group params together into a single object.\n self._task_d = task_d\n self._lock = threading.Lock()\n self._gradient_sum = {}\n self._edl_embedding_gradients = {}\n self._gradient_sum_indexed = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n self._minibatch_size = minibatch_size\n self._use_async = use_async\n self._lr_staleness_modulation = lr_staleness_modulation\n\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead of ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._embedding_service_endpoint = embedding_service_endpoint\n self._init_model(checkpoint_filename_for_init, init_var)\n self._opt = self._init_optimizer(\n optimizer, embedding_service_endpoint, embedding_dims, use_async\n )\n\n self._checkpoint_service = checkpoint_service\n self._evaluation_service = evaluation_service\n if evaluation_service:\n evaluation_service.set_master_servicer(self)\n\n # TODO: Multiple tests are currently using the function `set_model_var` to\n # initialize self._model, where the initialization should be done via\n # servicer's constructor.\n def set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(\n value, name=MasterServicer.var_name_encode(name)\n )\n\n def _modulate_lr_if_needed(self, opt):\n if self._use_async and self._lr_staleness_modulation:\n self._lr_modulation = add_lr_modulation_to_optimizer(opt)\n else:\n self._lr_modulation = None\n\n def _init_model_from_var_list(self, var_list):\n for var in var_list:\n self.set_model_var(var.name, var.numpy())\n\n def _init_model_from_tensor_pb_list(self, tensor_pb_list):\n assert tensor_pb_list\n for pb in tensor_pb_list:\n self.set_model_var(pb.name, tensor_pb_to_ndarray(pb))\n\n def _init_model(self, checkpoint_filename_for_init, init_var):\n if checkpoint_filename_for_init:\n pb_model = load_from_checkpoint_file(checkpoint_filename_for_init)\n self._version = pb_model.version\n self._init_model_from_tensor_pb_list(pb_model.param)\n elif init_var:\n self._init_model_from_var_list(init_var)\n else:\n logger.info(\n \"Model is not intialized. It will be \"\n \"initialized by the first update from \"\n \"the worker.\"\n )\n\n def _init_optimizer(\n self, opt, embedding_service_endpoint, embedding_dims, use_async\n ):\n # `embedding_service_endpoint` is not None means ElasticDL embedding\n # layers are used\n self._modulate_lr_if_needed(opt)\n if embedding_service_endpoint:\n return OptimizerWrapper(\n opt, embedding_service_endpoint, embedding_dims, use_async\n )\n return opt\n\n @staticmethod\n def var_name_encode(name):\n return name.replace(\":\", \"-\")\n\n def GetTask(self, request, _):\n res = elasticdl_pb2.Task()\n res.model_version = self._version\n res.minibatch_size = self._minibatch_size\n if request.task_type == elasticdl_pb2.EVALUATION:\n task_id, task = self._task_d.get_eval_task(request.worker_id)\n else:\n task_id, task = self._task_d.get(request.worker_id)\n\n if task:\n res.task_id = task_id\n res.shard_name = task.shard_name\n res.start = task.start\n res.end = task.end\n res.type = task.type\n for k, v in task.extended_config.items():\n res.extended_config[k] = v\n\n # For evaluation task, it will use the fixed version model\n if task.type == elasticdl_pb2.EVALUATION:\n res.model_version = task.model_version\n elif (not self._task_d.finished()) or (\n self._task_d.invoke_deferred_callback()\n ):\n # If the todo and doing tasks are not empty,\n # Otherwise if the callback list is not empty,\n # we are trying to pop and invoke the callback.\n # Then the master tells the worker to wait\n # in case of new tasks later.\n res.type = elasticdl_pb2.WAIT\n\n return res\n\n def GetModel(self, request, _):\n if not self._use_async:\n self._validate_model_version(request.version)\n\n if (\n request.method == elasticdl_pb2.MINIMUM\n or request.version == self._version\n ):\n if self._use_async:\n res = self._get_model_no_lock()\n else:\n with self._lock:\n res = self._get_model_no_lock()\n return res\n\n # Read from checkpoint for the fixed version model\n pb_model = elasticdl_pb2.Model()\n try:\n pb_model = self._checkpoint_service.get_checkpoint_model(\n request.version\n )\n except Exception:\n logger.error(\n \"Failed to fetch checkpoint model for \"\n \"model version {}\".format(request.version)\n )\n return pb_model\n\n def _update_model_version(self):\n assert self._lock.locked()\n self._version += 1\n\n def _update_model(self, grads, indexed_grads, edl_embedding_gradients):\n grad_var = []\n\n # (grad, var) pairs excluding keras Embedding layer and\n # ElasticDL Embedding layer\n for k in grads:\n grad_var.append((grads[k], self._model[k]))\n\n # (grad, var) pair of Keras Embedding layer\n for k in indexed_grads:\n grad_var.append((indexed_grads[k], self._model[k]))\n\n # (grad, var) pair of ElasticDL Embedding layer\n if edl_embedding_gradients:\n for layer_name, grads in edl_embedding_gradients.items():\n grad_var.append((grads, layer_name))\n\n self._opt.apply_gradients(grad_var)\n\n # need the lock for model version update in async SGD\n if self._use_async:\n self._lock.acquire()\n self._update_model_version()\n self._update_evaluation()\n self._update_checkpoint()\n if self._use_async:\n self._lock.release()\n else:\n self._gradient_sum.clear()\n self._gradient_sum_indexed.clear()\n self._edl_embedding_gradients.clear()\n self._grad_n = 0\n\n def get_model_version(self):\n return self._version\n\n def _save_checkpoint(self, locking, is_eval_checkpoint):\n try:\n logger.info(\n \"Saving checkpoint for model version %d\" % self._version\n )\n if locking:\n self._lock.acquire()\n pb_model = self._get_model_no_lock()\n self._checkpoint_service.save(\n self._version, pb_model, is_eval_checkpoint\n )\n checkpoint_version = self._version\n if locking:\n self._lock.release()\n return checkpoint_version\n except Exception:\n logger.error(\n \"Failed to save checkpoint file for model version %d\"\n % self._version\n )\n\n def save_latest_checkpoint(self, output_path):\n if self._checkpoint_service is None:\n self._checkpoint_service = CheckpointService(\n checkpoint_dir=\"\",\n checkpoint_steps=1,\n keep_checkpoint_max=1,\n include_evaluation=False,\n )\n self._save_checkpoint(locking=False, is_eval_checkpoint=False)\n checkpoint_path = self._checkpoint_service.get_checkpoint_path(\n self._checkpoint_service.get_latest_checkpoint_version()\n )\n copy_if_not_exists(checkpoint_path, output_path, is_dir=False)\n\n def _update_evaluation(self):\n if self._evaluation_service:\n self._evaluation_service.add_evaluation_task_if_needed(\n master_locking=False\n )\n\n def _update_checkpoint(self):\n if (\n self._checkpoint_service\n and self._checkpoint_service.need_to_checkpoint(self._version)\n ):\n self._save_checkpoint(locking=False, is_eval_checkpoint=False)\n\n def _get_model_no_lock(self):\n pb_model = elasticdl_pb2.Model()\n pb_model.version = self._version\n for k, v in self._model.items():\n emplace_tensor_pb_from_ndarray(pb_model.param, v.numpy(), name=k)\n return pb_model\n\n def _validate_model_version(self, request_model_version):\n if request_model_version > self._version:\n err_msg = (\n \"Model version %d not available yet, \"\n \"current version: %d\" % (request_model_version, self._version)\n )\n logger.warning(err_msg)\n raise ValueError(err_msg)\n return request_model_version == self._version\n\n def ReportVariable(self, request, _):\n with self._lock:\n if not self._model:\n self._init_model_from_tensor_pb_list(request.variable)\n return empty_pb2.Empty()\n\n def ReportGradient(self, request, _):\n model_version_valid = self._use_async or self._validate_model_version(\n request.model_version\n )\n\n res = elasticdl_pb2.ReportGradientResponse()\n if not model_version_valid:\n logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n non_embedding_gradients = {}\n indexed_grads = {}\n edl_embedding_gradients = {}\n # Do sanity check before accumulating gradients.\n for v in request.gradient:\n tensor = Tensor.from_tensor_pb(v)\n name = tensor.name\n if name not in self._model:\n if tensor.is_indexed_slices():\n # grads of ElasticDL Embedding layer\n # TODO: check arr.shape[1] = embedding_dim of this\n # EdlEmbedding layer\n edl_embedding_gradients[name] = tensor.to_tf_tensor()\n continue\n else:\n raise ValueError(\n \"Gradient key: %s is not part of model\", name\n )\n\n if tensor.is_indexed_slices():\n if (\n tensor.values.shape[1]\n != self._model[name].numpy().shape[1]\n ):\n raise ValueError(\n \"Gradient key: %s has incompatible \"\n \"indexed slice dimension %d, expected %d\"\n % (\n name,\n tensor.values.shape[1],\n self._model[name].numpy().shape[1],\n )\n )\n\n max_index = tf.math.reduce_max(tensor.indices).numpy()\n if max_index >= self._model[name].numpy().shape[0]:\n raise ValueError(\n \"Gradient key: %s has wrong indices %d, \"\n \"out of range %d\"\n % (\n name,\n max_index,\n self._model[name].numpy().shape[0] - 1,\n )\n )\n indexed_grads[name] = tensor.to_tf_tensor()\n else:\n if tensor.values.shape != self._model[name].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", name\n )\n non_embedding_gradients[name] = tensor.to_tf_tensor()\n\n if not self._use_async:\n self._lock.acquire()\n self._process_gradients(\n edl_embedding_gradients,\n indexed_grads,\n non_embedding_gradients,\n request.model_version,\n )\n if not self._use_async:\n self._lock.release()\n\n res.accepted = True\n res.model_version = self._version\n return res\n\n def _process_gradients(\n self, edl_embedding_gradients, indexed_grads, grads, request_version\n ):\n if not self._use_async:\n # grads of ElasticDL Embedding layer\n for k, v in edl_embedding_gradients.items():\n if k in self._edl_embedding_gradients:\n self._edl_embedding_gradients[k] = merge_indexed_slices(\n self._edl_embedding_gradients[k], v\n )\n else:\n self._edl_embedding_gradients[k] = v\n\n # grads of Keras Embedding layer\n for k, v in indexed_grads.items():\n if k not in self._gradient_sum_indexed:\n self._gradient_sum_indexed[k] = v\n else:\n grads_s = self._gradient_sum_indexed[k]\n self._gradient_sum_indexed[k] = merge_indexed_slices(\n grads_s, v\n )\n\n # other grads\n for k, v in grads.items():\n if not self._use_async and k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n self._grad_n += 1\n\n need_to_update_model = self._use_async\n if not self._use_async and self._grad_n >= self._grad_to_wait:\n need_to_update_model = True\n # get gradient average for sync SGD\n for k in self._gradient_sum:\n self._gradient_sum[k] = (\n self._gradient_sum[k] / self._grad_to_wait\n )\n edl_embedding_gradients = self._edl_embedding_gradients\n indexed_grads = self._gradient_sum_indexed\n grads = self._gradient_sum\n if need_to_update_model:\n self._update_optimizer(request_version)\n self._update_model(grads, indexed_grads, edl_embedding_gradients)\n\n def _update_optimizer(self, request_version):\n if self._lr_modulation:\n # staleness-aware learning rate modulation\n staleness = max(1, self._version - request_version)\n self._lr_modulation.set_multiplier(1.0 / staleness)\n\n def ReportTaskResult(self, request, _):\n if request.err_message:\n logger.warning(\"Worker reported error: \" + request.err_message)\n self._task_d.report(request.task_id, False)\n else:\n self._task_d.report(request.task_id, True)\n return empty_pb2.Empty()\n\n def ReportEvaluationMetrics(self, request, _):\n report_metrics = self._evaluation_service.report_evaluation_metrics(\n request.model_version, request.model_outputs, request.labels\n )\n res = elasticdl_pb2.ReportEvaluationMetricsResponse()\n res.model_version = self._version\n res.accepted = report_metrics\n return res\n"
] |
[
[
"tensorflow.math.reduce_max"
]
] |
bgraedel/arcos-gui
|
[
"aaeeba3aae1bc9a23c635ebabf6309f878ad8a39"
] |
[
"src/arcos_gui/temp_data_storage.py"
] |
[
"import pandas as pd\nfrom arcos4py import ARCOS\nfrom napari.utils.colormaps import AVAILABLE_COLORMAPS\n\n\n# store and retrive a number of variables\nclass data_storage:\n def __init__(self):\n self.layer_names: list = []\n self.data_merged: pd.DataFrame = pd.DataFrame()\n self.arcos: ARCOS = None # type: ignore\n self.ts_data: pd.DataFrame = pd.DataFrame()\n self.colormaps = list(AVAILABLE_COLORMAPS)\n self.current_position = None\n self.positions = \"None\"\n self.min_max = (0, 1)\n self.lut = \"RdYlBu_r\"\n self.colormaps.append(\"RdYlBu_r\")\n self._callbacks = []\n\n @property\n def filename_for_sample_data(self):\n return self._value\n\n @filename_for_sample_data.setter\n def filename_for_sample_data(self, new_value):\n self._filename_for_sample_data = new_value\n self._notify_observers(new_value)\n\n def _notify_observers(self, new_value):\n for callback in self._callbacks:\n callback(new_value)\n\n def register_callback(self, callback):\n self._callbacks.append(callback)\n"
] |
[
[
"pandas.DataFrame"
]
] |
voidful/s3prl
|
[
"78cd91d717abf151e855a874070ef17679136e5f"
] |
[
"s3prl/upstream/hubert_code_centroid/expert.py"
] |
[
"from collections import defaultdict\nfrom typing import List\n\nimport torch.nn as nn\nfrom torch import Tensor\n# from transformers import Wav2Vec2FeatureExtractor, HubertModel\nimport joblib\nimport torch\n\nSAMPLE_RATE = 16000\n\n\nclass UpstreamExpert(nn.Module):\n def __init__(self, ckpt: str = None, model_config: str = None, layer: int = 22, **kwargs):\n super().__init__()\n\n self.model = torch.hub.load('s3prl/s3prl', 'hubert_large_ll60k')\n self.model.eval()\n if torch.cuda.is_available():\n self.model = self.model.cuda()\n self.km_layer = layer\n\n self.C = defaultdict(list)\n self.Cnorm = defaultdict(list)\n self.Ctran = defaultdict(list)\n\n for layer in range(25):\n km_model = joblib.load(f\"./upstream/hubert_code_centroid/km_feat_100/km_feat_100_layer_{layer}\")\n C_np = km_model.cluster_centers_.transpose()\n Cnorm_np = (C_np ** 2).sum(0, keepdims=True)\n self.C[layer] = torch.from_numpy(C_np)\n self.Ctran[layer] = torch.from_numpy(C_np.transpose())\n self.Cnorm[layer] = torch.from_numpy(Cnorm_np)\n if torch.cuda.is_available():\n self.C[layer] = self.C[layer].cuda()\n self.Cnorm[layer] = self.Cnorm[layer].cuda()\n self.Ctran[layer] = self.Ctran[layer].cuda()\n\n def forward(self, wavs: List[Tensor]):\n with torch.no_grad():\n device = wavs[0].device\n model_outputs = self.model(wavs)\n all_centroid_state = []\n for layer in range(25):\n x = model_outputs['hidden_state_' + str(self.km_layer)].squeeze()\n dist = torch.sqrt(\n x.pow(2).sum(-1, keepdim=True) - 2 * torch.matmul(x, self.C[layer]) + self.Cnorm[layer]\n )\n min_dist = torch.flatten(dist.argmin(dim=-1))\n centroid_state = torch.index_select(self.Ctran[layer], 0, min_dist)\n centroid_state = centroid_state.view(x.shape)\n if centroid_state.dim() == 2:\n centroid_state = centroid_state.unsqueeze(dim=0)\n \n\n centroid_state = centroid_state.to(device)\n\n all_centroid_state.append(centroid_state)\n return {\n \"last_hidden_state\": centroid_state,\n \"hidden_states\": tuple(all_centroid_state),\n }\n\n"
] |
[
[
"torch.from_numpy",
"torch.matmul",
"torch.no_grad",
"torch.cuda.is_available",
"torch.hub.load",
"torch.index_select"
]
] |
dollking/AL-test
|
[
"0e698156ed3ed48f736560e508554ea04b933b0b"
] |
[
"query/graph/vae.py"
] |
[
"import random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Residual(nn.Module):\n def __init__(self, in_channels, num_hiddens, num_residual_hiddens):\n super(Residual, self).__init__()\n self._block = nn.Sequential(\n nn.ReLU(True),\n nn.Conv2d(in_channels=in_channels,\n out_channels=num_residual_hiddens,\n kernel_size=3, stride=1, padding=1, bias=False),\n nn.ReLU(True),\n nn.Conv2d(in_channels=num_residual_hiddens,\n out_channels=num_hiddens,\n kernel_size=1, stride=1, bias=False)\n )\n\n def forward(self, x):\n return x + self._block(x)\n\n\nclass ResidualStack(nn.Module):\n def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):\n super(ResidualStack, self).__init__()\n self._num_residual_layers = num_residual_layers\n self._layers = nn.ModuleList([Residual(in_channels, num_hiddens, num_residual_hiddens)\n for _ in range(self._num_residual_layers)])\n\n def forward(self, x):\n for i in range(self._num_residual_layers):\n x = self._layers[i](x)\n return x\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):\n super(Encoder, self).__init__()\n\n self._conv_1 = nn.Conv2d(in_channels=in_channels,\n out_channels=num_hiddens // 2,\n kernel_size=4,\n stride=2, padding=1)\n self._conv_2 = nn.Conv2d(in_channels=num_hiddens // 2,\n out_channels=num_hiddens,\n kernel_size=4,\n stride=2, padding=1)\n self._conv_3 = nn.Conv2d(in_channels=num_hiddens,\n out_channels=num_hiddens,\n kernel_size=3,\n stride=1, padding=1)\n self._residual_stack = ResidualStack(in_channels=num_hiddens,\n num_hiddens=num_hiddens,\n num_residual_layers=num_residual_layers,\n num_residual_hiddens=num_residual_hiddens)\n\n def forward(self, inputs):\n x = self._conv_1(inputs)\n x = F.relu(x)\n\n x = self._conv_2(x)\n x = F.relu(x)\n\n x = self._conv_3(x)\n\n return self._residual_stack(x)\n\n\nclass Decoder(nn.Module):\n def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens):\n super(Decoder, self).__init__()\n\n self._conv_1 = nn.Conv2d(in_channels=in_channels,\n out_channels=num_hiddens,\n kernel_size=3,\n stride=1, padding=1)\n\n self._residual_stack = ResidualStack(in_channels=num_hiddens,\n num_hiddens=num_hiddens,\n num_residual_layers=num_residual_layers,\n num_residual_hiddens=num_residual_hiddens)\n\n self._conv_trans_1 = nn.ConvTranspose2d(in_channels=num_hiddens,\n out_channels=num_hiddens // 2,\n kernel_size=4,\n stride=2, padding=1)\n\n self._conv_trans_2 = nn.ConvTranspose2d(in_channels=num_hiddens // 2,\n out_channels=num_hiddens // 4,\n kernel_size=4,\n stride=2, padding=1)\n\n self._conv_trans_3 = nn.ConvTranspose2d(in_channels=num_hiddens // 4,\n out_channels=3,\n kernel_size=4,\n stride=2, padding=1)\n\n def forward(self, inputs):\n x = self._conv_1(inputs)\n\n x = self._residual_stack(x)\n\n x = self._conv_trans_1(x)\n x = F.relu(x)\n\n x = self._conv_trans_2(x)\n x = F.relu(x)\n\n return self._conv_trans_3(x)\n\n\nclass VAE(nn.Module):\n def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens, embedding_dim):\n super(VAE, self).__init__()\n\n self.embedding_dim = embedding_dim\n\n self._encoder = Encoder(3, num_hiddens,\n num_residual_layers,\n num_residual_hiddens)\n self.conv1_1 = nn.Conv2d(in_channels=num_hiddens,\n out_channels=num_hiddens,\n kernel_size=3,\n stride=2, padding=1)\n self.conv1_2 = nn.Conv2d(in_channels=num_hiddens,\n out_channels=num_hiddens,\n kernel_size=3,\n stride=2, padding=1)\n\n self.conv2 = nn.Conv2d(in_channels=num_hiddens,\n out_channels=embedding_dim,\n kernel_size=1,\n stride=1)\n\n self._decoder = Decoder(embedding_dim + num_hiddens,\n num_hiddens,\n num_residual_layers,\n num_residual_hiddens)\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, x):\n encoder_out = self._encoder(x)\n mu = self.conv1_1(encoder_out)\n logvar = self.conv1_2(encoder_out)\n\n z = self.reparameterize(mu, logvar)\n\n _z = self.avg_pool(self.conv2(z))\n code = torch.sign(_z)\n\n _, _, w, h = z.size()\n quantized = code.repeat([1, 1, w, h])\n\n decoder_in = torch.cat([z, quantized], dim=1)\n\n x_recon = self._decoder(decoder_in)\n\n return x_recon, _z, mu, logvar\n"
] |
[
[
"torch.randn_like",
"torch.nn.ConvTranspose2d",
"torch.sign",
"torch.cat",
"torch.nn.Conv2d",
"torch.exp",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] |
RyanArnasonML/stock-analysis
|
[
"a5c79d9c438f095dc370f2db4e4780356cdc5d01"
] |
[
"stock_analysis/stock_modeler.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSimple time series modeling for stocks.\r\n\r\nCreated on Sat Oct 31 15:16:24 2020\r\n\r\n@author: ryanar\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\nfrom statsmodels.tsa.seasonal import seasonal_decompose\r\n\r\nimport statsmodels.api as sm\r\nfrom statsmodels.tsa.arima.model import ARIMA\r\n\r\nimport itertools\r\nimport warnings\r\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\r\n\r\nfrom .utils import validate_df\r\n\r\nclass StockModeler:\r\n \"\"\" \r\n Static method for modeling stocks.\r\n \"\"\"\r\n \r\n def __init__(self):\r\n raise NotImplementedError(\"This class is to be used statically, don't instantiate it!\")\r\n \r\n @staticmethod\r\n @validate_df(columns={'close'},instance_method=False) \r\n def decompose(df, period=20, model=\"additive\"):\r\n \"\"\"\r\n Decompose the closing price of the stock into trend, seasonal, and remainder components.\r\n\r\n Parameters\r\n ----------\r\n df : Dateframe\r\n Contains the stock closing price and index is the Date.\r\n period : Unsigned Integer\r\n The number of periods in the frequency.\r\n model : String, optional\r\n How to compute the decomposition ('additive', 'multiplicative'). The default is \"additive\".\r\n\r\n Returns\r\n -------\r\n A statsmodels decomposition object.\r\n\r\n \"\"\"\r\n return seasonal_decompose(df.close, model=model, period=period)\r\n \r\n @staticmethod\r\n @validate_df(columns={'close'}, instance_method=False)\r\n def arima(df, *, ar=10, i=1, ma=5, fit=True):\r\n \"\"\"\r\n Create an ARIMA object for modeling time series.\r\n\r\n Parameters\r\n ----------\r\n df : Dataframe\r\n The dataframe should have the index as datetime and a column with stock closing prices.\r\n * : TYPE\r\n DESCRIPTION.\r\n ar : TYPE\r\n The autoregressive order (p).\r\n i : TYPE\r\n The differenced order (q).\r\n ma : TYPE\r\n The moving average order (d).\r\n fit : TYPE, optional\r\n Whether or not to return the fitted model. The default is True.\r\n\r\n Returns\r\n -------\r\n A statsmodels ARIMA object which you can use to fit and predict.\r\n\r\n \"\"\"\r\n arima_model = ARIMA(df.close.asfreq('B').fillna(method='ffill'), order=(ar, i, ma))\r\n return arima_model.fit() if fit else arima_model\r\n \r\n @staticmethod\r\n @validate_df(columns={'close'}, instance_method=False)\r\n def arima_predictions(df, arima_model_fitted, start, end, plot=True, **kwargs):\r\n \"\"\"\r\n Get ARIMA predictions as pandas Series or plot.\r\n\r\n Parameters\r\n ----------\r\n df : Dataframe\r\n The dataframe should have the index as datetime and a column with stock closing prices.\r\n arima_model_fitted : TYPE\r\n The fitted ARIMA model.\r\n start : TYPE\r\n The start date for the predictions.\r\n end : TYPE\r\n The end date for the predictions.\r\n plot : BOOLEAN, optional\r\n Selects the return type of the function. \r\n False is pandas Series containing the predictions. \r\n The default is True which will return a plot.\r\n **kwargs : Dictionary\r\n Additional keyword arguments to pass to the pandas plot() method.\r\n\r\n Returns\r\n -------\r\n A matplotlib Axes object or predictions as a Series depending on the value of the `plot` argument.\r\n\r\n \"\"\"\r\n predicted_changes = arima_model_fitted.predict(start=start, end=end)\r\n \r\n predictions = pd.Series(predicted_changes, name='close').cumsum( ) + df.last('1D').close.iat[0]\r\n \r\n if plot:\r\n ax = df.close.plot(**kwargs)\r\n predictions.plot(ax=ax, style='r:', label='arima predictions')\r\n ax.legend()\r\n \r\n return ax if plot else predictions\r\n \r\n \r\n @validate_df(columns={'close'}, instance_method=False)\r\n def arima_grid_search(df, s):\r\n p = d = q = range(2)\r\n \r\n param_combinations = list(itertools.product(p, d, q))\r\n \r\n lowest_aic, pdq, pdqs = None, None, None\r\n \r\n total_interations = 0\r\n \r\n for order in param_combinations:\r\n seasonal_order = (p, q, d, s)\r\n total_interations += 1\r\n try:\r\n model = SARIMAX(df,\r\n order=order,\r\n seasonal_order = seasonal_order,\r\n enforce_stationarity = False,\r\n enforce_invertibility=False,\r\n disp=False)\r\n model_result = model.fit(maxiter=200, disp=False)\r\n \r\n if not lowest_aic or model_result.aic < lowest_aic:\r\n lowest_aic = model_result.aic\r\n pdq, pdqs = order, seasonal_order\r\n \r\n except Exception as ex:\r\n continue\r\n \r\n return lowest_aic, pdq, pdqs\r\n \r\n @validate_df(columns={'close'}, instance_method=False)\r\n def sarimax(df, s = 12):\r\n\r\n lowest_aic, order, seasonal_order = StockModeler.arima_grid_search(df.close, s)\r\n\r\n model = SARIMAX(\r\n df.close,\r\n order = order,\r\n seasonal_order = seasonal_order,\r\n enforce_stationarity = False,\r\n enforce_invertibility = False,\r\n disp = False\r\n )\r\n\r\n model_results = model.fit(maxiter=200, disp = False)\r\n\r\n print('Lowest AIC: %.3f'%lowest_aic)\r\n print(model_results.summary())\r\n print(model_results.resid.describe())\r\n\r\n model_results.plot_diagnostics(figsize=(12,8))\r\n \r\n # n = len(df_settle.index)\r\n # prediction = model_results.get_prediction(\r\n # start=n-12*5, \r\n # end=n+5\r\n # )\r\n # prediction_ci = prediction.conf_int()\r\n \r\n \r\n # plt.figure(figsize=(12, 6))\r\n\r\n # ax = df_settle['2008':].plot(label='actual')\r\n # prediction_ci.plot(\r\n # ax=ax, style=['--', '--'],\r\n # label='predicted/forecasted')\r\n\r\n # ci_index = prediction_ci.index\r\n # lower_ci = prediction_ci.iloc[:, 0]\r\n # upper_ci = prediction_ci.iloc[:, 1]\r\n\r\n # ax.fill_between(ci_index, lower_ci, upper_ci,\r\n # color='r', alpha=.1)\r\n\r\n # ax.set_xlabel('Time (years)')\r\n # ax.set_ylabel('Prices')\r\n\r\n # plt.legend()\r\n # plt.show()\r\n \r\n @staticmethod\r\n @validate_df(columns={'close'}, instance_method=False)\r\n def regression(df):\r\n \"\"\"\r\n Create linear regression of time series data with a lag of 1.\r\n\r\n Parameters\r\n ----------\r\n df : Dataframe\r\n The dataframe should have the index as datetime and a column with stock closing prices.\r\n\r\n Returns\r\n -------\r\n X : datetime\r\n Dates\r\n Y : FLOAT\r\n The closing price of a stock or security.\r\n Object\r\n A fitted statsmodels linear regression.\r\n\r\n \"\"\"\r\n X = df.close.shift().dropna()\r\n Y = df.close[1:]\r\n return X, Y, sm.OLS(Y, X).fit()\r\n \r\n @staticmethod\r\n @validate_df(columns={'close'},instance_method=False)\r\n def regression_predictions(df, model, start, end, plot=True, **kwargs):\r\n \"\"\"\r\n Get linear regression predictions as pandas Series or plot.\r\n\r\n Parameters\r\n ----------\r\n df : Dataframe\r\n The dataframe should have the index as datetime and a column with stock closing prices.\r\n model : TYPE\r\n The fitted linear regression model.\r\n start : datetime\r\n The start date for the predictions.\r\n end : datetime\r\n The end date for the predictions.\r\n plot : Boolean, optional\r\n False is a pandas Series containing the predictions being return. The default is True, and is a plot being returned.\r\n **kwargs : Dictionary\r\n Additional keyword arguments to pass to the pandas plot() method.\r\n\r\n Returns\r\n -------\r\n A matplotlib Axes object or predictions as a Series depending on the value of the plot argument.\r\n\r\n \"\"\"\r\n predictions = pd.Series(index=pd.date_range(start,end),name='close')\r\n last = df.last('1D').close\r\n for i, date in enumerate(predictions.index):\r\n \r\n if i == 0:\r\n pred = model.predict(last)\r\n else:\r\n pred = model.predict(predictions.iloc[i-1])\r\n \r\n predictions.loc[date] = pred[0]\r\n \r\n if plot:\r\n ax = df.close.plot(**kwargs)\r\n predictions.plot(ax=ax, style='r', label='regression predictions')\r\n ax.legend()\r\n \r\n return ax if plot else predictions\r\n \r\n @staticmethod\r\n def plot_residuals(model_fitted):\r\n fig, axes = plt.subplots(1,2, figsize=(15, 5))\r\n residuals = pd.Series(model_fitted.resid, name='residuals')\r\n residuals.plot(style='bo', ax=axes[0], title='Residuals')\r\n axes[0].set_xlabel('Date')\r\n axes[0].set_ylabel('Residual')\r\n residuals.plot(kind='kde', ax=axes[1], title='Residuals KDE')\r\n axes[1].set_xlabel('Residual')\r\n return axes\r\n \r\n \r\n \r\n \r\n "
] |
[
[
"matplotlib.pyplot.subplots",
"pandas.Series",
"pandas.date_range"
]
] |
mgrubisic/coronavirus-2020
|
[
"0242b4f18416bcc055326d6ddcb300d8edd6baa9"
] |
[
"tests/test_metadata.py"
] |
[
"import datetime\nimport json\nimport math\nimport os\nimport time\nimport pytest\nimport numpy as np\nimport pandas as pd\n\n\nfrom oscovida import MetadataRegion\n\n\ndef test_MetadataRegion_basics():\n m = MetadataRegion(\"Germany\", \"w\")\n # assert os.path.exists(MetadataStorageLocation)\n\n m['html'] = \"html-pfad\"\n m['ipynb'] = \"ipynb-pfad\"\n\n m = MetadataRegion(\"UK\", \"w\")\n m['html'] = \"html-path\"\n\n m = MetadataRegion(\"Germany\")\n assert m['html'] == \"html-pfad\"\n assert m['ipynb'] == \"ipynb-pfad\"\n assert sorted(m.keys()) == sorted([\"html\", \"ipynb\"])\n\n assert m.as_dict() == {'html': 'html-pfad', 'ipynb': 'ipynb-pfad'}\n\n m = MetadataRegion(\"UK\")\n assert m['html'] == \"html-path\"\n with pytest.raises(KeyError):\n m['missing-key'] \n\n\ndef test_MetadataRegion_updated():\n m = MetadataRegion(\"Test\", \"w\")\n assert m.last_updated_hours_ago() == math.inf\n\n m.mark_as_updated()\n # should be faster than a second\n assert m.last_updated_hours_ago()*3600 < 1\n assert m.last_updated_hours_ago() > 0\n\n time.sleep(1)\n assert m.last_updated_hours_ago()*3600 > 0.5\n\n m2 = MetadataRegion(\"Test\")\n assert m.last_updated_hours_ago()*3600 > 0.5\n assert m.last_updated_hours_ago()*3600 < 2.0\n\n # calling last_updated adds this key\n assert list(m.keys()) == [\"__last_modified__\"]\n\n\ndef test_MetadataRegion_get_regions():\n MetadataRegion.clear_all()\n\n m = MetadataRegion(\"Germany\", \"w\")\n # assert os.path.exists(MetadataStorageLocation)\n\n m['html'] = \"html-pfad\"\n m['ipynb'] = \"ipynb-pfad\"\n\n m = MetadataRegion(\"UK\", \"w\")\n m['html'] = \"html-path\"\n\n assert sorted(MetadataRegion.get_all()) == [\"Germany\", \"UK\"]\n\n MetadataRegion.clear_all()\n\n assert sorted(MetadataRegion.get_all()) == []\n\n\n\ndef test_MetadataRegion_get_all_as_dataframe():\n MetadataRegion.clear_all()\n m = MetadataRegion(\"Germany\", \"w\")\n # assert os.path.exists(MetadataStorageLocation)\n\n m['html'] = \"html-pfad\"\n m['ipynb'] = \"ipynb-pfad\"\n\n m = MetadataRegion(\"UK\", \"w\")\n m['html'] = \"html-path\"\n m['ipynb'] = \"ipynb-path\"\n\n ref = pd.DataFrame({'html' : {'UK' : 'html-path', \"Germany\" : 'html-pfad'},\n 'ipynb' : {'UK' : 'ipynb-path', \"Germany\" : 'ipynb-pfad'}})\n actual = MetadataRegion.get_all_as_dataframe()\n\n # We want to run this line:\n # assert ref.equals(actual)\n # but need to sort the table to be sure rows are in the\n # same order:\n assert ref.sort_index().equals(actual.sort_index())\n"
] |
[
[
"pandas.DataFrame"
]
] |
IronSublimate/CenterNet-IS-old
|
[
"a3df08e17d47a63e40f020e6cf2a0c8ec347ac12"
] |
[
"src/lib/datasets/sample/multi_pose.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datasets.base import BaseDataset\nimport numpy as np\nimport torch\nimport json\nimport cv2\nimport os\nfrom utils.image import flip, color_aug\nfrom utils.image import get_affine_transform, affine_transform\nfrom utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian\nfrom utils.image import draw_dense_reg\nimport math\n\n\nclass MultiPoseDataset(BaseDataset):\n def _coco_box_to_bbox(self, box):\n bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],\n dtype=np.float32)\n return bbox\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n def __getitem__(self, index):\n img_id = self.images[index]\n file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']\n img_path = os.path.join(self.img_dir, file_name)\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n anns = self.coco.loadAnns(ids=ann_ids)\n num_objs = min(len(anns), self.max_objs)\n\n img = cv2.imread(img_path)\n\n height, width = img.shape[0], img.shape[1]\n c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)\n s = max(img.shape[0], img.shape[1]) * 1.0\n rot = 0\n\n flipped = False\n if self.split == 'train':\n if not self.opt.not_rand_crop:\n s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))\n w_border = self._get_border(128, img.shape[1])\n h_border = self._get_border(128, img.shape[0])\n c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)\n c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)\n else:\n sf = self.opt.scale\n cf = self.opt.shift\n c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)\n c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)\n s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)\n if np.random.random() < self.opt.aug_rot:\n rf = self.opt.rotate\n rot = np.clip(np.random.randn() * rf, -rf * 2, rf * 2)\n\n if np.random.random() < self.opt.flip:\n flipped = True\n img = img[:, ::-1, :]\n c[0] = width - c[0] - 1\n\n trans_input = get_affine_transform(\n c, s, rot, [self.opt.input_res, self.opt.input_res])\n inp = cv2.warpAffine(img, trans_input,\n (self.opt.input_res, self.opt.input_res),\n flags=cv2.INTER_LINEAR)\n inp = (inp.astype(np.float32) / 255.)\n if self.split == 'train' and not self.opt.no_color_aug:\n color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)\n inp = (inp - self.mean) / self.std\n inp = inp.transpose(2, 0, 1)\n\n output_res = self.opt.output_res\n num_joints = self.num_joints\n trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])\n trans_output = get_affine_transform(c, s, 0, [output_res, output_res])\n\n hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)\n hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)\n dense_kps = np.zeros((num_joints, 2, output_res, output_res),\n dtype=np.float32)\n dense_kps_mask = np.zeros((num_joints, output_res, output_res),\n dtype=np.float32)\n wh = np.zeros((self.max_objs, 2), dtype=np.float32)\n kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32)\n reg = np.zeros((self.max_objs, 2), dtype=np.float32)\n ind = np.zeros((self.max_objs), dtype=np.int64)\n reg_mask = np.zeros((self.max_objs), dtype=np.uint8)\n kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)\n hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)\n hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)\n hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)\n\n draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \\\n draw_umich_gaussian\n\n gt_det = []\n for k in range(num_objs):\n ann = anns[k]\n bbox = self._coco_box_to_bbox(ann['bbox'])\n cls_id = int(ann['category_id']) - 1\n pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3)\n if flipped:\n bbox[[0, 2]] = width - bbox[[2, 0]] - 1\n pts[:, 0] = width - pts[:, 0] - 1\n for e in self.flip_idx:\n pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox = np.clip(bbox, 0, output_res - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if (h > 0 and w > 0) or (rot != 0):\n radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n radius = self.opt.hm_gauss if self.opt.mse_loss else max(0, int(radius))\n ct = np.array(\n [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n ct_int = ct.astype(np.int32)\n wh[k] = 1. * w, 1. * h\n ind[k] = ct_int[1] * output_res + ct_int[0]\n reg[k] = ct - ct_int\n reg_mask[k] = 1\n num_kpts = pts[:, 2].sum()\n if num_kpts == 0:\n hm[cls_id, ct_int[1], ct_int[0]] = 0.9999\n reg_mask[k] = 0\n\n hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))\n hp_radius = self.opt.hm_gauss \\\n if self.opt.mse_loss else max(0, int(hp_radius))\n for j in range(num_joints):\n if pts[j, 2] > 0:\n pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)\n if pts[j, 0] >= 0 and pts[j, 0] < output_res and \\\n pts[j, 1] >= 0 and pts[j, 1] < output_res:\n kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int\n kps_mask[k, j * 2: j * 2 + 2] = 1\n pt_int = pts[j, :2].astype(np.int32)\n hp_offset[k * num_joints + j] = pts[j, :2] - pt_int\n hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]\n hp_mask[k * num_joints + j] = 1\n if self.opt.dense_hp:\n # must be before draw center hm gaussian\n draw_dense_reg(dense_kps[j], hm[cls_id], ct_int,\n pts[j, :2] - ct_int, radius, is_offset=True)\n draw_gaussian(dense_kps_mask[j], ct_int, radius)\n draw_gaussian(hm_hp[j], pt_int, hp_radius)\n draw_gaussian(hm[cls_id], ct_int, radius)\n gt_det.append([ct[0] - w / 2, ct[1] - h / 2,\n ct[0] + w / 2, ct[1] + h / 2, 1] +\n pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])\n if rot != 0:\n hm = hm * 0 + 0.9999\n reg_mask *= 0\n kps_mask *= 0\n ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,\n 'hps': kps, 'hps_mask': kps_mask}\n if self.opt.dense_hp:\n dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res)\n dense_kps_mask = dense_kps_mask.reshape(\n num_joints, 1, output_res, output_res)\n dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1)\n dense_kps_mask = dense_kps_mask.reshape(\n num_joints * 2, output_res, output_res)\n ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask})\n del ret['hps'], ret['hps_mask']\n if self.opt.reg_offset:\n ret.update({'reg': reg})\n if self.opt.hm_hp:\n ret.update({'hm_hp': hm_hp})\n if self.opt.reg_hp_offset:\n ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask})\n if self.opt.debug > 0 or not self.split == 'train':\n gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \\\n np.zeros((1, 40), dtype=np.float32)\n meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}\n ret['meta'] = meta\n return ret\n"
] |
[
[
"numpy.random.random",
"numpy.clip",
"numpy.arange",
"numpy.concatenate",
"numpy.random.randn",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.