repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
flofriday/youtube-data
[ "24ac3e32c19e74bcc336183f309237e3e69662ca" ]
[ "bot.py" ]
[ "import logging\nimport os\nimport io\nimport matplotlib\n\nimport matplotlib.pyplot as plt\nimport pytz\nimport telegram\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom telegram.ext.dispatcher import run_async\n\nimport youtube_data as ytd\nimport user\nfrom user import User, UserState\n\n# Disabling matplotlib from opening a window on the server\nmatplotlib.use(\"Agg\")\nplt.ioff()\n\n\n# Enable logging\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_plot(\n user: User, context, plot: matplotlib.axes.Axes, caption: str = \"\"\n):\n \"\"\"Send a pandas plot to the specified chat\"\"\"\n fig = plot.get_figure()\n fig.tight_layout()\n image = io.BytesIO()\n fig.savefig(image, format=\"png\", dpi=300)\n plt.close(fig)\n image.seek(0)\n context.bot.send_photo(\n chat_id=user.telegram_id,\n photo=image,\n parse_mode=\"Markdown\",\n caption=caption,\n )\n pass\n\n\n@run_async\ndef start_command(update, context):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n user = User.load(update.effective_user.id)\n\n message = (\n f\"Hi {update.effective_user.name} 😊\\n\"\n \"YouTube saves a lot of data about you, however I can help you to \"\n \"get some insight into this data. So for me to help you, you need to \"\n \"download your data and send me the files `watch-history.json` and \"\n \"`search-history.json`. Here is a [Guide]\"\n \"(https://github.com/flofriday/youtube-data/blob/master/Download_Guide.md)\"\n \" on how to download your personal data.\\n\\n\"\n \"Some of the graphs I can create are time-sensetive, so it is \"\n \"important that I know in which timezone you live in. At the moment I \"\n f\"think/asume you live in `{user.timezone}`, if this is wrong you \"\n \"can correct me with the /timezone command.\\n\\n\"\n \"This bot is free software, and is developed in the hope to \"\n \"be useful. Its code is publicly available on \"\n \"[GitHub](https://github.com/flofriday/youtube-data).\\n\\n\"\n \"*Disclaimer:* This is not an official YouTube application, nor am I \"\n \"[flofriday](https://github.com/flofriday), in any way \"\n \"associated with YouTube or Google.\"\n )\n\n update.message.reply_text(\n message, parse_mode=\"Markdown\", disable_web_page_preview=True\n )\n\n # Also show all the available commands\n help_command(update, context)\n\n\n@run_async\ndef privacy_command(update, context):\n \"\"\"Tell the user how this bot manages their data\"\"\"\n message = (\n \"*Privarcy* πŸ”’\\n\"\n \"Privacy clearly is important, and this bot takes this subject \"\n \"seriously. Thats why *this bot doesn't save your personal \"\n \"YouTube data*.\\n\"\n \"However, this bot does save some userdata, which are either \"\n \"collected to enable some feature, or to enable some kind of \"\n \"analytics. Having this said, I will promise to allways make it \"\n \"clear, what this bot collects. Therefore, I created the /info and \"\n \"/statitic commands. The info command shows you all the data this bot \"\n \"knows about you.\"\n )\n update.message.reply_text(message, parse_mode=\"Markdown\")\n\n\n@run_async\ndef help_command(update, context):\n \"\"\"Send a message when the command /help is issued.\"\"\"\n message = (\n \"*Things I can do* πŸ€“\\n\"\n \"/timezone - Set your timezone\\n\"\n \"/privacy - How this bot handles your data\\n\"\n \"/info - Informations the bot has about you\\n\"\n \"/statistic - Informations on the bots usage\\n\"\n \"/help - This help message\"\n )\n update.message.reply_text(message, parse_mode=\"Markdown\")\n\n\n@run_async\ndef info_command(update, context):\n \"\"\"Show the user what the bot thinks about them\"\"\"\n user = User.load(update.effective_user.id)\n message = (\n \"*User Info*\\n\"\n f\"Telegram ID: {user.telegram_id}\\n\"\n f\"State: {UserState(user.state).name}\\n\"\n f\"Timezone: {user.timezone}\\n\"\n f\"Number of reports: {user.analyzes}\"\n )\n update.message.reply_text(\n message, parse_mode=\"Markdown\", disable_web_page_preview=True\n )\n\n\n@run_async\ndef statistic_command(update, context):\n \"\"\"Tell the user how many users their are\"\"\"\n users, analyzes = User.statistics()\n\n message = (\n f\"*Statistics*\\nUsers: *{users}*\\nAnalyzes calculated: *{analyzes}*\"\n )\n update.message.reply_text(message, parse_mode=\"Markdown\")\n\n\n@run_async\ndef timezone_command(update, context):\n \"\"\"Set the timezone for the user\"\"\"\n user = User.load(update.effective_user.id)\n user.state = UserState.send_timezone\n user.update()\n\n message = (\n \"Send me the timezone you live in.\\n\"\n \"Unfortunatly, I am very strict about the format πŸ˜….\\n\"\n \"The format must be like `Europe/Vienna`.\\n\"\n \"Here is the [Wikipedia Link]\"\n \"(https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) \"\n \"to help you out.\"\n )\n update.message.reply_text(\n message, parse_mode=\"Markdown\", disable_web_page_preview=True\n )\n\n\n@run_async\ndef document_message(update, context):\n \"\"\"React to files the user sends the bot\"\"\"\n\n filename = update.message.document.file_name\n if filename == \"search-history.json\":\n context.bot.send_chat_action(\n chat_id=update.effective_user.id,\n action=telegram.ChatAction.TYPING,\n )\n analyze_search(update, context)\n return\n elif filename == \"watch-history.json\":\n context.bot.send_chat_action(\n chat_id=update.effective_user.id,\n action=telegram.ChatAction.TYPING,\n )\n analyze_watch(update, context)\n return\n\n message = (\n \"Sorry, the file must either be named `search-history.json` or \"\n \"`watch-history.json`. πŸ˜”\"\n )\n update.message.reply_text(\n message, parse_mode=\"Markdown\", disable_web_page_preview=True\n )\n\n\n@run_async\ndef text_message(update, context):\n \"\"\"Handle normal messages\"\"\"\n\n user = User.load(update.effective_user.id)\n if user.state == UserState.send_timezone:\n if update.message.text not in pytz.all_timezones:\n update.message.reply_text(\"Sorry, I don't know that timezone. 😰\")\n return\n user.timezone = update.message.text\n user.state = UserState.idle\n user.update()\n update.message.reply_text(\"Great, set your new timezone. πŸ˜„\")\n return\n\n # I don't know what else to do\n update.message.reply_text(\"Sorry, I don't know what you want. πŸ˜”\")\n\n\n@run_async\ndef unknown_message(update, context):\n update.message.reply_text(\"Sorry, I don't know what you want. πŸ˜”\")\n\n\ndef analyze_search(update, context):\n document = update.message.document\n f = None\n try:\n f = document.get_file(30)\n except telegram.TelegramError:\n update.message.reply_text(\n \"An error occoured while downloading your file.\"\n )\n return\n\n # Load the user and the data into a dataframe\n user = User.load(update.effective_user.id)\n json = f.download_as_bytearray().decode(\"utf-8\")\n df = None\n try:\n df = ytd.load_search_history(json, user.timezone)\n except Exception:\n update.message.reply_text(\n \"An error occoured while parsing your file. 😡\\n\"\n \"Maybe you uploaded a corrrupted file ?\"\n )\n return\n\n # Overall information about the searches\n info_message = (\n \"*Absolut numbers*\\n\"\n f\"Searches since {df['time'].min().strftime('%b %d %Y')}: \"\n f\"*{len(df)}*\\n\"\n f\"Average searches per day: \"\n f\"*{len(df)/((df['time'].max()-df['time'].min()).days):.2f}*\"\n )\n update.message.reply_text(info_message, parse_mode=\"Markdown\")\n\n # Plot the words used most often\n plt1 = ytd.searchword_plot(df, 24)\n send_plot(user, context, plt1)\n\n # Plot the search activity over time\n plt2 = ytd.search_timeline_plot(df)\n send_plot(user, context, plt2)\n\n # Update the counter for the user\n user.analyzes += 1\n user.update()\n update.message.reply_text(\"Done 😊\", parse_mode=\"Markdown\")\n\n\ndef analyze_watch(update, context):\n document = update.message.document\n f = None\n try:\n f = document.get_file(30)\n except telegram.TelegramError:\n update.message.reply_text(\n \"An error occoured while downloading your file.\"\n )\n return\n\n # Load the user and the data into a dataframe\n user = User.load(update.effective_user.id)\n json = f.download_as_bytearray().decode(\"utf-8\")\n df = None\n try:\n df = ytd.load_watch_history(json, user.timezone)\n except Exception:\n update.message.reply_text(\n \"An error occoured while parsing your file. 😡\\n\"\n \"Maybe you uploaded a corrrupted file ?\"\n )\n return\n\n # Overall information about the searches\n info_message = (\n \"*Absolut numbers*\\n\"\n f\"Videos watched since {df['time'].min().strftime('%b %d %Y')}: \"\n f\"*{len(df)}*\\n\"\n f\"Average videos per day: \"\n f\"*{len(df)/((df['time'].max()-df['time'].min()).days):.2f}*\"\n )\n update.message.reply_text(info_message, parse_mode=\"Markdown\")\n\n # Plot the most watched creators\n plt = ytd.creator_plot(df, 24)\n send_plot(user, context, plt)\n\n # Plot the watch timeline\n plt = ytd.watch_timeline_plot(df)\n send_plot(user, context, plt)\n\n # Plot the hours the users watches\n plt = ytd.watch_hour_plot(df)\n send_plot(user, context, plt)\n\n # Update the counter for the user\n user.analyzes += 1\n user.update()\n update.message.reply_text(\"Done 😊\", parse_mode=\"Markdown\")\n\n\ndef main():\n \"\"\"Start the bot.\"\"\"\n\n # Initialize the database\n user.__init__(\"data/bot.db\")\n\n # Read the config from the environment\n token = os.environ[\"TELEGRAM_TOKEN\"]\n\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(token, use_context=True)\n print(\"Bot running...\")\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # on different commands - answer in Telegram\n dp.add_handler(CommandHandler(\"start\", start_command))\n dp.add_handler(CommandHandler(\"timezone\", timezone_command))\n dp.add_handler(CommandHandler(\"privacy\", privacy_command))\n dp.add_handler(CommandHandler(\"info\", info_command))\n dp.add_handler(CommandHandler(\"statistic\", statistic_command))\n dp.add_handler(CommandHandler(\"help\", help_command))\n\n # on noncommand i.e message - echo the message on Telegram\n dp.add_handler(MessageHandler(Filters.text, text_message))\n dp.add_handler(MessageHandler(Filters.document, document_message))\n dp.add_handler(MessageHandler(Filters.all, unknown_message))\n\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.use" ] ]
DanielAndreasen/Thesis
[ "da18d41e48de5d34c8281ffd9e850dfd4fe37824" ]
[ "figures/phoenix/create_arcturus_synthetic.py" ]
[ "from __future__ import division\nimport os\nimport numpy as np\nfrom astropy.io import fits\nfrom plot_fits import nrefrac\nfrom scipy.interpolate import interp1d\n\n\ndef read_phoenix(fname):\n path = os.path.expanduser('~/.plotfits/')\n pathwave = os.path.join(path, 'WAVE_PHOENIX-ACES-AGSS-COND-2011.fits')\n w = fits.getdata(pathwave)\n f = fits.getdata(fname)\n nre = nrefrac(w) # Correction for vacuum to air (ground based)\n w = w/nre\n return w, f\n\n\nif __name__ == '__main__':\n w1, w2 = 10000, 25000\n dA = 0.01\n w, f = read_phoenix('lte04300-1.50-0.5.fits')\n\n idx = (w1 <= w) & (w <= w2)\n w, f = w[idx], f[idx]\n\n N = int((w[-1] - w[0]) / dA)\n\n flux_int_func = interp1d(w, f, kind='linear')\n ll_int = np.arange(N) * dA + w[0]\n flux_int = flux_int_func(ll_int)\n hdr = fits.getheader('lte04300-1.50-0.5.fits')\n hdr[\"NAXIS1\"] = N\n hdr[\"CDELT1\"] = dA\n hdr[\"CRVAL1\"] = w[0]\n\n fits.writeto('Arcturus_PHOENIX.fits', flux_int, hdr, clobber=True)\n" ]
[ [ "scipy.interpolate.interp1d", "numpy.arange" ] ]
arkiv2/pandas
[ "257ac884d61a74990d1cb4d72c48b1c9003298d5", "506520bd35331aa82db50686c07d96594cac0c10" ]
[ "pandas/core/algorithms.py", "pandas/tests/frame/test_misc_api.py" ]
[ "\"\"\"\nGeneric data algorithms. This module is experimental at the moment and not\nintended for public consumption\n\"\"\"\nfrom __future__ import division\nfrom warnings import warn\nimport numpy as np\n\nfrom pandas import compat, lib, tslib, _np_version_under1p8\nfrom pandas.types.cast import _maybe_promote\nfrom pandas.types.generic import (ABCSeries, ABCIndex, ABCPeriodIndex,\n ABCDatetimeIndex)\nfrom pandas.types.common import (is_integer_dtype,\n is_int64_dtype,\n is_categorical_dtype,\n is_extension_type,\n is_datetimetz,\n is_period_arraylike,\n is_datetime_or_timedelta_dtype,\n is_float_dtype,\n needs_i8_conversion,\n is_categorical,\n is_datetime64_dtype,\n is_timedelta64_dtype,\n is_scalar,\n _ensure_platform_int,\n _ensure_object,\n _ensure_float64,\n _ensure_int64,\n is_list_like)\nfrom pandas.types.missing import isnull\n\nimport pandas.core.common as com\nimport pandas.algos as algos\nimport pandas.hashtable as htable\nfrom pandas.compat import string_types\nfrom pandas.tslib import iNaT\n\n\n# --------------- #\n# top-level algos #\n# --------------- #\n\ndef match(to_match, values, na_sentinel=-1):\n \"\"\"\n Compute locations of to_match into values\n\n Parameters\n ----------\n to_match : array-like\n values to find positions of\n values : array-like\n Unique set of values\n na_sentinel : int, default -1\n Value to mark \"not found\"\n\n Examples\n --------\n\n Returns\n -------\n match : ndarray of integers\n \"\"\"\n values = com._asarray_tuplesafe(values)\n if issubclass(values.dtype.type, string_types):\n values = np.array(values, dtype='O')\n\n f = lambda htype, caster: _match_generic(to_match, values, htype, caster)\n result = _hashtable_algo(f, values.dtype, np.int64)\n\n if na_sentinel != -1:\n\n # replace but return a numpy array\n # use a Series because it handles dtype conversions properly\n from pandas.core.series import Series\n result = Series(result.ravel()).replace(-1, na_sentinel).values.\\\n reshape(result.shape)\n\n return result\n\n\ndef _match_generic(values, index, table_type, type_caster):\n values = type_caster(values)\n index = type_caster(index)\n table = table_type(min(len(index), 1000000))\n table.map_locations(index)\n return table.lookup(values)\n\n\ndef unique(values):\n \"\"\"\n Compute unique values (not necessarily sorted) efficiently from input array\n of values\n\n Parameters\n ----------\n values : array-like\n\n Returns\n -------\n uniques\n \"\"\"\n values = com._asarray_tuplesafe(values)\n\n f = lambda htype, caster: _unique_generic(values, htype, caster)\n return _hashtable_algo(f, values.dtype)\n\n\ndef _unique_generic(values, table_type, type_caster):\n values = type_caster(values)\n table = table_type(min(len(values), 1000000))\n uniques = table.unique(values)\n return type_caster(uniques)\n\n\ndef isin(comps, values):\n \"\"\"\n Compute the isin boolean array\n\n Parameters\n ----------\n comps: array-like\n values: array-like\n\n Returns\n -------\n boolean array same length as comps\n \"\"\"\n\n if not is_list_like(comps):\n raise TypeError(\"only list-like objects are allowed to be passed\"\n \" to isin(), you passed a \"\n \"[{0}]\".format(type(comps).__name__))\n comps = np.asarray(comps)\n if not is_list_like(values):\n raise TypeError(\"only list-like objects are allowed to be passed\"\n \" to isin(), you passed a \"\n \"[{0}]\".format(type(values).__name__))\n if not isinstance(values, np.ndarray):\n values = list(values)\n\n # GH11232\n # work-around for numpy < 1.8 and comparisions on py3\n # faster for larger cases to use np.in1d\n if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:\n f = lambda x, y: np.in1d(x, np.asarray(list(y)))\n else:\n f = lambda x, y: lib.ismember_int64(x, set(y))\n\n # may need i8 conversion for proper membership testing\n if is_datetime64_dtype(comps):\n from pandas.tseries.tools import to_datetime\n values = to_datetime(values)._values.view('i8')\n comps = comps.view('i8')\n elif is_timedelta64_dtype(comps):\n from pandas.tseries.timedeltas import to_timedelta\n values = to_timedelta(values)._values.view('i8')\n comps = comps.view('i8')\n elif is_int64_dtype(comps):\n pass\n else:\n f = lambda x, y: lib.ismember(x, set(values))\n\n return f(comps, values)\n\n\ndef safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):\n \"\"\"\n Sort ``values`` and reorder corresponding ``labels``.\n ``values`` should be unique if ``labels`` is not None.\n Safe for use with mixed types (int, str), orders ints before strs.\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n values : list-like\n Sequence; must be unique if ``labels`` is not None.\n labels : list_like\n Indices to ``values``. All out of bound indices are treated as\n \"not found\" and will be masked with ``na_sentinel``.\n na_sentinel : int, default -1\n Value in ``labels`` to mark \"not found\".\n Ignored when ``labels`` is None.\n assume_unique : bool, default False\n When True, ``values`` are assumed to be unique, which can speed up\n the calculation. Ignored when ``labels`` is None.\n\n Returns\n -------\n ordered : ndarray\n Sorted ``values``\n new_labels : ndarray\n Reordered ``labels``; returned when ``labels`` is not None.\n\n Raises\n ------\n TypeError\n * If ``values`` is not list-like or if ``labels`` is neither None\n nor list-like\n * If ``values`` cannot be sorted\n ValueError\n * If ``labels`` is not None and ``values`` contain duplicates.\n \"\"\"\n if not is_list_like(values):\n raise TypeError(\"Only list-like objects are allowed to be passed to\"\n \"safe_sort as values\")\n values = np.array(values, copy=False)\n\n def sort_mixed(values):\n # order ints before strings, safe in py3\n str_pos = np.array([isinstance(x, string_types) for x in values],\n dtype=bool)\n nums = np.sort(values[~str_pos])\n strs = np.sort(values[str_pos])\n return _ensure_object(np.concatenate([nums, strs]))\n\n sorter = None\n if compat.PY3 and lib.infer_dtype(values) == 'mixed-integer':\n # unorderable in py3 if mixed str/int\n ordered = sort_mixed(values)\n else:\n try:\n sorter = values.argsort()\n ordered = values.take(sorter)\n except TypeError:\n # try this anyway\n ordered = sort_mixed(values)\n\n # labels:\n\n if labels is None:\n return ordered\n\n if not is_list_like(labels):\n raise TypeError(\"Only list-like objects or None are allowed to be\"\n \"passed to safe_sort as labels\")\n labels = _ensure_platform_int(np.asarray(labels))\n\n from pandas import Index\n if not assume_unique and not Index(values).is_unique:\n raise ValueError(\"values should be unique if labels is not None\")\n\n if sorter is None:\n # mixed types\n (hash_klass, _), values = _get_data_algo(values, _hashtables)\n t = hash_klass(len(values))\n t.map_locations(values)\n sorter = _ensure_platform_int(t.lookup(ordered))\n\n reverse_indexer = np.empty(len(sorter), dtype=np.int_)\n reverse_indexer.put(sorter, np.arange(len(sorter)))\n\n mask = (labels < -len(values)) | (labels >= len(values)) | \\\n (labels == na_sentinel)\n\n # (Out of bound indices will be masked with `na_sentinel` next, so we may\n # deal with them here without performance loss using `mode='wrap'`.)\n new_labels = reverse_indexer.take(labels, mode='wrap')\n np.putmask(new_labels, mask, na_sentinel)\n\n return ordered, new_labels\n\n\ndef factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):\n \"\"\"\n Encode input values as an enumerated type or categorical variable\n\n Parameters\n ----------\n values : ndarray (1-d)\n Sequence\n sort : boolean, default False\n Sort by values\n na_sentinel : int, default -1\n Value to mark \"not found\"\n size_hint : hint to the hashtable sizer\n\n Returns\n -------\n labels : the indexer to the original array\n uniques : ndarray (1-d) or Index\n the unique values. Index is returned when passed values is Index or\n Series\n\n note: an array of Periods will ignore sort as it returns an always sorted\n PeriodIndex\n \"\"\"\n from pandas import Index, Series, DatetimeIndex\n\n vals = np.asarray(values)\n\n # localize to UTC\n is_datetimetz_type = is_datetimetz(values)\n if is_datetimetz_type:\n values = DatetimeIndex(values)\n vals = values.asi8\n\n is_datetime = is_datetime64_dtype(vals)\n is_timedelta = is_timedelta64_dtype(vals)\n (hash_klass, vec_klass), vals = _get_data_algo(vals, _hashtables)\n\n table = hash_klass(size_hint or len(vals))\n uniques = vec_klass()\n labels = table.get_labels(vals, uniques, 0, na_sentinel, True)\n\n labels = _ensure_platform_int(labels)\n\n uniques = uniques.to_array()\n\n if sort and len(uniques) > 0:\n uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,\n assume_unique=True)\n\n if is_datetimetz_type:\n # reset tz\n uniques = values._shallow_copy(uniques)\n elif is_datetime:\n uniques = uniques.astype('M8[ns]')\n elif is_timedelta:\n uniques = uniques.astype('m8[ns]')\n if isinstance(values, Index):\n uniques = values._shallow_copy(uniques, name=None)\n elif isinstance(values, Series):\n uniques = Index(uniques)\n return labels, uniques\n\n\ndef value_counts(values, sort=True, ascending=False, normalize=False,\n bins=None, dropna=True):\n \"\"\"\n Compute a histogram of the counts of non-null values.\n\n Parameters\n ----------\n values : ndarray (1-d)\n sort : boolean, default True\n Sort by values\n ascending : boolean, default False\n Sort in ascending order\n normalize: boolean, default False\n If True then compute a relative histogram\n bins : integer, optional\n Rather than count values, group them into half-open bins,\n convenience for pd.cut, only works with numeric data\n dropna : boolean, default True\n Don't include counts of NaN\n\n Returns\n -------\n value_counts : Series\n\n \"\"\"\n from pandas.core.series import Series\n name = getattr(values, 'name', None)\n\n if bins is not None:\n try:\n from pandas.tools.tile import cut\n values = Series(values).values\n cat, bins = cut(values, bins, retbins=True)\n except TypeError:\n raise TypeError(\"bins argument only works with numeric data.\")\n values = cat.codes\n\n if is_extension_type(values) and not is_datetimetz(values):\n # handle Categorical and sparse,\n # datetime tz can be handeled in ndarray path\n result = Series(values).values.value_counts(dropna=dropna)\n result.name = name\n counts = result.values\n else:\n # ndarray path. pass original to handle DatetimeTzBlock\n keys, counts = _value_counts_arraylike(values, dropna=dropna)\n\n from pandas import Index, Series\n if not isinstance(keys, Index):\n keys = Index(keys)\n result = Series(counts, index=keys, name=name)\n\n if bins is not None:\n # TODO: This next line should be more efficient\n result = result.reindex(np.arange(len(cat.categories)),\n fill_value=0)\n result.index = bins[:-1]\n\n if sort:\n result = result.sort_values(ascending=ascending)\n\n if normalize:\n result = result / float(counts.sum())\n\n return result\n\n\ndef _value_counts_arraylike(values, dropna=True):\n is_datetimetz_type = is_datetimetz(values)\n is_period = (isinstance(values, ABCPeriodIndex) or\n is_period_arraylike(values))\n\n orig = values\n\n from pandas.core.series import Series\n values = Series(values).values\n dtype = values.dtype\n\n if is_datetime_or_timedelta_dtype(dtype) or is_period:\n from pandas.tseries.index import DatetimeIndex\n from pandas.tseries.period import PeriodIndex\n\n if is_period:\n values = PeriodIndex(values)\n freq = values.freq\n\n values = values.view(np.int64)\n keys, counts = htable.value_count_int64(values, dropna)\n\n if dropna:\n msk = keys != iNaT\n keys, counts = keys[msk], counts[msk]\n\n # convert the keys back to the dtype we came in\n keys = keys.astype(dtype)\n\n # dtype handling\n if is_datetimetz_type:\n if isinstance(orig, ABCDatetimeIndex):\n tz = orig.tz\n else:\n tz = orig.dt.tz\n keys = DatetimeIndex._simple_new(keys, tz=tz)\n if is_period:\n keys = PeriodIndex._simple_new(keys, freq=freq)\n\n elif is_integer_dtype(dtype):\n values = _ensure_int64(values)\n keys, counts = htable.value_count_int64(values, dropna)\n elif is_float_dtype(dtype):\n values = _ensure_float64(values)\n keys, counts = htable.value_count_float64(values, dropna)\n else:\n values = _ensure_object(values)\n mask = isnull(values)\n keys, counts = htable.value_count_object(values, mask)\n if not dropna and mask.any():\n keys = np.insert(keys, 0, np.NaN)\n counts = np.insert(counts, 0, mask.sum())\n\n return keys, counts\n\n\ndef duplicated(values, keep='first'):\n \"\"\"\n Return boolean ndarray denoting duplicate values\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n - ``first`` : Mark duplicates as ``True`` except for the first\n occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last\n occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n duplicated : ndarray\n \"\"\"\n\n dtype = values.dtype\n\n # no need to revert to original type\n if is_datetime_or_timedelta_dtype(dtype) or is_datetimetz(dtype):\n if isinstance(values, (ABCSeries, ABCIndex)):\n values = values.values.view(np.int64)\n else:\n values = values.view(np.int64)\n elif is_period_arraylike(values):\n from pandas.tseries.period import PeriodIndex\n values = PeriodIndex(values).asi8\n elif is_categorical_dtype(dtype):\n values = values.values.codes\n elif isinstance(values, (ABCSeries, ABCIndex)):\n values = values.values\n\n if is_integer_dtype(dtype):\n values = _ensure_int64(values)\n duplicated = htable.duplicated_int64(values, keep=keep)\n elif is_float_dtype(dtype):\n values = _ensure_float64(values)\n duplicated = htable.duplicated_float64(values, keep=keep)\n else:\n values = _ensure_object(values)\n duplicated = htable.duplicated_object(values, keep=keep)\n\n return duplicated\n\n\ndef mode(values):\n \"\"\"Returns the mode or mode(s) of the passed Series or ndarray (sorted)\"\"\"\n # must sort because hash order isn't necessarily defined.\n from pandas.core.series import Series\n\n if isinstance(values, Series):\n constructor = values._constructor\n values = values.values\n else:\n values = np.asanyarray(values)\n constructor = Series\n\n dtype = values.dtype\n if is_integer_dtype(values):\n values = _ensure_int64(values)\n result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)\n\n elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):\n dtype = values.dtype\n values = values.view(np.int64)\n result = constructor(sorted(htable.mode_int64(values)), dtype=dtype)\n\n elif is_categorical_dtype(values):\n result = constructor(values.mode())\n else:\n mask = isnull(values)\n values = _ensure_object(values)\n res = htable.mode_object(values, mask)\n try:\n res = sorted(res)\n except TypeError as e:\n warn(\"Unable to sort modes: %s\" % e)\n result = constructor(res, dtype=dtype)\n\n return result\n\n\ndef rank(values, axis=0, method='average', na_option='keep',\n ascending=True, pct=False):\n \"\"\"\n\n \"\"\"\n if values.ndim == 1:\n f, values = _get_data_algo(values, _rank1d_functions)\n ranks = f(values, ties_method=method, ascending=ascending,\n na_option=na_option, pct=pct)\n elif values.ndim == 2:\n f, values = _get_data_algo(values, _rank2d_functions)\n ranks = f(values, axis=axis, ties_method=method,\n ascending=ascending, na_option=na_option, pct=pct)\n\n return ranks\n\n_rank1d_functions = {\n 'float64': algos.rank_1d_float64,\n 'int64': algos.rank_1d_int64,\n 'generic': algos.rank_1d_generic\n}\n\n_rank2d_functions = {\n 'float64': algos.rank_2d_float64,\n 'int64': algos.rank_2d_int64,\n 'generic': algos.rank_2d_generic\n}\n\n\ndef quantile(x, q, interpolation_method='fraction'):\n \"\"\"\n Compute sample quantile or quantiles of the input array. For example, q=0.5\n computes the median.\n\n The `interpolation_method` parameter supports three values, namely\n `fraction` (default), `lower` and `higher`. Interpolation is done only,\n if the desired quantile lies between two data points `i` and `j`. For\n `fraction`, the result is an interpolated value between `i` and `j`;\n for `lower`, the result is `i`, for `higher` the result is `j`.\n\n Parameters\n ----------\n x : ndarray\n Values from which to extract score.\n q : scalar or array\n Percentile at which to extract score.\n interpolation_method : {'fraction', 'lower', 'higher'}, optional\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n - fraction: `i + (j - i)*fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n -lower: `i`.\n - higher: `j`.\n\n Returns\n -------\n score : float\n Score at percentile.\n\n Examples\n --------\n >>> from scipy import stats\n >>> a = np.arange(100)\n >>> stats.scoreatpercentile(a, 50)\n 49.5\n\n \"\"\"\n x = np.asarray(x)\n mask = isnull(x)\n\n x = x[~mask]\n\n values = np.sort(x)\n\n def _get_score(at):\n if len(values) == 0:\n return np.nan\n\n idx = at * (len(values) - 1)\n if idx % 1 == 0:\n score = values[int(idx)]\n else:\n if interpolation_method == 'fraction':\n score = _interpolate(values[int(idx)], values[int(idx) + 1],\n idx % 1)\n elif interpolation_method == 'lower':\n score = values[np.floor(idx)]\n elif interpolation_method == 'higher':\n score = values[np.ceil(idx)]\n else:\n raise ValueError(\"interpolation_method can only be 'fraction' \"\n \", 'lower' or 'higher'\")\n\n return score\n\n if is_scalar(q):\n return _get_score(q)\n else:\n q = np.asarray(q, np.float64)\n return algos.arrmap_float64(q, _get_score)\n\n\ndef _interpolate(a, b, fraction):\n \"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"\n return a + (b - a) * fraction\n\n\ndef nsmallest(arr, n, keep='first'):\n \"\"\"\n Find the indices of the n smallest values of a numpy array.\n\n Note: Fails silently with NaN.\n \"\"\"\n if keep == 'last':\n arr = arr[::-1]\n\n narr = len(arr)\n n = min(n, narr)\n\n sdtype = str(arr.dtype)\n arr = arr.view(_dtype_map.get(sdtype, sdtype))\n\n kth_val = algos.kth_smallest(arr.copy(), n - 1)\n return _finalize_nsmallest(arr, kth_val, n, keep, narr)\n\n\ndef nlargest(arr, n, keep='first'):\n \"\"\"\n Find the indices of the n largest values of a numpy array.\n\n Note: Fails silently with NaN.\n \"\"\"\n sdtype = str(arr.dtype)\n arr = arr.view(_dtype_map.get(sdtype, sdtype))\n return nsmallest(-arr, n, keep=keep)\n\n\ndef select_n_slow(dropped, n, keep, method):\n reverse_it = (keep == 'last' or method == 'nlargest')\n ascending = method == 'nsmallest'\n slc = np.s_[::-1] if reverse_it else np.s_[:]\n return dropped[slc].sort_values(ascending=ascending).head(n)\n\n\n_select_methods = {'nsmallest': nsmallest, 'nlargest': nlargest}\n\n\ndef select_n(series, n, keep, method):\n \"\"\"Implement n largest/smallest.\n\n Parameters\n ----------\n n : int\n keep : {'first', 'last'}, default 'first'\n method : str, {'nlargest', 'nsmallest'}\n\n Returns\n -------\n nordered : Series\n \"\"\"\n dtype = series.dtype\n if not issubclass(dtype.type, (np.integer, np.floating, np.datetime64,\n np.timedelta64)):\n raise TypeError(\"Cannot use method %r with dtype %s\" % (method, dtype))\n\n if keep not in ('first', 'last'):\n raise ValueError('keep must be either \"first\", \"last\"')\n\n if n <= 0:\n return series[[]]\n\n dropped = series.dropna()\n\n if n >= len(series):\n return select_n_slow(dropped, n, keep, method)\n\n inds = _select_methods[method](dropped.values, n, keep)\n return dropped.iloc[inds]\n\n\ndef _finalize_nsmallest(arr, kth_val, n, keep, narr):\n ns, = np.nonzero(arr <= kth_val)\n inds = ns[arr[ns].argsort(kind='mergesort')][:n]\n if keep == 'last':\n # reverse indices\n return narr - 1 - inds\n else:\n return inds\n\n_dtype_map = {'datetime64[ns]': 'int64', 'timedelta64[ns]': 'int64'}\n\n\n# ------- #\n# helpers #\n# ------- #\n\ndef _hashtable_algo(f, dtype, return_dtype=None):\n \"\"\"\n f(HashTable, type_caster) -> result\n \"\"\"\n if is_float_dtype(dtype):\n return f(htable.Float64HashTable, _ensure_float64)\n elif is_integer_dtype(dtype):\n return f(htable.Int64HashTable, _ensure_int64)\n elif is_datetime64_dtype(dtype):\n return_dtype = return_dtype or 'M8[ns]'\n return f(htable.Int64HashTable, _ensure_int64).view(return_dtype)\n elif is_timedelta64_dtype(dtype):\n return_dtype = return_dtype or 'm8[ns]'\n return f(htable.Int64HashTable, _ensure_int64).view(return_dtype)\n else:\n return f(htable.PyObjectHashTable, _ensure_object)\n\n_hashtables = {\n 'float64': (htable.Float64HashTable, htable.Float64Vector),\n 'int64': (htable.Int64HashTable, htable.Int64Vector),\n 'generic': (htable.PyObjectHashTable, htable.ObjectVector)\n}\n\n\ndef _get_data_algo(values, func_map):\n if is_float_dtype(values):\n f = func_map['float64']\n values = _ensure_float64(values)\n\n elif needs_i8_conversion(values):\n f = func_map['int64']\n values = values.view('i8')\n\n elif is_integer_dtype(values):\n f = func_map['int64']\n values = _ensure_int64(values)\n else:\n f = func_map['generic']\n values = _ensure_object(values)\n return f, values\n\n\n# ---- #\n# take #\n# ---- #\n\n\ndef _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):\n def wrapper(arr, indexer, out, fill_value=np.nan):\n if arr_dtype is not None:\n arr = arr.view(arr_dtype)\n if out_dtype is not None:\n out = out.view(out_dtype)\n if fill_wrap is not None:\n fill_value = fill_wrap(fill_value)\n f(arr, indexer, out, fill_value=fill_value)\n\n return wrapper\n\n\ndef _convert_wrapper(f, conv_dtype):\n def wrapper(arr, indexer, out, fill_value=np.nan):\n arr = arr.astype(conv_dtype)\n f(arr, indexer, out, fill_value=fill_value)\n\n return wrapper\n\n\ndef _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info):\n # this is not ideal, performance-wise, but it's better than raising\n # an exception (best to optimize in Cython to avoid getting here)\n row_idx, col_idx = indexer\n if mask_info is not None:\n (row_mask, col_mask), (row_needs, col_needs) = mask_info\n else:\n row_mask = row_idx == -1\n col_mask = col_idx == -1\n row_needs = row_mask.any()\n col_needs = col_mask.any()\n if fill_value is not None:\n if row_needs:\n out[row_mask, :] = fill_value\n if col_needs:\n out[:, col_mask] = fill_value\n for i in range(len(row_idx)):\n u_ = row_idx[i]\n for j in range(len(col_idx)):\n v = col_idx[j]\n out[i, j] = arr[u_, v]\n\n\ndef _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):\n if mask_info is not None:\n mask, needs_masking = mask_info\n else:\n mask = indexer == -1\n needs_masking = mask.any()\n if arr.dtype != out.dtype:\n arr = arr.astype(out.dtype)\n if arr.shape[axis] > 0:\n arr.take(_ensure_platform_int(indexer), axis=axis, out=out)\n if needs_masking:\n outindexer = [slice(None)] * arr.ndim\n outindexer[axis] = mask\n out[tuple(outindexer)] = fill_value\n\n\n_take_1d_dict = {\n ('int8', 'int8'): algos.take_1d_int8_int8,\n ('int8', 'int32'): algos.take_1d_int8_int32,\n ('int8', 'int64'): algos.take_1d_int8_int64,\n ('int8', 'float64'): algos.take_1d_int8_float64,\n ('int16', 'int16'): algos.take_1d_int16_int16,\n ('int16', 'int32'): algos.take_1d_int16_int32,\n ('int16', 'int64'): algos.take_1d_int16_int64,\n ('int16', 'float64'): algos.take_1d_int16_float64,\n ('int32', 'int32'): algos.take_1d_int32_int32,\n ('int32', 'int64'): algos.take_1d_int32_int64,\n ('int32', 'float64'): algos.take_1d_int32_float64,\n ('int64', 'int64'): algos.take_1d_int64_int64,\n ('int64', 'float64'): algos.take_1d_int64_float64,\n ('float32', 'float32'): algos.take_1d_float32_float32,\n ('float32', 'float64'): algos.take_1d_float32_float64,\n ('float64', 'float64'): algos.take_1d_float64_float64,\n ('object', 'object'): algos.take_1d_object_object,\n ('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,\n np.uint8),\n ('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,\n None),\n ('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(\n algos.take_1d_int64_int64, np.int64, np.int64, np.int64)\n}\n\n_take_2d_axis0_dict = {\n ('int8', 'int8'): algos.take_2d_axis0_int8_int8,\n ('int8', 'int32'): algos.take_2d_axis0_int8_int32,\n ('int8', 'int64'): algos.take_2d_axis0_int8_int64,\n ('int8', 'float64'): algos.take_2d_axis0_int8_float64,\n ('int16', 'int16'): algos.take_2d_axis0_int16_int16,\n ('int16', 'int32'): algos.take_2d_axis0_int16_int32,\n ('int16', 'int64'): algos.take_2d_axis0_int16_int64,\n ('int16', 'float64'): algos.take_2d_axis0_int16_float64,\n ('int32', 'int32'): algos.take_2d_axis0_int32_int32,\n ('int32', 'int64'): algos.take_2d_axis0_int32_int64,\n ('int32', 'float64'): algos.take_2d_axis0_int32_float64,\n ('int64', 'int64'): algos.take_2d_axis0_int64_int64,\n ('int64', 'float64'): algos.take_2d_axis0_int64_float64,\n ('float32', 'float32'): algos.take_2d_axis0_float32_float32,\n ('float32', 'float64'): algos.take_2d_axis0_float32_float64,\n ('float64', 'float64'): algos.take_2d_axis0_float64_float64,\n ('object', 'object'): algos.take_2d_axis0_object_object,\n ('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,\n np.uint8),\n ('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,\n np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,\n fill_wrap=np.int64)\n}\n\n_take_2d_axis1_dict = {\n ('int8', 'int8'): algos.take_2d_axis1_int8_int8,\n ('int8', 'int32'): algos.take_2d_axis1_int8_int32,\n ('int8', 'int64'): algos.take_2d_axis1_int8_int64,\n ('int8', 'float64'): algos.take_2d_axis1_int8_float64,\n ('int16', 'int16'): algos.take_2d_axis1_int16_int16,\n ('int16', 'int32'): algos.take_2d_axis1_int16_int32,\n ('int16', 'int64'): algos.take_2d_axis1_int16_int64,\n ('int16', 'float64'): algos.take_2d_axis1_int16_float64,\n ('int32', 'int32'): algos.take_2d_axis1_int32_int32,\n ('int32', 'int64'): algos.take_2d_axis1_int32_int64,\n ('int32', 'float64'): algos.take_2d_axis1_int32_float64,\n ('int64', 'int64'): algos.take_2d_axis1_int64_int64,\n ('int64', 'float64'): algos.take_2d_axis1_int64_float64,\n ('float32', 'float32'): algos.take_2d_axis1_float32_float32,\n ('float32', 'float64'): algos.take_2d_axis1_float32_float64,\n ('float64', 'float64'): algos.take_2d_axis1_float64_float64,\n ('object', 'object'): algos.take_2d_axis1_object_object,\n ('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,\n np.uint8),\n ('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,\n np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,\n fill_wrap=np.int64)\n}\n\n_take_2d_multi_dict = {\n ('int8', 'int8'): algos.take_2d_multi_int8_int8,\n ('int8', 'int32'): algos.take_2d_multi_int8_int32,\n ('int8', 'int64'): algos.take_2d_multi_int8_int64,\n ('int8', 'float64'): algos.take_2d_multi_int8_float64,\n ('int16', 'int16'): algos.take_2d_multi_int16_int16,\n ('int16', 'int32'): algos.take_2d_multi_int16_int32,\n ('int16', 'int64'): algos.take_2d_multi_int16_int64,\n ('int16', 'float64'): algos.take_2d_multi_int16_float64,\n ('int32', 'int32'): algos.take_2d_multi_int32_int32,\n ('int32', 'int64'): algos.take_2d_multi_int32_int64,\n ('int32', 'float64'): algos.take_2d_multi_int32_float64,\n ('int64', 'int64'): algos.take_2d_multi_int64_int64,\n ('int64', 'float64'): algos.take_2d_multi_int64_float64,\n ('float32', 'float32'): algos.take_2d_multi_float32_float32,\n ('float32', 'float64'): algos.take_2d_multi_float32_float64,\n ('float64', 'float64'): algos.take_2d_multi_float64_float64,\n ('object', 'object'): algos.take_2d_multi_object_object,\n ('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,\n np.uint8),\n ('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,\n np.uint8, None),\n ('datetime64[ns]', 'datetime64[ns]'):\n _view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,\n fill_wrap=np.int64)\n}\n\n\ndef _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):\n if ndim <= 2:\n tup = (arr_dtype.name, out_dtype.name)\n if ndim == 1:\n func = _take_1d_dict.get(tup, None)\n elif ndim == 2:\n if axis == 0:\n func = _take_2d_axis0_dict.get(tup, None)\n else:\n func = _take_2d_axis1_dict.get(tup, None)\n if func is not None:\n return func\n\n tup = (out_dtype.name, out_dtype.name)\n if ndim == 1:\n func = _take_1d_dict.get(tup, None)\n elif ndim == 2:\n if axis == 0:\n func = _take_2d_axis0_dict.get(tup, None)\n else:\n func = _take_2d_axis1_dict.get(tup, None)\n if func is not None:\n func = _convert_wrapper(func, out_dtype)\n return func\n\n def func(arr, indexer, out, fill_value=np.nan):\n indexer = _ensure_int64(indexer)\n _take_nd_generic(arr, indexer, out, axis=axis, fill_value=fill_value,\n mask_info=mask_info)\n\n return func\n\n\ndef take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,\n allow_fill=True):\n \"\"\"\n Specialized Cython take which sets NaN values in one pass\n\n Parameters\n ----------\n arr : ndarray\n Input array\n indexer : ndarray\n 1-D array of indices to take, subarrays corresponding to -1 value\n indicies are filed with fill_value\n axis : int, default 0\n Axis to take from\n out : ndarray or None, default None\n Optional output array, must be appropriate type to hold input and\n fill_value together, if indexer has any -1 value entries; call\n _maybe_promote to determine this type for any fill_value\n fill_value : any, default np.nan\n Fill value to replace -1 values with\n mask_info : tuple of (ndarray, boolean)\n If provided, value should correspond to:\n (indexer != -1, (indexer != -1).any())\n If not provided, it will be computed internally if necessary\n allow_fill : boolean, default True\n If False, indexer is assumed to contain no -1 values so no filling\n will be done. This short-circuits computation of a mask. Result is\n undefined if allow_fill == False and -1 is present in indexer.\n \"\"\"\n\n # dispatch to internal type takes\n if is_categorical(arr):\n return arr.take_nd(indexer, fill_value=fill_value,\n allow_fill=allow_fill)\n elif is_datetimetz(arr):\n return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)\n\n if indexer is None:\n indexer = np.arange(arr.shape[axis], dtype=np.int64)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n else:\n indexer = _ensure_int64(indexer)\n if not allow_fill:\n dtype, fill_value = arr.dtype, arr.dtype.type()\n mask_info = None, False\n else:\n # check for promotion based on types only (do this first because\n # it's faster than computing a mask)\n dtype, fill_value = _maybe_promote(arr.dtype, fill_value)\n if dtype != arr.dtype and (out is None or out.dtype != dtype):\n # check if promotion is actually required based on indexer\n if mask_info is not None:\n mask, needs_masking = mask_info\n else:\n mask = indexer == -1\n needs_masking = mask.any()\n mask_info = mask, needs_masking\n if needs_masking:\n if out is not None and out.dtype != dtype:\n raise TypeError('Incompatible type for fill_value')\n else:\n # if not, then depromote, set fill_value to dummy\n # (it won't be used but we don't want the cython code\n # to crash when trying to cast it to dtype)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n\n flip_order = False\n if arr.ndim == 2:\n if arr.flags.f_contiguous:\n flip_order = True\n\n if flip_order:\n arr = arr.T\n axis = arr.ndim - axis - 1\n if out is not None:\n out = out.T\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n if out is None:\n out_shape = list(arr.shape)\n out_shape[axis] = len(indexer)\n out_shape = tuple(out_shape)\n if arr.flags.f_contiguous and axis == arr.ndim - 1:\n # minor tweak that can make an order-of-magnitude difference\n # for dataframes initialized directly from 2-d ndarrays\n # (s.t. df.values is c-contiguous and df._data.blocks[0] is its\n # f-contiguous transpose)\n out = np.empty(out_shape, dtype=dtype, order='F')\n else:\n out = np.empty(out_shape, dtype=dtype)\n\n func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,\n mask_info=mask_info)\n indexer = _ensure_int64(indexer)\n func(arr, indexer, out, fill_value)\n\n if flip_order:\n out = out.T\n return out\n\n\ntake_1d = take_nd\n\n\ndef take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,\n allow_fill=True):\n \"\"\"\n Specialized Cython take which sets NaN values in one pass\n \"\"\"\n if indexer is None or (indexer[0] is None and indexer[1] is None):\n row_idx = np.arange(arr.shape[0], dtype=np.int64)\n col_idx = np.arange(arr.shape[1], dtype=np.int64)\n indexer = row_idx, col_idx\n dtype, fill_value = arr.dtype, arr.dtype.type()\n else:\n row_idx, col_idx = indexer\n if row_idx is None:\n row_idx = np.arange(arr.shape[0], dtype=np.int64)\n else:\n row_idx = _ensure_int64(row_idx)\n if col_idx is None:\n col_idx = np.arange(arr.shape[1], dtype=np.int64)\n else:\n col_idx = _ensure_int64(col_idx)\n indexer = row_idx, col_idx\n if not allow_fill:\n dtype, fill_value = arr.dtype, arr.dtype.type()\n mask_info = None, False\n else:\n # check for promotion based on types only (do this first because\n # it's faster than computing a mask)\n dtype, fill_value = _maybe_promote(arr.dtype, fill_value)\n if dtype != arr.dtype and (out is None or out.dtype != dtype):\n # check if promotion is actually required based on indexer\n if mask_info is not None:\n (row_mask, col_mask), (row_needs, col_needs) = mask_info\n else:\n row_mask = row_idx == -1\n col_mask = col_idx == -1\n row_needs = row_mask.any()\n col_needs = col_mask.any()\n mask_info = (row_mask, col_mask), (row_needs, col_needs)\n if row_needs or col_needs:\n if out is not None and out.dtype != dtype:\n raise TypeError('Incompatible type for fill_value')\n else:\n # if not, then depromote, set fill_value to dummy\n # (it won't be used but we don't want the cython code\n # to crash when trying to cast it to dtype)\n dtype, fill_value = arr.dtype, arr.dtype.type()\n\n # at this point, it's guaranteed that dtype can hold both the arr values\n # and the fill_value\n if out is None:\n out_shape = len(row_idx), len(col_idx)\n out = np.empty(out_shape, dtype=dtype)\n\n func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)\n if func is None and arr.dtype != out.dtype:\n func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)\n if func is not None:\n func = _convert_wrapper(func, out.dtype)\n if func is None:\n\n def func(arr, indexer, out, fill_value=np.nan):\n _take_2d_multi_generic(arr, indexer, out, fill_value=fill_value,\n mask_info=mask_info)\n\n func(arr, indexer, out=out, fill_value=fill_value)\n return out\n\n\n# ---- #\n# diff #\n# ---- #\n\n_diff_special = {\n 'float64': algos.diff_2d_float64,\n 'float32': algos.diff_2d_float32,\n 'int64': algos.diff_2d_int64,\n 'int32': algos.diff_2d_int32,\n 'int16': algos.diff_2d_int16,\n 'int8': algos.diff_2d_int8,\n}\n\n\ndef diff(arr, n, axis=0):\n \"\"\" difference of n between self,\n analagoust to s-s.shift(n) \"\"\"\n\n n = int(n)\n na = np.nan\n dtype = arr.dtype\n is_timedelta = False\n if needs_i8_conversion(arr):\n dtype = np.float64\n arr = arr.view('i8')\n na = tslib.iNaT\n is_timedelta = True\n elif issubclass(dtype.type, np.integer):\n dtype = np.float64\n elif issubclass(dtype.type, np.bool_):\n dtype = np.object_\n\n dtype = np.dtype(dtype)\n out_arr = np.empty(arr.shape, dtype=dtype)\n\n na_indexer = [slice(None)] * arr.ndim\n na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)\n out_arr[tuple(na_indexer)] = na\n\n if arr.ndim == 2 and arr.dtype.name in _diff_special:\n f = _diff_special[arr.dtype.name]\n f(arr, out_arr, n, axis)\n else:\n res_indexer = [slice(None)] * arr.ndim\n res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)\n res_indexer = tuple(res_indexer)\n\n lag_indexer = [slice(None)] * arr.ndim\n lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)\n lag_indexer = tuple(lag_indexer)\n\n # need to make sure that we account for na for datelike/timedelta\n # we don't actually want to subtract these i8 numbers\n if is_timedelta:\n res = arr[res_indexer]\n lag = arr[lag_indexer]\n\n mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)\n if mask.any():\n res = res.copy()\n res[mask] = 0\n lag = lag.copy()\n lag[mask] = 0\n\n result = res - lag\n result[mask] = na\n out_arr[res_indexer] = result\n else:\n out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]\n\n if is_timedelta:\n from pandas import TimedeltaIndex\n out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(\n out_arr.shape).astype('timedelta64[ns]')\n\n return out_arr\n", "# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n# pylint: disable-msg=W0612,E1101\nfrom copy import deepcopy\nimport sys\nimport nose\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import range, lrange\nfrom pandas import compat\n\nfrom numpy.random import randn\nimport numpy as np\n\nfrom pandas import DataFrame, Series\nimport pandas as pd\n\nfrom pandas.util.testing import (assert_almost_equal,\n assert_series_equal,\n assert_frame_equal,\n assertRaisesRegexp)\n\nimport pandas.util.testing as tm\n\nfrom pandas.tests.frame.common import TestData\n\n\nclass SharedWithSparse(object):\n\n _multiprocess_can_split_ = True\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n for attr in ('index', 'columns'):\n ind = getattr(self.frame, attr)\n ind.name = None\n cp = self.frame.copy()\n getattr(cp, attr).name = 'foo'\n self.assertIsNone(getattr(self.frame, attr).name)\n\n def test_getitem_pop_assign_name(self):\n s = self.frame['A']\n self.assertEqual(s.name, 'A')\n\n s = self.frame.pop('A')\n self.assertEqual(s.name, 'A')\n\n s = self.frame.ix[:, 'B']\n self.assertEqual(s.name, 'B')\n\n s2 = s.ix[:]\n self.assertEqual(s2.name, 'B')\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n tm.assert_almost_equal(result, expected)\n\n def test_join_index(self):\n # left / right\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2)\n self.assert_index_equal(f.index, joined.index)\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='left')\n self.assert_index_equal(joined.index, f.index)\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='right')\n self.assert_index_equal(joined.index, f2.index)\n self.assertEqual(len(joined.columns), 4)\n\n # inner\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='inner')\n self.assert_index_equal(joined.index, f.index.intersection(f2.index))\n self.assertEqual(len(joined.columns), 4)\n\n # outer\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='outer')\n self.assertTrue(tm.equalContents(self.frame.index, joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')\n\n # corner case - overlapping columns\n for how in ('outer', 'left', 'inner'):\n with assertRaisesRegexp(ValueError, 'columns overlap but '\n 'no suffix'):\n self.frame.join(self.frame, how=how)\n\n def test_join_index_more(self):\n af = self.frame.ix[:, ['A', 'B']]\n bf = self.frame.ix[::2, ['C', 'D']]\n\n expected = af.copy()\n expected['C'] = self.frame['C'][::2]\n expected['D'] = self.frame['D'][::2]\n\n result = af.join(bf)\n assert_frame_equal(result, expected)\n\n result = af.join(bf, how='right')\n assert_frame_equal(result, expected[::2])\n\n result = bf.join(af, how='right')\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_join_index_series(self):\n df = self.frame.copy()\n s = df.pop(self.frame.columns[-1])\n joined = df.join(s)\n\n # TODO should this check_names ?\n assert_frame_equal(joined, self.frame, check_names=False)\n\n s.name = None\n assertRaisesRegexp(ValueError, 'must have a name', df.join, s)\n\n def test_join_overlap(self):\n df1 = self.frame.ix[:, ['A', 'B', 'C']]\n df2 = self.frame.ix[:, ['B', 'C', 'D']]\n\n joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')\n df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')\n df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')\n\n no_overlap = self.frame.ix[:, ['A', 'D']]\n expected = df1_suf.join(df2_suf).join(no_overlap)\n\n # column order not necessarily sorted\n assert_frame_equal(joined, expected.ix[:, joined.columns])\n\n def test_add_prefix_suffix(self):\n with_prefix = self.frame.add_prefix('foo#')\n expected = pd.Index(['foo#%s' % c for c in self.frame.columns])\n self.assert_index_equal(with_prefix.columns, expected)\n\n with_suffix = self.frame.add_suffix('#foo')\n expected = pd.Index(['%s#foo' % c for c in self.frame.columns])\n self.assert_index_equal(with_suffix.columns, expected)\n\n\nclass TestDataFrameMisc(tm.TestCase, SharedWithSparse, TestData):\n\n klass = DataFrame\n\n _multiprocess_can_split_ = True\n\n def test_get_axis(self):\n f = self.frame\n self.assertEqual(f._get_axis_number(0), 0)\n self.assertEqual(f._get_axis_number(1), 1)\n self.assertEqual(f._get_axis_number('index'), 0)\n self.assertEqual(f._get_axis_number('rows'), 0)\n self.assertEqual(f._get_axis_number('columns'), 1)\n\n self.assertEqual(f._get_axis_name(0), 'index')\n self.assertEqual(f._get_axis_name(1), 'columns')\n self.assertEqual(f._get_axis_name('index'), 'index')\n self.assertEqual(f._get_axis_name('rows'), 'index')\n self.assertEqual(f._get_axis_name('columns'), 'columns')\n\n self.assertIs(f._get_axis(0), f.index)\n self.assertIs(f._get_axis(1), f.columns)\n\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)\n assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')\n assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number,\n None)\n\n def test_keys(self):\n getkeys = self.frame.keys\n self.assertIs(getkeys(), self.frame.columns)\n\n def test_column_contains_typeerror(self):\n try:\n self.frame.columns in self.frame\n except TypeError:\n pass\n\n def test_not_hashable(self):\n df = pd.DataFrame([1])\n self.assertRaises(TypeError, hash, df)\n self.assertRaises(TypeError, hash, self.empty)\n\n def test_new_empty_index(self):\n df1 = DataFrame(randn(0, 3))\n df2 = DataFrame(randn(0, 3))\n df1.index.name = 'foo'\n self.assertIsNone(df2.index.name)\n\n def test_array_interface(self):\n result = np.sqrt(self.frame)\n tm.assertIsInstance(result, type(self.frame))\n self.assertIs(result.index, self.frame.index)\n self.assertIs(result.columns, self.frame.columns)\n\n assert_frame_equal(result, self.frame.apply(np.sqrt))\n\n def test_get_agg_axis(self):\n cols = self.frame._get_agg_axis(0)\n self.assertIs(cols, self.frame.columns)\n\n idx = self.frame._get_agg_axis(1)\n self.assertIs(idx, self.frame.index)\n\n self.assertRaises(ValueError, self.frame._get_agg_axis, 2)\n\n def test_nonzero(self):\n self.assertTrue(self.empty.empty)\n\n self.assertFalse(self.frame.empty)\n self.assertFalse(self.mixed_frame.empty)\n\n # corner case\n df = DataFrame({'A': [1., 2., 3.],\n 'B': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['A']\n self.assertFalse(df.empty)\n\n def test_iteritems(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n for k, v in compat.iteritems(df):\n self.assertEqual(type(v), Series)\n\n def test_iter(self):\n self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))\n\n def test_iterrows(self):\n for i, (k, v) in enumerate(self.frame.iterrows()):\n exp = self.frame.xs(self.frame.index[i])\n assert_series_equal(v, exp)\n\n for i, (k, v) in enumerate(self.mixed_frame.iterrows()):\n exp = self.mixed_frame.xs(self.mixed_frame.index[i])\n assert_series_equal(v, exp)\n\n def test_itertuples(self):\n for i, tup in enumerate(self.frame.itertuples()):\n s = Series(tup[1:])\n s.name = tup[0]\n expected = self.frame.ix[i, :].reset_index(drop=True)\n assert_series_equal(s, expected)\n\n df = DataFrame({'floats': np.random.randn(5),\n 'ints': lrange(5)}, columns=['floats', 'ints'])\n\n for tup in df.itertuples(index=False):\n tm.assertIsInstance(tup[1], np.integer)\n\n df = DataFrame(data={\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n dfaa = df[['a', 'a']]\n self.assertEqual(list(dfaa.itertuples()), [\n (0, 1, 1), (1, 2, 2), (2, 3, 3)])\n\n self.assertEqual(repr(list(df.itertuples(name=None))),\n '[(0, 1, 4), (1, 2, 5), (2, 3, 6)]')\n\n tup = next(df.itertuples(name='TestName'))\n\n # no support for field renaming in Python 2.6, regular tuples are\n # returned\n if sys.version >= LooseVersion('2.7'):\n self.assertEqual(tup._fields, ('Index', 'a', 'b'))\n self.assertEqual((tup.Index, tup.a, tup.b), tup)\n self.assertEqual(type(tup).__name__, 'TestName')\n\n df.columns = ['def', 'return']\n tup2 = next(df.itertuples(name='TestName'))\n self.assertEqual(tup2, (0, 1, 4))\n\n if sys.version >= LooseVersion('2.7'):\n self.assertEqual(tup2._fields, ('Index', '_1', '_2'))\n\n df3 = DataFrame(dict(('f' + str(i), [i]) for i in range(1024)))\n # will raise SyntaxError if trying to create namedtuple\n tup3 = next(df3.itertuples())\n self.assertFalse(hasattr(tup3, '_fields'))\n self.assertIsInstance(tup3, tuple)\n\n def test_len(self):\n self.assertEqual(len(self.frame), len(self.frame.index))\n\n def test_as_matrix(self):\n frame = self.frame\n mat = frame.as_matrix()\n\n frameCols = frame.columns\n for i, row in enumerate(mat):\n for j, value in enumerate(row):\n col = frameCols[j]\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][i]))\n else:\n self.assertEqual(value, frame[col][i])\n\n # mixed type\n mat = self.mixed_frame.as_matrix(['foo', 'A'])\n self.assertEqual(mat[0, 0], 'bar')\n\n df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})\n mat = df.as_matrix()\n self.assertEqual(mat[0, 0], 1j)\n\n # single block corner case\n mat = self.frame.as_matrix(['A', 'B'])\n expected = self.frame.reindex(columns=['A', 'B']).values\n assert_almost_equal(mat, expected)\n\n def test_values(self):\n self.frame.values[:, 0] = 5.\n self.assertTrue((self.frame.values[:, 0] == 5).all())\n\n def test_deepcopy(self):\n cp = deepcopy(self.frame)\n series = cp['A']\n series[:] = 10\n for idx, value in compat.iteritems(series):\n self.assertNotEqual(self.frame['A'][idx], value)\n\n # ---------------------------------------------------------------------\n # Transposing\n\n def test_transpose(self):\n frame = self.frame\n dft = frame.T\n for idx, series in compat.iteritems(dft):\n for col, value in compat.iteritems(series):\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][idx]))\n else:\n self.assertEqual(value, frame[col][idx])\n\n # mixed type\n index, data = tm.getMixedTypeDict()\n mixed = DataFrame(data, index=index)\n\n mixed_T = mixed.T\n for col, s in compat.iteritems(mixed_T):\n self.assertEqual(s.dtype, np.object_)\n\n def test_transpose_get_view(self):\n dft = self.frame.T\n dft.values[:, 5:10] = 5\n\n self.assertTrue((self.frame.values[5:10] == 5).all())\n\n def test_swapaxes(self):\n df = DataFrame(np.random.randn(10, 5))\n assert_frame_equal(df.T, df.swapaxes(0, 1))\n assert_frame_equal(df.T, df.swapaxes(1, 0))\n assert_frame_equal(df, df.swapaxes(0, 0))\n self.assertRaises(ValueError, df.swapaxes, 2, 5)\n\n def test_axis_aliases(self):\n f = self.frame\n\n # reg name\n expected = f.sum(axis=0)\n result = f.sum(axis='index')\n assert_series_equal(result, expected)\n\n expected = f.sum(axis=1)\n result = f.sum(axis='columns')\n assert_series_equal(result, expected)\n\n def test_more_asMatrix(self):\n values = self.mixed_frame.as_matrix()\n self.assertEqual(values.shape[1], len(self.mixed_frame.columns))\n\n def test_repr_with_mi_nat(self):\n df = DataFrame({'X': [1, 2]},\n index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])\n res = repr(df)\n exp = ' X\\nNaT a 1\\n2013-01-01 b 2'\n self.assertEqual(res, exp)\n\n def test_iterkv_deprecation(self):\n with tm.assert_produces_warning(FutureWarning):\n self.mixed_float.iterkv()\n\n def test_iterkv_names(self):\n for k, v in compat.iteritems(self.mixed_frame):\n self.assertEqual(v.name, k)\n\n def test_series_put_names(self):\n series = self.mixed_frame._series\n for k, v in compat.iteritems(series):\n self.assertEqual(v.name, k)\n\n def test_empty_nonzero(self):\n df = DataFrame([1, 2, 3])\n self.assertFalse(df.empty)\n df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()\n self.assertTrue(df.empty)\n self.assertTrue(df.T.empty)\n\n def test_inplace_return_self(self):\n # re #1893\n\n data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],\n 'b': [0, 0, 1, 1],\n 'c': [1, 2, 3, 4]})\n\n def _check_f(base, f):\n result = f(base)\n self.assertTrue(result is None)\n\n # -----DataFrame-----\n\n # set_index\n f = lambda x: x.set_index('a', inplace=True)\n _check_f(data.copy(), f)\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True)\n _check_f(data.set_index('a'), f)\n\n # drop_duplicates\n f = lambda x: x.drop_duplicates(inplace=True)\n _check_f(data.copy(), f)\n\n # sort\n f = lambda x: x.sort_values('b', inplace=True)\n _check_f(data.copy(), f)\n\n # sort_index\n f = lambda x: x.sort_index(inplace=True)\n _check_f(data.copy(), f)\n\n # sortlevel\n f = lambda x: x.sortlevel(0, inplace=True)\n _check_f(data.set_index(['a', 'b']), f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(data.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(data.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(data.copy(), f)\n\n # -----Series-----\n d = data.copy()['c']\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True, drop=True)\n _check_f(data.set_index('a')['c'], f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(d.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(d.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(d.copy(), f)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "pandas.types.common.is_datetime_or_timedelta_dtype", "pandas.hashtable.duplicated_float64", "pandas.types.common._ensure_platform_int", "pandas.types.common.is_period_arraylike", "pandas.hashtable.value_count_int64", "pandas.algos.arrmap_float64", "numpy.sort", "pandas.types.common.is_list_like", "pandas.types.missing.isnull", "pandas.hashtable.duplicated_object", "pandas.core.common._asarray_tuplesafe", "pandas.types.common.is_timedelta64_dtype", "pandas.hashtable.duplicated_int64", "pandas.hashtable.value_count_float64", "numpy.dtype", "numpy.concatenate", "pandas.types.common.is_categorical", "pandas.types.common.is_extension_type", "numpy.empty", "pandas.tools.tile.cut", "pandas.types.common._ensure_object", "numpy.putmask", "pandas.hashtable.mode_object", "pandas.types.common.is_categorical_dtype", "numpy.nonzero", "pandas.hashtable.mode_int64", "numpy.arange", "pandas.tseries.period.PeriodIndex._simple_new", "pandas.types.common.needs_i8_conversion", "numpy.array", "pandas.types.common.is_datetime64_dtype", "pandas.tseries.index.DatetimeIndex._simple_new", "pandas.lib.infer_dtype", "pandas.types.common.is_integer_dtype", "pandas.types.common.is_float_dtype", "pandas.types.cast._maybe_promote", "pandas.types.common._ensure_int64", "pandas.types.common.is_scalar", "numpy.insert", "numpy.floor", "pandas.Index", "pandas.tseries.index.DatetimeIndex", "pandas.types.common._ensure_float64", "numpy.asarray", "numpy.ceil", "pandas.tseries.timedeltas.to_timedelta", "pandas.types.common.is_int64_dtype", "pandas.types.common.is_datetimetz", "pandas.hashtable.value_count_object", "pandas.Series", "pandas.tseries.period.PeriodIndex", "pandas.tseries.tools.to_datetime", "numpy.asanyarray" ], [ "pandas.util.testing.assertIsInstance", "pandas.Timestamp", "pandas.compat.range", "pandas.compat.iteritems", "pandas.DataFrame", "numpy.arange", "pandas.util.testing.assert_produces_warning", "numpy.sqrt", "pandas.util.testing.equalContents", "pandas.util.testing.assert_almost_equal", "numpy.random.randn", "pandas.compat.lrange", "pandas.Index", "numpy.isnan", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.getMixedTypeDict", "pandas.util.testing.assert_series_equal", "pandas.Series", "pandas.util.testing.assertRaisesRegexp" ] ]
SherylHYX/pytorch_geometric
[ "5f8e99d033a596426ef502c2ea4b9be89ec58f38" ]
[ "test/nn/conv/test_gatv2_conv.py" ]
[ "import torch\nfrom torch_sparse import SparseTensor\nfrom torch_geometric.nn import GATv2Conv\n\n\ndef test_gatv2_conv():\n x1 = torch.randn(4, 8)\n x2 = torch.randn(2, 8)\n edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])\n row, col = edge_index\n value = torch.randn(row.size(0))\n adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))\n\n conv = GATv2Conv(8, 32, heads=2)\n assert conv.__repr__() == 'GATv2Conv(8, 32, heads=2)'\n out = conv(x1, edge_index)\n assert out.size() == (4, 64)\n assert torch.allclose(conv(x1, edge_index, size=(4, 4)), out)\n assert torch.allclose(conv(x1, adj.t()), out, atol=1e-6)\n\n t = '(Tensor, Tensor, Size, NoneType) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert torch.allclose(jit(x1, edge_index), out)\n assert torch.allclose(jit(x1, edge_index, size=(4, 4)), out)\n\n t = '(Tensor, SparseTensor, Size, NoneType) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert torch.allclose(jit(x1, adj.t()), out, atol=1e-6)\n\n # Test `return_attention_weights`.\n result = conv(x1, edge_index, return_attention_weights=True)\n assert torch.allclose(result[0], out)\n assert result[1][0].size() == (2, 7)\n assert result[1][1].size() == (7, 2)\n assert result[1][1].min() >= 0 and result[1][1].max() <= 1\n assert conv._alpha is None\n\n result = conv(x1, adj.t(), return_attention_weights=True)\n assert torch.allclose(result[0], out, atol=1e-6)\n assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7\n assert conv._alpha is None\n\n t = '(Tensor, Tensor, Size, bool) -> Tuple[Tensor, Tuple[Tensor, Tensor]]'\n jit = torch.jit.script(conv.jittable(t))\n result = jit(x1, edge_index, return_attention_weights=True)\n assert torch.allclose(result[0], out)\n assert result[1][0].size() == (2, 7)\n assert result[1][1].size() == (7, 2)\n assert result[1][1].min() >= 0 and result[1][1].max() <= 1\n assert conv._alpha is None\n\n t = '(Tensor, SparseTensor, Size, bool) -> Tuple[Tensor, SparseTensor]'\n jit = torch.jit.script(conv.jittable(t))\n result = jit(x1, adj.t(), return_attention_weights=True)\n assert torch.allclose(result[0], out, atol=1e-6)\n assert result[1].sizes() == [4, 4, 2] and result[1].nnz() == 7\n assert conv._alpha is None\n\n adj = adj.sparse_resize((4, 2))\n out1 = conv((x1, x2), edge_index)\n assert out1.size() == (2, 64)\n assert torch.allclose(conv((x1, x2), edge_index, (4, 2)), out1)\n assert torch.allclose(conv((x1, x2), adj.t()), out1, atol=1e-6)\n\n t = '(OptPairTensor, Tensor, Size, NoneType) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert torch.allclose(jit((x1, x2), edge_index), out1)\n assert torch.allclose(jit((x1, x2), edge_index, size=(4, 2)), out1)\n\n t = '(OptPairTensor, SparseTensor, Size, NoneType) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert torch.allclose(jit((x1, x2), adj.t()), out1, atol=1e-6)\n" ]
[ [ "torch.allclose", "torch.tensor", "torch.randn" ] ]
partylikeits1984/financial-forecast-telegram-bot
[ "521e50977efefa212df3fd993e6197548b6bed7f" ]
[ "prophet/prophetSP500.py" ]
[ "from yahoo_fin import stock_info as si\nimport pandas as pd\nfrom fbprophet import Prophet\nfrom fbprophet.plot import add_changepoints_to_plot\n\nend = date.today()\nd = datetime.timedelta(days=730)\nstart = end - d\n\nticker = \"ES=F\"\ns = si.get_data(ticker, start, end)\ns['Date'] = s.index\ns.rename({'close': 'Close'}, axis=1, inplace=True)\ns.head()\n\ndf = pd.DataFrame()\ndf['ds'] = (s['Date'])\ndf['y'] = s['Close']\ndf.head()\n\nm = Prophet()\nm.fit(df)\n\nfuture = m.make_future_dataframe(periods=12 * 8, freq='D')\nforecast = m.predict(future)\nforecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper', 'trend', 'trend_lower', 'trend_upper']].tail()\n\nfig1 = m.plot(forecast)\nfig1.savefig('/home/ubuntu/Desktop/TelegramBot/charts/SP500forcast.jpeg', dpi=400, bbox_inches='tight')\n\nfig2 = m.plot_components(forecast)\nfig2.savefig('/home/ubuntu/Desktop/TelegramBot/charts/SP500trend.jpeg', dpi=400, bbox_inches='tight')\n\nfig = m.plot(forecast)\na = add_changepoints_to_plot(fig.gca(), m, forecast)\nfig.savefig('/home/ubuntu/Desktop/TelegramBot/charts/SP500forcastwithlines.jpeg', dpi=400, bbox_inches='tight')\n" ]
[ [ "pandas.DataFrame" ] ]
HoangQuangVu/DSN-Video-Summariztion
[ "234a728a805166bab390b5262ee0f18e520dca5f" ]
[ "parse_json.py" ]
[ "import os\nimport argparse\nimport re\nimport os.path as osp\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom utils import read_json\nimport numpy as np\n\n\"\"\"\nParse json file (.json) to extract rewards for specific videos.\nHow to use:\n# image will be saved in path: blah_blah_blah\n$ python parse_json.py -p blah_blah_blah/rewards.json -i 0\n\"\"\"\n\n# Rewards in RL are typically have a high variance,\n# so it's better to smooth them out for better analysis\ndef movingaverage(values, window):\n weights = np.repeat(1.0, window)/window\n sma = np.convolve(values, weights, 'valid')\n return sma\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p', '--path', type=str, required=True, help=\"path to rewards.json; output saved to the same dir\")\nparser.add_argument('-i', '--idx', type=int, default=0, help=\"choose which video to visualize, index starts from 0 (default: 0)\")\nargs = parser.parse_args()\nreward_writers = read_json(args.path)\nkeys = reward_writers.keys()\n\nassert args.idx < len(keys)\nkey = keys[args.idx]\n#key = keys[index]\nrewards = reward_writers[key]\n\nrewards = np.array(rewards)\nrewards = movingaverage(rewards, 8)\n\nplt.plot(rewards)\nplt.xlabel('epoch')\nplt.ylabel('reward')\nplt.title(\"{}\".format(key))\nplt.savefig(osp.join(osp.dirname(args.path), 'epoch_reward_' + str(args.idx) + '.png'))\nplt.close()\n" ]
[ [ "matplotlib.use", "numpy.array", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.ylabel", "numpy.repeat", "numpy.convolve" ] ]
IdkwhatImD0ing/AlgorithmPractice
[ "2d8d68a6d0168e873d61d1e3873e882bcdaf003f" ]
[ "Python/MachineLearning/ProfitPrediction/LinearRegression.py" ]
[ "import os\nimport numpy as np\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\n\nimport io\nimport pandas as pd\n\n\ndef plotData(x, y):\n \"\"\"\n Plots the data points x and y into a new figure. Plots the data \n points and gives the figure axes labels of population and profit.\n \n Parameters\n ----------\n x : array_like\n Data point values for x-axis.\n\n y : array_like\n Data point values for y-axis. Note x and y should have the same size.\n \n Instructions\n ------------\n Plot the training data into a figure using the \"figure\" and \"plot\"\n functions. Set the axes labels using the \"xlabel\" and \"ylabel\" functions.\n Assume the population and revenue data have been passed in as the x\n and y arguments of this function. \n \n Hint\n ----\n You can use the 'ro' option with plot to have the markers\n appear as red circles. Furthermore, you can make the markers larger by\n using plot(..., 'ro', ms=10), where `ms` refers to marker size. You \n can also set the marker edge color using the `mec` property.\n \"\"\"\n fig = pyplot.figure() # open a new figure\n pyplot.plot(x, y, 'ro', ms=10, mec='k')\n pyplot.ylabel('Profit in $10,000')\n pyplot.xlabel('Population of City in 10,000s')\n pyplot.show()\n\n\ndef computeCost(X, y, theta):\n \"\"\"\n Compute cost for linear regression. Computes the cost of using theta as the\n parameter for linear regression to fit the data points in X and y.\n \n Parameters\n ----------\n X : array_like\n The input dataset of shape (m x n+1), where m is the number of examples,\n and n is the number of features. We assume a vector of one's already \n appended to the features so we have n+1 columns.\n \n y : array_like\n The values of the function at each data point. This is a vector of\n shape (m, ).\n \n theta : array_like\n The parameters for the regression function. This is a vector of \n shape (n+1, ).\n \n Returns\n -------\n J : float\n The value of the regression cost function.\n \n Instructions\n ------------\n Compute the cost of a particular choice of theta. \n You should set J to the cost.\n \"\"\"\n\n m = y.size # number of training examples\n J = (1 / (2 * m)) * (np.sum((np.dot(X, theta) - y)**2))\n\n return J\n\n\ndef gradientDescent(X, y, theta, alpha, num_iters):\n \"\"\"\n Performs gradient descent to learn `theta`. Updates theta by taking `num_iters`\n gradient steps with learning rate `alpha`.\n \n Parameters\n ----------\n X : array_like\n The input dataset of shape (m x n+1).\n \n y : array_like\n Value at given features. A vector of shape (m, ).\n \n theta : array_like\n Initial values for the linear regression parameters. \n A vector of shape (n+1, ).\n \n alpha : float\n The learning rate.\n \n num_iters : int\n The number of iterations for gradient descent. \n \n Returns\n -------\n theta : array_like\n The learned linear regression parameters. A vector of shape (n+1, ).\n \n J_history : list\n A python list for the values of the cost function after each iteration.\n \n Instructions\n ------------\n Peform a single gradient step on the parameter vector theta.\n\n While debugging, it can be useful to print out the values of \n the cost function (computeCost) and gradient here.\n \"\"\"\n # Initialize some useful values\n m = y.shape[0] # number of training examples\n\n # make a copy of theta, to avoid changing the original array, since numpy arrays\n # are passed by reference to functions\n theta = theta.copy()\n\n J_history = [] # Use a python list to save cost in every iteration\n #print(X)\n #print(y)\n #print(X.shape[0])\n #print(X.shape[1])\n for i in range(num_iters):\n #print(np.dot(X, theta))\n #print(\"line break\")\n #print(X)\n #print(X[0])\n #print(X[1])\n #print(theta)\n #print(((np.dot(X, theta) - y)[:, None]))\n #print(((np.dot(X, theta) - y)[:, None] * X))\n #print(((np.dot(X, theta) - y)[:, None] * X)[0])\n #print(((np.dot(X, theta) - y)[:, None] * X)[1])\n #print(((np.dot(X, theta) - y)[:, None] * X)[2])\n #print(((np.dot(X, theta) - y)[:, None] * X)[3])\n print(np.sum((np.dot(X, theta) - y)[:, None] * X, axis=0))\n #print(theta)\n theta = theta - (alpha * (1 / m) * np.sum(\n ((np.dot(X, theta) - y)[:, None] * X), axis=0))\n\n # save the cost J in every iteration\n J_history.append(computeCost(X, y, theta))\n\n return theta, J_history\n\n\nuploaded = open(\"ex1data1.txt\", \"r\")\ndata = pd.read_csv(uploaded, header=None).to_numpy()\nX, y = data[:, 0], data[:, 1]\nm = y.size # number of training examples\n\nplotData(X, y)\n\n# Add a column of ones to X. The numpy function stack joins arrays along a given axis.\n# The first axis (axis=0) refers to rows (training examples)\n# and second axis (axis=1) refers to columns (features).\nX = np.stack([np.ones(m), X], axis=1)\n\nJ = computeCost(X, y, theta=np.array([0.0, 0.0]))\nprint('With theta = [0, 0] \\nCost computed = %.2f' % J)\nprint('Expected cost value (approximately) 32.07\\n')\n\n# further testing of the cost function\nJ = computeCost(X, y, theta=np.array([-1, 2]))\nprint('With theta = [-1, 2]\\nCost computed = %.2f' % J)\nprint('Expected cost value (approximately) 54.24')\n\n# initialize fitting parameters\ntheta = np.zeros(2)\n\n# some gradient descent settings\niterations = 1500\nalpha = 0.01\n\ntheta, J_history = gradientDescent(X, y, theta, alpha, iterations)\nprint('Theta found by gradient descent: {:.4f}, {:.4f}'.format(*theta))\nprint('Expected theta values (approximately): [-3.6303, 1.1664]')\n\n# plot the linear fit\npyplot.plot(X[:, 1], y, 'ro', ms=10, mec='k')\npyplot.ylabel('Profit in $10,000')\npyplot.xlabel('Population of City in 10,000s')\npyplot.plot(X[:, 1], np.dot(X, theta), '-')\npyplot.legend(['Training data', 'Linear regression'])\npyplot.show()\n\n# Predict values for population sizes of 35,000 and 70,000\npredict1 = np.dot([1, 3.5], theta)\nprint('For population = 35,000, we predict a profit of {:.2f}\\n'.format(\n predict1 * 10000))\n\npredict2 = np.dot([1, 7], theta)\nprint('For population = 70,000, we predict a profit of {:.2f}\\n'.format(\n predict2 * 10000))\n\n# grid over which we will calculate J\ntheta0_vals = np.linspace(-10, 10, 100)\ntheta1_vals = np.linspace(-1, 4, 100)\n\n# initialize J_vals to a matrix of 0's\nJ_vals = np.zeros((theta0_vals.shape[0], theta1_vals.shape[0]))\n\n# Fill out J_vals\nfor i, theta0 in enumerate(theta0_vals):\n for j, theta1 in enumerate(theta1_vals):\n J_vals[i, j] = computeCost(X, y, [theta0, theta1])\n\n# Because of the way meshgrids work in the surf command, we need to\n# transpose J_vals before calling surf, or else the axes will be flipped\nJ_vals = J_vals.T\n\n# surface plot\nfig = pyplot.figure(figsize=(12, 5))\nax = fig.add_subplot(121, projection='3d')\nax.plot_surface(theta0_vals, theta1_vals, J_vals, cmap='viridis')\npyplot.xlabel('theta0')\npyplot.ylabel('theta1')\npyplot.title('Surface')\n\n# contour plot\n# Plot J_vals as 15 contours spaced logarithmically between 0.01 and 100\nax = pyplot.subplot(122)\npyplot.contour(theta0_vals,\n theta1_vals,\n J_vals,\n linewidths=2,\n cmap='viridis',\n levels=np.logspace(-2, 3, 20))\npyplot.xlabel('theta0')\npyplot.ylabel('theta1')\npyplot.plot(theta[0], theta[1], 'ro', ms=10, lw=2)\npyplot.title('Contour, showing minimum')\npyplot.show()\npass" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "pandas.read_csv", "matplotlib.pyplot.figure", "numpy.ones", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.linspace", "numpy.logspace", "matplotlib.pyplot.subplot" ] ]
aaavinash85/100-Days-of-ML-
[ "d055d718f7972e3a4469279b9112867a42cf652f" ]
[ "Tensorflow/fashionMni1.py" ]
[ "# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)\n\nfashion_mnist = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\nprint(train_images.shape)\nprint(len(train_labels))\nprint(train_labels)\nprint(tests_images.shape)\n\nplt.figure()\nplt.imshow(train_images[0])\nplt.colorbar()\nplt.grid(False)\nplt.show()\n\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\nplt.show()\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nmodel.fit(train_images, train_labels, epochs=10)\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n\nprobability_model = tf.keras.Sequential([model, \n tf.keras.layers.Softmax()])\n\npredictions = probability_model.predict(test_images)\n\npredictions[0]\n\nnp.argmax(predictions[0])\n\ntest_labels[0]\n\ndef plot_image(i, predictions_array, true_label, img):\n predictions_array, true_label, img = predictions_array, true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100*np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\ndef plot_value_array(i, predictions_array, true_label):\n predictions_array, true_label = predictions_array, true_label[i]\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\ni = 0\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions[i], test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(i, predictions[i], test_labels)\nplt.show()\n\ni = 12\nplt.figure(figsize=(6,3))\nplt.subplot(1,2,1)\nplot_image(i, predictions[i], test_labels, test_images)\nplt.subplot(1,2,2)\nplot_value_array(i, predictions[i], test_labels)\nplt.show()\n\n# Plot the first X test images, their predicted labels, and the true labels.\n# Color correct predictions in blue and incorrect predictions in red.\nnum_rows = 5\nnum_cols = 3\nnum_images = num_rows*num_cols\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\nfor i in range(num_images):\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\n plot_image(i, predictions[i], test_labels, test_images)\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\n plot_value_array(i, predictions[i], test_labels)\nplt.tight_layout()\nplt.show()\n" ]
[ [ "numpy.max", "matplotlib.pyplot.subplot", "matplotlib.pyplot.colorbar", "tensorflow.keras.layers.Flatten", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "tensorflow.keras.layers.Dense", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "numpy.argmax", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show", "tensorflow.keras.layers.Softmax", "matplotlib.pyplot.xticks", "matplotlib.pyplot.imshow" ] ]
julesy89/pyallocation
[ "af80a8e2367a006121dd0702b55efa7b954bb039" ]
[ "pyallocation/problem.py" ]
[ "import numpy as np\n\nfrom pyallocation.function_loader import FunctionLoader\nfrom pyallocation.util import calc_obj, calc_constr\nfrom pymoo.core.problem import ElementwiseProblem\n\n\nclass AllocationProblem(ElementwiseProblem):\n\n def __init__(self, R, T, alloc=None, anti_alloc=None, w=None, **kwargs):\n self.R = R\n self.T = T\n self.alloc = alloc\n self.anti_alloc = anti_alloc\n self.w = w\n self.func_calc_consumed = FunctionLoader.get_instance().load(\"calc_consumed\")\n self.ideal = None\n self.nadir = None\n\n p, n, m = T.shape\n n_var = n\n\n n_obj = p\n if w is not None:\n n_obj = 1\n assert len(w) == p\n\n xl = np.full(n_var, 0)\n xu = np.full(n_var, m - 1)\n super().__init__(n_var=n_var, n_obj=n_obj, n_constr=p * m, xl=xl, xu=xu, **kwargs)\n\n def _evaluate(self, x, out, *args, **kwargs):\n R, T, w = self.R, self.T, self.w\n C = self.func_calc_consumed(T, x)\n F = calc_obj(C, w)\n\n G = list(calc_constr(C, R))\n\n for pos, val in self.alloc:\n G.append(int(x[pos] != val))\n\n for pos, val in self.anti_alloc:\n G.append(int(x[pos] == val))\n\n G = np.array(G)\n\n out[\"F\"], out[\"G\"] = F.astype(np.float), G.astype(np.float)\n" ]
[ [ "numpy.full", "numpy.array" ] ]
SeaPea1/PyPortfolioOpt
[ "28ef2254153e7d0a3fb9e24a2c03e80dc8b1a0d1" ]
[ "tests/test_risk_models.py" ]
[ "import pandas as pd\nimport numpy as np\nimport pytest\nfrom pypfopt import risk_models, expected_returns\nfrom tests.utilities_for_tests import get_data\n\n\ndef test_sample_cov_dummy():\n data = pd.DataFrame(\n [\n [4.0, 2.0, 0.6],\n [4.2, 2.1, 0.59],\n [3.9, 2.0, 0.58],\n [4.3, 2.1, 0.62],\n [4.1, 2.2, 0.63],\n ]\n )\n test_answer = pd.DataFrame(\n [\n [0.006661687937656102, 0.00264970955585574, 0.0020849735375206195],\n [0.00264970955585574, 0.0023450491307634215, 0.00096770864287974],\n [0.0020849735375206195, 0.00096770864287974, 0.0016396416271856837],\n ]\n )\n S = risk_models.sample_cov(data) / 252\n pd.testing.assert_frame_equal(S, test_answer)\n\n\ndef test_is_positive_semidefinite():\n a = np.zeros((100, 100))\n assert risk_models._is_positive_semidefinite(a)\n\n\ndef test_sample_cov_real_data():\n df = get_data()\n S = risk_models.sample_cov(df)\n assert S.shape == (20, 20)\n assert S.index.equals(df.columns)\n assert S.index.equals(S.columns)\n assert S.notnull().all().all()\n assert risk_models._is_positive_semidefinite(S)\n\n\ndef test_sample_cov_type_warning():\n df = get_data()\n cov_from_df = risk_models.sample_cov(df)\n\n returns_as_array = np.array(df)\n with pytest.warns(RuntimeWarning) as w:\n cov_from_array = risk_models.sample_cov(returns_as_array)\n assert len(w) == 1\n assert str(w[0].message) == \"data is not in a dataframe\"\n\n np.testing.assert_array_almost_equal(\n cov_from_df.values, cov_from_array.values, decimal=6\n )\n\n\ndef test_sample_cov_npd():\n S = np.array([[0.03818144, 0.04182824], [0.04182824, 0.04149209]])\n assert not risk_models._is_positive_semidefinite(S)\n\n for method in {\"spectral\", \"diag\"}:\n with pytest.warns(UserWarning) as w:\n S2 = risk_models.fix_nonpositive_semidefinite(S, fix_method=method)\n assert risk_models._is_positive_semidefinite(S2)\n assert len(w) == 1\n assert (\n str(w[0].message)\n == \"The covariance matrix is non positive semidefinite. Amending eigenvalues.\"\n )\n # Test works on DataFrame too, same results, index and columns rebuilt.\n tickers = [\"A\", \"B\"]\n S_df = pd.DataFrame(data=S, index=tickers, columns=tickers)\n S2_df = risk_models.fix_nonpositive_semidefinite(S_df, fix_method=method)\n assert isinstance(S2_df, pd.DataFrame)\n np.testing.assert_equal(S2_df.to_numpy(), S2)\n assert S2_df.index.equals(S_df.index)\n assert S2_df.columns.equals(S_df.columns)\n\n with pytest.raises(NotImplementedError):\n risk_models.fix_nonpositive_semidefinite(S, fix_method=\"blah\")\n\n\ndef test_fix_npd_different_method():\n df = get_data()\n S = risk_models.sample_cov(df)\n assert risk_models._is_positive_semidefinite(S)\n S = risk_models.sample_cov(df, fix_method=\"diag\")\n assert risk_models._is_positive_semidefinite(S)\n\n\ndef test_sample_cov_frequency():\n df = get_data()\n S = risk_models.sample_cov(df)\n S2 = risk_models.sample_cov(df, frequency=2)\n pd.testing.assert_frame_equal(S / 126, S2)\n\n\ndef test_semicovariance():\n df = get_data()\n S = risk_models.semicovariance(df)\n assert S.shape == (20, 20)\n assert S.index.equals(df.columns)\n assert S.index.equals(S.columns)\n assert S.notnull().all().all()\n assert risk_models._is_positive_semidefinite(S)\n S2 = risk_models.semicovariance(df, frequency=2)\n pd.testing.assert_frame_equal(S / 126, S2)\n # Cover that it works on np.ndarray, with a warning\n with pytest.warns(RuntimeWarning):\n S2_np = risk_models.semicovariance(df.to_numpy(), frequency=2)\n np.testing.assert_equal(S2_np, S2.to_numpy())\n\n\ndef test_semicovariance_benchmark():\n df = get_data()\n # When the benchmark is very negative, the cov matrix should be zeroes\n S_negative_benchmark = risk_models.semicovariance(df, benchmark=-0.5)\n np.testing.assert_allclose(S_negative_benchmark, 0, atol=1e-4)\n\n # Increasing the benchmark should increase covariances on average\n S = risk_models.semicovariance(df, benchmark=0)\n S2 = risk_models.semicovariance(df, benchmark=1)\n assert S2.sum().sum() > S.sum().sum()\n\n\ndef test_exp_cov_matrix():\n df = get_data()\n S = risk_models.exp_cov(df)\n assert S.shape == (20, 20)\n assert S.index.equals(df.columns)\n assert S.index.equals(S.columns)\n assert S.notnull().all().all()\n assert risk_models._is_positive_semidefinite(S)\n S2 = risk_models.exp_cov(df, frequency=2)\n pd.testing.assert_frame_equal(S / 126, S2)\n # Cover that it works on np.ndarray, with a warning\n with pytest.warns(RuntimeWarning):\n S2_np = risk_models.exp_cov(df.to_numpy(), frequency=2)\n np.testing.assert_equal(S2_np, S2.to_numpy())\n # Too short a span causes a warning.\n with pytest.warns(UserWarning):\n risk_models.exp_cov(df, frequency=2, span=9)\n\n\ndef test_exp_cov_limits():\n df = get_data()\n sample_cov = risk_models.sample_cov(df)\n S = risk_models.exp_cov(df)\n assert not np.allclose(sample_cov, S)\n\n # As span gets larger, it should tend towards sample covariance\n S2 = risk_models.exp_cov(df, span=1e20)\n assert np.abs(S2 - sample_cov).max().max() < 1e-3\n\n\ndef test_min_cov_det():\n df = get_data()\n S = risk_models.min_cov_determinant(df, random_state=8)\n assert S.shape == (20, 20)\n assert S.index.equals(df.columns)\n assert S.index.equals(S.columns)\n assert S.notnull().all().all()\n # assert risk_models._is_positive_semidefinite(S)\n # Cover that it works on np.ndarray, with a warning\n with pytest.warns(RuntimeWarning):\n S2 = risk_models.min_cov_determinant(df.to_numpy(), random_state=8)\n assert isinstance(S2, pd.DataFrame)\n np.testing.assert_equal(S.to_numpy(), S2.to_numpy())\n\n\ndef test_cov_to_corr():\n df = get_data()\n rets = risk_models.returns_from_prices(df).dropna()\n test_corr = risk_models.cov_to_corr(rets.cov())\n pd.testing.assert_frame_equal(test_corr, rets.corr())\n\n with pytest.warns(RuntimeWarning) as w:\n test_corr_numpy = risk_models.cov_to_corr(rets.cov().values)\n assert len(w) == 1\n assert str(w[0].message) == \"cov_matrix is not a dataframe\"\n assert isinstance(test_corr_numpy, pd.DataFrame)\n np.testing.assert_array_almost_equal(test_corr_numpy, rets.corr().values)\n\n\ndef test_corr_to_cov():\n df = get_data()\n rets = risk_models.returns_from_prices(df).dropna()\n test_corr = risk_models.cov_to_corr(rets.cov())\n new_cov = risk_models.corr_to_cov(test_corr, rets.std())\n pd.testing.assert_frame_equal(new_cov, rets.cov())\n\n with pytest.warns(RuntimeWarning) as w:\n cov_numpy = risk_models.corr_to_cov(test_corr.to_numpy(), rets.std())\n assert len(w) == 1\n assert str(w[0].message) == \"corr_matrix is not a dataframe\"\n assert isinstance(cov_numpy, pd.DataFrame)\n np.testing.assert_equal(cov_numpy.to_numpy(), new_cov.to_numpy())\n\n\ndef test_covariance_shrinkage_init():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n assert cs.S.shape == (20, 20)\n assert not (np.isnan(cs.S)).any()\n\n\ndef test_shrunk_covariance():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n shrunk_cov = cs.shrunk_covariance(0.2)\n assert cs.delta == 0.2\n assert shrunk_cov.shape == (20, 20)\n assert list(shrunk_cov.index) == list(df.columns)\n assert list(shrunk_cov.columns) == list(df.columns)\n assert not shrunk_cov.isnull().any().any()\n assert risk_models._is_positive_semidefinite(shrunk_cov)\n with pytest.warns(RuntimeWarning) as w:\n cs_numpy = risk_models.CovarianceShrinkage(df.to_numpy())\n assert len(w) == 1\n assert str(w[0].message) == \"data is not in a dataframe\"\n shrunk_cov_numpy = cs_numpy.shrunk_covariance(0.2)\n assert isinstance(shrunk_cov_numpy, pd.DataFrame)\n np.testing.assert_equal(shrunk_cov_numpy.to_numpy(), shrunk_cov.to_numpy())\n\n\ndef test_shrunk_covariance_extreme_delta():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n # if delta = 0, no shrinkage occurs\n shrunk_cov = cs.shrunk_covariance(0)\n np.testing.assert_array_almost_equal(shrunk_cov.values, risk_models.sample_cov(df))\n # if delta = 1, sample cov does not contribute to shrunk cov\n shrunk_cov = cs.shrunk_covariance(1)\n N = df.shape[1]\n F = np.identity(N) * np.trace(cs.S) / N\n np.testing.assert_array_almost_equal(shrunk_cov.values, F * 252)\n\n\ndef test_shrunk_covariance_frequency():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df, frequency=52)\n # if delta = 0, no shrinkage occurs\n shrunk_cov = cs.shrunk_covariance(0)\n\n S = risk_models.sample_cov(df, frequency=52)\n np.testing.assert_array_almost_equal(shrunk_cov.values, S)\n\n\ndef test_ledoit_wolf_default():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n shrunk_cov = cs.ledoit_wolf()\n assert 0 < cs.delta < 1\n assert shrunk_cov.shape == (20, 20)\n assert list(shrunk_cov.index) == list(df.columns)\n assert list(shrunk_cov.columns) == list(df.columns)\n assert not shrunk_cov.isnull().any().any()\n assert risk_models._is_positive_semidefinite(shrunk_cov)\n\n\ndef test_ledoit_wolf_single_index():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n shrunk_cov = cs.ledoit_wolf(shrinkage_target=\"single_factor\")\n assert 0 < cs.delta < 1\n assert shrunk_cov.shape == (20, 20)\n assert list(shrunk_cov.index) == list(df.columns)\n assert list(shrunk_cov.columns) == list(df.columns)\n assert not shrunk_cov.isnull().any().any()\n assert risk_models._is_positive_semidefinite(shrunk_cov)\n\n\ndef test_ledoit_wolf_constant_correlation():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n shrunk_cov = cs.ledoit_wolf(shrinkage_target=\"constant_correlation\")\n assert 0 < cs.delta < 1\n assert shrunk_cov.shape == (20, 20)\n assert list(shrunk_cov.index) == list(df.columns)\n assert list(shrunk_cov.columns) == list(df.columns)\n assert not shrunk_cov.isnull().any().any()\n assert risk_models._is_positive_semidefinite(shrunk_cov)\n\n\ndef test_ledoit_wolf_raises_not_implemented():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n with pytest.raises(NotImplementedError):\n cs.ledoit_wolf(shrinkage_target=\"I have not been implemented!\")\n\n\ndef test_oracle_approximating():\n df = get_data()\n cs = risk_models.CovarianceShrinkage(df)\n shrunk_cov = cs.oracle_approximating()\n assert 0 < cs.delta < 1\n assert shrunk_cov.shape == (20, 20)\n assert list(shrunk_cov.index) == list(df.columns)\n assert list(shrunk_cov.columns) == list(df.columns)\n assert not shrunk_cov.isnull().any().any()\n assert risk_models._is_positive_semidefinite(shrunk_cov)\n\n\ndef test_risk_matrix_and_returns_data():\n # Test the switcher method for simple calls\n df = get_data()\n\n for method in {\n \"sample_cov\",\n \"semicovariance\",\n \"exp_cov\",\n # FIXME: this fails \"min_cov_determinant\",\n \"ledoit_wolf\",\n \"ledoit_wolf_constant_variance\",\n \"ledoit_wolf_single_factor\",\n \"ledoit_wolf_constant_correlation\",\n \"oracle_approximating\",\n }:\n\n S = risk_models.risk_matrix(df, method=method)\n assert S.shape == (20, 20)\n assert S.notnull().all().all()\n assert risk_models._is_positive_semidefinite(S)\n\n S2 = risk_models.risk_matrix(\n expected_returns.returns_from_prices(df), returns_data=True, method=method\n )\n pd.testing.assert_frame_equal(S, S2)\n\n\ndef test_risk_matrix_additional_kwargs():\n df = get_data()\n S = risk_models.sample_cov(df)\n S2 = risk_models.risk_matrix(df, frequency=2)\n pd.testing.assert_frame_equal(S / 126, S2)\n\n S = risk_models.risk_matrix(\n df, method=\"semicovariance\", benchmark=0.0004, frequency=52\n )\n assert S.shape == (20, 20)\n assert S.notnull().all().all()\n assert risk_models._is_positive_semidefinite(S)\n\n S = risk_models.risk_matrix(\n expected_returns.returns_from_prices(df),\n returns_data=True,\n method=\"exp_cov\",\n span=60,\n fix_method=\"diag\",\n )\n assert S.shape == (20, 20)\n assert S.notnull().all().all()\n assert risk_models._is_positive_semidefinite(S)\n\n\ndef test_risk_matrix_not_implemented():\n df = get_data()\n with pytest.raises(NotImplementedError):\n risk_models.risk_matrix(df, method=\"fancy_new!\")\n" ]
[ [ "numpy.testing.assert_allclose", "pandas.testing.assert_frame_equal", "numpy.array", "numpy.trace", "numpy.isnan", "numpy.zeros", "pandas.DataFrame", "numpy.testing.assert_array_almost_equal", "numpy.allclose", "numpy.identity", "numpy.abs" ] ]
Strikeskids/pyquil
[ "369c944f22d00d987b0099012a9292243781906a" ]
[ "pyquil/tests/test_parameters.py" ]
[ "from math import pi\n\nimport numpy as np\n\nfrom pyquil.parameters import (Parameter, quil_sin, quil_cos, quil_sqrt, quil_exp, quil_cis,\n _contained_parameters, format_parameter, quil_cis, substitute, substitute_array)\n\n\ndef test_format_parameter():\n test_cases = [\n (1, '1'),\n (1.0, '1.0'),\n\n (1j, 'i'),\n (0 + 1j, 'i'),\n (-1j, '-i'),\n (1e-15 + 1j, 'i'),\n (1e-15 - 1j, '-i')\n ]\n\n for test_case in test_cases:\n assert format_parameter(test_case[0]) == test_case[1]\n\n\n# https://github.com/rigetticomputing/pyquil/issues/184\ndef test_pretty_print_pi():\n test_cases = [\n (0., '0'),\n (pi, 'pi'),\n (-pi, '-pi'),\n (2 * pi / 3., '2*pi/3'),\n (pi / 9, '0.3490658503988659'),\n (pi / 8, 'pi/8'),\n (-90 * pi / 2, '-45*pi'),\n ]\n\n for test_case in test_cases:\n assert format_parameter(test_case[0]) == test_case[1]\n\n\ndef test_expression_to_string():\n x = Parameter('x')\n assert str(x) == '%x'\n\n y = Parameter('y')\n assert str(y) == '%y'\n\n assert str(x + y) == '%x+%y'\n assert str(3 * x + y) == '3*%x+%y'\n assert str(3 * (x + y)) == '3*(%x+%y)'\n\n assert str(x + y + 2) == '%x+%y+2'\n assert str(x - y - 2) == '%x-%y-2'\n assert str(x - (y - 2)) == '%x-(%y-2)'\n\n assert str((x + y) - 2) == '%x+%y-2'\n assert str(x + (y - 2)) == '%x+%y-2'\n\n assert str(x ** y ** 2) == '%x^%y^2'\n assert str(x ** (y ** 2)) == '%x^%y^2'\n assert str((x ** y) ** 2) == '(%x^%y)^2'\n\n assert str(quil_sin(x)) == 'sin(%x)'\n assert str(3 * quil_sin(x + y)) == '3*sin(%x+%y)'\n\n\ndef test_contained_parameters():\n x = Parameter('x')\n assert _contained_parameters(x) == {x}\n\n y = Parameter('y')\n assert _contained_parameters(x + y) == {x, y}\n\n assert _contained_parameters(x ** y ** quil_sin(x * y * 4)) == {x, y}\n\n\ndef test_eval():\n x = Parameter('x')\n assert substitute(x, {x: 5}) == 5\n\n y = Parameter('y')\n assert substitute(x + y, {x: 5, y: 6}) == 11\n assert substitute(x + y, {x: 5}) == 5 + y\n assert substitute(quil_exp(x), {y: 5}) != np.exp(5)\n assert substitute(quil_exp(x), {x: 5}) == np.exp(5)\n\n assert np.isclose(substitute(quil_sin(x * x ** 2 / y), {x: 5.0, y: 10.0}), np.sin(12.5))\n assert np.isclose(substitute(quil_sqrt(x), {x: 5.0, y: 10.0}), np.sqrt(5.0))\n assert np.isclose(substitute(quil_cis(x), {x: 5.0, y: 10.0}), np.exp(1j * 5.0))\n assert np.isclose(substitute(x - y, {x: 5.0, y: 10.0}), -5.)\n\n assert substitute(quil_cis(x), {y: 5}) == quil_cis(x)\n assert np.allclose(substitute_array([quil_sin(x), quil_cos(x)], {x: 5}), [np.sin(5), np.cos(5)])\n" ]
[ [ "numpy.sin", "numpy.exp", "numpy.sqrt", "numpy.cos" ] ]
GoekeLab/m6anet
[ "be3148a6404bdd2a4e5e9544b3e618e836c6483c", "be3148a6404bdd2a4e5e9544b3e618e836c6483c" ]
[ "m6anet/utils/data_utils.py", "m6anet/model/model_blocks/blocks.py" ]
[ "import os\nimport pandas as pd\nimport numpy as np\nimport torch\nimport json\nimport joblib\nfrom ..scripts.compute_normalization_factors import annotate_kmer_information, create_kmer_mapping_df, create_norm_dict\nfrom torch.utils.data import DataLoader, Dataset\nfrom torch.utils.data._utils.collate import default_collate\nfrom itertools import product\n\n\nclass NanopolishDS(Dataset):\n\n def __init__(self, root_dir, min_reads, norm_path=None, site_info=None,\n num_neighboring_features=1, mode='Inference', site_mode=False,\n n_processes=1):\n allowed_mode = ('Train', 'Test', 'Val', 'Inference')\n \n if mode not in allowed_mode:\n raise ValueError(\"Invalid mode passed to dataset, must be one of {}\".format(allowed_mode))\n \n self.mode = mode\n self.site_info = site_info\n self.data_info = self.initialize_data_info(root_dir, min_reads)\n self.data_fpath = os.path.join(root_dir, \"data.json\")\n self.min_reads = min_reads\n self.site_mode = site_mode\n\n if norm_path is not None:\n self.norm_dict = joblib.load(norm_path)\n else:\n self.norm_dict = self.compute_norm_factors(n_processes)\n\n if num_neighboring_features > 5:\n raise ValueError(\"Invalid neighboring features number {}\".format(num_neighboring_features))\n\n self.num_neighboring_features = num_neighboring_features\n\n center_motifs = [['A', 'G', 'T'], ['G', 'A'], ['A'], ['C'], ['A', 'C', 'T']]\n flanking_motifs = [['G', 'A', 'C', 'T'] for i in range(self.num_neighboring_features)]\n all_kmers = list([\"\".join(x) for x in product(*(flanking_motifs + center_motifs + flanking_motifs))])\n \n self.all_kmers = np.unique(np.array(list(map(lambda x: [x[i:i+5] for i in range(len(x) -4)], \n all_kmers))).flatten())\n self.kmer_to_int = {self.all_kmers[i]: i for i in range(len(self.all_kmers))}\n self.int_to_kmer = {i: self.all_kmers[i] for i in range(len(self.all_kmers))}\n\n # Inferring total number of neighboring features extracted during dataprep step\n\n kmer, _ = self._load_data(0)\n self.total_neighboring_features = (len(kmer) - 5) // 2\n left_idx = [(self.total_neighboring_features - num_neighboring_features + j) * 3 + i \n for j in range(num_neighboring_features) for i in range(3)]\n center_idx = [self.total_neighboring_features * 3 + i for i in range(3)]\n right_idx = [(self.total_neighboring_features + j) * 3 + i for j in range(1, num_neighboring_features + 1) \n for i in range(3)]\n\n self.indices = np.concatenate([left_idx, center_idx, right_idx]).astype('int')\n\n if self.mode != 'Inference':\n self.labels = self.data_info[\"modification_status\"].values\n\n\n def initialize_data_info(self, fpath, min_reads):\n data_index = pd.read_csv(os.path.join(fpath ,\"data.index\")) \n if self.mode == 'Inference':\n read_count = pd.read_csv(os.path.join(fpath, \"data.readcount\"))\n else:\n if self.site_info is None:\n read_count = pd.read_csv(os.path.join(fpath, \"data.readcount.labelled\"))\n else:\n read_count = pd.read_csv(os.path.join(self.site_info, \"data.readcount.labelled\"))\n \n read_count = read_count[read_count[\"set_type\"] == self.mode].reset_index(drop=True)\n\n data_info = data_index.merge(read_count, on=[\"transcript_id\", \"transcript_position\"])\n return data_info[data_info[\"n_reads\"] >= min_reads].reset_index(drop=True)\n\n def __len__(self):\n return len(self.data_info)\n\n def _load_data(self, idx):\n with open(self.data_fpath, 'r') as f:\n tx_id, tx_pos, start_pos, end_pos = self.data_info.iloc[idx][[\"transcript_id\", \"transcript_position\",\n \"start\", \"end\"]]\n f.seek(start_pos, 0)\n json_str = f.read(end_pos - start_pos)\n pos_info = json.loads(json_str)[tx_id][str(tx_pos)]\n\n assert(len(pos_info.keys()) == 1)\n\n kmer, features = list(pos_info.items())[0]\n return kmer, np.array(features)\n\n def __getitem__(self, idx):\n kmer, features = self._load_data(idx)\n # Repeating kmer to the number of reads sampled\n kmer = self._retrieve_full_sequence(kmer, self.num_neighboring_features)\n kmer = [kmer[i:i+5] for i in range(2 * self.num_neighboring_features + 1)]\n\n features = features[np.random.choice(len(features), self.min_reads, replace=False), :]\n features = features[:, self.indices]\n \n if self.norm_dict is not None:\n mean, std = self.get_norm_factor(kmer)\n features = torch.Tensor((features - mean) / std)\n else:\n features = torch.Tensor((features))\n\n if not self.site_mode:\n kmer = np.repeat(np.array([self.kmer_to_int[kmer] for kmer in kmer])\\\n .reshape(-1, 2 * self.num_neighboring_features + 1), self.min_reads, axis=0)\n kmer = torch.Tensor(kmer)\n else:\n kmer = torch.LongTensor([self.kmer_to_int[kmer] for kmer in kmer])\n if self.mode == 'Inference':\n return features, kmer\n else:\n return features, kmer, self.data_info.iloc[idx][\"modification_status\"]\n\n def get_norm_factor(self, list_of_kmers):\n norm_mean, norm_std = [], []\n for kmer in list_of_kmers:\n mean, std = self.norm_dict[kmer]\n norm_mean.append(mean)\n norm_std.append(std)\n return np.concatenate(norm_mean), np.concatenate(norm_std)\n\n def compute_norm_factors(self, n_processes):\n if \"kmer\" not in self.data_info.columns:\n print(\"k-mer information is not present in column, annotating k-mer information in data info\")\n self.data_info = annotate_kmer_information(self.data_fpath, self.data_info, n_processes)\n kmer_mapping_df = create_kmer_mapping_df(self.data_info)\n norm_dict = create_norm_dict(kmer_mapping_df, self.data_fpath, n_processes)\n return norm_dict\n\n def _retrieve_full_sequence(self, kmer, n_neighboring_features=0):\n if n_neighboring_features < self.total_neighboring_features:\n return kmer[self.total_neighboring_features - n_neighboring_features:2 * self.total_neighboring_features + n_neighboring_features]\n else:\n return kmer\n\n def _retrieve_sequence(self, sequence, n_neighboring_features=0):\n return [sequence[i : i+5] for i in range(len(sequence) - 4)]\n\n\nclass ImbalanceUnderSampler(torch.utils.data.Sampler):\n\n def __init__(self, data_source):\n self.data_source = data_source\n self.class_counts = np.unique(self.data_source.labels, return_counts=True)[1]\n self.minority_class, self.majority_class = np.argmin(self.class_counts), np.argmax(self.class_counts)\n self.minority_class_idx = np.argwhere(self.data_source.labels == self.minority_class).flatten()\n self.majority_class_idx = np.argwhere(self.data_source.labels == self.majority_class).flatten()\n\n def __iter__(self):\n idx = np.append(self.minority_class_idx, np.random.choice(self.majority_class_idx,\n len(self.minority_class_idx), replace=False))\n np.random.shuffle(idx)\n return iter(idx)\n\n def __len__(self):\n return 2 * len(self.minority_class_idx)\n\n\nclass ImbalanceOverSampler(torch.utils.data.Sampler):\n\n def __init__(self, data_source):\n self.data_source = data_source\n self.class_counts = np.unique(self.data_source.labels, return_counts=True)[1]\n self.minority_class, self.majority_class = np.argmin(self.class_counts), np.argmax(self.class_counts)\n self.minority_class_idx = np.argwhere(self.data_source.labels == self.minority_class).flatten()\n self.majority_class_idx = np.argwhere(self.data_source.labels == self.majority_class).flatten()\n\n def __iter__(self):\n idx = np.append(self.majority_class_idx, np.random.choice(self.minority_class_idx,\n len(self.majority_class_idx), replace=True))\n np.random.shuffle(idx)\n return iter(idx)\n\n def __len__(self):\n return 2 * len(self.majority_class_idx)\n\n\ndef inference_collate(batch):\n return {key: batch for key, batch \n in zip (['X', 'kmer'], default_collate(batch))}\n\n\ndef train_collate(batch):\n return {key: batch for key, batch \n in zip (['X', 'kmer', 'y'], default_collate(batch))}\n", "import torch\nfrom torch import nn\n\n\ndef get_activation(activation):\n activation_func = None\n if activation == 'tanh':\n activation_func = nn.Tanh()\n elif activation == 'sigmoid':\n activation_func = nn.Sigmoid()\n elif activation == 'relu':\n activation_func = nn.ReLU()\n elif activation == 'softmax':\n activation_func = nn.Softmax(dim=1)\n else:\n raise ValueError(\"Invalid activation\")\n \n return activation_func\n\n\nclass Block(nn.Module):\n\n def __init__(self):\n super(Block, self).__init__()\n \n def forward(self, x):\n return self.layers(x)\n\n\nclass PoolingFilter(nn.Module):\n \n def forward(self, x):\n return x\n\n def predict_read_level_prob(self, x):\n return self.forward(x)\n\n\nclass ConcatenateFeatures(Block):\n\n def __init__(self):\n super(ConcatenateFeatures, self).__init__()\n\n def forward(self, x):\n x = torch.cat([val for _, val in x.items()], axis=1)\n return x\n\n\nclass DeaggregateNanopolish(Block):\n\n def __init__(self, num_neighboring_features, n_features=3):\n super(DeaggregateNanopolish, self).__init__()\n self.num_neighboring_features = num_neighboring_features\n self.n_features = n_features * (2 * self.num_neighboring_features + 1)\n\n\n def forward(self, x):\n return {'X': x['X'].view(-1, self.n_features), 'kmer': x['kmer'].view(-1, 1)}\n\n\nclass Flatten(Block):\n\n def __init__(self, start_dim, end_dim):\n super(Flatten, self).__init__()\n self.layers = nn.Flatten(start_dim, end_dim)\n\n\nclass KmerMultipleEmbedding(Block):\n\n def __init__(self, input_channel, output_channel, num_neighboring_features=0):\n super(KmerMultipleEmbedding, self).__init__()\n self.input_channel, self.output_channel = input_channel, output_channel\n self.embedding_layer = nn.Embedding(input_channel, output_channel)\n self.n_features = 2 * num_neighboring_features + 1\n\n def forward(self, x):\n kmer = x['kmer']\n return {'X': x['X'], 'kmer' :self.embedding_layer(kmer.long()).reshape(-1, self.n_features * self.output_channel)}\n\n\nclass Linear(Block):\n\n def __init__(self, input_channel, output_channel, activation='relu', batch_norm=True, dropout=0.0):\n super(Linear, self).__init__()\n self.layers = self._make_layers(input_channel, output_channel, activation, batch_norm)\n \n def _make_layers(self, input_channel, output_channel, activation, batch_norm, dropout=0.0):\n layers = [nn.Linear(input_channel, output_channel)]\n if batch_norm:\n layers.append(nn.BatchNorm1d(num_features=output_channel))\n if activation is not None:\n layers.append(get_activation(activation))\n layers.append(nn.Dropout(p=dropout))\n return nn.Sequential(*layers)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.argmin", "torch.utils.data._utils.collate.default_collate", "numpy.random.shuffle", "torch.LongTensor", "numpy.argmax", "numpy.argwhere", "torch.Tensor", "numpy.unique" ], [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Softmax", "torch.nn.Sigmoid", "torch.nn.Sequential", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.BatchNorm1d", "torch.nn.Embedding", "torch.nn.Flatten" ] ]
zkbfdzp/hhatefi
[ "53e400c9fa217d099ceba7ce767ca40a1975addd" ]
[ "tests/buy_and_hold.py" ]
[ "# imports\nfrom zipline.api import (symbol, set_benchmark, order_target_percent,\n schedule_function, time_rules)\nfrom zipline.finance import commission\n#import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef initialize(context):\n context.set_commission(commission.PerShare(cost=0.0, min_trade_cost=0))\n set_benchmark(symbol('SPY'))\n context.asset = symbol('AAPL')\n context.has_ordered = False\n\n schedule_function(place_order, None,\n time_rules.market_open())\n\ndef place_order(context, data):\n if not context.has_ordered:\n order_target_percent(context.asset, 1.0)\n context.has_ordered = True\n\ndef analyze(context, perf):\n fig = plt.figure(figsize=(12,8))\n perf.algorithm_period_return.plot(x='strategy return', legend=True)\n perf.benchmark_period_return.plot(legend=True)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
J-A-St/heat_exchanger_calculation
[ "a57def1b2ccf74af6629a4cbc49fe6beea3a7194" ]
[ "test_heat_exchanger_reverse.py" ]
[ "import numpy as np\nfrom scipy.special import lambertw\n\nfrom heat_exchanger import HeatExchanger\nfrom heat_exchanger_reverse import HeatExchangerReverse\n\ninlet_temperatures = [80.0, 20.0]\nfilm_heat_transfer_coefficients = [1, 1]\nheat_capacity_flows = [5, 4]\nheat_load = 100\nmixer_type_hot = 'none'\nmixer_type_cold = 'none'\nmixer_fraction_hot = 0\nmixer_fraction_cold = 0\n\n\ndef setup_heat_exchanger():\n \"\"\"Setup coefficients of the heat exchanger class\"\"\"\n return HeatExchanger(\n inlet_temperatures=inlet_temperatures,\n film_heat_transfer_coefficients=film_heat_transfer_coefficients,\n heat_capacity_flows=heat_capacity_flows,\n heat_load=heat_load,\n mixer_type_hot=mixer_type_hot,\n mixer_type_cold=mixer_type_cold,\n mixer_fraction_hot=mixer_fraction_hot,\n mixer_fraction_cold=mixer_fraction_cold)\n\n\ndef setup_model():\n \"\"\"Setup coefficients for testing the reversed heat exchanger class\"\"\"\n h = setup_heat_exchanger()\n return HeatExchangerReverse(\n inlet_temperatures=inlet_temperatures,\n film_heat_transfer_coefficients=film_heat_transfer_coefficients,\n heat_capacity_flows=heat_capacity_flows,\n heat_load=heat_load,\n existent_area=h.area), h\n\n\ndef test_overall_heat_transfer_coefficient():\n m, h = setup_model()\n assert m.overall_heat_transfer_coefficient == h.overall_heat_transfer_coefficient\n\n\ndef test_outlet_temperatures():\n m, h = setup_model()\n assert m.outlet_temperature_hot_stream == h.outlet_temperature_hot_stream\n assert m.outlet_temperature_cold_stream == h.outlet_temperature_cold_stream\n\n\ndef test_area_no_mixer():\n m, h = setup_model()\n assert m.area_no_mixer == h.area\n\n\ndef test_mixer_type():\n m, _ = setup_model()\n assert m.mixer_type == 'none'\n m.heat_load = 90\n assert m.mixer_type == 'bypass'\n m.heat_load = 110\n assert m.mixer_type == 'admixer'\n\n\ndef test_logarithmic_mean_temperature_difference():\n m, h = setup_model()\n assert m.logarithmic_mean_temperature_difference == h.logarithmic_temperature_difference\n m.heat_load = 90\n logarithmic_mean_temperature_difference = m.heat_load / (h.overall_heat_transfer_coefficient * h.area)\n assert logarithmic_mean_temperature_difference == m.logarithmic_mean_temperature_difference\n\n\ndef test_lambertw():\n # Data from Chen, J.J.J.,2019. Logarithmic mean: Chen's approximation or explicit solution?. Computers and Chemical Engineering. 120,1-3.\n inlet_temperatures = [125, 30]\n film_heat_transfer_coefficients = [1, 1]\n heat_capacity_flows = [100, 465.83850931677018633540372670807]\n heat_load = 7500\n existent_area = 500\n m = HeatExchangerReverse(\n inlet_temperatures=inlet_temperatures,\n film_heat_transfer_coefficients=film_heat_transfer_coefficients,\n heat_capacity_flows=heat_capacity_flows,\n heat_load=heat_load,\n existent_area=existent_area)\n m.existent_area = 2 * 175\n m.heat_exchanger_temperature_calculation(mixer_side='cold')\n assert abs(m.heat_exchanger_outlet_temperature_cold_stream - 46.27702305742176) < 10e-1\n dTa = m.heat_exchanger_outlet_temperature_hot_stream - m.heat_exchanger_inlet_temperature_cold_stream\n dTb = m.heat_exchanger_inlet_temperature_hot_stream - m.heat_exchanger_outlet_temperature_cold_stream\n LMTD = (dTa - dTb) / np.log(dTa / dTb)\n UA = heat_load / LMTD\n assert UA == 175\n" ]
[ [ "numpy.log" ] ]
wayneisaacuy/perform
[ "333198b538eded5c498b236cf9d598b948dbb1e3" ]
[ "perform/input_funcs.py" ]
[ "\"\"\"Functions for handling ingestion of input files\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\n\nfrom perform.constants import REAL_TYPE\n\n\ndef catch_input(in_dict, in_key, default_val):\n \"\"\"Handle non-list dictionary entries from parameter input files.\n\n Casts input value as same type as default_val.\n Assign default values if user does not provide a given input parameter.\n Use catch_list() if attempting to retrieve lists or lists of lists.\n\n Args:\n in_dict: Dictionary in input parameters, within which in_key is a key.\n in_key: Key of parameter to retrieve from in_dict.\n default:\n Default value to assign at output if in_key does not exist in in_dict.\n The type of default implicitly defines the type which the parameter\n is cast to, if it exists in in_dict.\n Returns:\n Input parameter retrieved from in_dict, or default if not provided.\n \"\"\"\n\n # TODO: correct error handling if default type is not recognized\n # TODO: check against lowercase'd strings so that inputs are not case sensitive.\n # Do this for True/False too\n # TODO: instead of trusting user for None, could also use NaN/Inf to indicate int/float defaults\n # without passing a numerical default\n # Or could just pass the actual default type lol, that'd be easier\n\n try:\n # If None passed as default, trust user\n if default_val is None:\n out_val = in_dict[in_key]\n else:\n default_type = type(default_val)\n out_val = default_type(in_dict[in_key])\n except KeyError:\n out_val = default_val\n\n return out_val\n\n\ndef catch_list(in_dict, in_key, default, len_highest=1):\n \"\"\"Handle list and list of list dictionary entries from parameter input files.\n\n Casts list entries of input as same type as default_val.\n Assign default values if user does not provide a given input parameter.\n\n Args:\n in_dict: Dictionary in input parameters, within which in_key is a key.\n in_key: Key of parameter to retrieve from in_dict.\n default:\n Default list to assign at output if in_key does not exist in in_dict.\n The type of the list entries in default implicitly defines the type which the parameter\n is cast to, if it exists in in_dict.\n len_highest: Expected length of topmost list.\n\n Returns:\n Input parameter list retrieved from in_dict, or default if not provided.\n \"\"\"\n\n # TODO: needs to throw an error if input list of lists is longer than len_highest\n # TODO: could make a recursive function probably, just hard to define appropriate list lengths at each level\n\n list_of_lists_flag = type(default[0]) == list\n\n try:\n inList = in_dict[in_key]\n\n if len(inList) == 0:\n raise ValueError\n\n # List of lists\n if list_of_lists_flag:\n val_list = []\n for list_idx in range(len_highest):\n # If default value is None, trust user\n if default[0][0] is None:\n val_list.append(inList[list_idx])\n else:\n type_default = type(default[0][0])\n cast_in_list = [type_default(inVal) for inVal in inList[list_idx]]\n val_list.append(cast_in_list)\n\n # Normal list\n else:\n # If default value is None, trust user\n if default[0] is None:\n val_list = inList\n else:\n type_default = type(default[0])\n val_list = [type_default(inVal) for inVal in inList]\n\n except:\n if list_of_lists_flag:\n val_list = []\n for list_idx in range(len_highest):\n val_list.append(default[0])\n else:\n val_list = default\n\n return val_list\n\n\ndef parse_value(expr):\n \"\"\"Parse text into Python expression.\n\n Args:\n expr: String to be converted to Python expression (e.g. a list).\n\n Returns:\n Parsed Python expression.\n \"\"\"\n\n try:\n return eval(expr)\n except:\n return eval(re.sub(r\"\\s+\", \",\", expr))\n else:\n return expr\n\n\ndef parse_line(line):\n \"\"\"Parse line from text file line into dict key and value.\n\n Breaks a line into the text before and after an equals sign, if present.\n The text before the equals sign is treated as the parameter name,\n and the text after the equals sign is the input value of this parameter.\n\n Args:\n line: String of a single line from text file.\n\n Returns:\n Dictionary key and value for input parameter read from line, if line contains a valid parameter.\n Otherwise raises an exception (e.g. for empty lines, or lines without an equals sign)\n \"\"\"\n\n eq = line.find(\"=\")\n if eq == -1:\n raise Exception()\n key = line[:eq].strip()\n value = line[(eq + 1) : -1].strip()\n return key, parse_value(value)\n\n\ndef read_input_file(input_file):\n \"\"\"Parse input parameters from PERFORM text input file.\n\n Refer to the documentation for proper formatting of input files.\n\n Args:\n input_file: Path to input file to be read.\n\n Returns:\n Dictionary of parameters read from input_file.\n \"\"\"\n\n # TODO: better exception handling besides just a pass\n\n read_dict = {}\n with open(input_file) as f:\n contents = f.readlines()\n\n for line in contents:\n try:\n key, val = parse_line(line)\n read_dict[key] = val\n # convert lists to NumPy arrays\n if isinstance(val, list):\n read_dict[key] = np.asarray(val)\n except:\n pass\n\n return read_dict\n\n\ndef parse_bc(bc_name, in_dict):\n \"\"\"Parse boundary condition parameters from input parameter dictionary.\n\n Retrieves inlet and outlet boundary condition parameters. Refer to the documentation\n for proper formatting of these input parameters.\n\n Args:\n bc_name: \"inlet\" or \"outlet\", for inlet and outlet boundary condition, respectively.\n in_dict: Dictionary if input parameters read from the solver parameters input file.\n\n Returns:\n Boundary condition parameters. If a given parameter is not supplied in the solver parameters input file,\n then None is returned for that parameter.\n \"\"\"\n\n # TODO: can definitely be made more general\n\n if (\"press_\" + bc_name) in in_dict:\n press = in_dict[\"press_\" + bc_name]\n else:\n press = None\n if (\"vel_\" + bc_name) in in_dict:\n vel = in_dict[\"vel_\" + bc_name]\n else:\n vel = None\n if (\"temp_\" + bc_name) in in_dict:\n temp = in_dict[\"temp_\" + bc_name]\n else:\n temp = None\n if (\"mass_fracs_\" + bc_name) in in_dict:\n mass_fracs = in_dict[\"mass_fracs_\" + bc_name]\n else:\n mass_fracs = None\n if (\"rho_\" + bc_name) in in_dict:\n rho = in_dict[\"rho_\" + bc_name]\n else:\n rho = None\n if (\"pert_type_\" + bc_name) in in_dict:\n pert_type = in_dict[\"pert_type_\" + bc_name]\n else:\n pert_type = None\n if (\"pert_perc_\" + bc_name) in in_dict:\n pert_perc = in_dict[\"pert_perc_\" + bc_name]\n else:\n pert_perc = None\n if (\"pert_freq_\" + bc_name) in in_dict:\n pert_freq = in_dict[\"pert_freq_\" + bc_name]\n else:\n pert_freq = None\n\n return press, vel, temp, mass_fracs, rho, pert_type, pert_perc, pert_freq\n\n\ndef get_initial_conditions(sol_domain, solver):\n \"\"\"Extract initial condition primitive solution profile.\n\n This function sets the initial conditions for a simulation. This may come from a piecewise uniform profile\n file (init_params_file), a user-specified binary profile (init_file), or a restart files.\n\n Restart files take precedence over an init_file, and an init_file takes precedence over an init_params_file file.\n\n Args:\n sol_domain: SolutionDomain for which the initial condition is retrieved.\n solver: SystemSolver containing global simulation parameters.\n Returns:\n NumPy array of the initial condition primitive solution profile.\n \"\"\"\n\n # TODO: add option to interpolate solution onto given mesh, if different\n\n # Initialize from restart file\n if solver.init_from_restart:\n (solver.sol_time, sol_prim_init, solver.restart_iter) = read_restart_file(solver)\n\n # Otherwise init from scratch IC or custom IC file\n else:\n if solver.init_file is None:\n sol_prim_init = gen_piecewise_uniform_ic(sol_domain, solver)\n else:\n sol_prim_init = np.load(solver.init_file)\n assert sol_prim_init.shape[0] == sol_domain.gas_model.num_eqs, \"Incorrect init_file num_eqs: \" + str(\n sol_prim_init.shape[0]\n )\n\n # Attempt to get solver.sol_time, if given\n solver.sol_time = catch_input(solver.param_dict, \"sol_time_init\", 0.0)\n\n return sol_prim_init\n\n\ndef gen_piecewise_uniform_ic(sol_domain, solver):\n \"\"\"Get primitive solution profile initial condition from piecewise uniform parameters.\n\n Piecewise uniform profiles are characterized by a solution profile broken into constant chunks,\n like step functions. This can be useful for cases like a shock tube or initializing flame simulations.\n\n Args:\n sol_domain:\n solver: SystemSolver containing global simulation parameters.\n Returns:\n NumPy array of the initial condition primitive solution profile.\n \"\"\"\n\n # TODO: generalize to >2 uniform regions\n\n if os.path.isfile(solver.ic_params_file):\n ic_dict = read_input_file(solver.ic_params_file)\n else:\n raise ValueError(\"Could not find initial conditions file at \" + solver.ic_params_file)\n\n split_idx = np.absolute(sol_domain.mesh.x_cell - ic_dict[\"x_split\"]).argmin() + 1\n sol_prim = np.zeros((sol_domain.gas_model.num_eqs, sol_domain.mesh.num_cells), dtype=REAL_TYPE)\n\n # TODO: error (warning?) if x_split outside domain / doesn't split domain\n\n gas = sol_domain.gas_model\n\n # Left state\n sol_prim[0, :split_idx] = ic_dict[\"press_left\"]\n sol_prim[1, :split_idx] = ic_dict[\"vel_left\"]\n sol_prim[2, :split_idx] = ic_dict[\"temp_left\"]\n mass_fracs_left = ic_dict[\"mass_fracs_left\"]\n assert np.sum(mass_fracs_left) == 1.0, \"mass_fracs_left must sum to 1.0\"\n assert len(mass_fracs_left) == gas.num_species_full, (\n \"mass_fracs_left must have \" + str(gas.num_species_full) + \" entries\"\n )\n sol_prim[3:, :split_idx] = ic_dict[\"mass_fracs_left\"][gas.mass_frac_slice, None]\n\n # Right state\n sol_prim[0, split_idx:] = ic_dict[\"press_right\"]\n sol_prim[1, split_idx:] = ic_dict[\"vel_right\"]\n sol_prim[2, split_idx:] = ic_dict[\"temp_right\"]\n mass_fracs_right = ic_dict[\"mass_fracs_right\"]\n assert np.sum(mass_fracs_right) == 1.0, \"mass_fracs_right must sum to 1.0\"\n assert len(mass_fracs_right) == gas.num_species_full, (\n \"mass_fracs_right must have \" + str(gas.num_species_full) + \" entries\"\n )\n sol_prim[3:, split_idx:] = mass_fracs_right[gas.mass_frac_slice, None]\n\n return sol_prim\n\n\ndef read_restart_file(solver):\n \"\"\"Get primitive solution profile initial condition from a restart file.\n\n Also retrieves physical solution time to ensure boundary forcing function is correctly synced.\n\n Sets solver.restart_iter so that subsequent restart files follow this restart file's iteration number.\n\n Args:\n solver: SystemSolver containing global simulation parameters.\n\n Returns:\n Solution time (in seconds), NumPy array of the loaded primitive solution profile,\n and the current restart iteration number.\n \"\"\"\n\n # Read text file for restart file iteration number\n iter_file = os.path.join(solver.restart_output_dir, \"restart_iter.dat\")\n with open(iter_file, \"r\") as f:\n restart_iter = int(f.read())\n\n # Read solution\n restart_file = os.path.join(solver.restart_output_dir, \"restart_file_\" + str(restart_iter) + \".npz\")\n restart_in = np.load(restart_file)\n\n sol_time = restart_in[\"sol_time\"].item() # convert array() to scalar\n sol_prim = restart_in[\"sol_prim\"]\n\n restart_iter += 1 # so this restart file doesn't get overwritten\n\n return sol_time, sol_prim, restart_iter\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.load", "numpy.absolute" ] ]
MarcAntoineAlex/SCINet
[ "4ac582cd717ba1c0c6c6d31a9a824235d35563ed" ]
[ "experiments/exp_basic.py" ]
[ "import os\nimport torch\nimport numpy as np\n\nclass Exp_Basic(object):\n def __init__(self, args):\n self.args = args\n self.device = self._acquire_device()\n self.model = self._build_model().cuda()\n\n def _build_model(self):\n raise NotImplementedError\n\n def _acquire_device(self):\n if self.args.use_gpu:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(self.args.gpu) if not self.args.use_multi_gpu else self.args.devices\n device = torch.device('cuda:{}'.format(self.args.gpu))\n print('Use GPU: cuda:{}'.format(self.args.gpu))\n else:\n device = torch.device('cpu')\n print('Use CPU')\n return device\n\n def _get_data(self):\n pass\n\n def valid(self):\n pass\n\n def train(self):\n pass\n\n def test(self):\n pass\n " ]
[ [ "torch.device" ] ]
RubenPants/BlenderCNN
[ "89988ee6692e818cbfc295cc0b2b5f334eb58fb3" ]
[ "model.py" ]
[ "import json\nimport os\nfrom typing import Any, List, Tuple\nfrom warnings import warn\n\nimport numpy as np\nfrom keras.activations import sigmoid\nfrom keras.layers import Activation, BatchNormalization, Conv2DTranspose, Dense, ReLU, Reshape\nfrom keras.models import load_model as lm, Sequential\nfrom matplotlib.image import imread\nfrom tensorflow.python.keras.callbacks import EarlyStopping, ReduceLROnPlateau, TensorBoard\nfrom tqdm import tqdm\n\n\nclass TransposeCNN:\n def __init__(self, name: str = ''):\n if not name: name = 'TransposeCNN'\n self.name = name\n self.path = os.path.expanduser(f'~/models/BlenderCNN/')\n self.model = None\n self.load_model()\n \n def __str__(self):\n \"\"\"Use the model's representation as the representation of this object.\"\"\"\n summary = []\n self.model.summary(print_fn=lambda x: summary.append(x))\n return '\\n'.join(summary)\n \n def __repr__(self):\n return str(self)\n \n def __call__(self, vector: List[float]):\n \"\"\"Query the model.\"\"\"\n if type(vector) == list: vector = np.asarray(vector, dtype=float)\n if len(vector.shape) == 1: vector = vector.reshape((1,) + vector.shape)\n return self.model(\n vector\n )\n \n def train(\n self,\n n_epoch: int,\n features,\n values,\n batch_size: int = 32,\n ) -> None:\n \"\"\"Train the model.\"\"\"\n # Create useful callbacks\n cb_early_stopping = EarlyStopping(patience=5, restore_best_weights=True, verbose=2) # Stop when val_loss stops\n cb_tensorboard = TensorBoard(log_dir='.logs') # Measure losses during training\n cb_lr = ReduceLROnPlateau(verbose=2, patience=3) # Reduce learning rate when val_loss stops moving\n \n # Train the model\n self.model.fit(\n epochs=n_epoch,\n x=features,\n y=values,\n batch_size=batch_size,\n validation_split=0.1,\n callbacks=[cb_early_stopping, cb_tensorboard, cb_lr],\n )\n self.save_model()\n \n def save_model(self) -> None:\n \"\"\"Save the current state of the model.\"\"\"\n if not os.path.exists(self.path): os.makedirs(self.path)\n self.model.save(os.path.join(self.path, f'{self.name}.h5'))\n warn(\"Model saved!\")\n \n def load_model(self) -> None:\n \"\"\"Load in the CNN model.\"\"\"\n try:\n self.model = lm(os.path.join(self.path, f'{self.name}.h5'))\n warn(\"Model loaded successfully!\")\n except OSError:\n warn(\"No model found, creating new one...\")\n self.model = create_model(self.name)\n warn(\"Model initialised!\")\n \n # Give an overview of the model\n self.model.summary()\n\n\ndef create_model(name):\n \"\"\"Create the model.\"\"\"\n model = Sequential(name=name)\n \n # Input\n # model.add(Reshape((1, 1, 5), input_dim=5, name='reshape'))\n \n # Initial layer\n model.add(Dense(16 * 16 * 128, input_dim=5, name='dense_input'))\n model.add(Reshape((16, 16, 128), name='reshape'))\n model.add(BatchNormalization(\n momentum=0.1,\n epsilon=1e-5,\n name=f'batch_norm_init',\n ))\n model.add(ReLU(\n name=f'ReLU_init',\n ))\n \n # Intermediate layers\n for i, filters in zip(range(1, 10), [128, 128, 128]):\n model.add(Conv2DTranspose(\n filters=filters,\n kernel_size=(4, 4),\n strides=(2, 2),\n padding='same',\n use_bias=False, # Included in BatchNormalization\n name=f'conv2d_t_layer{i}',\n ))\n model.add(BatchNormalization(\n momentum=0.1,\n epsilon=1e-5,\n name=f'batch_norm_layer{i}',\n ))\n model.add(ReLU(\n name=f'ReLU_layer{i}',\n ))\n \n # Final layer\n model.add(Conv2DTranspose(\n filters=3,\n kernel_size=(4, 4),\n strides=(2, 2),\n padding='same',\n use_bias=True, # Since no BatchNormalization\n name='conv2d_t_last',\n ))\n model.add(Activation(\n sigmoid, # End with sigmoid layer to ensure values between 0..1\n name='sigmoid_last',\n ))\n \n # Compile the model\n model.compile(\n optimizer='adam', # Adam optimiser\n loss='mse', # Regression problem\n )\n return model\n\n\ndef parse_images(img_names: List[str]) -> Tuple[List[Any], List[Any]]:\n \"\"\"Parse the image features and values from the images.\"\"\"\n # Initialise placeholders\n features = np.zeros((len(img_names), 5), dtype=float) # Replicated state-vectors\n values = np.zeros((len(img_names), 256, 256, 3), dtype=float) # Image-depth is 3 (RGB)\n \n # Load in all vectors\n with open(os.path.expanduser(f'~/data/flying_dots/metadata.json'), 'r') as f:\n all_features = json.load(f)\n for i, img in enumerate(tqdm(img_names, desc='Parsing images...')):\n features[i, :] = all_features[img]\n values[i, :, :, :] = imread(os.path.expanduser(f'~/data/flying_dots/{img}.png'), format='RGB')[:, :, :3]\n return features, values\n" ]
[ [ "tensorflow.python.keras.callbacks.TensorBoard", "tensorflow.python.keras.callbacks.ReduceLROnPlateau", "numpy.asarray", "tensorflow.python.keras.callbacks.EarlyStopping" ] ]
tombroz/berkeley-cs294_homework
[ "5419b772c734093c750362d2e09b46ce59d79da6" ]
[ "hw3/dqn.py" ]
[ "import os\nimport sys\nimport time\n\nimport gym.spaces\nimport itertools\nimport numpy as np\nimport random\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nfrom collections import namedtuple\nfrom dqn_utils import *\n\nOptimizerSpec = namedtuple(\"OptimizerSpec\", [\"constructor\", \"kwargs\", \"lr_schedule\"])\n\ndef learn(env,\n q_func,\n optimizer_spec,\n session,\n exploration=LinearSchedule(1000000, 0.1),\n stopping_criterion=None,\n replay_buffer_size=1000000,\n batch_size=32,\n gamma=0.99,\n learning_starts=50000,\n learning_freq=4,\n frame_history_len=4,\n target_update_freq=10000,\n grad_norm_clipping=10,\n out_dir=None,\n double_q=True):\n \"\"\"Run Deep Q-learning algorithm.\n\n You can specify your own convnet using q_func.\n\n All schedules are w.r.t. total number of steps taken in the environment.\n\n Parameters\n ----------\n env: gym.Env\n gym environment to train on.\n q_func: function\n Model to use for computing the q function. It should accept the\n following named arguments:\n img_in: tf.Tensor\n tensorflow tensor representing the input image\n num_actions: int\n number of actions\n scope: str\n scope in which all the model related variables\n should be created\n reuse: bool\n whether previously created variables should be reused.\n optimizer_spec: OptimizerSpec\n Specifying the constructor and kwargs, as well as learning rate schedule\n for the optimizer\n session: tf.Session\n tensorflow session to use.\n exploration: rl_algs.deepq.utils.schedules.Schedule\n schedule for probability of chosing random action.\n stopping_criterion: (env, t) -> bool\n should return true when it's ok for the RL algorithm to stop.\n takes in env and the number of steps executed so far.\n replay_buffer_size: int\n How many memories to store in the replay buffer.\n batch_size: int\n How many transitions to sample each time experience is replayed.\n gamma: float\n Discount Factor\n learning_starts: int\n After how many environment steps to start replaying experiences\n learning_freq: int\n How many steps of environment to take between every experience replay\n frame_history_len: int\n How many past frames to include as input to the model.\n target_update_freq: int\n How many experience replay rounds (not steps!) to perform between\n each update to the target Q network\n grad_norm_clipping: float or None\n If not None gradients' norms are clipped to this value.\n \"\"\"\n assert type(env.observation_space) == gym.spaces.Box\n assert type(env.action_space) == gym.spaces.Discrete\n\n ###############\n # BUILD MODEL #\n ###############\n if not out_dir: out_dir = os.path.join(os.getcwd(),'results',env.unwrapped.spec.id +'_' + time.strftime(\"%d-%m-%Y_%H-%M-%S\"))\n writer = tf.summary.FileWriter(out_dir)\n\n if len(env.observation_space.shape) == 1:\n # This means we are running on low-dimensional observations (e.g. RAM)\n input_shape = env.observation_space.shape\n else:\n img_h, img_w, img_c = env.observation_space.shape\n input_shape = (img_h, img_w, frame_history_len * img_c)\n num_actions = env.action_space.n\n\n # set up placeholders\n # placeholder for current observation (or state)\n obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))\n # placeholder for current action\n act_t_ph = tf.placeholder(tf.int32, [None])\n # placeholder for current reward\n rew_t_ph = tf.placeholder(tf.float32, [None])\n # placeholder for next observation (or state)\n obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))\n # placeholder for end of episode mask\n # this value is 1 if the next state corresponds to the end of an episode,\n # in which case there is no Q-value at the next state; at the end of an\n # episode, only the current state reward contributes to the target, not the\n # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)\n done_mask_ph = tf.placeholder(tf.float32, [None])\n\n # casting to float on GPU ensures lower data transfer times.\n obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0\n obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0\n\n # Here, you should fill in your own code to compute the Bellman error. This requires\n # evaluating the current and next Q-values and constructing the corresponding error.\n # TensorFlow will differentiate this error for you, you just need to pass it to the\n # optimizer. See assignment text for details.\n # Your code should produce one scalar-valued tensor: total_error\n # This will be passed to the optimizer in the provided code below.\n # Your code should also produce two collections of variables:\n # q_func_vars\n # target_q_func_vars\n # These should hold all of the variables of the Q-function network and target network,\n # respectively. A convenient way to get these is to make use of TF's \"scope\" feature.\n # For example, you can create your Q-function network with the scope \"q_func\" like this:\n # <something> = q_func(obs_t_float, num_actions, scope=\"q_func\", reuse=False)\n # And then you can obtain the variables like this:\n # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')\n # Older versions of TensorFlow may require using \"VARIABLES\" instead of \"GLOBAL_VARIABLES\"\n ######\n def q_online(obs_float):\n return q_func(obs_float,num_actions,scope=\"online_q_func\",reuse=tf.AUTO_REUSE)\n\n # Q-function network and target network\n q_online_t = q_online(obs_t_float)\n q_online_tp1 = q_online(obs_tp1_float)\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='online_q_func')\n q_target = q_func(obs_tp1_float,num_actions,scope=\"target_q_func\",reuse=False)\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope='target_q_func')\n # Bellman training error\n if double_q:\n q_max = gather_2d(q_target,tf.argmax(q_online_tp1,axis=1,output_type=tf.int32))\n else:\n q_max = tf.reduce_max(q_target,axis=1)\n target = rew_t_ph + gamma * q_max * (1.0 - done_mask_ph)\n q_t_act = gather_2d(q_online_t,act_t_ph)\n total_error = tf.reduce_mean(huber_loss(target - q_t_act))\n ######\n\n # construct optimization op (with gradient clipping)\n learning_rate = tf.placeholder(tf.float32, (), name=\"learning_rate\")\n optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)\n train_fn = minimize_and_clip(optimizer, total_error,\n var_list=q_func_vars, clip_val=grad_norm_clipping)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_fn = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_fn.append(var_target.assign(var))\n update_target_fn = tf.group(*update_target_fn)\n\n # construct the replay buffer\n replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)\n\n ###############\n # RUN ENV #\n ###############\n model_initialized = False\n num_param_updates = 0\n mean_episode_reward = -float('nan')\n best_mean_episode_reward = -float('inf')\n last_obs = env.reset()\n LOG_EVERY_N_STEPS = 10000\n DEBUG_LOG_EVERY_N_STEPS = 1000\n start = time.time()\n\n for t in itertools.count():\n ### 1. Check stopping criterion\n if stopping_criterion is not None and stopping_criterion(env, t):\n break\n\n ### 2. Step the env and store the transition\n # At this point, \"last_obs\" contains the latest observation that was\n # recorded from the simulator. Here, your code needs to store this\n # observation and its outcome (reward, next observation, etc.) into\n # the replay buffer while stepping the simulator forward one step.\n # At the end of this block of code, the simulator should have been\n # advanced one step, and the replay buffer should contain one more\n # transition.\n # Specifically, last_obs must point to the new latest observation.\n # Useful functions you'll need to call:\n # obs, reward, done, info = env.step(action)\n # this steps the environment forward one step\n # obs = env.reset()\n # this resets the environment if you reached an episode boundary.\n # Don't forget to call env.reset() to get a new observation if done\n # is true!!\n # Note that you cannot use \"last_obs\" directly as input\n # into your network, since it needs to be processed to include context\n # from previous frames. You should check out the replay buffer\n # implementation in dqn_utils.py to see what functionality the replay\n # buffer exposes. The replay buffer has a function called\n # encode_recent_observation that will take the latest observation\n # that you pushed into the buffer and compute the corresponding\n # input that should be given to a Q network by appending some\n # previous frames.\n # Don't forget to include epsilon greedy exploration!\n # And remember that the first time you enter this loop, the model\n # may not yet have been initialized (but of course, the first step\n # might as well be random, since you haven't trained your net...)\n\n idx = replay_buffer.store_frame(last_obs)\n last_obs = replay_buffer.encode_recent_observation()\n\n if random.uniform(0,1) < exploration.value(t) or not model_initialized:\n action = np.random.choice(num_actions)\n else:\n action = np.argmax(session.run(q_online_t,feed_dict={obs_t_ph: last_obs[None]})[0])\n\n last_obs,reward,done,info = env.step(action)\n replay_buffer.store_effect(idx,action,reward,done)\n if done:\n last_obs = env.reset()\n\n # at this point, the environment should have been advanced one step (and\n # reset if done was true), and last_obs should point to the new latest\n # observation\n\n ### 3. Perform experience replay and train the network.\n # note that this is only done if the replay buffer contains enough samples\n # for us to learn something useful -- until then, the model will not be\n # initialized and random actions should be taken\n if (t > learning_starts and\n t % learning_freq == 0 and\n replay_buffer.can_sample(batch_size)):\n # Here, you should perform training. Training consists of four steps:\n # 3.a: use the replay buffer to sample a batch of transitions (see the\n # replay buffer code for function definition, each batch that you sample\n # should consist of current observations, current actions, rewards,\n # next observations, and done indicator).\n # 3.b: initialize the model if it has not been initialized yet; to do\n # that, call\n # initialize_interdependent_variables(session, tf.global_variables(), {\n # obs_t_ph: obs_t_batch,\n # obs_tp1_ph: obs_tp1_batch,\n # })\n # where obs_t_batch and obs_tp1_batch are the batches of observations at\n # the current and next time step. The boolean variable model_initialized\n # indicates whether or not the model has been initialized.\n # Remember that you have to update the target network too (see 3.d)!\n # 3.c: train the model. To do this, you'll need to use the train_fn and\n # total_error ops that were created earlier: total_error is what you\n # created to compute the total Bellman error in a batch, and train_fn\n # will actually perform a gradient step and update the network parameters\n # to reduce total_error. When calling session.run on these you'll need to\n # populate the following placeholders:\n # obs_t_ph\n # act_t_ph\n # rew_t_ph\n # obs_tp1_ph\n # done_mask_ph\n # (this is needed for computing total_error)\n # learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t)\n # (this is needed by the optimizer to choose the learning rate)\n # 3.d: periodically update the target network by calling\n # session.run(update_target_fn)\n # you should update every target_update_freq steps, and you may find the\n # variable num_param_updates useful for this (it was initialized to 0)\n\n obs_t_batch,act_t_batch,rew_t_batch,obs_tp1_batch,done_mask_batch = replay_buffer.sample(batch_size)\n if not model_initialized:\n initialize_interdependent_variables(session,tf.global_variables(),\n {obs_t_ph: obs_t_batch,obs_tp1_ph: obs_tp1_batch,})\n model_initialized = True\n session.run([total_error,train_fn],\n feed_dict={obs_t_ph: obs_t_batch,act_t_ph: act_t_batch, rew_t_ph: rew_t_batch,\n obs_tp1_ph: obs_tp1_batch,done_mask_ph: done_mask_batch,\n learning_rate: optimizer_spec.lr_schedule.value(t)})\n num_param_updates += 1\n if num_param_updates % target_update_freq == 0:\n session.run(update_target_fn)\n\n ### 4. Log progress\n episode_rewards = get_wrapper_by_name(env, \"Monitor\").get_episode_rewards()\n if len(episode_rewards) > 0:\n mean_episode_reward = np.mean(episode_rewards[-100:])\n if len(episode_rewards) > 100:\n best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)\n if t % DEBUG_LOG_EVERY_N_STEPS == 0:\n print('Timestep = {} | Elapsed time = {:.3f}sec'.format(t,time.time() - start))\n if t % LOG_EVERY_N_STEPS == 0 and model_initialized:\n print(\"Timestep %d\" % (t,))\n print(\"mean reward (100 episodes) %f\" % mean_episode_reward)\n print(\"best mean reward %f\" % best_mean_episode_reward)\n print(\"episodes %d\" % len(episode_rewards))\n print(\"exploration %f\" % exploration.value(t))\n print(\"learning_rate %f\" % optimizer_spec.lr_schedule.value(t))\n mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='mean_rew',simple_value=mean_episode_reward)])\n best_mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='best_mean_rew',simple_value=best_mean_episode_reward)])\n writer.add_summary(mean_rew_summ, global_step=t)\n writer.add_summary(best_mean_rew_summ, global_step=t)\n sys.stdout.flush()\n\ndef gather_2d(vectors,indices):\n return tf.gather_nd(vectors, tf.stack([tf.range(tf.shape(vectors)[0]), indices], axis=1))" ]
[ [ "tensorflow.shape", "numpy.random.choice", "tensorflow.group", "tensorflow.argmax", "tensorflow.global_variables", "numpy.mean", "tensorflow.reduce_max", "tensorflow.placeholder", "tensorflow.Summary.Value", "tensorflow.summary.FileWriter", "tensorflow.get_collection", "tensorflow.cast" ] ]
Tilps/keras
[ "71ec611117d1bcbe8f49bc432200aa041bffefcb" ]
[ "keras/engine/input_spec.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n# pylint: disable=g-classes-have-attributes\n\"\"\"Contains the InputSpec class.\"\"\"\n\nimport tensorflow.compat.v2 as tf\nfrom keras import backend\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@keras_export('keras.layers.InputSpec',\n v1=['keras.layers.InputSpec',\n 'keras.__internal__.legacy.layers.InputSpec'])\n@tf_export(v1=['layers.InputSpec'])\nclass InputSpec:\n \"\"\"Specifies the rank, dtype and shape of every input to a layer.\n\n Layers can expose (if appropriate) an `input_spec` attribute:\n an instance of `InputSpec`, or a nested structure of `InputSpec` instances\n (one per input tensor). These objects enable the layer to run input\n compatibility checks for input structure, input rank, input shape, and\n input dtype.\n\n A None entry in a shape is compatible with any dimension,\n a None shape is compatible with any shape.\n\n Args:\n dtype: Expected DataType of the input.\n shape: Shape tuple, expected shape of the input\n (may include None for unchecked axes). Includes the batch size.\n ndim: Integer, expected rank of the input.\n max_ndim: Integer, maximum rank of the input.\n min_ndim: Integer, minimum rank of the input.\n axes: Dictionary mapping integer axes to\n a specific dimension value.\n allow_last_axis_squeeze: If True, then allow inputs of rank N+1 as long\n as the last axis of the input is 1, as well as inputs of rank N-1\n as long as the last axis of the spec is 1.\n name: Expected key corresponding to this input when passing data as\n a dictionary.\n\n Example:\n\n ```python\n class MyLayer(Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n # The layer will accept inputs with shape (?, 28, 28) & (?, 28, 28, 1)\n # and raise an appropriate error message otherwise.\n self.input_spec = InputSpec(\n shape=(None, 28, 28, 1),\n allow_last_axis_squeeze=True)\n ```\n \"\"\"\n\n def __init__(self,\n dtype=None,\n shape=None,\n ndim=None,\n max_ndim=None,\n min_ndim=None,\n axes=None,\n allow_last_axis_squeeze=False,\n name=None):\n self.dtype = tf.as_dtype(dtype).name if dtype is not None else None\n shape = tf.TensorShape(shape)\n if shape.rank is None:\n shape = None\n else:\n shape = tuple(shape.as_list())\n if shape is not None:\n self.ndim = len(shape)\n self.shape = shape\n else:\n self.ndim = ndim\n self.shape = None\n self.max_ndim = max_ndim\n self.min_ndim = min_ndim\n self.name = name\n self.allow_last_axis_squeeze = allow_last_axis_squeeze\n try:\n axes = axes or {}\n self.axes = {int(k): axes[k] for k in axes}\n except (ValueError, TypeError):\n raise TypeError('The keys in axes must be integers.')\n\n if self.axes and (self.ndim is not None or self.max_ndim is not None):\n max_dim = (self.ndim if self.ndim else self.max_ndim) - 1\n max_axis = max(self.axes)\n if max_axis > max_dim:\n raise ValueError('Axis {} is greater than the maximum allowed value: {}'\n .format(max_axis, max_dim))\n\n def __repr__(self):\n spec = [('dtype=' + str(self.dtype)) if self.dtype else '',\n ('shape=' + str(self.shape)) if self.shape else '',\n ('ndim=' + str(self.ndim)) if self.ndim else '',\n ('max_ndim=' + str(self.max_ndim)) if self.max_ndim else '',\n ('min_ndim=' + str(self.min_ndim)) if self.min_ndim else '',\n ('axes=' + str(self.axes)) if self.axes else '']\n return 'InputSpec(%s)' % ', '.join(x for x in spec if x)\n\n def get_config(self):\n return {\n 'dtype': self.dtype,\n 'shape': self.shape,\n 'ndim': self.ndim,\n 'max_ndim': self.max_ndim,\n 'min_ndim': self.min_ndim,\n 'axes': self.axes}\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\ndef to_tensor_shape(spec):\n \"\"\"Returns a tf.TensorShape object that matches the shape specifications.\n\n If the InputSpec's shape or ndim is defined, this method will return a fully\n or partially-known shape. Otherwise, the returned TensorShape is None.\n\n Args:\n spec: an InputSpec object.\n\n Returns:\n a tf.TensorShape object\n \"\"\"\n if spec.ndim is None and spec.shape is None:\n return tf.TensorShape(None)\n elif spec.shape is not None:\n return tf.TensorShape(spec.shape)\n else:\n shape = [None] * spec.ndim\n for a in spec.axes:\n shape[a] = spec.axes[a] # Assume that axes is defined\n return tf.TensorShape(shape)\n\n\ndef assert_input_compatibility(input_spec, inputs, layer_name):\n \"\"\"Checks compatibility between the layer and provided inputs.\n\n This checks that the tensor(s) `inputs` verify the input assumptions\n of a layer (if any). If not, a clear and actional exception gets raised.\n\n Args:\n input_spec: An InputSpec instance, list of InputSpec instances, a nested\n structure of InputSpec instances, or None.\n inputs: Input tensor, list of input tensors, or a nested structure of\n input tensors.\n layer_name: String, name of the layer (for error message formatting).\n\n Raises:\n ValueError: in case of mismatch between\n the provided inputs and the expectations of the layer.\n \"\"\"\n if not input_spec:\n return\n\n input_spec = tf.nest.flatten(input_spec)\n if isinstance(inputs, dict):\n # Flatten `inputs` by reference order if input spec names are provided\n names = [spec.name for spec in input_spec]\n if all(names):\n list_inputs = []\n for name in names:\n if name not in inputs:\n raise ValueError('Missing data for input \"%s\". '\n 'You passed a data dictionary with keys %s. '\n 'Expected the following keys: %s' %\n (name, list(inputs.keys()), names))\n list_inputs.append(inputs[name])\n inputs = list_inputs\n\n inputs = tf.nest.flatten(inputs)\n for x in inputs:\n # Having a shape/dtype is the only commonality of the various tensor-like\n # objects that may be passed. The most common kind of invalid type we are\n # guarding for is a Layer instance (Functional API), which does not\n # have a `shape` attribute.\n if not hasattr(x, 'shape'):\n raise TypeError('Inputs to a layer should be tensors. Got: %s' % (x,))\n\n if len(inputs) != len(input_spec):\n raise ValueError('Layer ' + layer_name + ' expects ' +\n str(len(input_spec)) + ' input(s), '\n 'but it received ' + str(len(inputs)) +\n ' input tensors. Inputs received: ' + str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n if spec is None:\n continue\n\n shape = tf.TensorShape(x.shape)\n if shape.rank is None:\n return\n # Check ndim.\n if spec.ndim is not None and not spec.allow_last_axis_squeeze:\n ndim = shape.rank\n if ndim != spec.ndim:\n raise ValueError('Input ' + str(input_index) + ' of layer ' +\n layer_name + ' is incompatible with the layer: '\n 'expected ndim=' + str(spec.ndim) + ', found ndim=' +\n str(ndim) + '. Full shape received: ' +\n str(tuple(shape)))\n if spec.max_ndim is not None:\n ndim = x.shape.rank\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) + ' of layer ' +\n layer_name + ' is incompatible with the layer: '\n 'expected max_ndim=' + str(spec.max_ndim) +\n ', found ndim=' + str(ndim))\n if spec.min_ndim is not None:\n ndim = x.shape.rank\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) + ' of layer ' +\n layer_name + ' is incompatible with the layer: '\n ': expected min_ndim=' + str(spec.min_ndim) +\n ', found ndim=' + str(ndim) +\n '. Full shape received: ' +\n str(tuple(shape)))\n # Check dtype.\n if spec.dtype is not None:\n if x.dtype.name != spec.dtype:\n raise ValueError('Input ' + str(input_index) + ' of layer ' +\n layer_name + ' is incompatible with the layer: '\n 'expected dtype=' + str(spec.dtype) +\n ', found dtype=' + str(x.dtype))\n\n # Check specific shape axes.\n shape_as_list = shape.as_list()\n if spec.axes:\n for axis, value in spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape_as_list[int(axis)] not in {value, None}:\n raise ValueError(\n 'Input ' + str(input_index) + ' of layer ' + layer_name + ' is'\n ' incompatible with the layer: expected axis ' + str(axis) +\n ' of input shape to have value ' + str(value) +\n ' but received input with shape ' + display_shape(x.shape))\n # Check shape.\n if spec.shape is not None and shape.rank is not None:\n spec_shape = spec.shape\n if spec.allow_last_axis_squeeze:\n if shape_as_list and shape_as_list[-1] == 1:\n shape_as_list = shape_as_list[:-1]\n if spec_shape and spec_shape[-1] == 1:\n spec_shape = spec_shape[:-1]\n for spec_dim, dim in zip(spec_shape, shape_as_list):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError('Input ' + str(input_index) +\n ' is incompatible with layer ' + layer_name +\n ': expected shape=' + str(spec.shape) +\n ', found shape=' + display_shape(x.shape))\n\n\ndef display_shape(shape):\n return str(tuple(shape.as_list()))\n\n\ndef to_tensor_spec(input_spec, default_dtype=None):\n \"\"\"Converts a Keras InputSpec object to a TensorSpec.\"\"\"\n default_dtype = default_dtype or backend.floatx()\n if isinstance(input_spec, InputSpec):\n dtype = input_spec.dtype or default_dtype\n return tf.TensorSpec(to_tensor_shape(input_spec), dtype)\n return tf.TensorSpec(None, default_dtype)\n" ]
[ [ "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.TensorSpec", "tensorflow.compat.v2.as_dtype", "tensorflow.compat.v2.nest.flatten", "tensorflow.python.util.tf_export.tf_export" ] ]
Minipeps/betapose
[ "e29162c82c867d4a8177322d7d49a55c5fd90639" ]
[ "3_6Dpose_estimator/utils/utils.py" ]
[ "import math\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nfrom scipy.linalg import expm, norm\nfrom matplotlib import pyplot as plt\nfrom IPython import embed #debugging\nimport sys\nimport os\nthis_dir = os.path.dirname(__file__)\ncurrent_path = os.path.join(this_dir)\nsys.path.append(current_path)\nimport renderer\nimport random\nimport copy\n\ndef pnp(points_3D, points_2D, cameraMatrix):\n try:\n distCoeffs = pnp.distCoeffs\n except:\n distCoeffs = np.zeros((8, 1), dtype='float32')\n\n assert points_3D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'\n # embed()\n _, R_exp, t = cv2.solvePnP(points_3D,\n # points_2D,\n np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),\n cameraMatrix,\n distCoeffs)\n # , None, None, False, cv2.SOLVEPNP_UPNP)\n \n # R_exp, t, _ = cv2.solvePnPRansac(points_3D,\n # points_2D,\n # cameraMatrix,\n # distCoeffs,\n # reprojectionError=12.0)\n # \n\n R, _ = cv2.Rodrigues(R_exp)\n # Rt = np.c_[R, t]\n return R, t\n\ndef handle_occlusion(real_kp_depth, real_kp_label, real_all_depth):\n '''Handle occlusion by removing pixels that are previously marked with 1\n in real_all_depth.\n '''\n # embed()\n for i in range(len(real_kp_depth)):\n for j in range(len(real_kp_depth[i])):\n # loop for every key point as center point\n if (real_kp_depth[i][j][0] > 0):\n if (real_all_depth[i][j][0] == 1): #occluded\n # print(\"Delete one kp due to occluding!\")\n real_kp_depth[i][j][:] = [0, 0, 0]\n real_kp_label[i][j][:] = real_kp_label[i][j][:] + 66\n # special mark for occluded kp\n return real_kp_depth, real_kp_label\n\ndef local_top(kp_raw_depth, all_depth, label_matrix, searching_radius = 5, threshold_ratio = 0.5):\n '''Remove vertices that are in the back of the model.\n Principal: prefer to save rather than kill.\n '''\n return kp_raw_depth, label_matrix\n # kp_depth = kp_raw_depth\n # min_kp_depth = 10000.000\n # max_kp_depth = np.max(kp_depth)\n # for i in range(len(kp_raw_depth)):\n # for j in range(len(kp_raw_depth[i])):\n # if kp_raw_depth[i][j]<min_kp_depth and kp_raw_depth[i][j] > 3:\n # min_kp_depth = kp_raw_depth[i][j]\n # threshold = threshold_ratio * (max_kp_depth - min_kp_depth)\n # # print (\"threshold_ratio is: \", threshold_ratio)\n # for i in range(len(kp_raw_depth)):\n # for j in range(len(kp_raw_depth[i])):\n # if kp_raw_depth[i][j] != 0:\n # # loop for every key point as center point\n # xmin = max(0, i - searching_radius)\n # xmax = min(len(kp_raw_depth), i + searching_radius)\n # ymin = max(0, j - searching_radius)\n # ymax = min(len(kp_raw_depth[i]), j + searching_radius)\n # # # For testing: print all ratios to choose the suitable one\n # # if all_depth[i][j] > 1:\n # # current_ratio = (kp_raw_depth[i][j] - all_depth[i][j])\\\n # # /(max_kp_depth - min_kp_depth)\n # # if current_ratio > 0 :\n # # print(\"Current ratio is:\", current_ratio)\n # for k in range(xmin, xmax):\n # if kp_depth[i][j] == 0:\n # break\n # for l in range(ymin, ymax):\n # if (kp_raw_depth[i][j] - all_depth[k][l]) > threshold:\n # '''\n # Means that the all_depth concludes a very near pixel,\n # so the corresponding pixel in kp_raw_depth must be in back.\n # '''\n # if all_depth[k][l] > 1:\n # current_ratio = (kp_raw_depth[i][j] - all_depth[k][l])\\\n # /(max_kp_depth - min_kp_depth)\n # # print(\"current_ratio\", current_ratio)\n # # print(\"Delete one kp in back!\")\n # kp_depth[i][j] = 0\n # label_matrix[i][j] = 0\n # break\n # # embed()\n # return kp_depth, label_matrix\n\ndef trans_vertices_by_pose(ori_vertices, pose):\n # make points homogeneous, copy them to maintain the originals\n ori_vertices = np.array(ori_vertices, )\n ext_model_points = np.ones((4,ori_vertices.shape[0]))\n ext_model_points[:3,:] = np.copy(ori_vertices.T)\n trans_vertices = np.dot(np.array(pose), ext_model_points)\n # Transfer points to original form\n trans_vertices = trans_vertices.T\n trans_vertices = np.copy(trans_vertices[:, :3])\n return trans_vertices\n\ndef print_kp_result_distance(c):\n for x, y in enumerate(c):\n print(x,abs(y[0])+abs(y[1]))\n\ndef jitter_bbox(bbox, jitter):\n '''Jitter given bbox, a way of data augmentation.\n bbox: [xmin, ymin, xmax, ymax]\n '''\n newbbox = copy.copy(bbox)\n oh = bbox[3] - bbox[1];\n ow = bbox[2] - bbox[0];\n dw = (ow*jitter);\n dh = (oh*jitter);\n pleft = int(random.uniform(-dw, dw));\n pright = int(random.uniform(-dw, dw));\n ptop = int(random.uniform(-dh, dh));\n pbot = int(random.uniform(-dh, dh));\n newbbox[0] = bbox[0] + pleft\n newbbox[1] = bbox[1] + ptop\n newbbox[2] = bbox[2] + pright\n newbbox[3] = bbox[3] + pbot\n return newbbox\n\ndef get_bbox_from_mask(mask, KP=False):\n '''Given a mask, this returns the bounding box annotations\n\n Args:\n mask(NumPy Array): Array with the mask\n Returns:\n tuple: Bounding box annotation (xmin, xmax, ymin, ymax)\n '''\n rows = np.any(mask, axis=1)\n cols = np.any(mask, axis=0)\n if len(np.where(rows)[0]) > 0:\n ymin, ymax = np.where(rows)[0][[0, -1]]\n xmin, xmax = np.where(cols)[0][[0, -1]]\n return xmin, xmax, ymin, ymax\n else:\n return -1, -1, -1, -1\n\ndef rotate(image, angle, center=None, scale=1.0, if_INTER_NEAREST = False):\n # grab the dimensions of the image\n (h, w) = image.shape[:2]\n\n # if the center is None, initialize it as the center of\n # the image\n if center is None:\n center = (w // 2, h // 2)\n\n # perform the rotation\n M = cv2.getRotationMatrix2D(center, angle, scale)\n \n if if_INTER_NEAREST:\n rotated = cv2.warpAffine(image, M, (w, h),flags=cv2.INTER_NEAREST)\n else:\n rotated = cv2.warpAffine(image, M, (w, h))\n # return the rotated image\n return rotated\n\ndef visualize_img(img, ifdepth = False, filename = \"test.png\"):\n \"\"\"Visualize input img.\n Args: nparray with shape H * W * 3\n \"\"\"\n output_img = np.zeros((480, 640, 3))\n if ifdepth:\n for i in range(len(img)):\n for j in range(len(img[i])):\n if img[i][j]!= 0:\n output_img[i][j][0] = 255\n output_img[i][j][1] = 255\n output_img[i][j][2] = 255\n else:\n output_img = img\n # plt.annotate(r'00',\n # xy=(1, 3), xycoords='data',\n # xytext=(+10, +10), textcoords='offset points', fontsize=16,\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n cv2.imwrite(filename, img)\n plt.imshow(img)\n plt.show()\n # x,y is the kp coordinates, xytext is the location of annotation\n\n return img\n\ndef visualize_kp_in_img(img, keypoints):\n \"\"\"Visualize input img.\n Args: nparray with shape H * W * 3\n \"\"\"\n output_img = np.zeros((480, 640, 3))\n output_img = img\n # embed()\n plt.imshow(img)\n for idx, kp in enumerate(keypoints):\n x = kp[0]\n y = kp[1]\n show_idx = str(idx)\n # x,y is the kp coordinates, xytext is the location of annotation\n plt.annotate(show_idx,\n xy=(x, y), xycoords='data',\n xytext=(+10, +10), textcoords='offset points', fontsize=4,\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n # cv2.imwrite(\"test_kp_in_img\", img)\n plt.show()\n\n return img\n\ndef generate_mask_img(depth, if_all_black = True):\n \"\"\"Generate depth masked img.\n Args:\n depth: H*W np array(480*640)\n \"\"\"\n img = np.zeros((480, 640, 3))\n for _h in range(0, 480):\n for _w in range(0, 640):\n if depth[_h][_w]!=0:\n if if_all_black:\n img[_h][_w][0] = 255\n img[_h][_w][1] = 255\n img[_h][_w][2] = 255\n else:\n img[_h][_w][0] = depth[_h][_w]\n img[_h][_w][1] = depth[_h][_w]\n img[_h][_w][2] = depth[_h][_w]\n return img\n\n\ndef print_all_nonzero(A):\n x, y = np.nonzero(A)\n print(\"Indexes are\", x, y)\n print(\"Values are\", A[x, y])\n\ndef draw_detections_2D(image, detections):\n \"\"\"Draws detections onto resized image with name and confidence\n\n Parameters\n ----------\n image: Numpy array, normalized to [0-1]\n detections: A list of detections for this image, coming from SSD.detect() in the form\n [l, t, r, b, name, confidence, .....]\n\n \"\"\"\n out = np.copy(image)\n for det in detections:\n lt = (int(det[0] * image.shape[1]), int(det[1] * image.shape[0]))\n rb = (int(det[2] * image.shape[1]), int(det[3] * image.shape[0]))\n text = '{}: {:.2f}'.format(det[4], det[5])\n cv2.rectangle(out, lt, rb, (0., 1., 0.), 2)\n cv2.putText(out, text, lt, 0, 0.8, (0., 1., 0.), 2)\n return out\n\n\ndef draw_detections_3D(image, detections, cam, model_map, thres):\n \"\"\"Draws 6D detections onto resized image\n\n Parameters\n ----------\n image: Numpy array, normalized to [0-1]\n detections: A list of detections for this image, coming from SSD.detect() in the form\n [ name, confidence,l, t, r, b, 6D_pose0, ..., 6D_poseN]\n cam: Intrinsics for rendering\n model_map: Mapping of model name to Model3D instance {'obj': model3D}\n\n \"\"\"\n if not detections:\n return np.copy(image)\n\n ren = Renderer((image.shape[1], image.shape[0]), cam)\n ren.clear()\n ren.set_cam(cam)\n out = np.copy(image)\n for det in detections:\n if det[1] < thres:\n break\n model = model_map['{:02d}'.format(int(det[0])+1)]\n for pose in det[6:]:\n #pose = [[0.32426249,0.94596714,0. ,0.02060331692],[0.45433376,-0.15573839,-0.87711253,0.0040045299],[-0.82971963,0.2844147,-0.48028493,0.72818325105],[0,0,0,1]]\n #print('ssss')\n #ren.draw_model(model, pose)\n ren.draw_boundingbox(model, pose)\n col, dep = ren.finish()\n # Copy the rendering over into the scene\n mask = np.dstack((dep, dep, dep)) > 0\n out[mask] = col[mask]\n return out\n\n\ndef compute_rotation_from_vertex(vertex):\n \"\"\"Compute rotation matrix from viewpoint vertex \"\"\"\n up = [0, 0, 1]\n if vertex[0] == 0 and vertex[1] == 0 and vertex[2] != 0:\n up = [-1, 0, 0]\n rot = np.zeros((3, 3))\n rot[:, 2] = -vertex / norm(vertex) # View direction towards origin\n rot[:, 0] = np.cross(rot[:, 2], up)\n rot[:, 0] /= norm(rot[:, 0])\n rot[:, 1] = np.cross(rot[:, 0], -rot[:, 2])\n return rot.T\n\n\ndef create_pose(camR, scale=0, angle_deg=0):\n \"\"\"Compute rotation matrix from viewpoint vertex and inplane rotation \"\"\"\n rot = camR.reshape(3,3)\n transform = np.eye(4)\n rodriguez = np.asarray([0, 0, 1]) * (angle_deg * math.pi / 180.0)\n angle_axis = expm(np.cross(np.eye(3), rodriguez))\n transform[0:3, 0:3] = np.matmul(angle_axis, rot)\n transform[0:3, 3] = [0, 0, scale]\n return transform\n\n\ndef precompute_projections(camR, inplanes, cam, model3D, bbox_list):\n \"\"\"Precomputes the projection information needed for 6D pose construction\n\n # Arguments\n camR: List of cam_R_m2c\n inplanes: List of inplane angles in degrees\n cam: Intrinsics to use for translation estimation\n model3D: Model3D instance\n\n # Returns\n data: a 3D list with precomputed entities with shape\n (views, inplanes, (4x4 pose matrix, 3) )\n 3: norm_centroid_x, norm_centroid_y, lr\n \"\"\"\n # w, h = 400.0, 400.0\n #ren = Renderer((w, h), cam)\n data = []\n if model3D.vertices is None:\n return data\n\n print(len(camR))\n for v in tqdm(range(len(camR))):\n data.append([])\n for i in inplanes:\n pose = create_pose(np.array(camR[v]), angle_deg=i)\n pose[:3, 3] = [0,0,0.5] # zr = 0.65\n\n # Render object and extract tight 2D bbox and projected 2D centroid\n #ren.clear()\n #ren.draw_model(model3D, pose)\n #box = np.argwhere(ren.finish()[1]) # Deduct bbox from depth rendering\n bbox = np.array(bbox_list[v])\n box = [bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]]\n centroid = np.matmul(pose[:3, :3], model3D.centroid) + pose[:3, 3]\n \n centroid_x = cam[0, 2] + centroid[0] * cam[0, 0] / centroid[2]\n centroid_y = cam[1, 2] + centroid[1] * cam[1, 1] / centroid[2]\n \n\n # Compute 2D centroid position in normalized, box-local reference frame\n box_w, box_h = (box[2] - box[0]), (box[3] - box[1])\n norm_centroid_x = (centroid_x - box[0]) / float(box_w)\n norm_centroid_y = (centroid_y - box[1]) / float(box_h)\n # Compute normalized diagonal box length\n lr = np.sqrt((box_w) ** 2 + (box_h) ** 2)\n \n data[-1].append((pose, [norm_centroid_x, norm_centroid_y, lr]))\n return data\n\ndef create_src_pointcloud(mask, color, depth, cam, model_map, img_size=(720, 540), num_tpls=641, num_inplane=4, select_tpls=3, select_inplane=1):\n '''Convert partial img in bbox to pointcloud\n # Arguments\n mask: Mask using to choose suitable outlines, size: D*VI*H*W\n color, depth: bench img in RGB/D, size: H*W*3/H*W\n (in this function color is not used)\n cam: Intrinsics to use for backprojection\n\n # Returns\n points: D*vi*N*3 pointcloud, where D means number of dets, N depends on the \n size of bbox, 3 means XYZ coordinates \n '''\n width = img_size[0]\n height = img_size[1]\n points = []\n # print(\"Number of dets in one img is %d.\" %len(image_dets))\n for det in mask:\n det_points = []\n for vi in det:\n vi_points = []\n # Cover all the points in one img\n for _h in range(img_size[1]):\n for _w in range(img_size[0]):\n if vi[_h][_w] >0: # If not masked\n if depth[_h][_w] > 0.001: # Avoid storing (0.0, 0.0, 0.0)\n # Backproject depth img to pointcloud\n tmp_z = depth[_h][_w] # Derive depth from depth img\n tmp_x = tmp_z * (_w - cam[0, 2]) / cam[0, 0]\n tmp_y = tmp_z * (_h - cam[1, 2]) / cam[1, 1]\n tmp_xyz = [tmp_x, tmp_y, tmp_z] \n vi_points.append(tmp_xyz)\n det_points.append(vi_points)\n points.append(det_points)\n return points\n\ndef create_mask(des_points, cam, img_size = (720, 540)):\n ''' Create mask for creating src_points\n Arguments\n des_points: D * VI * N * 3 points, D is number of detections, VI is number of v,i\n pair, default VI = 25.\n cam: Intrinsics to use for projection\n Return\n mask: D*VI*H*W, mask == 1/0 means the pixel should be/not be lifted\n '''\n mask = []\n for det in des_points:\n mask_det = []\n for vi in det: \n # Initialize mask with all zero\n mask_vi = []\n for _h in range(img_size[1]):\n mask_vi.append([]) \n for _w in range(img_size[0]):\n mask_vi[_h].append(0)\n\n for point in vi:\n px = point[0]\n py = point[1]\n pz = point[2]\n # Project 3D point to pixel space\n x = px * cam[0, 0] / pz + cam[0, 2]\n y = py * cam[1, 1] / pz + cam[1, 2]\n # Activate corresponding mask\n if (int(y) > 0) and (int(y) < img_size[1]) and (int(x) > 0) and (int(x) < img_size[0]): \n mask_vi[int(y)][int(x)] = 1\n mask_det.append(mask_vi)\n mask.append(mask_det)\n return mask\n\ndef output_pointcloud(points, output_filename, scale_back = 1000):\n \"\"\" Output pointcloud in ply format for visualization\n Arguments\n points: N*3 list\n output_filename: points will be saved in file named output_filename\n \"\"\"\n with open(output_filename, 'w') as f:\n f.write(\"ply\\n\")\n f.write(\"format ascii 1.0\\n\")\n f.write(\"element vertex {0}\\n\".format(len(points)))\n f.write(\"property float x\\n\")\n f.write(\"property float y\\n\")\n f.write(\"property float z\\n\") \n f.write(\"end_header\\n\")\n for point in points:\n f.write(\"{x} {y} {z} \\n\".format(\n x = point[0] * scale_back,\n y = point[1] * scale_back,\n z = point[2] * scale_back,\n ))\n print(\"Pointcloud has been saved in file %s\" %output_filename)\n\ndef log_string(LOG_FOUT, out_str): \n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\ndef build_6D_poses(detections, model_map, cam, img_size=(720, 540), num_tpls=641, num_inplane=4, select_tpls=3, select_inplane=1):\n \"\"\"Processes the detections to build full 6D poses\n\n # Arguments\n detections: List of predictions for every image. Each prediction is:\n [label, confidence, xmin, ymin, xmax, ymax, \n view0_conf, ..., viewN_conf, inplane0_conf, ... , inplaneM_conf]\n model_map: Mapping of model name to Model3D instance {'obj': model3D}\n cam: Intrinsics to use for backprojection\n\n # Returns\n new_detections: List of list of 6D predictions for every picture.\n Each prediction has the form:\n [label, confidence, xmin, ymin, xmax, ymax, \n (pose00), ..., (poseNM)] where poseXX is a 4x4 matrix\n\n des_points: Model points changed by pose\n \"\"\"\n new_detections = []\n des_points = []\n # print(\"One 6D detections contains %d images. \"%len(detections))\n for image_dets in detections:\n new_image_dets = []\n image_des_points = []\n print(\"Number of 6D dets in one img is %d. \" %len(image_dets))\n for det in image_dets:\n # print(det[0])\n det = det.tolist()\n new_det = det[:6] # Copy over 2D bbox, label and confidence\n box_w, box_h = det[4] - det[2], det[5] - det[3]\n ls = np.sqrt((box_w*img_size[0]) ** 2 + (box_h*img_size[1]) ** 2)\n\n projected = model_map['{:02d}'.format(int(det[0])+1)].projections\n model_points = model_map['{:02d}'.format(int(det[0])+1)].vertices\n # Model_points: N*3 list\n\n # print(\"length of model_points is %d\"%len(model_points))\n # print(\"length of point in model_points is %d\"%len(model_points[4]))\n vicounter = 0\n\n # make points homogeneous, copy them to maintain the originals\n model_points = np.array(model_points)\n ext_model_points = np.ones((4,model_points.shape[0]))\n ext_model_points[:3,:] = np.copy(model_points.T)\n # print(ext_model_points.shape)\n\n vi_group_points = []\n for v in np.argsort(det[6:6+num_tpls])[-select_tpls:]: \n # rank by confidence and choose select_tpls many views...\n for i in np.argsort(det[6+num_tpls:6+num_tpls+num_inplane])[-select_inplane:]:\n if not projected: # No pre-projections available for this model, skip...\n new_det.append(np.eye(4))\n continue\n pose = projected[int(v)][i][0]\n norm_centroid_x, norm_centroid_y, lr = projected[int(v)][i][1]\n pose[2, 3] = 0.5 * lr / ls # Compute depth from projective ratio\n # print(\"Testing lr, ls and pose[2,3] info------------------------------------\")\n # print(lr, ls, pose[2, 3])\n vicounter = vicounter + 1\n # print(norm_centroid_x, norm_centroid_y, lr)\n # Compute the new 2D centroid in pixel space\n new_centroid_x = (det[2] + norm_centroid_x * box_w) * img_size[0]\n new_centroid_y = (det[3] + norm_centroid_y * box_h) * img_size[1]\n # print(new_centroid_x,new_centroid_y)\n # Backproject into 3D metric space\n pose[0, 3] = pose[2, 3] * (new_centroid_x - cam[0, 2]) / cam[0, 0]\n pose[1, 3] = pose[2, 3] * (new_centroid_y - cam[1, 2]) / cam[1, 1]\n new_det.append(pose)\n # Apply pose\n vi_points = np.dot(np.array(pose), ext_model_points)\n # Transfer points to original form\n vi_points = vi_points.T\n vi_points = np.copy(vi_points[:, :3])\n #print(\"vi points info: \", vi_points.shape)\n vi_points = vi_points.tolist()\n vi_group_points.append(vi_points) \n # print(\"end %d v,i pairs-------------------------------------\"%int(vicounter))\n new_image_dets.append(new_det)\n image_des_points.append(vi_group_points)\n new_detections.append(new_image_dets)\n des_points.append(image_des_points)\n\n return new_detections[0], des_points[0]\n\n\ndef verify_6D_poses(detections, model_map, cam, image):\n \"\"\"For one image, select for each detection the best pose from the 6D pool\n\n # Arguments\n detections: List of predictions for one image. Each prediction is:\n [xmin, ymin, xmax, ymax, label, confidence,\n (pose00), ..., (poseNM)] where poseXX is a 4x4 matrix\n model_map: Mapping of model name to Model3D instance {'obj': model3D}\n cam: Intrinsics to use for backprojection\n image: The scene color image\n\n # Returns\n filtered: List of predictions for one image.\n Each prediction has the form:\n [label, confidence, xmin, ymin, xmax, ymax, pose] where pose is a 4x4 matrix\n\n \"\"\"\n\n def compute_grads_and_mags(color):\n gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n grads = np.dstack((cv2.Sobel(gray, cv2.CV_32F, 1, 0, ksize=5),\n cv2.Sobel(gray, cv2.CV_32F, 0, 1, ksize=5)))\n mags = np.sqrt(np.sum(grads**2, axis=2)) + 0.001 # To avoid div/0\n grads /= np.dstack((mags, mags))\n mask = mags < 5\n mags[mask] = 0\n grads[np.dstack((mask, mask))] = 0\n return grads, mags\n\n scene_grads, scene_mags = compute_grads_and_mags(image)\n scene_grads = np.reshape(scene_grads, (-1, 2))\n #cv2.imshow('mags', scene_mags)\n\n ren = Renderer((image.shape[1], image.shape[0]), cam)\n ren.set_cam(cam)\n filtered = []\n for det in detections:\n model = model_map['{:02d}'.format(int(det[0])+1)]\n scores = []\n for pose in det[6:]:\n ren.clear()\n ren.draw_model(model, pose)\n ren_grads, ren_mags = compute_grads_and_mags(ren.finish()[0])\n ren_grads = np.reshape(ren_grads, (-1, 2))\n dot = np.sum(np.abs(ren_grads[:, 0]*scene_grads[:, 0] + ren_grads[:, 1]*scene_grads[:, 1]))\n sum = np.sum(ren_mags>0)\n scores.append(dot / (sum+1))\n new_det = det[:6]\n # print(new_det)\n new_det.append(det[6 + np.argmax(np.asarray(scores))]) # Put best pose first\n filtered.append(new_det)\n\n return filtered\n" ]
[ [ "numpy.copy", "numpy.where", "numpy.nonzero", "numpy.eye", "numpy.sqrt", "numpy.cross", "numpy.array", "numpy.matmul", "numpy.zeros", "numpy.reshape", "numpy.argsort", "numpy.dstack", "matplotlib.pyplot.show", "scipy.linalg.norm", "numpy.asarray", "numpy.ascontiguousarray", "numpy.sum", "numpy.ones", "numpy.any", "numpy.abs", "matplotlib.pyplot.imshow" ] ]
Muflhi01/Chatistics
[ "c091db38099f9edf9b39c2ed5fe99ace6a864d87" ]
[ "export.py" ]
[ "import argparse\nimport sys\nimport logging\nfrom utils import ArgParseDefault, add_load_data_args, load_data\nimport pandas as pd\nimport os\nfrom datetime import datetime\nimport pickle\n\nlog = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"Simple method to export message logs to either stdout or to a file\"\"\"\n\n def get_f_name(compressed):\n ts = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n f_path = os.path.join('exports', f'chatistics_export_{ts}.{args.format}')\n if compressed:\n f_path += '.zip'\n return f_path\n\n parser = ArgParseDefault(description='Export parsed chatlog data')\n parser = add_load_data_args(parser)\n parser.add_argument('-n', '--num-rows', dest='num_rows', type=int,\n default=50, help='Print first n rows (use negative for last rows) (only used if output format is stdout)')\n parser.add_argument('-c', '--cols', dest='cols', nargs='+',\n default=['timestamp', 'conversationWithName', 'senderName', 'outgoing', 'text', 'language', 'platform'],\n help='Only show specific columns (only used if output format is stdout)')\n parser.add_argument('-f', '--format', dest='format', default='stdout', choices=['stdout', 'json', 'csv', 'pkl'], help='Output format')\n parser.add_argument('--compress', action='store_true', help='Compress the output (only used for json and csv formats)')\n\n args = parser.parse_args()\n df = load_data(args)\n if args.format == 'stdout':\n # Print data to stdout\n df = df.iloc[:args.num_rows]\n df.loc[:, 'timestamp'] = pd.to_datetime(df.timestamp, unit='s')\n pd.set_option('display.max_colwidth', 100)\n with pd.option_context('display.max_rows', 1000, 'display.width', -1):\n print(df[args.cols].to_string(index=False))\n else:\n # Exporting data to a file\n f_name = get_f_name(args.compress)\n log.info(f'Exporting data to file {f_name}')\n compression = 'zip' if args.compress else None\n if args.format == 'json':\n df.to_json(f_name, orient='records', compression=compression)\n elif args.format == 'csv':\n df.to_csv(f_name, index=False, compression=compression)\n elif args.format == 'pkl':\n with open(f_name, 'wb', encoding=\"utf8\") as f:\n pickle.dump(df, f)\n else:\n raise Exception(f'Format {args.format} is not supported.')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.to_datetime", "pandas.option_context", "pandas.set_option" ] ]
HongzhouTang/Pros-GNN
[ "b374166bb4789464e9c3c65b45432ebac1acf28d" ]
[ "train/layers.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom utils import *\nimport time\nimport torch.nn as nn\nclass Gate(torch.nn.Module):\n def __init__(self, n_in_feature, n_out_feature):\n super(Gate, self).__init__()\n self.W = nn.Linear(n_in_feature, n_out_feature)\n self.A = nn.Parameter(torch.zeros(size=(n_out_feature, n_out_feature)))\n self.gate = nn.Linear(n_out_feature*2, 1)\n\n def forward(self, x, adj):\n h = self.W(x) \n h_prime = F.leaky_relu(torch.einsum('aij,ajk->aik',(adj, h)))\n coeff = torch.sigmoid(self.gate(torch.cat([x,h_prime], -1))).repeat(1,1,x.size(-1))\n retval = coeff*x+(1-coeff)*h_prime\n return retval\n\n\n\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.einsum", "torch.zeros" ] ]
svenruf/trackintel
[ "8b3482f0a4f44cf5f5de366198a39ca54ac04e75" ]
[ "trackintel/preprocessing/positionfixes.py" ]
[ "from math import radians\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely.geometry import LineString, Point\nfrom sklearn.cluster import DBSCAN\n\nfrom trackintel.geogr.distances import haversine_dist\n\n\ndef generate_staypoints(positionfixes,\n method='sliding',\n dist_func=haversine_dist,\n dist_threshold=50,\n time_threshold=300,\n epsilon=100,\n num_samples=1):\n \"\"\"\n Generate staypoints from positionfixes.\n \n Parameters\n ----------\n positionfixes : GeoDataFrame (as trackintel positionfixes)\n The positionfixes have to follow the standard definition for positionfixes DataFrames.\n\n method : {'sliding' or 'dbscan'}\n Method to create staypoints. \n \n - 'sliding' : Applies a sliding window over the data.\n - 'dbscan' : Uses the DBSCAN algorithm to find clusters of staypoints.\n \n dist_func : {'haversine_dist'}\n The distance metric used by the applied method.\n \n dist_threshold : float, default 50\n The distance threshold for the 'sliding' method, i.e., how far someone has to travel to\n generate a new staypoint. Units depend on the dist_func parameter.\n\n time_threshold : float, default 300 (seconds)\n The time threshold for the 'sliding' method in seconds\n \n epsilon : float, default 100\n The epsilon for the 'dbscan' method. Units depend on the dist_func parameter.\n \n num_samples : int, default 1\n The num_samples for the 'dbscan' method. The minimal number of samples in a cluster. \n \n Returns\n -------\n ret_pfs: GeoDataFrame (as trackintel positionfixes)\n The original positionfixes with a new column ``[`staypoint_id`]``.\n \n ret_spts: GeoDataFrame (as trackintel staypoints)\n The generated staypoints.\n\n Examples\n --------\n >>> pfs.as_positionfixes.generate_staypoints('sliding', dist_threshold=100)\n\n References\n ----------\n Zheng, Y. (2015). Trajectory data mining: an overview. ACM Transactions on Intelligent Systems \n and Technology (TIST), 6(3), 29.\n\n Li, Q., Zheng, Y., Xie, X., Chen, Y., Liu, W., & Ma, W. Y. (2008, November). Mining user \n similarity based on location history. In Proceedings of the 16th ACM SIGSPATIAL international \n conference on Advances in geographic information systems (p. 34). ACM.\n \"\"\"\n # copy the original pfs for adding 'staypoint_id' column\n ret_pfs = positionfixes.copy()\n\n elevation_flag = 'elevation' in ret_pfs.columns # if there is elevation data\n\n name_geocol = ret_pfs.geometry.name\n ret_spts = pd.DataFrame(columns=['id', 'user_id', 'started_at', 'finished_at', 'geom'])\n\n # TODO: tests using a different distance function, e.g., L2 distance\n if method == 'sliding':\n # Algorithm from Li et al. (2008). For details, please refer to the paper.\n ret_spts = ret_pfs.groupby('user_id', as_index=False).apply(_generate_staypoints_sliding_user,\n name_geocol,\n elevation_flag,\n dist_threshold,\n time_threshold,\n dist_func).reset_index(drop=True)\n # index management\n ret_spts['id'] = np.arange(len(ret_spts))\n ret_spts.set_index('id', inplace=True)\n\n # Assign staypoint_id to ret_pfs if spts is detected \n if not ret_spts.empty:\n stps2pfs_map = ret_spts[['pfs_id']].to_dict()['pfs_id']\n\n ls = []\n for key, values in stps2pfs_map.items():\n for value in values:\n ls.append([value, key])\n temp = pd.DataFrame(ls, columns=['id', 'staypoint_id']).set_index('id')\n # pfs with no stps receives nan in 'staypoint_id'\n ret_pfs = ret_pfs.join(temp, how='left')\n ret_spts.drop(columns={'pfs_id'}, inplace=True)\n # if no staypoint is identified\n else:\n ret_pfs['staypoint_id'] = np.nan\n\n # TODO: create tests for dbscan method\n # TODO: currently only support haversine distance, provode support for other distances, \n # we could use the same structure as generate_location function\n elif method == 'dbscan':\n # TODO: Make sure time information is included in the clustering!\n # time information is in the column 'started at', however the user should be able to\n # adjust the distance metric e.g. chebychev\n\n # TODO: fix bug: generated staypoints has id starting from 0 for each user\n ret_pfs = ret_pfs.groupby(\"user_id\").apply(_generate_staypoints_dbscan_user,\n name_geocol,\n epsilon,\n num_samples)\n\n # TODO: staypoint 'elevation' field\n # TODO: using dissolve for staypoint generation\n # create staypoints as the center of the grouped positionfixes\n grouped_df = ret_pfs.groupby(['user_id', 'staypoint_id'])\n for combined_id, group in grouped_df:\n user_id, staypoint_id = combined_id\n\n if int(staypoint_id) != -1:\n staypoint = {}\n staypoint['user_id'] = user_id\n staypoint['id'] = staypoint_id\n\n # point geometry of staypoint\n staypoint[name_geocol] = Point(group[name_geocol].x.mean(),\n group[name_geocol].y.mean())\n\n ret_spts = ret_spts.append(staypoint, ignore_index=True)\n ret_spts.set_index('id', inplace=True)\n \n ret_pfs = gpd.GeoDataFrame(ret_pfs, geometry=name_geocol,crs=ret_pfs.crs)\n ret_spts = gpd.GeoDataFrame(ret_spts, geometry=name_geocol,crs=ret_pfs.crs)\n \n ## dtype consistency \n # stps id (generated by this function) should be int64\n ret_spts.index = ret_spts.index.astype('int64')\n # ret_pfs['staypoint_id'] should be Int64 (missing values)\n ret_pfs['staypoint_id'] = ret_pfs['staypoint_id'].astype('Int64')\n\n # user_id of spts should be the same as ret_pfs\n ret_spts['user_id'] = ret_spts['user_id'].astype(ret_pfs['user_id'].dtype)\n\n return ret_pfs, ret_spts\n\n\ndef generate_triplegs(positionfixes, staypoints=None, method='between_staypoints'):\n \"\"\"\n Generate triplegs from positionfixes.\n\n A tripleg is (for now) defined as anything that happens between two consecutive staypoints.\n\n **Attention**: This function requires either a column ``staypoint_id`` on the \n positionfixes or passing some staypoints that correspond to the positionfixes! \n This means you usually should call ``extract_staypoints()`` first.\n\n Parameters\n ----------\n positionfixes : GeoDataFrame (as trackintel positionfixes)\n The positionfixes have to follow the standard definition for positionfixes DataFrames.\n\n staypoints : GeoDataFrame (as trackintel staypoints), optional\n The staypoints (corresponding to the positionfixes). If this is not passed, the\n positionfixes need staypoint_id associated with them.\n\n method: {'between_staypoints'}\n Method to create triplegs. \n \n - 'between_staypoints': A tripleg is defined as all positionfixes \\\n between two staypoints. This method requires either a column ``staypoint_id`` on \\\n the positionfixes or passing staypoints as an input.\n\n Returns\n -------\n ret_pfs: GeoDataFrame (as trackintel positionfixes)\n The original positionfixes with a new column ``[`tripleg_id`]``.\n \n ret_tpls: GeoDataFrame (as trackintel triplegs)\n The generated triplegs.\n\n Notes\n -----\n Methods ``between_staypoints`` creates a tripleg from all positionfixes between two sequential\n staypoinst. The latest positionfix of a staypoint is at the same time the first positionfix of corresponding\n tripleg. This means that the a staypoint and the following tripleg share 1 trackpoint.\n To use the method 'between_staypoints' you need to provide staypoints, positionfixes with a column 'staypoint_id'\n or both. It is recommended to provide both as it increases the performance.\n\n Examples\n --------\n >>> pfs.as_positionfixes.generate_triplegs(staypoints)\n \"\"\"\n # copy the original pfs for adding 'staypoint_id' column\n ret_pfs = positionfixes.copy()\n\n ret_tpls = pd.DataFrame(columns=['id', 'user_id', 'started_at', 'finished_at', 'geom'])\n if method == 'between_staypoints':\n\n # get case:\n # Case 1: Staypoints are provided and are connected to positionfixes which have a column 'staypoint_id'\n # Case 2: Staypoints are provided but positionfixes do not have a column 'staypoint_id'\n # case 3: Staypoints are not provided but positionfixes have a column 'staypoint_id'\n\n\n if staypoints is not None and \"staypoint_id\" in ret_pfs:\n case = 1\n elif staypoints is not None:\n case = 2\n elif \"staypoint_id\" in ret_pfs:\n case = 3\n else:\n raise Exception('unknown case')\n\n # generated_triplegs is a list to which we will append tripleg records. Every tripleg record is a dictionary\n # with the following keys: ['id', 'user_id', 'started_at', 'finished_at', 'geom', 'pfs_ids']\n generated_triplegs = []\n for user_id_this in ret_pfs['user_id'].unique():\n\n positionfixes_user_this = ret_pfs.loc[ret_pfs['user_id'] == user_id_this].sort_values(\n 'tracked_at') # this is no copy\n if positionfixes_user_this.empty:\n continue\n\n # Case 1: Staypoints exist and are connected to positionfixes by user id\n if case == 1:\n generated_triplegs.extend(_triplegs_between_staypoints_case1(positionfixes_user_this, staypoints,\n user_id_this))\n\n # Case 2: Staypoints exist but there is no user_id given\n elif case == 2:\n generated_triplegs.extend(_triplegs_between_staypoints_case2(positionfixes_user_this, staypoints,\n user_id_this))\n\n # case 3: Only positionfixes with staypoint id for tripleg generation\n elif case == 3:\n generated_triplegs.extend(_triplegs_between_staypoints_case3(positionfixes_user_this, user_id_this))\n\n # create tripleg dataframe\n columns_triplegs = ['user_id', 'started_at', 'finished_at', 'geom', 'pfs_ids']\n if len(generated_triplegs) == 0:\n ret_tpls = gpd.GeoDataFrame(columns=columns_triplegs,\n geometry='geom', crs=ret_pfs.crs)\n else:\n ret_tpls = gpd.GeoDataFrame(generated_triplegs, geometry='geom', crs=ret_pfs.crs)\n # sanity check for tripleg generation\n assert len(columns_triplegs) == len(ret_tpls.columns), \"Unexpected or missing column in tripleg generation\"\n for col in columns_triplegs:\n assert col in ret_tpls.columns, \"Unexpected columns in tripleg generation.\"\n \n # index management \n ret_tpls['id'] = np.arange(len(ret_tpls))\n ret_tpls.set_index('id', inplace=True)\n\n # assign tripleg_id to positionfixes\n if not ret_tpls.empty:\n tripleg_ids = ret_tpls['pfs_ids'].explode()\n # swap index and values\n tripleg_ids = pd.Series(tripleg_ids.index, index=tripleg_ids.values, name='tripleg_id')\n ret_pfs = ret_pfs.join(tripleg_ids, how='left')\n else:\n ret_pfs['tripleg_id'] = np.nan\n ret_tpls = ret_tpls.drop('pfs_ids', axis=1)\n\n ## dtype consistency\n # tpls id (generated by this function) should be int\n ret_tpls.index = ret_tpls.index.astype('int64')\n # ret_pfs['tripleg_id'] should be Int64 (missing values)\n ret_pfs['tripleg_id'] = ret_pfs['tripleg_id'].astype('Int64')\n # user_id of tpls should be the same as ret_pfs\n ret_tpls['user_id'] = ret_tpls['user_id'].astype(ret_pfs['user_id'].dtype)\n\n return ret_pfs, ret_tpls\n\n else:\n raise NameError('Chosen method is not defined')\n\n\ndef _generate_staypoints_sliding_user(df,\n name_geocol,\n elevation_flag,\n dist_threshold=50,\n time_threshold=300,\n dist_func=haversine_dist):\n ret_spts = pd.DataFrame(columns=['user_id', 'started_at', 'finished_at', 'geom'])\n df.sort_values('tracked_at', inplace=True)\n\n # pfs id should be in index, create separate idx for storing the matching\n pfs = df.to_dict('records')\n idx = df.index.to_list()\n\n num_pfs = len(pfs)\n\n i = 0\n j = 0 # is zero because it gets incremented in the beginning\n while i < num_pfs:\n if j == num_pfs:\n # We're at the end, this can happen if in the last \"bin\", \n # the dist_threshold is never crossed anymore.\n break\n else:\n j = i + 1\n\n while j < num_pfs:\n # TODO: Can we make distance function independent of projection?\n dist = dist_func(pfs[i][name_geocol].x, pfs[i][name_geocol].y,\n pfs[j][name_geocol].x, pfs[j][name_geocol].y)\n\n if dist > dist_threshold:\n delta_t = pfs[j]['tracked_at'] - pfs[i]['tracked_at']\n if delta_t.total_seconds() > time_threshold:\n staypoint = {}\n staypoint['user_id'] = pfs[i]['user_id']\n staypoint[name_geocol] = Point(np.mean([pfs[k][name_geocol].x for k in range(i, j)]),\n np.mean([pfs[k][name_geocol].y for k in range(i, j)]))\n if elevation_flag:\n staypoint['elevation'] = np.mean([pfs[k]['elevation'] for k in range(i, j)])\n staypoint['started_at'] = pfs[i]['tracked_at']\n staypoint['finished_at'] = pfs[j - 1]['tracked_at']\n\n # store matching, index should be the id of pfs\n staypoint['pfs_id'] = [idx[k] for k in range(i, j)]\n\n # add staypoint\n ret_spts = ret_spts.append(staypoint, ignore_index=True)\n\n # TODO Discussion: Is this last point really a staypoint? As we don't know if the\n # person \"moves on\" afterwards...\n if j == num_pfs - 1:\n staypoint = {}\n staypoint['user_id'] = pfs[j]['user_id']\n staypoint[name_geocol] = Point(pfs[j][name_geocol].x, pfs[j][name_geocol].y)\n if elevation_flag:\n staypoint['elevation'] = pfs[j]['elevation']\n staypoint['started_at'] = pfs[j]['tracked_at']\n staypoint['finished_at'] = pfs[j]['tracked_at']\n # store matching, index should be the id of pfs\n staypoint['pfs_id'] = [idx[j]]\n\n ret_spts = ret_spts.append(staypoint, ignore_index=True)\n i = j\n break\n j = j + 1\n\n return ret_spts\n\n\ndef _generate_staypoints_dbscan_user(pfs,\n name_geocol,\n epsilon=100,\n num_samples=1):\n db = DBSCAN(eps=epsilon / 6371000, min_samples=num_samples, algorithm='ball_tree', metric='haversine')\n\n # TODO: enable transformations to temporary (metric) system\n transform_crs = None\n if transform_crs is not None:\n pass\n\n # get staypoint matching\n p = np.array([[radians(g.y), radians(g.x)] for g in pfs[name_geocol]])\n labels = db.fit_predict(p)\n\n # add positionfixes - staypoint matching to original positionfixes\n pfs['staypoint_id'] = labels\n\n return pfs\n\n\ndef _triplegs_between_staypoints_case1(positionfixes, staypoints, user_id_this):\n \"\"\"\n This function uses the staypoints and the column 'staypoint_id' in the positionfixes, to identify all\n positionfixes that lie in between two staypoints.\n\n Parameters\n ----------\n positionfixes: trackintel positionfixes\n staypoints: trackintel staypoints\n user_id_this:\n\n Returns\n --------\n list\n a list of dictionaries with individual triplegs\n \"\"\"\n\n generated_triplegs_list = []\n spts = staypoints.loc[staypoints['user_id'] == user_id_this].sort_values('started_at')\n if spts.empty:\n return []\n\n spts = spts.reset_index().to_dict('records')\n\n for spt1, spt2 in zip(list(spts), list(spts)[1:]):\n # - Go through all pairs of consecutive staypoints.\n # - identify end of first and start of second staypoint.\n # - assign all positionfixes in between (including bounds) to a tripleg\n\n # get the last posfix of the first staypoint\n index_first_posfix_tl = positionfixes[positionfixes.staypoint_id == spt1['id']].index[-1]\n position_first_posfix_tl = positionfixes.index.get_loc(index_first_posfix_tl)\n\n # get first posfix of the second staypoint\n index_last_posfix_tl = positionfixes[positionfixes.staypoint_id == spt2['id']].index[0]\n position_last_posfix_tl = positionfixes.index.get_loc(index_last_posfix_tl)\n\n # create tripleg from all positionfixes in between the two staypoints\n pfs_tripleg = positionfixes.iloc[position_first_posfix_tl:position_last_posfix_tl + 1]\n generated_triplegs_list.append(__get_tripleg_record_from_psfs(pfs_tripleg, user_id_this, min_nb_of_points=3))\n\n # add first tripleg to the beginning of generated_tripleg_list\n index_first_posfix_first_stp = positionfixes[positionfixes.staypoint_id == spts[0]['id']].index[0]\n position_first_posfix_first_stp = positionfixes.index.get_loc(index_first_posfix_first_stp)\n\n pfs_tripleg = positionfixes.iloc[0:position_first_posfix_first_stp + 1]\n generated_triplegs_list = [__get_tripleg_record_from_psfs(pfs_tripleg, user_id_this, min_nb_of_points=2)] + \\\n generated_triplegs_list\n\n # add last tripleg to the end of generated_triplegs\n index_last_posfix_last_stp = positionfixes[positionfixes.staypoint_id == spts[-1]['id']].index[-1]\n position_last_posfix_last_stp = positionfixes.index.get_loc(index_last_posfix_last_stp)\n\n pfs_tripleg = positionfixes.iloc[position_last_posfix_last_stp:]\n generated_triplegs_list.append(__get_tripleg_record_from_psfs(pfs_tripleg, user_id_this, min_nb_of_points=2))\n\n # filter None values\n return list(filter(None, generated_triplegs_list))\n\n\ndef _triplegs_between_staypoints_case2(positionfixes, staypoints, user_id_this):\n \"\"\"\n This function uses the timestamps of staypoints to identify all positionfixes that lie in between two staypoints.\n\n Parameters\n ----------\n positionfixes: trackintel positionfixes\n staypoints: trackintel staypoints\n user_id_this:\n\n Returns\n --------\n list\n a list of dictionaries with individual triplegs\n\n \"\"\"\n generated_triplegs_list = []\n spts = staypoints.loc[staypoints['user_id'] == user_id_this].sort_values('started_at')\n if spts.empty:\n return []\n\n spts = spts.reset_index().to_dict('records')\n positionfixes = positionfixes.sort_values('tracked_at')\n for stp1, stp2 in zip(list(spts), list(spts)[1:]):\n # - Get all positionfixes that lie between these two staypoints by comparing timestamps.\n # - generate tripleg\n\n # Not so efficient, always matching on the time (as things are sorted anyways).\n pfs_tripleg = positionfixes[(stp1['finished_at'] <= positionfixes['tracked_at']) &\n (positionfixes['tracked_at'] <= stp2['started_at'])]\n generated_triplegs_list.append(__get_tripleg_record_from_psfs(pfs_tripleg, user_id_this, min_nb_of_points=3))\n\n # add first tripleg\n pfs_first_tripleg = positionfixes[positionfixes['tracked_at'] <= spts[0]['started_at']]\n generated_triplegs_list = [__get_tripleg_record_from_psfs(pfs_first_tripleg, user_id_this, min_nb_of_points=2\n )] + generated_triplegs_list\n\n # add last tripleg\n pfs_first_tripleg = positionfixes[positionfixes['tracked_at'] >= spts[-1]['finished_at']]\n generated_triplegs_list.append(__get_tripleg_record_from_psfs(pfs_first_tripleg, user_id_this, min_nb_of_points=2))\n\n # filter None values\n return list(filter(None, generated_triplegs_list))\n\n\ndef __get_tripleg_record_from_psfs(pfs_tripleg, user_id_this, min_nb_of_points):\n \"\"\"\n Create a tripleg from a collection of positionfixes\n\n Parameters\n ----------\n pfs_tripleg: geodataframe\n All positionfixes that are part of the tripleg\n user_id_this\n min_nb_of_points: int\n Minimum number of positionfixes required for a valid tripleg.\n 3 positionfixes are required for a tripleg in between two staypoints to have at least 1 positionfix that is\n no part of a staypoint.\n 2 positionfixes are required for a tripleg in the beginning or the end of the dataset (or a gap) as the\n first/last positionfix does then not belong to a staypoint.\n\n Returns\n -------\n dict or None\n \"\"\"\n coords = list(pfs_tripleg.geometry.apply(lambda r: (r.x, r.y)))\n\n if len(coords) < min_nb_of_points: # at least 1 posfix that is not part of a staypoint\n return None\n else:\n tripleg_entry = {\n 'user_id': user_id_this,\n 'started_at': pfs_tripleg['tracked_at'].iloc[0],\n 'finished_at': pfs_tripleg['tracked_at'].iloc[-1],\n 'geom': LineString(coords),\n 'pfs_ids': list(pfs_tripleg.index)\n }\n return tripleg_entry\n\n\ndef _triplegs_between_staypoints_case3(positionfixes, user_id_this):\n \"\"\"\n This function uses column 'staypoint_id' to identify all positionfixes that lie in between two staypoints.\n\n Parameters\n ----------\n positionfixes: trackintel positionfixes\n user_id_this:\n\n Returns\n --------\n list\n a list of dictionaries with individual triplegs\n \"\"\"\n\n name_geocol = positionfixes.geometry.name\n generated_triplegs = []\n # initialize first tripleg\n curr_tripleg = {\n 'user_id': user_id_this,\n 'started_at': positionfixes['tracked_at'].iloc[0],\n 'finished_at': None,\n 'geom': [],\n 'pfs_ids': []\n }\n\n first_iteration = True\n for idx, pf in positionfixes.iterrows():\n\n if first_iteration:\n first_iteration = False\n\n if pd.isna(pf['staypoint_id']):\n status = 'in_tripleg'\n elif not pd.isna(pf['staypoint_id']):\n status = 'in_staypoint'\n else:\n # - loop through all positionfixes\n # - identify the current situation and define the variable 'status'\n # - store or skip the current positionfix based on the state of 'status'\n\n # During the loop the status of a positionfix can be {'in_tripleg', 'in_staypoint', 'tripleg_starts',\n # 'tripleg_ends'}\n\n if not pd.isna(prev_pf['staypoint_id']):\n if pd.isna(pf['staypoint_id']):\n status = 'tripleg_starts'\n else:\n status = 'in_staypoint'\n if prev_pf['staypoint_id'] != pf['staypoint_id']:\n status = 'tripleg_starts'\n\n elif pd.isna(prev_pf['staypoint_id']):\n if not pd.isna(pf['staypoint_id']):\n status = 'tripleg_ends'\n elif pd.isna(pf['staypoint_id']):\n status = 'in_tripleg'\n else:\n raise Exception(\"case not defined\")\n else:\n raise Exception(\"case not defined\")\n\n # take action depending on status\n if status == 'tripleg_starts':\n # initialize tripleg with last staypoint\n curr_tripleg = {\n 'user_id': user_id_this,\n 'started_at': prev_pf['tracked_at'],\n 'finished_at': None,\n 'geom': [(prev_pf[name_geocol].x, prev_pf[name_geocol].y), ],\n 'pfs_ids': [prev_idx, ]\n }\n status = 'in_tripleg'\n\n if status == 'in_tripleg':\n curr_tripleg['geom'].append((pf[name_geocol].x, pf[name_geocol].y))\n curr_tripleg['pfs_ids'].append(idx)\n\n elif status == 'tripleg_ends':\n curr_tripleg['finished_at'] = pf['tracked_at']\n curr_tripleg['geom'].append((pf[name_geocol].x, pf[name_geocol].y))\n curr_tripleg['pfs_ids'].append(idx)\n curr_tripleg['geom'] = LineString([(x, y) for x, y in curr_tripleg['geom']])\n generated_triplegs.append(curr_tripleg)\n\n del curr_tripleg\n elif status == 'in_staypoint':\n pass\n\n prev_idx = idx\n prev_pf = pf\n\n # add a potential tripleg after the last staypoint\n if status == 'in_tripleg' and len(curr_tripleg) > 1:\n curr_tripleg['finished_at'] = pf['tracked_at']\n\n # NB: geom and id where already added during the loop\n curr_tripleg['geom'] = LineString([(x, y) for x, y in curr_tripleg['geom']])\n generated_triplegs.append(curr_tripleg)\n\n return generated_triplegs\n" ]
[ [ "pandas.DataFrame", "sklearn.cluster.DBSCAN", "pandas.isna", "pandas.Series" ] ]
isabella232/snippet-ranger
[ "06247e0492d59b859fe48dea0428171b252770ef" ]
[ "snippet_ranger/models/snippet.py" ]
[ "from ast2vec import Source\nimport numpy as np\n\n\nclass Snippet(Source):\n \"\"\"\n This model can store code snippets. In general, code snippet is any part of source code file.\n For example, function declaration is a code snippet. So, this class is the same as source model\n but have start and end line positions of snippet location in file.\n You can use :class:`Source2Function` transformer to create function snippets from source model.\n \"\"\"\n\n NAME = \"snippet\"\n\n def construct(self, repository, filenames, uasts, sources,\n positions_start=None, positions_end=None, positions=None):\n super(Snippet, self).construct(repository=repository, filenames=filenames,\n sources=sources, uasts=uasts)\n if ((positions_start is None) ^ (positions_end is None)) or \\\n ((positions_start is None) ^ (positions is not None)):\n raise ValueError(\"You should specify both positions_start and positions_end or \"\n \"only a positions\")\n if positions is None:\n if len(positions_start) != len(positions_end):\n raise ValueError(\"Length of positions_start ({}) and positions_end ({}) \"\n \"are not equal\".format(len(positions_start), len(positions_end)))\n self._positions = np.array(list(zip(positions_start, positions_end)))\n else:\n self._positions = positions\n return self\n\n @property\n def names(self) -> list:\n \"\"\"\n Creates the list of names for snippets in the model.\n Usually names are needed for topic modeling to represent different snippets as different\n documents. See `SnippetModel2BOW` transformer.\n \"\"\"\n return [\"{}/{}_{}_{}\".format(self._repository, name, st, end).\n replace(\":\", \"\").replace(\" \", \"_\")\n for name, (st, end) in zip(self._filenames, self._positions)]\n\n @property\n def positions(self):\n \"\"\"\n Return start and end line positions of snippets.\n \"\"\"\n return self._positions\n\n @property\n def positions_start(self):\n \"\"\"\n Return start line position of snippets.\n \"\"\"\n return self._positions[:, 0].T\n\n @property\n def positions_end(self):\n \"\"\"\n Return end line position of snippets.\n \"\"\"\n return self._positions[:, 1].T\n\n def __iter__(self):\n \"\"\"\n Iterator over the items.\n \"\"\"\n return zip(self._filenames, self._uasts, self._sources, self._positions)\n\n def __getitem__(self, item):\n \"\"\"\n Returns file name, uast, source code and positions for the given snippet index.\n\n :param item: Snippet index.\n :return: file name, source code, uast, positions, where positions[0] is start and \\\n positions[1] is end.\n \"\"\"\n return super(Snippet, self).__getitem__(item) + (self._positions[item], )\n\n def _load_tree_kwargs(self, tree):\n tree_kwargs = super(Snippet, self)._load_tree_kwargs(tree)\n tree_kwargs[\"positions\"] = np.array(tree[\"positions\"])\n return tree_kwargs\n\n def _to_dict_to_save(self):\n save_dict = super(Snippet, self)._to_dict_to_save()\n save_dict[\"positions\"] = self._positions\n return save_dict\n" ]
[ [ "numpy.array" ] ]
mmessalti/GmdhPy
[ "1dbd59de80c7d0b075c938864f7f76afe26ba12f" ]
[ "examples/boston_houses.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom sklearn.datasets import load_boston\nfrom sklearn import metrics\nfrom gmdhpy.gmdh import Regressor\nfrom gmdhpy.plot_model import PlotModel\n\n\nif __name__ == '__main__':\n\n boston = load_boston()\n\n n_samples = boston.data.shape[0]\n\n train_data_is_the_first_half = False\n n = n_samples // 2\n if train_data_is_the_first_half:\n train_x = boston.data[:n]\n train_y = boston.target[:n]\n test_x = boston.data[n:]\n test_y = boston.target[n:]\n else:\n train_x = boston.data[n:]\n train_y = boston.target[n:]\n test_x = boston.data[:n]\n test_y = boston.target[:n]\n\n params = {\n 'admix_features': True, # default value\n 'criterion_type': 'validate', # default value\n 'seq_type' : 'mode1' , # default value\n 'max_layer_count': 100, # default value is sys.maxsize\n 'criterion_minimum_width': 5, # default value\n 'stop_train_epsilon_condition' : 0.0001, # default value is 0.001\n 'manual_best_neurons_selection' : False, # default value\n 'ref_functions': 'linear_cov', # default value\n 'normalize': True, # default value\n 'layer_err_criterion': 'top', # default value\n 'n_jobs': 1, # default value\n 'feature_names': boston.feature_names,\n 'l2_bis':(1e-5,1e-4,1e-3,0.01,0.1,1.0,10.0)\n }\n\n \n model = Regressor(**params)\n '''\n model = Regressor(ref_functions=('linear_cov',),\n criterion_type='validate',\n feature_names=boston.feature_names,\n criterion_minimum_width=5,\n stop_train_epsilon_condition=0.001,\n layer_err_criterion='top',\n l2=0.5,\n n_jobs='max')\n '''\n model.fit(train_x, train_y)\n\n # Now predict the value of the second half:\n y_pred = model.predict(test_x)\n mse = metrics.mean_squared_error(test_y, y_pred)\n mae = metrics.mean_absolute_error(test_y, y_pred)\n r2 = metrics.r2_score(test_y, y_pred)\n\n print(\"mse error on test set: {mse:0.2f}\".format(mse=mse))\n print(\"mae error on test set: {mae:0.2f}\".format(mae=mae))\n print(\"RΒ² score on test set: {r2:0.4f}\".format(r2=r2))\n\n print(model.get_selected_features_indices())\n print(model.get_unselected_features_indices())\n\n print(\"Selected features: {}\".format(model.get_selected_features()))\n print(\"Unselected features: {}\".format(model.get_unselected_features()))\n print()\n print()\n print()\n print(model.describe())\n\n PlotModel(model, filename='boston_house_model', plot_neuron_name=True, view=True).plot()\n" ]
[ [ "sklearn.metrics.r2_score", "sklearn.metrics.mean_absolute_error", "sklearn.metrics.mean_squared_error", "sklearn.datasets.load_boston" ] ]
tgautam03/CS6190-ProbabilisticML
[ "f000f571d1068ab640a360b490a40f0f15d8502b" ]
[ "assignments/a5/Q2.py" ]
[ "import numpy as np\nfrom scipy.special import expit\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom scipy.stats import multivariate_normal\n\n##################################################################################################################################\n############################################################# PART A #############################################################\n##################################################################################################################################\nmu = np.zeros(2)\ncovariance = np.array([[3, 2.9], [2.9, 3]])\ntrue_dist = multivariate_normal(mean=mu, cov=covariance).rvs(500)\nplt.scatter(true_dist[:, 0], true_dist[:, 1])\nplt.xlabel(\"z0\")\nplt.ylabel(\"z1\")\nplt.title(\"Drawing samples\")\nplt.show()\n\n##################################################################################################################################\n############################################################# PART B #############################################################\n##################################################################################################################################\nn = 2\nn_iter = 100\n# Initialise x\nz = np.array([-4, -4], dtype=np.float64)\naccepted = np.array(z)[np.newaxis,:]\nfor i in range(n_iter):\n for j in range(n):\n k = (j+1)%2\n mu_j = mu[j] + covariance[j][k]*(z[k] - mu[k])/covariance[k][k]\n var_j = covariance[j][j] - covariance[j][k]*covariance[k][j]/covariance[k][k]\n z[j] = norm(loc = mu_j, scale=np.sqrt(var_j)).rvs(1)\n accepted = np.vstack((accepted, z))\n \n\nplt.plot(accepted[:, 0], accepted[:, 1], 'ro')\nplt.xlabel(\"z0\")\nplt.ylabel(\"z1\")\nplt.title(\"Gibbs Sampling\")\nplt.show()\n\ntrue_dist = multivariate_normal(mean=mu, cov=covariance).rvs(500)\nplt.scatter(true_dist[:, 0], true_dist[:, 1], label=\"Normal Sampling\")\nplt.plot(accepted[:, 0], accepted[:, 1], 'ro', label=\"Gibbs Sampling\")\nplt.xlabel(\"z0\")\nplt.ylabel(\"z1\")\nplt.title(\"Gibbs Sampling vs Normal Sampling\")\nplt.legend()\nplt.show()\n\n##################################################################################################################################\n############################################################# PART C #############################################################\n##################################################################################################################################\ndef dU_dz(mu, cov, z):\n z = np.array(z-mu)\n grad = np.dot(np.linalg.inv(cov),z)\n return grad\n\ndef leapfrog(z, r, s, mu, cov, eps, L):\n\n for i in range(L):\n r -= (eps/2)*dU_dz(mu, cov, np.copy(z))\n z += eps*np.dot(np.linalg.inv(s), r)\n r -= (eps/2)*dU_dz(mu, cov, np.copy(z))\n return (z, r)\n\ndef accept_prob(pos_dist, current_state, next_state, mu, cov, s):\n current_state_p = pos_dist(current_state, mu, cov, s)\n next_state_p = pos_dist(next_state, mu, cov, s)\n return(np.min([1, next_state_p/current_state_p]))\n\ndef total_energy(state, mu, cov, s):\n z = state[0]\n r = np.array(state[1])\n z = np.array(z-mu)\n u = 0.5*(np.dot(np.dot(z.transpose(),np.linalg.inv(cov)), z))\n k = 0.5*(np.dot(np.dot(r.transpose(),np.linalg.inv(s)), r))\n return(np.exp(-u-k))\n\ndef hybrid_monte_carlo(mu, cov, burn_in, n_iter, eps, L, z):\n s = np.eye(2)\n r = multivariate_normal(mean=np.zeros(2), cov=s)\n mu = mu[:,np.newaxis]\n z_p = z[:,np.newaxis]\n rejected = np.array(z_p)\n accepted = np.array(z_p)\n for i in range(1, burn_in + 1):\n r_p = r.rvs(1)[:, np.newaxis] \n z_n, r_n = leapfrog(np.copy(z_p), np.copy(r_p), s, mu, cov, eps, L)\n r_n *= (-1)\n prob = accept_prob(total_energy, [z_p, r_p], [z_n, r_n], mu, cov, s)\n u = np.random.uniform(0, 1, 1)\n if (u <= prob):\n z_p = z_n\n\n for i in range(1, n_iter + 1):\n accept = False\n r_p = r.rvs(1)[:, np.newaxis] \n z_n, r_n = leapfrog(np.copy(z_p), np.copy(r_p), s, mu, cov, eps, L)\n r_n *= (-1)\n prob = accept_prob(total_energy, [z_p, r_p, s], [z_n, r_n, s], mu, cov, s)\n u = np.random.uniform(0, 1, 1)\n if (u <= prob):\n accept = True\n if (i % m == 0):\n if (accept):\n accepted = np.hstack((accepted, z_n))\n else:\n accepted = np.hstack((accepted, z_p))\n rejected = np.hstack((rejected, z_n))\n if (accept):\n z_p = z_n\n return accepted.transpose() #, rejected.transpose()\n\neps = 0.1; L = 20; m = 1\nburn_in = 100000\naccepted_monte = hybrid_monte_carlo(mu, covariance, burn_in, 100, eps, L, z)\n\nplt.plot(accepted_monte[:, 0], accepted_monte[:, 1], 'ro')\nplt.xlabel(\"z0\")\nplt.ylabel(\"z1\")\nplt.title(\"Monte Carlo Sampling\")\nplt.show()\n\ntrue_dist = multivariate_normal(mean=mu, cov=covariance).rvs(500)\nplt.scatter(true_dist[:, 0], true_dist[:, 1], label=\"Normal Sampling\")\nplt.plot(accepted_monte[:, 0], accepted_monte[:, 1], 'ro', label=\"Monte Carlo Sampling\")\nplt.plot(accepted[:, 0], accepted[:, 1], 'go', label=\"Gibbs Sampling\")\nplt.xlabel(\"z0\")\nplt.ylabel(\"z1\")\nplt.title(\"Monte Carlo Sampling vs Normal Sampling\")\nplt.legend()\nplt.show()" ]
[ [ "numpy.copy", "numpy.min", "numpy.exp", "numpy.eye", "numpy.sqrt", "numpy.linalg.inv", "numpy.vstack", "numpy.array", "numpy.zeros", "matplotlib.pyplot.title", "numpy.hstack", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "scipy.stats.multivariate_normal", "numpy.random.uniform", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter" ] ]
nim65s/multicontact-api
[ "036b902deb2472bb22496a567e93a25a236a3e1e" ]
[ "unittest/python/scenario.py" ]
[ "# Copyright (c) 2019, CNRS\n# Authors: Pierre Fernbach <pfernbac@laas.fr>\nimport unittest\nfrom math import cos, sin, sqrt\nfrom random import uniform\n\nimport numpy as np\nfrom ndcurves import SE3Curve, bezier, piecewise, piecewise_SE3, polynomial\nfrom numpy import array, array_equal, isclose, random\n\nimport pinocchio as pin\nfrom multicontact_api import ContactModel, ContactPatch, ContactPhase, ContactSequence, ContactType\nfrom pinocchio import SE3, Quaternion\nimport pickle\n\npin.switchToNumpyArray()\n\n\ndef randomQuaternion():\n u1 = uniform(0., 1.)\n u2 = uniform(0., 2. * np.pi)\n u3 = uniform(0., 2. * np.pi)\n a = sqrt(1 - u1)\n b = sqrt(u1)\n q = Quaternion(a * sin(u2), a * cos(u2), b * sin(u3), b * cos(u3))\n q.normalize()\n return q\n\n\ndef createRandomPiecewisePolynomial(dim, t_min=0, t_max=2):\n \"\"\"\n Build random piecewise polynomial with 2 polynomial of degree 3\n between 01 and 12\n \"\"\"\n t_mid = (t_min + t_max) / 2.\n coefs0 = np.random.rand(dim, 4) # degree 3\n pol0 = polynomial(coefs0, t_min, t_mid)\n pc = piecewise(pol0)\n coefs1 = np.random.rand(dim, 4) # degree 3\n pc.append(polynomial(coefs1, t_mid, t_max))\n return pc\n\n\ndef createRandomSE3Traj(t_min=0, t_max=2):\n p0 = SE3()\n p0.setRandom()\n p1 = SE3()\n p1.setRandom()\n curve = SE3Curve(p0, p1, t_min, t_max)\n return curve\n\n\ndef addRandomPointsValues(cp):\n c_init = np.random.rand(3)\n dc_init = np.random.rand(3)\n ddc_init = np.random.rand(3)\n L_init = np.random.rand(3)\n dL_init = np.random.rand(3)\n q_init = np.random.rand(35)\n c_final = np.random.rand(3)\n dc_final = np.random.rand(3)\n ddc_final = np.random.rand(3)\n L_final = np.random.rand(3)\n dL_final = np.random.rand(3)\n q_final = np.random.rand(35)\n cp.c_init = c_init\n cp.dc_init = dc_init\n cp.ddc_init = ddc_init\n cp.L_init = L_init\n cp.dL_init = dL_init\n cp.q_init = q_init\n cp.c_final = c_final\n cp.dc_final = dc_final\n cp.ddc_final = ddc_final\n cp.L_final = L_final\n cp.dL_final = dL_final\n cp.q_final = q_final\n\n\ndef addRandomCurvesValues(cp):\n q = createRandomPiecewisePolynomial(31)\n dq = createRandomPiecewisePolynomial(30)\n ddq = createRandomPiecewisePolynomial(30)\n tau = createRandomPiecewisePolynomial(30)\n dc = createRandomPiecewisePolynomial(3)\n ddc = createRandomPiecewisePolynomial(3)\n L = createRandomPiecewisePolynomial(3)\n dL = createRandomPiecewisePolynomial(3)\n wrench = createRandomPiecewisePolynomial(6)\n zmp = createRandomPiecewisePolynomial(3)\n root = createRandomSE3Traj()\n coefs = np.random.rand(3, 7) # degree 3\n c1 = polynomial(coefs, 0, 2)\n # assign trajectories :\n cp.q_t = q\n cp.dq_t = dq\n cp.ddq_t = ddq\n cp.tau_t = tau\n cp.c_t = c1\n cp.dc_t = dc\n cp.ddc_t = ddc\n cp.L_t = L\n cp.dL_t = dL\n cp.wrench_t = wrench\n cp.zmp_t = zmp\n cp.root_t = root\n\n\ndef addRandomContacts(cp):\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n cp.addContact(\"right-leg\", patchRF)\n p = SE3()\n p.setRandom()\n patchLF = ContactPatch(p, 0.5)\n cp.addContact(\"left-leg\", patchLF)\n\n\ndef addRandomForcesTrajs(cp):\n fR = createRandomPiecewisePolynomial(12)\n fL = createRandomPiecewisePolynomial(12)\n # fL2 = createRandomPiecewisePolynomial(12)\n cp.addContactForceTrajectory(\"right-leg\", fR)\n cp.addContactForceTrajectory(\"left-leg\", fL)\n fR = createRandomPiecewisePolynomial(1)\n fL = createRandomPiecewisePolynomial(1)\n cp.addContactNormalForceTrajectory(\"right-leg\", fR)\n cp.addContactNormalForceTrajectory(\"left-leg\", fL)\n\n\ndef addRandomEffectorTrajectories(cp):\n fR = createRandomSE3Traj()\n fL = createRandomSE3Traj()\n cp.addEffectorTrajectory(\"right-hand\", fR)\n cp.addEffectorTrajectory(\"left-hand\", fL)\n\n\ndef buildRandomContactPhase(min=-1, max=-1):\n if min >= 0 and max >= 0:\n cp = ContactPhase(min, max)\n else:\n cp = ContactPhase()\n addRandomPointsValues(cp)\n addRandomCurvesValues(cp)\n addRandomContacts(cp)\n addRandomForcesTrajs(cp)\n addRandomEffectorTrajectories(cp)\n return cp\n\n\nclass ContactModelTest(unittest.TestCase):\n def test_contact_model(self):\n mu = 0.3\n # default constructor\n mp = ContactModel()\n self.assertEqual(mp.mu, -1.)\n self.assertEqual(mp.contact_type, ContactType.CONTACT_UNDEFINED)\n self.assertEqual(mp.num_contact_points, 1)\n self.assertEqual(len(mp.contact_points_positions.shape), 1)\n self.assertEqual(mp.contact_points_positions.shape[0], 3)\n self.assertTrue(not mp.contact_points_positions.any())\n\n # constructor with friction\n mp_mu = ContactModel(mu)\n self.assertEqual(mp_mu.mu, mu)\n self.assertEqual(mp_mu.contact_type, ContactType.CONTACT_UNDEFINED)\n self.assertEqual(mp.num_contact_points, 1)\n self.assertEqual(len(mp.contact_points_positions.shape), 1)\n self.assertEqual(mp.contact_points_positions.shape[0], 3)\n self.assertTrue(not mp.contact_points_positions.any())\n\n # constructor with both values\n mp1 = ContactModel(mu, ContactType.CONTACT_PLANAR)\n # test getter bindings\n self.assertEqual(mp1.mu, mu)\n self.assertEqual(mp1.contact_type, ContactType.CONTACT_PLANAR)\n self.assertEqual(mp.num_contact_points, 1)\n self.assertEqual(len(mp.contact_points_positions.shape), 1)\n self.assertEqual(mp.contact_points_positions.shape[0], 3)\n self.assertTrue(not mp.contact_points_positions.any())\n\n # copy constructor :\n mp2 = ContactModel(mp1)\n self.assertEqual(mp2.mu, mu)\n self.assertEqual(mp2.contact_type, ContactType.CONTACT_PLANAR)\n self.assertEqual(mp.num_contact_points, 1)\n self.assertEqual(len(mp.contact_points_positions.shape), 1)\n self.assertEqual(mp.contact_points_positions.shape[0], 3)\n self.assertTrue(not mp.contact_points_positions.any())\n\n # test operator ==\n self.assertTrue(mp1 == mp2)\n mp1.mu = 0.5\n self.assertTrue(mp1 != mp2)\n\n def test_contact_model_contact_points(self):\n mp1 = ContactModel(0.5, ContactType.CONTACT_PLANAR)\n mp1.num_contact_points = 4\n self.assertEqual(mp1.num_contact_points, 4)\n self.assertEqual(mp1.contact_points_positions.shape[0], 3)\n self.assertEqual(mp1.contact_points_positions.shape[1], 4)\n self.assertTrue(not mp1.contact_points_positions.any())\n\n pos = np.random.rand(3, 5)\n mp1.contact_points_positions = pos\n self.assertEqual(mp1.num_contact_points, 5)\n self.assertEqual(mp1.contact_points_positions.shape[0], 3)\n self.assertEqual(mp1.contact_points_positions.shape[1], 5)\n self.assertTrue(isclose(mp1.contact_points_positions, pos).all())\n\n generators = mp1.generatorMatrix()\n self.assertEqual(generators.shape[0], 6)\n self.assertEqual(generators.shape[1], 5 * 3)\n\n mp1.num_contact_points = 2\n self.assertEqual(mp1.num_contact_points, 2)\n self.assertEqual(mp1.contact_points_positions.shape[0], 3)\n self.assertEqual(mp1.contact_points_positions.shape[1], 2)\n self.assertTrue(not mp1.contact_points_positions.any())\n\n def test_contact_model_serialization_default(self):\n mp1 = ContactModel()\n mp1.saveAsText(\"mp_test.txt\")\n mp_txt = ContactModel()\n mp_txt.loadFromText(\"mp_test.txt\")\n self.assertEqual(mp1, mp_txt)\n mp1.saveAsBinary(\"mp_test\")\n mp_bin = ContactModel()\n mp_bin.loadFromBinary(\"mp_test\")\n self.assertEqual(mp1, mp_bin)\n mp1.saveAsXML(\"mp_test.xml\", 'ContactModel')\n mp_xml = ContactModel()\n mp_xml.loadFromXML(\"mp_test.xml\", 'ContactPatch')\n self.assertEqual(mp1, mp_xml)\n mp_pickled = pickle.dumps(mp1)\n mp_from_pickle = pickle.loads(mp_pickled)\n self.assertEqual(mp1, mp_from_pickle)\n\n def test_contact_model_serialization_full(self):\n mu = 0.3\n # constructor with both values\n mp1 = ContactModel(mu, ContactType.CONTACT_PLANAR)\n mp1.saveAsText(\"mp_test.txt\")\n mp_txt = ContactModel()\n mp_txt.loadFromText(\"mp_test.txt\")\n self.assertEqual(mp1, mp_txt)\n mp1.saveAsBinary(\"mp_test\")\n mp_bin = ContactModel()\n mp_bin.loadFromBinary(\"mp_test\")\n self.assertEqual(mp1, mp_bin)\n mp1.saveAsXML(\"mp_test.xml\", 'ContactModel')\n mp_xml = ContactModel()\n mp_xml.loadFromXML(\"mp_test.xml\", 'ContactPatch')\n self.assertEqual(mp1, mp_xml)\n mp_pickled = pickle.dumps(mp1)\n mp_from_pickle = pickle.loads(mp_pickled)\n self.assertEqual(mp1, mp_from_pickle)\n\n\nclass ContactPatchTest(unittest.TestCase):\n def test_default_constructor(self):\n cp = ContactPatch()\n self.assertEqual(cp.friction, -1.0)\n self.assertTrue(cp.placement == SE3.Identity())\n\n def test_setter_getter(self):\n cp = ContactPatch()\n p = SE3()\n p.setRandom()\n cp.placement = p\n self.assertTrue(cp.placement == p)\n cp.friction = 0.7\n self.assertTrue(cp.friction == 0.7)\n self.assertTrue(cp.placement == p)\n\n def test_constructor_with_arguments(self):\n p = SE3()\n p.setRandom()\n cp = ContactPatch(p)\n self.assertTrue(cp.friction == -1.0)\n self.assertTrue(cp.placement == p)\n # check that the value have been copied and it's not the same pointer anymore :\n p.setRandom()\n self.assertTrue(cp.placement != p)\n\n p = SE3()\n p.setRandom()\n cp = ContactPatch(p, 0.9)\n self.assertTrue(cp.friction == 0.9)\n self.assertTrue(cp.placement == p)\n\n def test_constructor_with_contact_model(self):\n cm = ContactModel(0.5, ContactType.CONTACT_PLANAR)\n cm.num_contact_points = 4\n p = SE3()\n p.setRandom()\n cp = ContactPatch(p, cm)\n self.assertTrue(cp.friction == 0.5)\n self.assertTrue(cp.placement == p)\n self.assertTrue(cp.contact_model.num_contact_points == 4)\n\n # check that the value have been copied and it's not the same pointer anymore :\n cm.num_contact_points = 6\n self.assertTrue(cp.contact_model.num_contact_points == 4)\n\n def test_operator_equal(self):\n cp1 = ContactPatch()\n cp2 = ContactPatch()\n self.assertTrue(cp1 == cp2)\n self.assertFalse(cp1 != cp2)\n cp1.friction = 0.5\n self.assertTrue(cp1 != cp2)\n self.assertFalse(cp1 == cp2)\n cp2.friction = 0.5\n self.assertTrue(cp1 == cp2)\n\n p = SE3()\n p.setRandom()\n cp1 = ContactPatch(p, 0.9)\n cp2 = ContactPatch(p, 0.9)\n self.assertTrue(cp1 == cp2)\n cp1.placement.setRandom()\n self.assertTrue(cp1 != cp2)\n\n def test_copy_constructor(self):\n p = SE3()\n p.setRandom()\n cp1 = ContactPatch(p, 0.9)\n cp2 = ContactPatch(cp1)\n self.assertTrue(cp1 == cp2)\n cp1.placement.setRandom()\n self.assertTrue(cp1 != cp2)\n\n def test_serialization_no_friction(self):\n p = SE3()\n p.setRandom()\n cp1 = ContactPatch(p)\n cp1.saveAsText(\"cp_test.txt\")\n cp_txt = ContactPatch()\n cp_txt.loadFromText(\"cp_test.txt\")\n self.assertEqual(cp1, cp_txt)\n cp1.saveAsBinary(\"cp_test\")\n cp_bin = ContactPatch()\n cp_bin.loadFromBinary(\"cp_test\")\n self.assertEqual(cp1, cp_bin)\n cp1.saveAsXML(\"cp_test.xml\", 'ContactPatch')\n cp_xml = ContactPatch()\n cp_xml.loadFromXML(\"cp_test.xml\", 'ContactPatch')\n self.assertEqual(cp1, cp_xml)\n cp_pickled = pickle.dumps(cp1)\n cp_from_pickle = pickle.loads(cp_pickled)\n self.assertEqual(cp1, cp_from_pickle)\n\n def test_serialization_full(self):\n p = SE3()\n p.setRandom()\n cp1 = ContactPatch(p, 0.9)\n cp1.saveAsText(\"cp_test.txt\")\n cp_txt = ContactPatch()\n cp_txt.loadFromText(\"cp_test.txt\")\n self.assertEqual(cp1, cp_txt)\n cp1.saveAsBinary(\"cp_test\")\n cp_bin = ContactPatch()\n cp_bin.loadFromBinary(\"cp_test\")\n self.assertEqual(cp1, cp_bin)\n cp1.saveAsXML(\"cp_test.xml\", 'ContactPatch')\n cp_xml = ContactPatch()\n cp_xml.loadFromXML(\"cp_test.xml\", 'ContactPatch')\n self.assertEqual(cp1, cp_xml)\n cp_pickled = pickle.dumps(cp1)\n cp_from_pickle = pickle.loads(cp_pickled)\n self.assertEqual(cp1, cp_from_pickle)\n\n def test_contact_patch_model_accessor(self):\n p = SE3()\n p.setRandom()\n cp1 = ContactPatch(p, 0.9)\n cm = cp1.contact_model\n self.assertEqual(cm.mu, 0.9)\n cm.mu = 0.5\n self.assertEqual(cp1.friction, 0.5)\n\n cp1.contact_model.contact_type = ContactType.CONTACT_PLANAR\n self.assertEqual(cp1.contact_model.contact_type, ContactType.CONTACT_PLANAR)\n\n cp1.friction = 2\n self.assertEqual(cp1.contact_model.mu, 2)\n self.assertEqual(cm.mu, 2)\n\n pos = np.random.rand(3, 4)\n cp1.contact_model.contact_points_positions = pos\n self.assertEqual(cp1.contact_model.num_contact_points, 4)\n self.assertEqual(cp1.contact_model.contact_points_positions.shape[0], 3)\n self.assertEqual(cp1.contact_model.contact_points_positions.shape[1], 4)\n self.assertTrue(isclose(cp1.contact_model.contact_points_positions, pos).all())\n\n\nclass ContactPhaseTest(unittest.TestCase):\n def test_default_constructor(self):\n cp = ContactPhase()\n self.assertEqual(cp.timeInitial, -1)\n self.assertEqual(cp.timeFinal, -1)\n self.assertEqual(cp.duration, 0)\n self.assertEqual(cp.numContacts(), 0)\n self.assertEqual(len(cp.effectorsInContact()), 0)\n\n def test_constructor_with_arguments(self):\n cp = ContactPhase(1, 5)\n self.assertEqual(cp.timeInitial, 1)\n self.assertEqual(cp.timeFinal, 5)\n self.assertEqual(cp.duration, 4)\n self.assertEqual(cp.numContacts(), 0)\n self.assertEqual(len(cp.effectorsInContact()), 0)\n with self.assertRaises(ValueError):\n cp = ContactPhase(1, 0.5)\n\n def test_timings_setter(self):\n cp = ContactPhase()\n cp.timeInitial = 1.5\n cp.timeFinal = 3.\n self.assertEqual(cp.timeInitial, 1.5)\n self.assertEqual(cp.timeFinal, 3.)\n self.assertEqual(cp.duration, 1.5)\n cp.duration = 2.\n self.assertEqual(cp.timeInitial, 1.5)\n self.assertEqual(cp.timeFinal, 3.5)\n self.assertEqual(cp.duration, 2.)\n with self.assertRaises(ValueError):\n cp.timeFinal = 1.\n with self.assertRaises(ValueError):\n cp.duration = -0.5\n\n def test_contact_methods(self):\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n new = cp.addContact(\"right-leg\", patchRF)\n self.assertTrue(new)\n self.assertTrue(cp.isEffectorInContact(\"right-leg\"))\n self.assertTrue(\"right-leg\" in cp.effectorsInContact())\n self.assertEqual(patchRF, cp.contactPatch(\"right-leg\"))\n self.assertEqual(cp.numContacts(), 1)\n\n # add another contact :\n p = SE3()\n p.setRandom()\n patchLF = ContactPatch(p, 0.5)\n new = cp.addContact(\"left-leg\", patchLF)\n self.assertTrue(new)\n self.assertTrue(cp.isEffectorInContact(\"right-leg\"))\n self.assertTrue(\"right-leg\" in cp.effectorsInContact())\n self.assertEqual(patchRF, cp.contactPatch(\"right-leg\"))\n self.assertTrue(cp.isEffectorInContact(\"left-leg\"))\n self.assertTrue(\"left-leg\" in cp.effectorsInContact())\n self.assertEqual(patchLF, cp.contactPatch(\"left-leg\"))\n self.assertEqual(cp.numContacts(), 2)\n # check that the patch can be overwritten:\n p = SE3()\n p.setRandom()\n patchRF2 = ContactPatch(p, 0.5)\n new = cp.addContact(\"right-leg\", patchRF2)\n self.assertFalse(new)\n\n # check deletion of contacts :\n exist = cp.removeContact(\"right-leg\")\n self.assertTrue(exist)\n self.assertTrue(cp.isEffectorInContact(\"left-leg\"))\n self.assertTrue(\"left-leg\" in cp.effectorsInContact())\n self.assertEqual(patchLF, cp.contactPatch(\"left-leg\"))\n self.assertEqual(cp.numContacts(), 1)\n self.assertFalse(cp.isEffectorInContact(\"right-leg\"))\n self.assertFalse(\"right-leg\" in cp.effectorsInContact())\n exist = cp.removeContact(\"right-leg\")\n self.assertFalse(exist)\n\n exist = cp.removeContact(\"left-leg\")\n self.assertTrue(exist)\n self.assertFalse(cp.isEffectorInContact(\"left-leg\"))\n self.assertFalse(\"left-leg\" in cp.effectorsInContact())\n self.assertFalse(cp.isEffectorInContact(\"right-leg\"))\n self.assertFalse(\"right-leg\" in cp.effectorsInContact())\n self.assertEqual(cp.numContacts(), 0)\n\n def test_contact_patch_access(self):\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n cp.addContact(\"right-leg\", patchRF)\n # check that the contactPatch have been copied and it's not a pointer :\n patchRF.placement.setRandom()\n self.assertNotEqual(patchRF, cp.contactPatch(\"right-leg\"))\n patchRF = ContactPatch(cp.contactPatch(\"right-leg\"))\n self.assertEqual(patchRF, cp.contactPatch(\"right-leg\"))\n patchRF.placement.translation += np.array([0, 0.1, 0])\n self.assertNotEqual(patchRF, cp.contactPatch(\"right-leg\"))\n patchRF = ContactPatch(cp.contactPatch(\"right-leg\"))\n # check that the getter of contactPatch is a non const reference:\n cp.contactPatch('right-leg').placement.setRandom()\n self.assertNotEqual(patchRF, cp.contactPatch(\"right-leg\"))\n patchRF = ContactPatch(cp.contactPatch(\"right-leg\"))\n cp.contactPatch('right-leg').friction = 0.7\n self.assertNotEqual(patchRF, cp.contactPatch(\"right-leg\"))\n patchRF = ContactPatch(cp.contactPatch(\"right-leg\"))\n cp.contactPatch(\"right-leg\").placement.translation += np.array([0, 0.1, 0])\n self.assertNotEqual(patchRF, cp.contactPatch(\"right-leg\"))\n\n patchRF = cp.contactPatch(\"right-leg\")\n self.assertEqual(patchRF, cp.contactPatch(\"right-leg\"))\n patchRF.placement.translation += np.array([0, 0.1, 0])\n self.assertEqual(patchRF, cp.contactPatch(\"right-leg\"))\n # check errors :\n with self.assertRaises(ValueError):\n cp.contactPatch(\"left-leg\")\n\n def test_contact_patch_dict(self):\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n cp.addContact(\"right-leg\", patchRF)\n dict = cp.contactPatches()\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertEqual(dict[\"right-leg\"], patchRF)\n self.assertEqual(len(dict.keys()), 1)\n\n # add another contact :\n p = SE3()\n p.setRandom()\n patchLF = ContactPatch(p, 0.5)\n cp.addContact(\"left-leg\", patchLF)\n # check that it's not a pointer :\n self.assertEqual(len(dict.keys()), 1)\n self.assertFalse(\"left-leg\" in dict.keys())\n # check that the contact have been added\n dict = cp.contactPatches()\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertTrue(\"left-leg\" in dict.keys())\n self.assertEqual(dict[\"right-leg\"], patchRF)\n self.assertEqual(dict[\"left-leg\"], patchLF)\n self.assertEqual(len(dict.keys()), 2)\n\n # check that changing the dict doesn't change the contact phase:\n p = SE3()\n p.setRandom()\n patch2 = ContactPatch(p, 0.5)\n dict.update({\"test\": patch2})\n self.assertFalse(\"test\" in cp.contactPatches().keys())\n # check that the map is const\n cp.contactPatches().update({\"test\": patch2}) # should not have any effect\n self.assertFalse(\"test\" in cp.contactPatches().keys())\n\n # check deletion :\n cp.removeContact(\"right-leg\")\n dict = cp.contactPatches()\n self.assertFalse(\"right-leg\" in dict.keys())\n self.assertTrue(\"left-leg\" in dict.keys())\n self.assertEqual(dict[\"left-leg\"], patchLF)\n self.assertEqual(len(dict.keys()), 1)\n\n def test_effector_trajectory(self):\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n cp.addContact(\"right-leg\", patchRF)\n # create a SE3 trajectory :\n init_pose = SE3.Identity()\n end_pose = SE3.Identity()\n init_pose.translation = array([0.2, -0.7, 0.6])\n end_pose.translation = array([3.6, -2.2, -0.9])\n init_pose.rotation = Quaternion.Identity().normalized().matrix()\n end_pose.rotation = Quaternion(sqrt(2.) / 2., sqrt(2.) / 2., 0, 0).normalized().matrix()\n effL = SE3Curve(init_pose, end_pose, 0.5, 2.5)\n # add the trajectory to the contact phase :\n new = cp.addEffectorTrajectory(\"left-leg\", effL)\n self.assertTrue(new)\n self.assertTrue(cp.effectorHaveAtrajectory(\"left-leg\"))\n self.assertTrue(\"left-leg\" in cp.effectorsWithTrajectory())\n self.assertEqual(cp.effectorTrajectory(\"left-leg\"), effL)\n self.assertEqual(cp.effectorTrajectory(\"left-leg\").min(), 0.5)\n self.assertEqual(cp.effectorTrajectory(\"left-leg\").max(), 2.5)\n self.assertTrue(cp.effectorTrajectory(\"left-leg\").evaluateAsSE3(0.5).isApprox(init_pose))\n self.assertTrue(cp.effectorTrajectory(\"left-leg\").evaluateAsSE3(2.5).isApprox(end_pose))\n\n # check with piecewise SE3\n effH = piecewise_SE3(effL)\n end_pose2 = SE3.Identity()\n end_pose2.translation = array([-4.9, 0.8, 0.9])\n end_pose2.rotation = Quaternion(sqrt(2.) / 2., 0., sqrt(2.) / 2., 0).normalized().matrix()\n effH.append(end_pose2, 4.)\n new = cp.addEffectorTrajectory(\"hand\", effH)\n self.assertTrue(new)\n self.assertTrue(cp.effectorHaveAtrajectory(\"left-leg\"))\n self.assertTrue(\"left-leg\" in cp.effectorsWithTrajectory())\n self.assertTrue(cp.effectorHaveAtrajectory(\"hand\"))\n self.assertTrue(\"hand\" in cp.effectorsWithTrajectory())\n self.assertEqual(cp.effectorTrajectory(\"left-leg\"), effL)\n self.assertEqual(cp.effectorTrajectory(\"hand\"), effH)\n self.assertEqual(cp.effectorTrajectory(\"hand\").min(), 0.5)\n self.assertEqual(cp.effectorTrajectory(\"hand\").max(), 4.)\n self.assertTrue(cp.effectorTrajectory(\"hand\").evaluateAsSE3(0.5).isApprox(init_pose))\n self.assertTrue(cp.effectorTrajectory(\"hand\").evaluateAsSE3(4).isApprox(end_pose2))\n\n # check that the getter return a pointer to a non const object :\n end_pose3 = SE3.Identity()\n end_pose3.setRandom()\n cp.effectorTrajectory(\"hand\").append(end_pose3, 6.5)\n self.assertEqual(cp.effectorTrajectory(\"hand\").max(), 6.5)\n self.assertTrue(cp.effectorTrajectory(\"hand\").evaluateAsSE3(6.5).isApprox(end_pose3))\n\n effH = cp.effectorTrajectory(\"hand\")\n end_pose4 = SE3.Identity()\n end_pose4.setRandom()\n effH.append(end_pose4, 10.)\n self.assertEqual(cp.effectorTrajectory(\"hand\").max(), 10.)\n self.assertTrue(cp.effectorTrajectory(\"hand\").evaluateAsSE3(10.).isApprox(end_pose4))\n\n # check errors :\n with self.assertRaises(ValueError):\n cp.addEffectorTrajectory(\"right-leg\", effL)\n\n # check that we cannot add other kind of trajectories than SE3 :\n waypoints = array([[1., 2., 3.], [4., 5., 6.]]).transpose()\n a = bezier(waypoints, 0., 1.)\n with self.assertRaises(BaseException):\n cp.addEffectorTrajectory(\"other-leg\", a)\n\n def test_effector_trajectory_dict(self):\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n cp.addContact(\"right-leg\", patchRF)\n # create a SE3 trajectory :\n init_pose = SE3.Identity()\n end_pose = SE3.Identity()\n init_pose.translation = array([0.2, -0.7, 0.6])\n end_pose.translation = array([3.6, -2.2, -0.9])\n init_pose.rotation = Quaternion.Identity().normalized().matrix()\n end_pose.rotation = Quaternion(sqrt(2.) / 2., sqrt(2.) / 2., 0, 0).normalized().matrix()\n effL = SE3Curve(init_pose, end_pose, 0.5, 2.5)\n # add the trajectory to the contact phase :\n cp.addEffectorTrajectory(\"left-leg\", effL)\n dict = cp.effectorTrajectories()\n self.assertEqual(len(dict.keys()), 1)\n self.assertTrue(\"left-leg\" in dict.keys())\n self.assertEqual(dict[\"left-leg\"], effL)\n self.assertEqual(dict[\"left-leg\"].min(), 0.5)\n self.assertEqual(dict[\"left-leg\"].max(), 2.5)\n self.assertTrue(dict[\"left-leg\"].evaluateAsSE3(0.5).isApprox(init_pose))\n self.assertTrue(dict[\"left-leg\"].evaluateAsSE3(2.5).isApprox(end_pose))\n\n # check that changing the dict doesn't change the contact phase:\n effH = piecewise_SE3(effL)\n end_pose2 = SE3.Identity()\n end_pose2.translation = array([-4.9, 0.8, 0.9])\n end_pose2.rotation = Quaternion(sqrt(2.) / 2., 0., sqrt(2.) / 2., 0).normalized().matrix()\n effH.append(end_pose2, 4.)\n dict.update({\"hand\": effH})\n self.assertFalse(\"hand\" in cp.effectorTrajectories().keys())\n # check that the map is const\n cp.effectorTrajectories().update({\"hand\": effH}) # should not have any effect\n self.assertFalse(\"hand\" in cp.effectorTrajectories().keys())\n\n def test_contact_force_trajectory(self):\n # create phase and add two contacts\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n cp.addContact(\"right-leg\", ContactPatch(p, 0.5))\n p = SE3()\n p.setRandom()\n cp.addContact(\"left-leg\", ContactPatch(p, 0.5))\n # create a polynomial 12D trajectory\n fR = createRandomPiecewisePolynomial(12)\n fL = createRandomPiecewisePolynomial(12)\n new = cp.addContactForceTrajectory(\"right-leg\", fR)\n self.assertTrue(new)\n self.assertEqual(cp.contactForce(\"right-leg\"), fR)\n self.assertEqual(cp.contactForce(\"right-leg\").min(), 0)\n self.assertEqual(cp.contactForce(\"right-leg\").max(), 2.)\n self.assertTrue(array_equal(cp.contactForce(\"right-leg\")(0.5), fR(0.5)))\n self.assertTrue(array_equal(cp.contactForce(\"right-leg\")(1.5), fR(1.5)))\n\n new = cp.addContactForceTrajectory(\"left-leg\", fL)\n self.assertTrue(new)\n self.assertEqual(cp.contactForce(\"left-leg\"), fL)\n self.assertEqual(cp.contactForce(\"left-leg\").min(), 0)\n self.assertEqual(cp.contactForce(\"left-leg\").max(), 2.)\n self.assertTrue(array_equal(cp.contactForce(\"left-leg\")(0.5), fL(0.5)))\n self.assertTrue(array_equal(cp.contactForce(\"left-leg\")(1.5), fL(1.5)))\n\n new = cp.addContactForceTrajectory(\"left-leg\", fL)\n self.assertFalse(new)\n\n # check that the getter return a pointer to a non const object :\n cp.contactForce(\"left-leg\").append(np.random.rand(12, 1), 3.5)\n self.assertEqual(cp.contactForce(\"left-leg\").max(), 3.5)\n\n pc = cp.contactForce(\"left-leg\")\n pc.append(np.random.rand(12, 1), 6.)\n self.assertEqual(cp.contactForce(\"left-leg\").max(), 6.)\n self.assertTrue(array_equal(cp.contactForce(\"left-leg\")(6.), pc(6.)))\n\n # check errors :\n with self.assertRaises(ValueError):\n cp.addContactForceTrajectory(\"hand\", fL)\n\n def test_contact_force_trajectory_dict(self):\n # create phase and add two contacts\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n cp.addContact(\"right-leg\", ContactPatch(p, 0.5))\n p = SE3()\n p.setRandom()\n cp.addContact(\"left-leg\", ContactPatch(p, 0.5))\n # create a polynomial 12D trajectory\n fR = createRandomPiecewisePolynomial(12)\n fL = createRandomPiecewisePolynomial(12)\n cp.addContactForceTrajectory(\"right-leg\", fR)\n dict = cp.contactForces()\n self.assertEqual(len(dict.keys()), 1)\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertEqual(dict[\"right-leg\"], fR)\n self.assertEqual(dict[\"right-leg\"].min(), 0)\n self.assertEqual(dict[\"right-leg\"].max(), 2.)\n self.assertTrue(array_equal(dict[\"right-leg\"](0.5), fR(0.5)))\n self.assertTrue(array_equal(dict[\"right-leg\"](1.5), fR(1.5)))\n\n cp.addContactForceTrajectory(\"left-leg\", fL)\n self.assertEqual(len(dict.keys()), 1)\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertFalse(\"left-leg\" in dict.keys())\n dict = cp.contactForces()\n self.assertEqual(len(dict.keys()), 2)\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertTrue(\"left-leg\" in dict.keys())\n\n # check that changing the dict doesn\"t change the contact phase\n f2 = createRandomPiecewisePolynomial(12)\n dict.update({\"hand\": f2})\n self.assertFalse(\"hand\" in cp.contactForces().keys())\n # check that the map is const\n cp.contactForces().update({\"hand\": f2}) # should not have any effect\n self.assertFalse(\"hand\" in cp.contactForces().keys())\n\n def test_contact_normal_force_trajectory(self):\n # create phase and add two contacts\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n cp.addContact(\"right-leg\", ContactPatch(p, 0.5))\n p = SE3()\n p.setRandom()\n cp.addContact(\"left-leg\", ContactPatch(p, 0.5))\n # create a polynomial 12D trajectory\n fR = createRandomPiecewisePolynomial(1)\n fL = createRandomPiecewisePolynomial(1)\n new = cp.addContactNormalForceTrajectory(\"right-leg\", fR)\n self.assertTrue(new)\n self.assertEqual(cp.contactNormalForce(\"right-leg\"), fR)\n self.assertEqual(cp.contactNormalForce(\"right-leg\").min(), 0)\n self.assertEqual(cp.contactNormalForce(\"right-leg\").max(), 2.)\n self.assertTrue(array_equal(cp.contactNormalForce(\"right-leg\")(0.5), fR(0.5)))\n self.assertTrue(array_equal(cp.contactNormalForce(\"right-leg\")(1.5), fR(1.5)))\n\n new = cp.addContactNormalForceTrajectory(\"left-leg\", fL)\n self.assertTrue(new)\n self.assertEqual(cp.contactNormalForce(\"left-leg\"), fL)\n self.assertEqual(cp.contactNormalForce(\"left-leg\").min(), 0)\n self.assertEqual(cp.contactNormalForce(\"left-leg\").max(), 2.)\n self.assertTrue(array_equal(cp.contactNormalForce(\"left-leg\")(0.5), fL(0.5)))\n self.assertTrue(array_equal(cp.contactNormalForce(\"left-leg\")(1.5), fL(1.5)))\n\n new = cp.addContactNormalForceTrajectory(\"left-leg\", fL)\n self.assertFalse(new)\n\n # check that the getter return a pointer to a non const object :\n cp.contactNormalForce(\"left-leg\").append(np.random.rand(1, 1), 3.5)\n self.assertEqual(cp.contactNormalForce(\"left-leg\").max(), 3.5)\n\n pc = cp.contactNormalForce(\"left-leg\")\n pc.append(np.random.rand(1, 1), 6.)\n self.assertEqual(cp.contactNormalForce(\"left-leg\").max(), 6.)\n self.assertTrue(array_equal(cp.contactNormalForce(\"left-leg\")(6.), pc(6.)))\n\n # check errors :\n with self.assertRaises(ValueError):\n cp.addContactNormalForceTrajectory(\"hand\", fL)\n\n fL = createRandomPiecewisePolynomial(3)\n with self.assertRaises(ValueError):\n cp.addContactNormalForceTrajectory(\"left-leg\", fL)\n\n def test_contact_normal_force_trajectory_dict(self):\n # create phase and add two contacts\n cp = ContactPhase(1.5, 3)\n p = SE3()\n p.setRandom()\n cp.addContact(\"right-leg\", ContactPatch(p, 0.5))\n p = SE3()\n p.setRandom()\n cp.addContact(\"left-leg\", ContactPatch(p, 0.5))\n # create a polynomial 12D trajectory\n fR = createRandomPiecewisePolynomial(1)\n fL = createRandomPiecewisePolynomial(1)\n cp.addContactNormalForceTrajectory(\"right-leg\", fR)\n dict = cp.contactNormalForces()\n self.assertEqual(len(dict.keys()), 1)\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertEqual(dict[\"right-leg\"], fR)\n self.assertEqual(dict[\"right-leg\"].min(), 0)\n self.assertEqual(dict[\"right-leg\"].max(), 2.)\n self.assertTrue(array_equal(dict[\"right-leg\"](0.5), fR(0.5)))\n self.assertTrue(array_equal(dict[\"right-leg\"](1.5), fR(1.5)))\n\n cp.addContactNormalForceTrajectory(\"left-leg\", fL)\n self.assertEqual(len(dict.keys()), 1)\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertFalse(\"left-leg\" in dict.keys())\n dict = cp.contactNormalForces()\n self.assertEqual(len(dict.keys()), 2)\n self.assertTrue(\"right-leg\" in dict.keys())\n self.assertTrue(\"left-leg\" in dict.keys())\n\n # check that changing the dict doesn\"t change the contact phase\n f2 = createRandomPiecewisePolynomial(1)\n dict.update({\"hand\": f2})\n self.assertFalse(\"hand\" in cp.contactNormalForces().keys())\n # check that the map is const\n cp.contactNormalForces().update({\"hand\": f2}) # should not have any effect\n self.assertFalse(\"hand\" in cp.contactNormalForces().keys())\n\n def test_members_points(self):\n cp = ContactPhase()\n # check default values :\n self.assertTrue(array_equal(np.zeros(3), cp.c_init))\n self.assertTrue(array_equal(np.zeros(3), cp.dc_init))\n self.assertTrue(array_equal(np.zeros(3), cp.ddc_init))\n self.assertTrue(array_equal(np.zeros(3), cp.L_init))\n self.assertTrue(array_equal(np.zeros(3), cp.dL_init))\n self.assertTrue(array_equal(np.zeros(3), cp.c_final))\n self.assertTrue(array_equal(np.zeros(3), cp.dc_final))\n self.assertTrue(array_equal(np.zeros(3), cp.ddc_final))\n self.assertTrue(array_equal(np.zeros(3), cp.L_final))\n self.assertTrue(array_equal(np.zeros(3), cp.dL_final))\n # set random values :\n c_init = np.random.rand(3)\n dc_init = np.random.rand(3)\n ddc_init = np.random.rand(3)\n L_init = np.random.rand(3)\n dL_init = np.random.rand(3)\n q_init = np.random.rand(35)\n c_final = np.random.rand(3)\n dc_final = np.random.rand(3)\n ddc_final = np.random.rand(3)\n L_final = np.random.rand(3)\n dL_final = np.random.rand(3)\n q_final = np.random.rand(35)\n cp.c_init = c_init\n cp.dc_init = dc_init\n cp.ddc_init = ddc_init\n cp.L_init = L_init\n cp.dL_init = dL_init\n cp.q_init = q_init\n cp.c_final = c_final\n cp.dc_final = dc_final\n cp.ddc_final = ddc_final\n cp.L_final = L_final\n cp.dL_final = dL_final\n cp.q_final = q_final\n self.assertTrue(array_equal(cp.c_init, c_init))\n self.assertTrue(array_equal(cp.dc_init, dc_init))\n self.assertTrue(array_equal(cp.ddc_init, ddc_init))\n self.assertTrue(array_equal(cp.L_init, L_init))\n self.assertTrue(array_equal(cp.dL_init, dL_init))\n self.assertTrue(array_equal(cp.q_init, q_init))\n self.assertTrue(array_equal(cp.c_final, c_final))\n self.assertTrue(array_equal(cp.dc_final, dc_final))\n self.assertTrue(array_equal(cp.ddc_final, ddc_final))\n self.assertTrue(array_equal(cp.L_final, L_final))\n self.assertTrue(array_equal(cp.dL_final, dL_final))\n self.assertTrue(array_equal(cp.q_final, q_final))\n # check that it's not a pointer :\n ci = cp.c_init\n ci = np.random.rand(3)\n self.assertFalse(array_equal(cp.c_init, ci))\n # it's a copy (limitation from eigenpy ...) :\n dc_init = cp.dc_init.copy()\n cp.dc_init += np.array([0.1, 0., -2.]) # this work as += call the setter\n self.assertFalse(array_equal(cp.dc_init, dc_init))\n dc_init = cp.dc_init.copy()\n cp.dc_init[2] = 0. # this line have no effect\n self.assertTrue(array_equal(cp.dc_init, dc_init))\n\n # check error due to incorrect dimensions :\n print(\"# Expected warning messages about dimension / column vector : \")\n with self.assertRaises(BaseException):\n cp.c_init = np.random.rand(4)\n with self.assertRaises(BaseException):\n cp.dc_init = np.random.rand(2)\n with self.assertRaises(BaseException):\n cp.ddc_init = np.random.rand(10)\n with self.assertRaises(BaseException):\n cp.L_init = np.random.rand(1)\n with self.assertRaises(BaseException):\n cp.dL_init = np.random.rand(4)\n with self.assertRaises(BaseException):\n cp.c_final = np.random.rand(5)\n with self.assertRaises(BaseException):\n cp.dc_final = np.random.rand(3, 2)\n with self.assertRaises(BaseException):\n cp.ddc_final = np.random.rand(3, 3)\n with self.assertRaises(BaseException):\n cp.L_final = np.random.rand(1, 2)\n with self.assertRaises(BaseException):\n cp.dL_final = np.random.rand(1, 3)\n print(\"# End of Expected warning messages.\")\n\n def test_member_curves(self):\n cp = ContactPhase()\n # check default values :\n self.assertIsNone(cp.q_t)\n self.assertIsNone(cp.dq_t)\n self.assertIsNone(cp.ddq_t)\n self.assertIsNone(cp.tau_t)\n self.assertIsNone(cp.c_t)\n self.assertIsNone(cp.dc_t)\n self.assertIsNone(cp.ddc_t)\n self.assertIsNone(cp.L_t)\n self.assertIsNone(cp.dL_t)\n self.assertIsNone(cp.wrench_t)\n self.assertIsNone(cp.zmp_t)\n self.assertIsNone(cp.root_t)\n # build random trajectories :\n q = createRandomPiecewisePolynomial(31)\n dq = createRandomPiecewisePolynomial(30)\n ddq = createRandomPiecewisePolynomial(30)\n tau = createRandomPiecewisePolynomial(30)\n c = createRandomPiecewisePolynomial(3)\n dc = createRandomPiecewisePolynomial(3)\n ddc = createRandomPiecewisePolynomial(3)\n L = createRandomPiecewisePolynomial(3)\n dL = createRandomPiecewisePolynomial(3)\n wrench = createRandomPiecewisePolynomial(6)\n zmp = createRandomPiecewisePolynomial(3)\n root = createRandomSE3Traj()\n # assign trajectories :\n cp.q_t = q\n cp.dq_t = dq\n cp.ddq_t = ddq\n cp.tau_t = tau\n cp.c_t = c\n cp.dc_t = dc\n cp.ddc_t = ddc\n cp.L_t = L\n cp.dL_t = dL\n cp.wrench_t = wrench\n cp.zmp_t = zmp\n cp.root_t = root\n # check getter :\n self.assertEqual(cp.q_t, q)\n self.assertEqual(cp.dq_t, dq)\n self.assertEqual(cp.ddq_t, ddq)\n self.assertEqual(cp.tau_t, tau)\n self.assertEqual(cp.c_t, c)\n self.assertEqual(cp.dc_t, dc)\n self.assertEqual(cp.ddc_t, ddc)\n self.assertEqual(cp.L_t, L)\n self.assertEqual(cp.dL_t, dL)\n self.assertEqual(cp.wrench_t, wrench)\n self.assertEqual(cp.zmp_t, zmp)\n self.assertEqual(cp.root_t, root)\n for t in np.linspace(0., 2., 10):\n self.assertTrue(array_equal(cp.q_t(t), q(t)))\n self.assertTrue(array_equal(cp.dq_t(t), dq(t)))\n self.assertTrue(array_equal(cp.ddq_t(t), ddq(t)))\n self.assertTrue(array_equal(cp.tau_t(t), tau(t)))\n self.assertTrue(array_equal(cp.c_t(t), c(t)))\n self.assertTrue(array_equal(cp.dc_t(t), dc(t)))\n self.assertTrue(array_equal(cp.ddc_t(t), ddc(t)))\n self.assertTrue(array_equal(cp.L_t(t), L(t)))\n self.assertTrue(array_equal(cp.dL_t(t), dL(t)))\n self.assertTrue(array_equal(cp.wrench_t(t), wrench(t)))\n self.assertTrue(array_equal(cp.zmp_t(t), zmp(t)))\n self.assertTrue(array_equal(cp.root_t(t), root(t)))\n self.assertEqual(cp.root_t.evaluateAsSE3(t), root.evaluateAsSE3(t))\n\n # check that deleting python variables doesn't delete members after assignement:\n del q\n self.assertIsNotNone(cp.q_t)\n self.assertEqual(cp.q_t.min(), 0)\n self.assertEqual(cp.q_t.max(), 2)\n self.assertIsNotNone(cp.q_t(1.))\n c = None\n self.assertIsNotNone(cp.c_t)\n self.assertEqual(cp.c_t.min(), 0)\n self.assertEqual(cp.c_t.max(), 2)\n self.assertIsNotNone(cp.c_t(1.))\n # check that curve have not been copied and that it's the same pointer\n dc.append(np.random.rand(3, 1), 3.5)\n self.assertEqual(cp.dc_t.min(), 0)\n self.assertEqual(cp.dc_t.max(), 3.5)\n self.assertEqual(cp.dc_t, dc)\n # check that the return of the getter is not const :\n cp.dq_t.append(np.random.rand(30, 1), 4.)\n self.assertEqual(cp.dq_t.min(), 0)\n self.assertEqual(cp.dq_t.max(), 4.)\n self.assertEqual(cp.dq_t, dq)\n\n def test_operator_equal(self):\n cp1 = ContactPhase()\n cp2 = ContactPhase()\n # check timings\n self.assertTrue(cp1 == cp2)\n cp1.timeInitial = 1.\n self.assertTrue(cp1 != cp2)\n cp2.timeInitial = 1.\n self.assertTrue(cp1 == cp2)\n cp1.timeFinal = 3.5\n self.assertTrue(cp1 != cp2)\n cp2.duration = 2.5\n self.assertTrue(cp1 == cp2)\n # check public members :\n # points :\n c_init = np.random.rand(3)\n dc_init = np.random.rand(3)\n ddc_init = np.random.rand(3)\n L_init = np.random.rand(3)\n dL_init = np.random.rand(3)\n q_init = np.random.rand(35)\n c_final = np.random.rand(3)\n dc_final = np.random.rand(3)\n ddc_final = np.random.rand(3)\n L_final = np.random.rand(3)\n dL_final = np.random.rand(3)\n q_final = np.random.rand(35)\n cp1.c_init = c_init\n self.assertTrue(cp1 != cp2)\n cp2.c_init = c_init\n self.assertTrue(cp1 == cp2)\n cp1.dc_init = dc_init\n self.assertTrue(cp1 != cp2)\n cp2.dc_init = dc_init\n self.assertTrue(cp1 == cp2)\n cp1.ddc_init = ddc_init\n self.assertTrue(cp1 != cp2)\n cp2.ddc_init = ddc_init\n self.assertTrue(cp1 == cp2)\n cp1.L_init = L_init\n self.assertTrue(cp1 != cp2)\n cp2.L_init = L_init\n self.assertTrue(cp1 == cp2)\n cp1.dL_init = dL_init\n self.assertTrue(cp1 != cp2)\n cp2.dL_init = dL_init\n self.assertTrue(cp1 == cp2)\n cp1.q_init = q_init\n self.assertTrue(cp1 != cp2)\n cp2.q_init = q_init\n self.assertTrue(cp1 == cp2)\n cp1.c_final = c_final\n self.assertTrue(cp1 != cp2)\n cp2.c_final = c_final.copy()\n self.assertTrue(cp1 == cp2)\n cp1.dc_final = dc_final\n self.assertTrue(cp1 != cp2)\n cp2.dc_final = dc_final.copy()\n self.assertTrue(cp1 == cp2)\n cp1.ddc_final = ddc_final\n self.assertTrue(cp1 != cp2)\n cp2.ddc_final = ddc_final.copy()\n self.assertTrue(cp1 == cp2)\n cp1.L_final = L_final\n self.assertTrue(cp1 != cp2)\n L_final2 = np.array(L_final)\n cp2.L_final = L_final2\n self.assertTrue(cp1 == cp2)\n cp1.dL_final = dL_final\n self.assertTrue(cp1 != cp2)\n dL_final2 = np.array(dL_final)\n cp2.dL_final = dL_final2\n self.assertTrue(cp1 == cp2)\n cp1.q_final = q_final\n self.assertTrue(cp1 != cp2)\n cp2.q_final = q_final\n self.assertTrue(cp1 == cp2)\n # curves :\n q = createRandomPiecewisePolynomial(31)\n dq = createRandomPiecewisePolynomial(30)\n ddq = createRandomPiecewisePolynomial(30)\n tau = createRandomPiecewisePolynomial(30)\n dc = createRandomPiecewisePolynomial(3)\n ddc = createRandomPiecewisePolynomial(3)\n L = createRandomPiecewisePolynomial(3)\n dL = createRandomPiecewisePolynomial(3)\n wrench = createRandomPiecewisePolynomial(6)\n zmp = createRandomPiecewisePolynomial(3)\n root = createRandomSE3Traj()\n coefs = np.random.rand(3, 7) # degree 3\n c1 = polynomial(coefs, 0, 2)\n c2 = polynomial(coefs, 0, 2)\n # assign trajectories :\n cp1.q_t = q\n self.assertTrue(cp1 != cp2)\n cp2.q_t = q\n self.assertTrue(cp1 == cp2)\n cp1.dq_t = dq\n self.assertTrue(cp1 != cp2)\n cp2.dq_t = dq\n self.assertTrue(cp1 == cp2)\n cp1.ddq_t = ddq\n self.assertTrue(cp1 != cp2)\n cp2.ddq_t = ddq\n self.assertTrue(cp1 == cp2)\n cp1.tau_t = tau\n self.assertTrue(cp1 != cp2)\n cp2.tau_t = tau\n self.assertTrue(cp1 == cp2)\n cp1.c_t = c1\n self.assertTrue(cp1 != cp2)\n cp2.c_t = c2\n self.assertTrue(cp1 == cp2)\n cp1.dc_t = dc\n self.assertTrue(cp1 != cp2)\n dc2 = dc\n cp2.dc_t = dc2\n self.assertTrue(cp1 == cp2)\n cp1.ddc_t = ddc\n self.assertTrue(cp1 != cp2)\n cp2.ddc_t = ddc\n self.assertTrue(cp1 == cp2)\n cp1.L_t = L\n self.assertTrue(cp1 != cp2)\n cp2.L_t = L\n self.assertTrue(cp1 == cp2)\n cp1.dL_t = dL\n self.assertTrue(cp1 != cp2)\n cp2.dL_t = dL\n self.assertTrue(cp1 == cp2)\n cp1.wrench_t = wrench\n self.assertTrue(cp1 != cp2)\n cp2.wrench_t = wrench\n self.assertTrue(cp1 == cp2)\n cp1.zmp_t = zmp\n self.assertTrue(cp1 != cp2)\n cp2.zmp_t = zmp\n self.assertTrue(cp1 == cp2)\n cp1.root_t = root\n self.assertTrue(cp1 != cp2)\n cp2.root_t = root\n self.assertTrue(cp1 == cp2)\n\n # test contacts\n p = SE3()\n p.setRandom()\n patchRF = ContactPatch(p, 0.5)\n cp1.addContact(\"right-leg\", patchRF)\n self.assertTrue(cp1 != cp2)\n cp2.addContact(\"right-leg2\", patchRF)\n self.assertTrue(cp1 != cp2)\n cp2.addContact(\"right-leg\", patchRF)\n self.assertTrue(cp1 != cp2)\n cp2.removeContact(\"right-leg2\")\n self.assertTrue(cp1 == cp2)\n p = SE3()\n p.setRandom()\n patchLF = ContactPatch(p, 0.5)\n patchLF2 = ContactPatch(p)\n cp1.addContact(\"left-leg\", patchLF)\n self.assertFalse(cp1 == cp2)\n cp2.addContact(\"left-leg\", patchLF2)\n self.assertFalse(cp1 == cp2)\n cp2.removeContact(\"left-leg\")\n cp2.addContact(\"left-leg\", patchLF.copy())\n self.assertFalse(cp1 != cp2)\n\n # test force trajectories :\n fR = createRandomPiecewisePolynomial(12)\n fL = createRandomPiecewisePolynomial(12)\n fL2 = createRandomPiecewisePolynomial(12)\n cp1.addContactForceTrajectory(\"right-leg\", fR)\n self.assertTrue(cp1 != cp2)\n cp2.addContactForceTrajectory(\"right-leg\", fR)\n self.assertTrue(cp1 == cp2)\n cp1.addContactForceTrajectory(\"left-leg\", fL)\n self.assertTrue(cp1 != cp2)\n cp2.addContactForceTrajectory(\"left-leg\", fL2)\n self.assertTrue(cp1 != cp2)\n cp2.addContactForceTrajectory(\"left-leg\", fL)\n self.assertTrue(cp1 == cp2)\n fR = createRandomPiecewisePolynomial(1)\n fL = createRandomPiecewisePolynomial(1)\n fL2 = createRandomPiecewisePolynomial(1)\n cp1.addContactNormalForceTrajectory(\"right-leg\", fR)\n self.assertTrue(cp1 != cp2)\n cp2.addContactNormalForceTrajectory(\"right-leg\", fR)\n self.assertTrue(cp1 == cp2)\n cp1.addContactNormalForceTrajectory(\"left-leg\", fL)\n self.assertTrue(cp1 != cp2)\n cp2.addContactNormalForceTrajectory(\"left-leg\", fL2)\n self.assertTrue(cp1 != cp2)\n cp2.addContactNormalForceTrajectory(\"left-leg\", fL)\n self.assertTrue(cp1 == cp2)\n # test effector trajectories :\n fR = createRandomSE3Traj()\n fL = createRandomSE3Traj()\n fL2 = createRandomSE3Traj()\n cp1.addEffectorTrajectory(\"right-hand\", fR)\n self.assertTrue(cp1 != cp2)\n cp2.addEffectorTrajectory(\"right-hand\", fR)\n self.assertTrue(cp1 == cp2)\n cp1.addEffectorTrajectory(\"left-hand\", fL)\n self.assertTrue(cp1 != cp2)\n cp2.addEffectorTrajectory(\"left-hand\", fL2)\n self.assertTrue(cp1 != cp2)\n cp2.addEffectorTrajectory(\"left-hand\", fL)\n self.assertTrue(cp1 == cp2)\n\n def test_copy_constructor(self):\n cp1 = buildRandomContactPhase(0., 2.)\n cp2 = ContactPhase(cp1)\n cp3 = cp1.copy()\n self.assertEqual(cp1, cp2)\n self.assertEqual(cp1, cp3)\n\n def test_contact_phase_serialization_no_timing(self):\n cp1 = ContactPhase()\n addRandomPointsValues(cp1)\n cp1.saveAsText(\"cp_test.txt\")\n cp_txt = ContactPhase()\n cp_txt.loadFromText(\"cp_test.txt\")\n self.assertEqual(cp1, cp_txt)\n cp1.saveAsBinary(\"cp_test\")\n cp_bin = ContactPhase()\n cp_bin.loadFromBinary(\"cp_test\")\n self.assertEqual(cp1, cp_bin)\n cp1.saveAsXML(\"cp_test.xml\", 'ContactPhase')\n cp_xml = ContactPhase()\n cp_xml.loadFromXML(\"cp_test.xml\", 'ContactPhase')\n self.assertEqual(cp1, cp_xml)\n cp_pickled = pickle.dumps(cp1)\n cp_from_pickle = pickle.loads(cp_pickled)\n self.assertEqual(cp1, cp_from_pickle)\n\n def test_contact_phase_serialization_full(self):\n cp1 = buildRandomContactPhase(0., 2.)\n cp1.saveAsText(\"cp_test_full.txt\")\n cp_txt = ContactPhase()\n cp_txt.loadFromText(\"cp_test_full.txt\")\n self.assertEqual(cp1, cp_txt)\n cp1.saveAsBinary(\"cp_test_full\")\n cp_bin = ContactPhase()\n cp_bin.loadFromBinary(\"cp_test_full\")\n self.assertEqual(cp1, cp_bin)\n cp1.saveAsXML(\"cp_test_full.xml\", 'ContactPhase')\n cp_xml = ContactPhase()\n cp_xml.loadFromXML(\"cp_test_full.xml\", 'ContactPhase')\n self.assertEqual(cp1, cp_xml)\n # TODO : check serialization from another file\n cp_pickled = pickle.dumps(cp1)\n cp_from_pickle = pickle.loads(cp_pickled)\n self.assertEqual(cp1, cp_from_pickle)\n\n def test_contact_phase_contacts_variation(self):\n # # contacts repositioned :\n cp1 = buildRandomContactPhase()\n cp2 = buildRandomContactPhase()\n repo = cp1.getContactsRepositioned(cp2)\n self.assertTrue(len(repo) == 2)\n self.assertTrue(repo[0] == \"right-leg\")\n self.assertTrue(repo[1] == \"left-leg\")\n repo1 = cp2.getContactsRepositioned(cp1)\n self.assertTrue(len(repo1) == 2)\n self.assertTrue(repo1[0] == \"right-leg\")\n self.assertTrue(repo1[1] == \"left-leg\")\n vars = cp1.getContactsVariations(cp2)\n self.assertTrue(len(vars) == 2)\n\n # # contacts broken :\n\n RH_placement = SE3.Identity()\n RH_placement.setRandom()\n RH_patch = ContactPatch(RH_placement)\n cp3 = ContactPhase()\n cp3.addContact(\"right-leg\", RH_patch)\n p = SE3.Identity()\n p.setRandom()\n cp3.addContact(\"left-leg\", ContactPatch(p))\n cp4 = ContactPhase()\n cp4.addContact(\"right-leg\", RH_patch)\n broken = cp3.getContactsBroken(cp4)\n self.assertTrue(len(broken) == 1)\n self.assertTrue(broken[0] == \"left-leg\")\n broken1 = cp4.getContactsBroken(cp3)\n self.assertTrue(len(broken1) == 0)\n\n created = cp4.getContactsCreated(cp3)\n self.assertTrue(len(created) == 1)\n self.assertTrue(created[0] == \"left-leg\")\n created1 = cp3.getContactsCreated(cp4)\n self.assertTrue(len(created1) == 0)\n\n vars = cp3.getContactsVariations(cp4)\n self.assertTrue(len(vars) == 1)\n self.assertTrue(vars[0] == \"left-leg\")\n vars = cp4.getContactsVariations(cp3)\n self.assertTrue(len(vars) == 1)\n self.assertTrue(vars[0] == \"left-leg\")\n\n def test_com_trajectory_helper(self):\n N = 7\n points = array(random.rand(3, N))\n points_derivative = array(random.rand(3, N))\n points_second_derivative = array(random.rand(3, N))\n time_points = array(random.rand(1, N)).T\n time_points.sort(0)\n cp = ContactPhase()\n cp.setCOMtrajectoryFromPoints(points, points_derivative, points_second_derivative, time_points)\n self.assertEqual(cp.c_t.min(), time_points[0])\n self.assertEqual(cp.c_t.max(), time_points[-1])\n self.assertEqual(cp.dc_t.dim(), 3)\n for i in range(N):\n self.assertTrue(isclose(cp.c_t(time_points[i, 0]), points[:, i]).all())\n self.assertTrue(isclose(cp.dc_t(time_points[i, 0]), points_derivative[:, i]).all())\n self.assertTrue(isclose(cp.ddc_t(time_points[i, 0]), points_second_derivative[:, i]).all())\n\n cp.setAMtrajectoryFromPoints(points, points_derivative, time_points)\n for i in range(N):\n self.assertTrue(isclose(cp.L_t(time_points[i, 0]), points[:, i]).all())\n self.assertTrue(isclose(cp.dL_t(time_points[i, 0]), points_derivative[:, i]).all())\n\n cp.setJointsTrajectoryFromPoints(points, points_derivative, points_second_derivative, time_points)\n for i in range(N):\n self.assertTrue(isclose(cp.q_t(time_points[i, 0]), points[:, i]).all())\n self.assertTrue(isclose(cp.dq_t(time_points[i, 0]), points_derivative[:, i]).all())\n self.assertTrue(isclose(cp.ddq_t(time_points[i, 0]), points_second_derivative[:, i]).all())\n\n\nclass ContactSequenceTest(unittest.TestCase):\n def test_append(self):\n cs = ContactSequence(0)\n self.assertTrue(cs.size() == 0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n id = cs.append(cp0)\n self.assertTrue(cs.size() == 1)\n self.assertTrue(id == 0)\n self.assertTrue(cs.contactPhases[0] == cp0)\n id = cs.append(cp1)\n self.assertTrue(cs.size() == 2)\n self.assertTrue(id == 1)\n self.assertTrue(cs.contactPhases[0] == cp0)\n self.assertTrue(cs.contactPhases[1] == cp1)\n\n \"\"\" # test copied from c++, but the same behaviour cannot be obtained in python\n def test_accessor_phase_vector(self):\n cs = ContactSequence(0)\n cp0 = buildRandomContactPhase(0,2)\n cp1 = buildRandomContactPhase(2,4.)\n cs.append(cp0)\n cs.append(cp1)\n phases = cs.contactPhases()\n self.assertTrue(type(phases) is list)\n self.assertTrue(len(phases) == 2)\n self.assertTrue(phases[0] == cp0)\n self.assertTrue(phases[1] == cp1)\n\n # check that the accessor to contactPhases() create a copy :\n cp2 = buildRandomContactPhase(0,2)\n phases += [cp2]\n self.assertTrue(len(phases) == 3)\n self.assertTrue(cs.size() == 2 ) # original contact sequence should not be modified\n phases[1].duration = 3.\n self.assertTrue(cs.contactPhases[1) == cp1) # original contact sequence should not be modified\n \"\"\"\n\n def test_accessor_phase_reference(self):\n cs = ContactSequence(0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n cp2 = buildRandomContactPhase(2, 4.)\n cs.append(cp0)\n cs.append(cp1)\n cs.contactPhases[1].timeFinal = 10.\n self.assertTrue(cs.contactPhases[1] != cp1)\n self.assertTrue(cs.contactPhases[1].timeFinal == 10.)\n\n cs.contactPhases[0] = cp2\n self.assertTrue(cs.contactPhases[0] == cp2)\n\n # try with a variable :\n cp_ref = cs.contactPhases[0]\n c_init = np.random.rand(3)\n cp_ref.c_init = c_init\n cp_ref.duration = 10\n self.assertTrue(cs.contactPhases[0].duration == 10)\n self.assertTrue(array_equal(cs.contactPhases[0].c_init, c_init))\n\n def test_constructor_with_size(self):\n cp_default = ContactPhase()\n cs = ContactSequence(3)\n self.assertTrue(cs.size() == 3)\n for i in range(3):\n self.assertTrue(cs.contactPhases[i] == cp_default)\n\n # try to modify the uninitialized contact phase inside the sequence from the reference\n cp_0 = cs.contactPhases[0]\n c_init = np.random.rand(3)\n cp_0.c_init = c_init\n cp_0.duration = 10\n self.assertTrue(cs.contactPhases[0] != cp_default)\n self.assertTrue(cs.contactPhases[0].duration == 10)\n self.assertTrue(array_equal(cs.contactPhases[0].c_init, c_init))\n\n cp1 = buildRandomContactPhase(2, 4.)\n cs.contactPhases[1] = cp1\n self.assertTrue(cs.contactPhases[1] == cp1)\n\n def test_resize(self):\n cp_default = ContactPhase()\n cs = ContactSequence(3)\n cp_0 = cs.contactPhases[0]\n c_init = np.random.rand(3)\n cp_0.c_init = c_init\n cp_0.duration = 10\n cp1 = buildRandomContactPhase(2, 4.)\n cs.contactPhases[1] = cp1\n # with smaller value than current :\n cs.resize(1)\n self.assertTrue(cs.size() == 1)\n self.assertTrue(cs.contactPhases[0].duration == 10)\n self.assertTrue(array_equal(cs.contactPhases[0].c_init, c_init))\n\n # check with greater size than current :\n cs.resize(4)\n self.assertTrue(cs.size() == 4)\n self.assertTrue(cs.contactPhases[0].duration == 10)\n self.assertTrue(array_equal(cs.contactPhases[0].c_init, c_init))\n for i in range(1, 4):\n self.assertTrue(cs.contactPhases[i] == cp_default)\n\n def test_operator_equal(self):\n cs3 = ContactSequence()\n cs4 = ContactSequence()\n\n self.assertTrue(cs3 == cs4)\n cp3_0 = buildRandomContactPhase(0., 2.)\n cs3.append(cp3_0)\n self.assertTrue(cs3 != cs4)\n self.assertFalse(cs3 == cs4)\n cs4.append(cp3_0)\n self.assertTrue(cs3 == cs4)\n cp3_1 = buildRandomContactPhase(0., 2.)\n cs3.append(cp3_1)\n self.assertTrue(cs3 != cs4)\n cs4.append(cp3_1)\n self.assertTrue(cs3 == cs4)\n cs4.contactPhases[1].duration = 10\n self.assertTrue(cs4.contactPhases[1] != cp3_1)\n self.assertTrue(cs3 != cs4)\n cs5 = ContactSequence(2)\n cs5.contactPhases[0] = cp3_0\n self.assertTrue(cs3 != cs5)\n cs5.contactPhases[1] = cp3_1\n self.assertTrue(cs3 == cs5)\n\n def test_copy_constructor(self):\n cs = ContactSequence()\n for i in range(10):\n cp = buildRandomContactPhase(0., 2.)\n cs.append(cp)\n self.assertTrue(cs.size() == 10)\n\n cs1 = ContactSequence(cs)\n self.assertTrue(cs == cs1)\n for i in range(10):\n self.assertTrue(cs.contactPhases[i] == cs1.contactPhases[i])\n\n # check that it's a copy and not the same object :\n cs.contactPhases[0].duration = 15.\n self.assertFalse(cs == cs1)\n self.assertFalse(cs.contactPhases[0] == cs1.contactPhases[0])\n\n def test_serialization(self):\n cs = ContactSequence()\n for i in range(10):\n cp = buildRandomContactPhase(0., 2.)\n cs.append(cp)\n\n cs.saveAsText(\"cs_test_full.txt\")\n cs_txt = ContactSequence()\n cs_txt.loadFromText(\"cs_test_full.txt\")\n self.assertEqual(cs, cs_txt)\n cs.saveAsBinary(\"cs_test_full\")\n cs_bin = ContactSequence()\n cs_bin.loadFromBinary(\"cs_test_full\")\n self.assertEqual(cs, cs_bin)\n cs.saveAsXML(\"cs_test_full.xml\", 'ContactSequence')\n cs_xml = ContactSequence()\n cs_xml.loadFromXML(\"cs_test_full.xml\", 'ContactPatch')\n self.assertEqual(cs, cs_xml)\n\n def test_contact_sequence_helpers(self):\n cs1 = ContactSequence()\n self.assertTrue(cs1.size() == 0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n cs1.append(cp0)\n cs1.append(cp1)\n # # test break contact :\n self.assertTrue(cs1.size() == 2)\n cs1.breakContact(\"left-leg\")\n self.assertTrue(cs1.size() == 3)\n self.assertFalse(cs1.contactPhases[2].isEffectorInContact(\"left-leg\"))\n self.assertTrue(\n cs1.contactPhases[1].timeFinal == 4.) # time final of previous phase should not have been modified\n # check that the final value of the previous phase have been copied in the initial value of the new one\n self.assertTrue(array_equal(cs1.contactPhases[1].c_final, cs1.contactPhases[2].c_init))\n self.assertTrue(array_equal(cs1.contactPhases[1].dc_final, cs1.contactPhases[2].dc_init))\n self.assertTrue(array_equal(cs1.contactPhases[1].ddc_final, cs1.contactPhases[2].ddc_init))\n self.assertTrue(array_equal(cs1.contactPhases[1].L_final, cs1.contactPhases[2].L_init))\n self.assertTrue(array_equal(cs1.contactPhases[1].dL_final, cs1.contactPhases[2].dL_init))\n self.assertTrue(array_equal(cs1.contactPhases[1].q_final, cs1.contactPhases[2].q_init))\n self.assertTrue(cs1.contactPhases[1].timeFinal == cs1.contactPhases[2].timeInitial)\n # check that the other contactPatch have been copied :\n self.assertTrue(\n cs1.contactPhases[1].contactPatch(\"right-leg\") == cs1.contactPhases[2].contactPatch(\"right-leg\"))\n\n # # test create contact :\n placement_random = SE3.Identity()\n placement_random.setRandom()\n target = ContactPatch(placement_random)\n cs1.createContact(\"left-leg\", target, 2.5)\n self.assertTrue(cs1.size() == 4)\n self.assertTrue(\n cs1.contactPhases[2].timeFinal == 6.5) # time final of previous phase should have been modified\n self.assertTrue(cs1.contactPhases[3].contactPatch(\"left-leg\") == target)\n # check that the final value of the previous phase have been copied in the initial value of the new one\n self.assertTrue(array_equal(cs1.contactPhases[2].c_final, cs1.contactPhases[3].c_init))\n self.assertTrue(array_equal(cs1.contactPhases[2].dc_final, cs1.contactPhases[3].dc_init))\n self.assertTrue(array_equal(cs1.contactPhases[2].ddc_final, cs1.contactPhases[3].ddc_init))\n self.assertTrue(array_equal(cs1.contactPhases[2].L_final, cs1.contactPhases[3].L_init))\n self.assertTrue(array_equal(cs1.contactPhases[2].dL_final, cs1.contactPhases[3].dL_init))\n self.assertTrue(array_equal(cs1.contactPhases[2].q_final, cs1.contactPhases[3].q_init))\n self.assertTrue(cs1.contactPhases[2].timeFinal == cs1.contactPhases[3].timeInitial)\n # check that the other contactPatch have been copied :\n self.assertTrue(\n cs1.contactPhases[2].contactPatch(\"right-leg\") == cs1.contactPhases[3].contactPatch(\"right-leg\"))\n\n # # test break with duration :\n cs1.breakContact(\"left-leg\", 1.)\n self.assertTrue(cs1.size() == 5)\n self.assertFalse(cs1.contactPhases[4].isEffectorInContact(\"left-leg\"))\n self.assertTrue(\n cs1.contactPhases[3].timeFinal == 7.5) # time final of previous phase should have been modified\n\n # # test create contact with no duration:\n cs1.contactPhases[4].duration = 1.\n self.assertTrue(\n cs1.contactPhases[4].timeFinal == 8.5) # time final of previous phase should have been modified\n placement_random.setRandom()\n target = ContactPatch(placement_random)\n cs1.createContact(\"left-leg\", target)\n self.assertTrue(cs1.size() == 6)\n self.assertTrue(\n cs1.contactPhases[4].timeFinal == 8.5) # time final of previous phase should have been modified\n self.assertTrue(\n cs1.contactPhases[5].timeInitial == 8.5) # time final of previous phase should have been modified\n\n # # test move effector to placement :\n target_placement = SE3.Identity()\n target_placement.setRandom()\n addRandomPointsValues(cs1.contactPhases[5])\n cs1.contactPhases[5].contactPatch(\"right-leg\").friction = 2.\n cs1.moveEffectorToPlacement(\"right-leg\", target_placement, 1., 1.5)\n self.assertTrue(cs1.size() == 8)\n self.assertFalse(cs1.contactPhases[6].isEffectorInContact(\"right-leg\"))\n self.assertTrue(cs1.contactPhases[7].isEffectorInContact(\"right-leg\"))\n self.assertTrue(cs1.contactPhases[7].contactPatch(\"right-leg\").placement == target_placement)\n # check that previous patch have not been modified :\n self.assertTrue(cs1.contactPhases[5].contactPatch(\"right-leg\").placement != target_placement)\n self.assertTrue(cs1.contactPhases[7].contactPatch(\"right-leg\").friction == 2.)\n self.assertTrue(cs1.contactPhases[5].timeFinal == 9.5)\n self.assertTrue(cs1.contactPhases[6].timeInitial == 9.5)\n self.assertTrue(cs1.contactPhases[6].timeFinal == 11.)\n self.assertTrue(cs1.contactPhases[7].timeInitial == 11.)\n # check that the final value of the previous phase have been copied in the initial value of the new one\n self.assertTrue(array_equal(cs1.contactPhases[5].c_final, cs1.contactPhases[6].c_init))\n self.assertTrue(array_equal(cs1.contactPhases[5].dc_final, cs1.contactPhases[6].dc_init))\n self.assertTrue(array_equal(cs1.contactPhases[5].ddc_final, cs1.contactPhases[6].ddc_init))\n self.assertTrue(array_equal(cs1.contactPhases[5].L_final, cs1.contactPhases[6].L_init))\n self.assertTrue(array_equal(cs1.contactPhases[5].dL_final, cs1.contactPhases[6].dL_init))\n self.assertTrue(array_equal(cs1.contactPhases[5].q_final, cs1.contactPhases[6].q_init))\n # with MoveEffector, the middle phase should have the same initial and final point :\n self.assertTrue(array_equal(cs1.contactPhases[6].c_final, cs1.contactPhases[6].c_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].dc_final, cs1.contactPhases[6].dc_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].ddc_final, cs1.contactPhases[6].ddc_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].L_final, cs1.contactPhases[6].L_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].dL_final, cs1.contactPhases[6].dL_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].q_final, cs1.contactPhases[6].q_init))\n # check that the final value of the previous phase have been copied in the initial value of the new one\n self.assertTrue(array_equal(cs1.contactPhases[6].c_final, cs1.contactPhases[7].c_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].dc_final, cs1.contactPhases[7].dc_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].ddc_final, cs1.contactPhases[7].ddc_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].L_final, cs1.contactPhases[7].L_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].dL_final, cs1.contactPhases[7].dL_init))\n self.assertTrue(array_equal(cs1.contactPhases[6].q_final, cs1.contactPhases[7].q_init))\n # check that the other contactPatch have been copied :\n self.assertTrue(cs1.contactPhases[5].contactPatch(\"left-leg\") == cs1.contactPhases[6].contactPatch(\"left-leg\"))\n self.assertTrue(cs1.contactPhases[6].contactPatch(\"left-leg\") == cs1.contactPhases[7].contactPatch(\"left-leg\"))\n\n # # test move effector of:\n target_transform = SE3.Identity()\n target_transform.setRandom()\n cs1.contactPhases[7].contactPatch(\"left-leg\").friction = 10.\n cs1.moveEffectorOf(\"left-leg\", target_transform, 1., 1.5)\n self.assertTrue(cs1.size() == 10)\n self.assertFalse(cs1.contactPhases[8].isEffectorInContact(\"left-leg\"))\n self.assertTrue(cs1.contactPhases[9].isEffectorInContact(\"left-leg\"))\n target_placement = target_transform.act(cs1.contactPhases[7].contactPatch(\"left-leg\").placement)\n self.assertTrue(cs1.contactPhases[9].contactPatch(\"left-leg\").placement == target_placement)\n self.assertTrue(cs1.contactPhases[9].contactPatch(\"left-leg\").friction == 10.)\n # check that the other contactPatch have been copied :\n self.assertTrue(\n cs1.contactPhases[7].contactPatch(\"right-leg\") == cs1.contactPhases[8].contactPatch(\"right-leg\"))\n self.assertTrue(\n cs1.contactPhases[8].contactPatch(\"right-leg\") == cs1.contactPhases[9].contactPatch(\"right-leg\"))\n\n def test_contact_sequence_helpers_errors(self):\n cs1 = ContactSequence()\n self.assertTrue(cs1.size() == 0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n cp1.removeContact(\"left-leg\")\n cs1.append(cp0)\n cs1.append(cp1)\n self.assertTrue(cs1.size() == 2)\n with self.assertRaises(ValueError):\n cs1.breakContact(\"left-leg\") # contact do not exist\n self.assertTrue(cs1.size() == 2)\n cp2 = buildRandomContactPhase()\n cs1.append(cp2)\n self.assertTrue(cs1.size() == 3)\n with self.assertRaises(ValueError):\n cs1.breakContact(\"left-leg\", 1.5) # time interval not defined for last phase\n self.assertTrue(cs1.size() == 3)\n\n # # check that create contact correctly throw error when needed :\n placement = SE3.Identity()\n placement.setRandom()\n\n with self.assertRaises(ValueError):\n cs1.createContact(\"left-leg\", ContactPatch(placement)) # contact already exist\n self.assertTrue(cs1.size() == 3)\n cs1.breakContact(\"left-leg\")\n self.assertTrue(cs1.size() == 4)\n with self.assertRaises(ValueError):\n cs1.createContact(\"left-leg\", ContactPatch(placement), 2.) # time interval not defined\n self.assertTrue(cs1.size() == 4)\n\n def test_is_consistent(self):\n cs1 = ContactSequence(0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n cs1.append(cp0)\n cs1.append(cp1)\n consistent = cs1.haveTimings()\n self.assertTrue(consistent)\n\n cs2 = ContactSequence(0)\n cp2 = buildRandomContactPhase(0, 2)\n cp3 = buildRandomContactPhase(1.5, 4.)\n cs2.append(cp2)\n cs2.append(cp3)\n consistent = cs2.haveTimings()\n self.assertFalse(consistent)\n\n cs3 = ContactSequence(0)\n cp4 = buildRandomContactPhase(0, 2)\n cp5 = buildRandomContactPhase()\n cs3.append(cp4)\n cs3.append(cp5)\n consistent = cs3.haveTimings()\n self.assertFalse(consistent)\n\n cs4 = ContactSequence(0)\n cp6 = buildRandomContactPhase()\n cp7 = buildRandomContactPhase(1, 3)\n cs4.append(cp6)\n cs4.append(cp7)\n consistent = cs4.haveTimings()\n self.assertFalse(consistent)\n\n def test_contact_sequence_have_contact_model(self):\n cs1 = ContactSequence(0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n cs1.append(cp0)\n cs1.append(cp1)\n self.assertFalse(cs1.haveContactModelDefined())\n\n mp1 = ContactModel(0.5, ContactType.CONTACT_PLANAR)\n pos = np.random.rand(3, 5)\n mp1.contact_points_positions = pos\n mp2 = ContactModel(1., ContactType.CONTACT_POINT)\n pos = np.random.rand(3, 5)\n mp1.contact_points_positions = pos\n\n cs1.contactPhases[0].contactPatch(\"right-leg\").contact_model = mp1\n cs1.contactPhases[0].contactPatch(\"left-leg\").contact_model = mp2\n cs1.contactPhases[1].contactPatch(\"right-leg\").contact_model = mp1\n cs1.contactPhases[1].contactPatch(\"left-leg\").contact_model = mp2\n self.assertTrue(cs1.haveContactModelDefined())\n\n cp2 = buildRandomContactPhase(6., 8.)\n cs1.append(cp2)\n self.assertFalse(cs1.haveContactModelDefined())\n mp3 = ContactModel(0.2)\n cs1.contactPhases[2].contactPatch(\"right-leg\").contact_model = mp3\n cs1.contactPhases[2].contactPatch(\"left-leg\").contact_model = mp2\n self.assertFalse(cs1.haveContactModelDefined())\n\n mp3.contact_type = ContactType.CONTACT_PLANAR # do not change the contact model already in the seqence\n self.assertFalse(cs1.haveContactModelDefined())\n\n cs1.contactPhases[2].contactPatch(\"right-leg\").contact_model.contact_type = ContactType.CONTACT_PLANAR\n self.assertTrue(cs1.haveContactModelDefined())\n\n def test_contact_sequence_concatenate_config_traj(self):\n cs1 = ContactSequence(0)\n cp0 = buildRandomContactPhase(0, 2)\n cp1 = buildRandomContactPhase(2, 4.)\n p0 = np.random.rand(35)\n p1 = np.random.rand(35)\n p2 = np.random.rand(35)\n t0 = 0.\n t1 = 2.\n t2 = 4.\n c1 = polynomial(p0, p1, t0, t1)\n c2 = polynomial(p1, p2, t1, t2)\n cp0.q_t = c1\n cp1.q_t = c2\n self.assertTrue(cp0.q_t.min() == 0.)\n self.assertTrue(cp0.q_t.max() == 2.)\n self.assertTrue(cp1.q_t.min() == 2.)\n self.assertTrue(cp1.q_t.max() == 4.)\n cs1.append(cp0)\n cs1.append(cp1)\n q_t = cs1.concatenateQtrajectories()\n self.assertTrue(q_t.min() == 0.)\n self.assertTrue(q_t.max() == 4.)\n self.assertTrue(array_equal(q_t(0), cp0.q_t(0)))\n self.assertTrue(array_equal(q_t(0.5), cp0.q_t(0.5)))\n self.assertTrue(array_equal(q_t(2.), cp0.q_t(2.)))\n self.assertTrue(array_equal(q_t(3), cp1.q_t(3)))\n self.assertTrue(array_equal(q_t(4.), cp1.q_t(4.)))\n\n def test_contact_sequence_concatenate_effector_traj(self):\n cs1 = ContactSequence(0)\n cp0 = ContactPhase(0, 2)\n cp1 = ContactPhase(2, 4.)\n cp2 = ContactPhase(4, 8.)\n p0 = SE3()\n p0.setRandom()\n p1 = SE3()\n p1.setRandom()\n p2 = SE3()\n p2.setRandom()\n\n traj_0 = SE3Curve(p0, p1, 0., 2.)\n traj_2 = SE3Curve(p1, p2, 4., 8.)\n cp0.addEffectorTrajectory(\"right_leg\", traj_0)\n cp2.addEffectorTrajectory(\"right_leg\", traj_2)\n cs1.append(cp0)\n cs1.append(cp1)\n cs1.append(cp2)\n\n traj = cs1.concatenateEffectorTrajectories(\"right_leg\")\n self.assertTrue(traj.min() == 0.)\n self.assertTrue(traj.max() == 8.)\n self.assertTrue(np.isclose(traj(0.), traj_0(0.)).all())\n self.assertTrue(np.isclose(traj(1.5), traj_0(1.5)).all())\n self.assertTrue(np.isclose(traj(2.), traj_0(2.)).all())\n self.assertTrue(np.isclose(traj(4.), traj_2(4.)).all())\n self.assertTrue(np.isclose(traj(6.), traj_2(6.)).all())\n self.assertTrue(np.isclose(traj(8.), traj_2(8.)).all())\n self.assertTrue(np.isclose(traj(2.5), traj_0(2.)).all())\n self.assertTrue(np.isclose(traj(3.8), traj_0(2.)).all())\n\n def test_contact_sequence_concatenate_force_traj(self):\n cs1 = ContactSequence(0)\n cp0 = ContactPhase(0, 2)\n cp1 = ContactPhase(2, 4.)\n cp2 = ContactPhase(4, 8.)\n\n cp0.addContact(\"right_leg\", ContactPatch())\n cp2.addContact(\"right_leg\", ContactPatch())\n f_0 = createRandomPiecewisePolynomial(12, 0, 2)\n f_2 = createRandomPiecewisePolynomial(12, 4, 8)\n cp0.addContactForceTrajectory(\"right_leg\", f_0)\n cp2.addContactForceTrajectory(\"right_leg\", f_2)\n\n cs1.append(cp0)\n cs1.append(cp1)\n cs1.append(cp2)\n\n forces = cs1.concatenateContactForceTrajectories(\"right_leg\")\n self.assertTrue(forces.min() == 0.)\n self.assertTrue(forces.max() == 8.)\n self.assertTrue(array_equal(forces(0.), f_0(0.)))\n self.assertTrue(array_equal(forces(1.5), f_0(1.5)))\n self.assertTrue(array_equal(forces(1.999), f_0(1.999)))\n self.assertTrue(array_equal(forces(4.), f_2(4.)))\n self.assertTrue(array_equal(forces(6.), f_2(6.)))\n self.assertTrue(array_equal(forces(8.), f_2(8.)))\n self.assertTrue(array_equal(forces(2.), np.zeros(12)))\n self.assertTrue(array_equal(forces(2.5), np.zeros(12)))\n self.assertTrue(array_equal(forces(3.8), np.zeros(12)))\n\n def test_contact_sequence_concatenate_normal_force_traj(self):\n cs1 = ContactSequence(0)\n cp0 = ContactPhase(0, 2)\n cp1 = ContactPhase(2, 4.)\n cp2 = ContactPhase(4, 8.)\n\n cp1.addContact(\"right_leg\", ContactPatch())\n f_1 = createRandomPiecewisePolynomial(1, 2., 4.)\n cp1.addContactNormalForceTrajectory(\"right_leg\", f_1)\n\n cs1.append(cp0)\n cs1.append(cp1)\n cs1.append(cp2)\n\n forces = cs1.concatenateNormalForceTrajectories(\"right_leg\")\n self.assertTrue(forces.min() == 0.)\n self.assertTrue(forces.max() == 8.)\n self.assertTrue(array_equal(forces(2.), f_1(2.)))\n self.assertTrue(array_equal(forces(2.5), f_1(2.5)))\n self.assertTrue(array_equal(forces(3.999), f_1(3.999)))\n self.assertTrue(array_equal(forces(0.), np.zeros(1)))\n self.assertTrue(array_equal(forces(1.5), np.zeros(1)))\n self.assertTrue(array_equal(forces(4.), np.zeros(1)))\n self.assertTrue(array_equal(forces(7.5), np.zeros(1)))\n\n def test_contact_sequence_phase_at_time(self):\n cs1 = ContactSequence(0)\n cp0 = ContactPhase(0, 2)\n cp1 = ContactPhase(2, 4.)\n cp2 = ContactPhase(4, 8.)\n\n cs1.append(cp0)\n cs1.append(cp1)\n cs1.append(cp2)\n\n self.assertEqual(cs1.phaseIdAtTime(0.), 0)\n self.assertEqual(cs1.phaseIdAtTime(1.), 0)\n self.assertEqual(cs1.phaseIdAtTime(1.9), 0)\n self.assertEqual(cs1.phaseIdAtTime(2.), 1)\n self.assertEqual(cs1.phaseIdAtTime(3.5), 1)\n self.assertEqual(cs1.phaseIdAtTime(4.), 2)\n self.assertEqual(cs1.phaseIdAtTime(5.), 2)\n self.assertEqual(cs1.phaseIdAtTime(8.), 2)\n self.assertEqual(cs1.phaseIdAtTime(-0.5), -1)\n self.assertEqual(cs1.phaseIdAtTime(10.), -1)\n\n self.assertEqual(cs1.phaseAtTime(0.), cp0)\n self.assertEqual(cs1.phaseAtTime(1.), cp0)\n self.assertEqual(cs1.phaseAtTime(1.9), cp0)\n self.assertEqual(cs1.phaseAtTime(2.), cp1)\n self.assertEqual(cs1.phaseAtTime(3.5), cp1)\n self.assertEqual(cs1.phaseAtTime(4.), cp2)\n self.assertEqual(cs1.phaseAtTime(5.), cp2)\n self.assertEqual(cs1.phaseAtTime(8.), cp2)\n with self.assertRaises(ValueError):\n cs1.phaseAtTime(-0.5)\n with self.assertRaises(ValueError):\n cs1.phaseAtTime(10.)\n\n def test_pickle_contact_sequence(self):\n cs = ContactSequence()\n for i in range(10):\n cp = buildRandomContactPhase(0., 2.)\n cs.append(cp)\n cs_pickled = pickle.dumps(cs)\n cs_from_pickle = pickle.loads(cs_pickled)\n self.assertEqual(cs_from_pickle, cs)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.isclose", "numpy.random.rand", "numpy.array_equal", "numpy.zeros", "numpy.linspace" ] ]
Zac-HD/MyGrad
[ "fbc375d28842e1af1ebaf62ca6da479609a6baf6", "fbc375d28842e1af1ebaf62ca6da479609a6baf6", "fbc375d28842e1af1ebaf62ca6da479609a6baf6" ]
[ "tests/nnet/activations/test_leaky_relu.py", "src/mygrad/nnet/activations/selu.py", "tests/nnet/layers/test_batchnorm.py" ]
[ "import hypothesis.strategies as st\nimport numpy as np\nimport pytest\n\nfrom mygrad import Tensor, asarray\nfrom mygrad.nnet.activations import leaky_relu\nfrom tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory\n\n\n@pytest.mark.parametrize(\"slope\", (None, 1j))\ndef test_input_validation(slope):\n with pytest.raises(TypeError):\n leaky_relu(2, slope=slope)\n\n\ndef _np_leaky_relu(x, slope):\n return np.maximum(x, 0) + asarray(slope) * np.minimum(x, 0)\n\n\n_reasonable_floats = st.floats(-100, 100)\n\n\ndef _finite_params(arrs, slope):\n return np.all(np.isfinite(asarray(slope) * asarray(arrs)))\n\n\n@fwdprop_test_factory(\n mygrad_func=leaky_relu,\n true_func=_np_leaky_relu,\n num_arrays=1,\n kwargs={\n \"slope\": lambda x: _reasonable_floats\n | _reasonable_floats.map(np.array)\n | _reasonable_floats.map(Tensor)\n },\n assumptions=_finite_params,\n)\ndef test_leaky_relu_fwd():\n pass\n\n\ndef _away_from_zero(*arrs, **kwargs):\n x = arrs[0]\n return np.all(np.abs(x.data) > 1e-8)\n\n\n@backprop_test_factory(\n mygrad_func=leaky_relu,\n true_func=_np_leaky_relu,\n num_arrays=1,\n assumptions=lambda arrs, slope: _away_from_zero(arrs)\n and _finite_params(arrs, slope),\n kwargs={\n \"slope\": lambda x: _reasonable_floats\n | _reasonable_floats.map(np.array)\n | _reasonable_floats.map(Tensor)\n },\n)\ndef test_leaky_relu_bkwd():\n pass\n", "import numpy as np\n\nfrom mygrad import Tensor\nfrom mygrad.operation_base import Operation\n\n__all__ = [\"selu\"]\n\n\n_ALPHA = 1.6732632423543772848170429916717\n_SCALE = 1.0507009873554804934193349852946\n\n\nclass SELU(Operation):\n \"\"\" Returns the scaled exponential linear activation (SELU) elementwise along x. The SELU is\n given by Ξ»Ι‘(exp(x) - 1) for x < 0 and Ξ»x for x β‰₯ 0.\n\n Notes\n -----\n The SELU activation was proposed in the paper\n Self-Normalizing Neural Networks\n GΓΌnter Klambauer, Thomas Unterthiner, Andreas Mayr, Sepp Hochreiter\n at https://arxiv.org/abs/1706.02515\n \"\"\"\n\n def __call__(self, x):\n \"\"\"\n Parameters\n ----------\n x : mygrad.Tensor\n Input data.\n\n Returns\n -------\n numpy.ndarray\n The SELU function applied to `x` elementwise.\n \"\"\"\n self.variables = (x,)\n\n x = x.data\n self.exp = _ALPHA * (np.exp(x) - 1)\n return _SCALE * np.where(x < 0, self.exp, x)\n\n def backward_var(self, grad, index, **kwargs):\n x = self.variables[index]\n return grad * _SCALE * np.where(x.data < 0, self.exp + _ALPHA, 1)\n\n\ndef selu(x, constant=False):\n \"\"\" Returns the scaled exponential linear activation (SELU) elementwise along x.\n\n The SELU is given by Ξ»Ι‘(exp(x) - 1) for x < 0 and Ξ»x for x β‰₯ 0.\n\n Parameters\n ----------\n x : mygrad.Tensor\n Input data.\n\n constant : bool, optional(default=False)\n If ``True``, the returned tensor is a constant (it\n does not back-propagate a gradient)\n\n Returns\n -------\n mygrad.Tensor\n The SELU function applied to `x` elementwise.\n\n Notes\n -----\n The SELU activation was proposed in the paper\n Self-Normalizing Neural Networks\n GΓΌnter Klambauer, Thomas Unterthiner, Andreas Mayr, Sepp Hochreiter\n at https://arxiv.org/abs/1706.02515\n\n Examples\n --------\n >>> import mygrad as mg\n >>> from mygrad.nnet.activations import selu\n >>> x = mg.arange(-5, 6)\n >>> x\n Tensor([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])\n >>> y = elu(x, alpha=0.1); y\n Tensor([-1.74625336, -1.72589863, -1.67056873, -1.52016647, -1.11133074,\n 0. , 1.05070099, 2.10140197, 3.15210296, 4.20280395,\n 5.25350494])\n \"\"\"\n return Tensor._op(SELU, x, constant=constant)\n", "import hypothesis.extra.numpy as hnp\nimport hypothesis.strategies as st\nimport numpy as np\nfrom hypothesis import given, settings\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nimport mygrad as mg\nfrom mygrad import Tensor\nfrom mygrad.nnet.layers.batchnorm import batchnorm\nfrom tests.wrappers.uber import backprop_test_factory, fwdprop_test_factory\n\n\ndef _mean(y, keepdims=False, axis=None, ddof=0):\n \"\"\"For use in var\"\"\"\n if isinstance(axis, int):\n axis = (axis,)\n N = y.size if axis is None else np.prod([y.shape[i] for i in axis])\n return y.sum(keepdims=keepdims, axis=axis) / (N - ddof)\n\n\ndef _var(x, keepdims=False, axis=None, ddof=0):\n \"\"\"Defines variance without using abs. Permits use of\n complex-step numerical derivative.\"\"\"\n return _mean(\n (x - x.mean(axis=axis, keepdims=True)) ** 2,\n keepdims=keepdims,\n axis=axis,\n ddof=ddof,\n )\n\n\ndef simple_batchnorm(x, gamma=None, beta=None, eps=None):\n axes = [i for i in range(x.ndim)]\n axes.pop(1) # every axis except 1\n axes = tuple(axes)\n keepdims_shape = tuple(1 if n != 1 else d for n, d in enumerate(x.shape))\n\n mean = mg.mean(x, axis=axes, keepdims=True)\n var = _var(x, axis=axes, keepdims=True)\n norm = (x - mean) / mg.sqrt(var + eps)\n\n if gamma is not None:\n gamma = gamma.reshape(keepdims_shape)\n norm *= gamma\n\n if beta is not None:\n beta = beta.reshape(keepdims_shape)\n norm += beta\n return norm\n\n\n@given(\n x=hnp.arrays(\n shape=hnp.array_shapes(min_dims=2, max_dims=4),\n dtype=float,\n elements=st.floats(-100, 100),\n ),\n data=st.data(),\n)\ndef test_batchnorm(x, data):\n # optionally draw affine parameters\n gamma = data.draw(\n st.none()\n | hnp.arrays(shape=x.shape[1:2], dtype=float, elements=st.floats(-10, 10)),\n label=\"gamma\",\n )\n beta = data.draw(\n st.none()\n | hnp.arrays(shape=x.shape[1:2], dtype=float, elements=st.floats(-10, 10)),\n label=\"beta\",\n )\n x_orig = np.copy(x)\n\n gamma_orig = np.copy(gamma) if gamma is not None else None\n beta_orig = np.copy(beta) if beta is not None else None\n\n t1 = Tensor(x)\n t2 = Tensor(x)\n\n g1 = Tensor(gamma) if gamma is not None else None\n g2 = Tensor(gamma) if gamma is not None else None\n\n b1 = Tensor(beta) if beta is not None else None\n b2 = Tensor(beta) if beta is not None else None\n\n y1 = simple_batchnorm(t1, gamma=g1, beta=b1, eps=1e-10)\n y2 = batchnorm(t2, gamma=g2, beta=b2, eps=1e-10)\n\n assert_allclose(actual=y2.data, desired=y1.data, atol=1e-4, rtol=1e-4)\n grad = data.draw(\n hnp.arrays(shape=y2.shape, dtype=t2.dtype, elements=st.floats(-10, 10)),\n label=\"grad\",\n )\n grad_orig = np.copy(grad)\n\n y1.backward(grad)\n y2.backward(grad)\n\n assert_allclose(actual=t2.grad, desired=t1.grad, atol=1e-4, rtol=1e-4)\n\n if beta is not None:\n assert_allclose(actual=b2.grad, desired=b1.grad, atol=1e-4, rtol=1e-4)\n else:\n assert b2 is None\n\n if gamma is not None:\n assert_allclose(actual=g2.grad, desired=g1.grad, atol=1e-4, rtol=1e-4)\n else:\n assert g2 is None\n\n for n, (o, c) in enumerate(\n zip((x, gamma, beta, grad), (x_orig, gamma_orig, beta_orig, grad_orig))\n ):\n if o is None or c is None:\n assert o is c, \"('{x}', '{gamma}', '{beta}', '{grad}')[{n}]\".format(\n x=x, gamma=gamma, beta=beta, grad=grad, n=n\n )\n else:\n assert_array_equal(\n o,\n c,\n err_msg=\"('{x}', '{gamma}', '{beta}', '{grad}')[{n}]\".format(\n x=x, gamma=gamma, beta=beta, grad=grad, n=n\n ),\n )\n\n if gamma is not None and beta is not None:\n assert not np.shares_memory(g2.grad, b2.grad)\n assert not np.shares_memory(grad, t2.grad)\n\n y2.null_gradients()\n assert t2.grad is None\n\n if gamma is not None:\n assert g2.grad is None\n\n if beta is not None:\n assert b2.grad is None\n\n\ndef simple_batchnorm_numpy(x, gamma=None, beta=None, eps=0):\n return mg.asarray(simple_batchnorm(x, eps=eps, gamma=gamma, beta=beta))\n\n\n@settings(deadline=None)\n@fwdprop_test_factory(\n mygrad_func=batchnorm,\n true_func=simple_batchnorm_numpy,\n num_arrays=1,\n index_to_arr_shapes={0: hnp.array_shapes(min_dims=2, max_dims=4)},\n kwargs=lambda x: st.fixed_dictionaries(dict(eps=st.floats(1e-20, 1e0))),\n atol=1e-5,\n)\ndef test_batchnorm_fwd():\n pass\n\n\n@settings(deadline=None)\n@backprop_test_factory(\n mygrad_func=batchnorm,\n true_func=simple_batchnorm_numpy,\n num_arrays=1,\n index_to_arr_shapes={0: hnp.array_shapes(min_dims=2, max_dims=4)},\n kwargs=lambda x: st.fixed_dictionaries(dict(eps=st.floats(1e-20, 1e0))),\n vary_each_element=True,\n)\ndef test_batchnorm_bkwd():\n pass\n" ]
[ [ "numpy.minimum", "numpy.abs", "numpy.maximum" ], [ "numpy.where", "numpy.exp" ], [ "numpy.shares_memory", "numpy.testing.assert_allclose", "numpy.copy", "numpy.prod" ] ]
tommylees112/crop_yield_prediction
[ "43299b6d6a1f22e4431e23bf92f9cff87c6f5073", "43299b6d6a1f22e4431e23bf92f9cff87c6f5073" ]
[ "test_scripts/tommy_test.py", "1_download_data/pull_MODIS_landcover_entire_county_clip.py" ]
[ "# tommy_test.py\nimport numpy as np\nimport os\n\ndef set_base_dir():\n \"\"\" SET path directory to the repo base \"\"\"\n os.chdir(os.path.abspath(\n os.path.join(\n os.path.dirname( __file__ ),'..'\n )\n )\n )\n print((\"Working from dir:\", os.getcwd()))\n\n return\n\nresult = np.load('6 result_analysis/paper_result.npy')\ncorr = np.load('6 result_analysis/corr.npy')\n\n# 24 'results' of different metric:algorithm permutations\ncompare_result_final = np.load('6 result_analysis/Compare_result_final.npz')\ncompare_result_ridge = np.load('6 result_analysis/Compare_result_ridge.npz')\ncompare_result = np.load('6 result_analysis/Compare_result.npz')\n\n", "import ee\r\nimport time\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport itertools\r\nimport os\r\nimport urllib.request, urllib.parse, urllib.error\r\n\r\nee.Initialize()\r\n\r\ndef export_oneimage(img,folder,name,scale,crs):\r\n task = ee.batch.Export.image(img, name, {\r\n 'driveFolder':folder,\r\n 'driveFileNamePrefix':name,\r\n 'scale':scale,\r\n 'crs':crs\r\n })\r\n task.start()\r\n while task.status()['state'] == 'RUNNING':\r\n print('Running...')\r\n # Perhaps task.cancel() at some point.\r\n time.sleep(10)\r\n print('Done.', task.status())\r\n\r\n\r\n\r\n\r\nlocations = pd.read_csv('locations_final.csv')\r\n\r\n\r\n# Transforms an Image Collection with 1 band per Image into a single Image with items as bands\r\n# Author: Jamie Vleeshouwer\r\n\r\ndef appendBand(current, previous):\r\n # Rename the band\r\n previous=ee.Image(previous)\r\n current = current.select([0])\r\n # Append it to the result (Note: only return current item on first element/iteration)\r\n accum = ee.Algorithms.If(ee.Algorithms.IsEqual(previous,None), current, previous.addBands(ee.Image(current)))\r\n # Return the accumulation\r\n return accum\r\n\r\ncounty_region = ee.FeatureCollection('ft:1S4EB6319wWW2sWQDPhDvmSBIVrD3iEmCLYB7nMM')\r\n\r\nimgcoll = ee.ImageCollection('MODIS/051/MCD12Q1') \\\r\n .filterBounds(ee.Geometry.Rectangle(-106.5, 50,-64, 23))\\\r\n .filterDate('2002-12-31','2016-8-4')\r\nimg=imgcoll.iterate(appendBand)\r\nimg=ee.Image(img)\r\n\r\n# img_0=ee.Image(ee.Number(0))\r\n# img_5000=ee.Image(ee.Number(5000))\r\n#\r\n# img=img.min(img_5000)\r\n# img=img.max(img_0)\r\n\r\n# img=ee.Image(ee.Number(100))\r\n# img=ee.ImageCollection('LC8_L1T').mosaic()\r\n\r\nfor loc1, loc2, lat, lon in locations.values:\r\n fname = '{}_{}'.format(int(loc1), int(loc2))\r\n\r\n # offset = 0.11\r\n scale = 500\r\n crs='EPSG:4326'\r\n\r\n # filter for a county\r\n region = county_region.filterMetadata('StateFips', 'equals', int(loc1))\r\n region = ee.FeatureCollection(region).filterMetadata('CntyFips', 'equals', int(loc2))\r\n region = ee.Feature(region.first())\r\n\r\n # region = str([\r\n # [lat - offset, lon + offset],\r\n # [lat + offset, lon + offset],\r\n # [lat + offset, lon - offset],\r\n # [lat - offset, lon - offset]])\r\n while True:\r\n try:\r\n export_oneimage(img.clip(region), 'data_mask', fname, scale, crs)\r\n except:\r\n print('retry')\r\n time.sleep(10)\r\n continue\r\n break\r\n # while True:\r\n # try:\r\n # export_oneimage(img,'Data_test',fname,region,scale,crs)\r\n # except:\r\n # print 'retry'\r\n # time.sleep(10)\r\n # continue\r\n # break" ]
[ [ "numpy.load" ], [ "pandas.read_csv" ] ]
ingeniamc/pyqtgraph
[ "be98ca4ffcb983050f250d827a62652d9cd7b824" ]
[ "pyqtgraph/graphicsItems/PlotDataItem.py" ]
[ "import numpy as np\nfrom .. import metaarray as metaarray\nfrom ..Qt import QtCore\nfrom .GraphicsObject import GraphicsObject\nfrom .PlotCurveItem import PlotCurveItem\nfrom .ScatterPlotItem import ScatterPlotItem\nfrom .. import functions as fn\nfrom .. import debug as debug\nfrom .. import getConfigOption\n\n\nclass PlotDataItem(GraphicsObject):\n \"\"\"\n **Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`\n \n GraphicsItem for displaying plot curves, scatter plots, or both. \n While it is possible to use :class:`PlotCurveItem <pyqtgraph.PlotCurveItem>` or\n :class:`ScatterPlotItem <pyqtgraph.ScatterPlotItem>` individually, this class\n provides a unified interface to both. Instances of :class:`PlotDataItem` are \n usually created by plot() methods such as :func:`pyqtgraph.plot` and\n :func:`PlotItem.plot() <pyqtgraph.PlotItem.plot>`.\n \n ============================== ==============================================\n **Signals:**\n sigPlotChanged(self) Emitted when the data in this item is updated. \n sigClicked(self) Emitted when the item is clicked.\n sigPointsClicked(self, points) Emitted when a plot point is clicked\n Sends the list of points under the mouse.\n ============================== ==============================================\n \"\"\"\n \n sigPlotChanged = QtCore.Signal(object)\n sigClicked = QtCore.Signal(object)\n sigPointsClicked = QtCore.Signal(object, object)\n \n def __init__(self, *args, **kargs):\n \"\"\"\n There are many different ways to create a PlotDataItem:\n \n **Data initialization arguments:** (x,y data only)\n \n =================================== ======================================\n PlotDataItem(xValues, yValues) x and y values may be any sequence (including ndarray) of real numbers\n PlotDataItem(yValues) y values only -- x will be automatically set to range(len(y))\n PlotDataItem(x=xValues, y=yValues) x and y given by keyword arguments\n PlotDataItem(ndarray(Nx2)) numpy array with shape (N, 2) where x=data[:,0] and y=data[:,1]\n =================================== ======================================\n \n **Data initialization arguments:** (x,y data AND may include spot style)\n \n =========================== =========================================\n PlotDataItem(recarray) numpy array with dtype=[('x', float), ('y', float), ...]\n PlotDataItem(list-of-dicts) [{'x': x, 'y': y, ...}, ...] \n PlotDataItem(dict-of-lists) {'x': [...], 'y': [...], ...} \n PlotDataItem(MetaArray) 1D array of Y values with X sepecified as axis values \n OR 2D array with a column 'y' and extra columns as needed.\n =========================== =========================================\n \n **Line style keyword arguments:**\n\n ========== ==============================================================================\n connect Specifies how / whether vertexes should be connected. See\n :func:`arrayToQPath() <pyqtgraph.arrayToQPath>`\n pen Pen to use for drawing line between points.\n Default is solid grey, 1px width. Use None to disable line drawing.\n May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`\n shadowPen Pen for secondary line to draw behind the primary line. disabled by default.\n May be any single argument accepted by :func:`mkPen() <pyqtgraph.mkPen>`\n fillLevel Fill the area between the curve and fillLevel\n fillBrush Fill to use when fillLevel is specified. \n May be any single argument accepted by :func:`mkBrush() <pyqtgraph.mkBrush>`\n stepMode If True, two orthogonal lines are drawn for each sample\n as steps. This is commonly used when drawing histograms.\n Note that in this case, `len(x) == len(y) + 1`\n (added in version 0.9.9)\n ========== ==============================================================================\n \n **Point style keyword arguments:** (see :func:`ScatterPlotItem.setData() <pyqtgraph.ScatterPlotItem.setData>` for more information)\n \n ============ =====================================================\n symbol Symbol to use for drawing points OR list of symbols, \n one per point. Default is no symbol.\n Options are o, s, t, d, +, or any QPainterPath\n symbolPen Outline pen for drawing points OR list of pens, one \n per point. May be any single argument accepted by \n :func:`mkPen() <pyqtgraph.mkPen>`\n symbolBrush Brush for filling points OR list of brushes, one per \n point. May be any single argument accepted by \n :func:`mkBrush() <pyqtgraph.mkBrush>`\n symbolSize Diameter of symbols OR list of diameters.\n pxMode (bool) If True, then symbolSize is specified in \n pixels. If False, then symbolSize is \n specified in data coordinates.\n ============ =====================================================\n \n **Optimization keyword arguments:**\n \n ================ =====================================================================\n antialias (bool) By default, antialiasing is disabled to improve performance.\n Note that in some cases (in particluar, when pxMode=True), points \n will be rendered antialiased even if this is set to False.\n decimate deprecated.\n downsample (int) Reduce the number of samples displayed by this value\n downsampleMethod 'subsample': Downsample by taking the first of N samples. \n This method is fastest and least accurate.\n 'mean': Downsample by taking the mean of N samples.\n 'peak': Downsample by drawing a saw wave that follows the min \n and max of the original data. This method produces the best \n visual representation of the data but is slower.\n autoDownsample (bool) If True, resample the data before plotting to avoid plotting\n multiple line segments per pixel. This can improve performance when\n viewing very high-density data, but increases the initial overhead \n and memory usage.\n clipToView (bool) If True, only plot data that is visible within the X range of\n the containing ViewBox. This can improve performance when plotting\n very large data sets where only a fraction of the data is visible\n at any time.\n identical *deprecated*\n ================ =====================================================================\n \n **Meta-info keyword arguments:**\n \n ========== ================================================\n name name of dataset. This would appear in a legend\n ========== ================================================\n \"\"\"\n GraphicsObject.__init__(self)\n self.setFlag(self.ItemHasNoContents)\n self.xData = None\n self.yData = None\n self.xDisp = None\n self.yDisp = None\n #self.dataMask = None\n #self.curves = []\n #self.scatters = []\n self.curve = PlotCurveItem()\n self.scatter = ScatterPlotItem()\n self.curve.setParentItem(self)\n self.scatter.setParentItem(self)\n \n self.curve.sigClicked.connect(self.curveClicked)\n self.scatter.sigClicked.connect(self.scatterClicked)\n \n \n #self.clear()\n self.opts = {\n 'connect': 'all',\n \n 'fftMode': False,\n 'logMode': [False, False],\n 'alphaHint': 1.0,\n 'alphaMode': False,\n \n 'pen': (200,200,200),\n 'shadowPen': None,\n 'fillLevel': None,\n 'fillBrush': None,\n 'stepMode': None, \n \n 'symbol': None,\n 'symbolSize': 10,\n 'symbolPen': (200,200,200),\n 'symbolBrush': (50, 50, 150),\n 'pxMode': True,\n \n 'antialias': getConfigOption('antialias'),\n 'pointMode': None,\n \n 'downsample': 1,\n 'autoDownsample': False,\n 'downsampleMethod': 'peak',\n 'autoDownsampleFactor': 5., # draw ~5 samples per pixel\n 'clipToView': False,\n \n 'data': None,\n }\n self.setData(*args, **kargs)\n \n def implements(self, interface=None):\n ints = ['plotData']\n if interface is None:\n return ints\n return interface in ints\n \n def name(self):\n return self.opts.get('name', None)\n \n def boundingRect(self):\n return QtCore.QRectF() ## let child items handle this\n\n def setAlpha(self, alpha, auto):\n if self.opts['alphaHint'] == alpha and self.opts['alphaMode'] == auto:\n return\n self.opts['alphaHint'] = alpha\n self.opts['alphaMode'] = auto\n self.setOpacity(alpha)\n #self.update()\n \n def setFftMode(self, mode):\n if self.opts['fftMode'] == mode:\n return\n self.opts['fftMode'] = mode\n self.xDisp = self.yDisp = None\n self.xClean = self.yClean = None\n self.updateItems()\n self.informViewBoundsChanged()\n \n def setLogMode(self, xMode, yMode):\n if self.opts['logMode'] == [xMode, yMode]:\n return\n self.opts['logMode'] = [xMode, yMode]\n self.xDisp = self.yDisp = None\n self.xClean = self.yClean = None\n self.updateItems()\n self.informViewBoundsChanged()\n \n def setPointMode(self, mode):\n if self.opts['pointMode'] == mode:\n return\n self.opts['pointMode'] = mode\n self.update()\n \n def setPen(self, *args, **kargs):\n \"\"\"\n | Sets the pen used to draw lines between points.\n | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`\n \"\"\"\n pen = fn.mkPen(*args, **kargs)\n self.opts['pen'] = pen\n #self.curve.setPen(pen)\n #for c in self.curves:\n #c.setPen(pen)\n #self.update()\n self.updateItems()\n \n def setShadowPen(self, *args, **kargs):\n \"\"\"\n | Sets the shadow pen used to draw lines between points (this is for enhancing contrast or \n emphacizing data). \n | This line is drawn behind the primary pen (see :func:`setPen() <pyqtgraph.PlotDataItem.setPen>`)\n and should generally be assigned greater width than the primary pen.\n | *pen* can be a QPen or any argument accepted by :func:`pyqtgraph.mkPen() <pyqtgraph.mkPen>`\n \"\"\"\n pen = fn.mkPen(*args, **kargs)\n self.opts['shadowPen'] = pen\n #for c in self.curves:\n #c.setPen(pen)\n #self.update()\n self.updateItems()\n \n def setFillBrush(self, *args, **kargs):\n brush = fn.mkBrush(*args, **kargs)\n if self.opts['fillBrush'] == brush:\n return\n self.opts['fillBrush'] = brush\n self.updateItems()\n \n def setBrush(self, *args, **kargs):\n return self.setFillBrush(*args, **kargs)\n \n def setFillLevel(self, level):\n if self.opts['fillLevel'] == level:\n return\n self.opts['fillLevel'] = level\n self.updateItems()\n\n def setSymbol(self, symbol):\n if self.opts['symbol'] == symbol:\n return\n self.opts['symbol'] = symbol\n #self.scatter.setSymbol(symbol)\n self.updateItems()\n \n def setSymbolPen(self, *args, **kargs):\n pen = fn.mkPen(*args, **kargs)\n if self.opts['symbolPen'] == pen:\n return\n self.opts['symbolPen'] = pen\n #self.scatter.setSymbolPen(pen)\n self.updateItems()\n \n \n \n def setSymbolBrush(self, *args, **kargs):\n brush = fn.mkBrush(*args, **kargs)\n if self.opts['symbolBrush'] == brush:\n return\n self.opts['symbolBrush'] = brush\n #self.scatter.setSymbolBrush(brush)\n self.updateItems()\n \n \n def setSymbolSize(self, size):\n if self.opts['symbolSize'] == size:\n return\n self.opts['symbolSize'] = size\n #self.scatter.setSymbolSize(symbolSize)\n self.updateItems()\n\n def setDownsampling(self, ds=None, auto=None, method=None):\n \"\"\"\n Set the downsampling mode of this item. Downsampling reduces the number\n of samples drawn to increase performance. \n \n ============== =================================================================\n **Arguments:**\n ds (int) Reduce visible plot samples by this factor. To disable,\n set ds=1.\n auto (bool) If True, automatically pick *ds* based on visible range\n mode 'subsample': Downsample by taking the first of N samples.\n This method is fastest and least accurate.\n 'mean': Downsample by taking the mean of N samples.\n 'peak': Downsample by drawing a saw wave that follows the min\n and max of the original data. This method produces the best\n visual representation of the data but is slower.\n ============== =================================================================\n \"\"\"\n changed = False\n if ds is not None:\n if self.opts['downsample'] != ds:\n changed = True\n self.opts['downsample'] = ds\n \n if auto is not None and self.opts['autoDownsample'] != auto:\n self.opts['autoDownsample'] = auto\n changed = True\n \n if method is not None:\n if self.opts['downsampleMethod'] != method:\n changed = True\n self.opts['downsampleMethod'] = method\n \n if changed:\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n def setClipToView(self, clip):\n if self.opts['clipToView'] == clip:\n return\n self.opts['clipToView'] = clip\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n \n def setData(self, *args, **kargs):\n \"\"\"\n Clear any data displayed by this item and display new data.\n See :func:`__init__() <pyqtgraph.PlotDataItem.__init__>` for details; it accepts the same arguments.\n \"\"\"\n # self.clear()\n profiler = debug.Profiler()\n valid_arguments = True\n y = None\n x = None\n if len(args) == 1:\n data = args[0]\n dt = dataType(data)\n if dt == 'empty':\n pass\n elif dt == 'listOfValues':\n y = np.array(data)\n elif dt == 'Nx2array':\n x = data[:, 0]\n y = data[:, 1]\n elif dt == 'recarray' or dt == 'dictOfLists':\n if 'x' in data:\n x = np.array(data['x'])\n if 'y' in data:\n y = np.array(data['y'])\n elif dt == 'listOfDicts':\n if 'x' in data[0]:\n x = np.array([d.get('x', None) for d in data])\n if 'y' in data[0]:\n y = np.array([d.get('y', None) for d in data])\n for k in ['data', 'symbolSize', 'symbolPen', 'symbolBrush',\n 'symbolShape']:\n if k in data:\n kargs[k] = [d.get(k, None) for d in data]\n elif dt == 'MetaArray':\n y = data.view(np.ndarray)\n x = data.xvals(0).view(np.ndarray)\n else:\n valid_arguments = False\n print('Invalid data type %s' % type(data))\n\n elif len(args) == 2:\n seq = ('listOfValues', 'MetaArray', 'empty')\n dtyp = dataType(args[0]), dataType(args[1])\n if dtyp[0] not in seq or dtyp[1] not in seq:\n valid_arguments = False\n print(\n 'When passing two unnamed arguments, both must be a list or array of values. (got %s, %s)' % (\n str(type(args[0])), str(type(args[1]))))\n if valid_arguments:\n if not isinstance(args[0], np.ndarray):\n # x = np.array(args[0])\n if dtyp[0] == 'MetaArray':\n x = args[0].asarray()\n else:\n x = np.array(args[0])\n else:\n x = args[0].view(np.ndarray)\n if not isinstance(args[1], np.ndarray):\n # y = np.array(args[1])\n if dtyp[1] == 'MetaArray':\n y = args[1].asarray()\n else:\n y = np.array(args[1])\n else:\n y = args[1].view(np.ndarray)\n if valid_arguments:\n if 'x' in kargs:\n x = kargs['x']\n if 'y' in kargs:\n y = kargs['y']\n\n profiler('interpret data')\n ## pull in all style arguments.\n ## Use self.opts to fill in anything not present in kargs.\n\n if 'name' in kargs:\n self.opts['name'] = kargs['name']\n if 'connect' in kargs:\n self.opts['connect'] = kargs['connect']\n\n ## if symbol pen/brush are given with no symbol, then assume symbol is 'o'\n\n if 'symbol' not in kargs and (\n 'symbolPen' in kargs or 'symbolBrush' in kargs or 'symbolSize' in kargs):\n kargs['symbol'] = 'o'\n\n if 'brush' in kargs:\n kargs['fillBrush'] = kargs['brush']\n\n for k in list(self.opts.keys()):\n if k in kargs:\n self.opts[k] = kargs[k]\n\n # curveArgs = {}\n # for k in ['pen', 'shadowPen', 'fillLevel', 'brush']:\n # if k in kargs:\n # self.opts[k] = kargs[k]\n # curveArgs[k] = self.opts[k]\n\n # scatterArgs = {}\n # for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol')]:\n # if k in kargs:\n # self.opts[k] = kargs[k]\n # scatterArgs[v] = self.opts[k]\n\n if y is None:\n self.updateItems()\n profiler('update items')\n return\n if y is not None and x is None:\n x = np.arange(len(y))\n\n if isinstance(x, list):\n x = np.array(x)\n if isinstance(y, list):\n y = np.array(y)\n\n self.xData = x.view(\n np.ndarray) ## one last check to make sure there are no MetaArrays getting by\n self.yData = y.view(np.ndarray)\n self.xClean = self.yClean = None\n self.xDisp = None\n self.yDisp = None\n profiler('set data')\n\n self.updateItems()\n profiler('update items')\n\n self.informViewBoundsChanged()\n # view = self.getViewBox()\n # if view is not None:\n # view.itemBoundsChanged(self) ## inform view so it can update its range if it wants\n\n self.sigPlotChanged.emit(self)\n profiler('emit')\n\n def updateItems(self):\n \n curveArgs = {}\n for k,v in [('pen','pen'), ('shadowPen','shadowPen'), ('fillLevel','fillLevel'), ('fillBrush', 'brush'), ('antialias', 'antialias'), ('connect', 'connect'), ('stepMode', 'stepMode')]:\n curveArgs[v] = self.opts[k]\n \n scatterArgs = {}\n for k,v in [('symbolPen','pen'), ('symbolBrush','brush'), ('symbol','symbol'), ('symbolSize', 'size'), ('data', 'data'), ('pxMode', 'pxMode'), ('antialias', 'antialias')]:\n if k in self.opts:\n scatterArgs[v] = self.opts[k]\n \n x,y = self.getData()\n #scatterArgs['mask'] = self.dataMask\n \n if curveArgs['pen'] is not None or (curveArgs['brush'] is not None and curveArgs['fillLevel'] is not None):\n self.curve.setData(x=x, y=y, **curveArgs)\n self.curve.show()\n else:\n self.curve.hide()\n \n if scatterArgs['symbol'] is not None:\n \n if self.opts.get('stepMode', False) is True:\n x = 0.5 * (x[:-1] + x[1:]) \n self.scatter.setData(x=x, y=y, **scatterArgs)\n self.scatter.show()\n else:\n self.scatter.hide()\n\n\n def getData(self):\n if self.xData is None:\n return (None, None)\n \n if self.xDisp is None:\n x = self.xData\n y = self.yData\n \n if self.opts['fftMode']:\n x,y = self._fourierTransform(x, y)\n # Ignore the first bin for fft data if we have a logx scale\n if self.opts['logMode'][0]:\n x=x[1:]\n y=y[1:]\n \n with np.errstate(divide='ignore'):\n if self.opts['logMode'][0]:\n x = np.log10(x)\n if self.opts['logMode'][1]:\n y = np.log10(y)\n \n ds = self.opts['downsample']\n if not isinstance(ds, int):\n ds = 1\n \n if self.opts['autoDownsample']:\n # this option presumes that x-values have uniform spacing\n range = self.viewRect()\n if range is not None and len(x) > 1:\n dx = float(x[-1]-x[0]) / (len(x)-1)\n if dx != 0.0:\n x0 = (range.left()-x[0]) / dx\n x1 = (range.right()-x[0]) / dx\n width = self.getViewBox().width()\n if width != 0.0:\n ds = int(max(1, int((x1-x0) / (width*self.opts['autoDownsampleFactor']))))\n ## downsampling is expensive; delay until after clipping.\n \n if self.opts['clipToView']:\n view = self.getViewBox()\n if view is None or not view.autoRangeEnabled()[0]:\n # this option presumes that x-values are in increasing order\n range = self.viewRect()\n if range is not None and len(x) > 1:\n # clip to visible region extended by downsampling value, assuming\n # uniform spacing of x-values, has O(1) performance\n dx = float(x[-1]-x[0]) / (len(x)-1)\n x0 = np.clip(int((range.left()-x[0])/dx) - 1*ds, 0, len(x)-1)\n x1 = np.clip(int((range.right()-x[0])/dx) + 2*ds, 0, len(x)-1)\n \n # if data has been clipped too strongly (in case of non-uniform \n # spacing of x-values), refine the clipping region as required\n # worst case performance: O(log(n))\n # best case performance: O(1)\n if x[x0] > range.left():\n x0 = np.searchsorted(x, range.left()) - 1*ds\n x0 = np.clip(x0, a_min=0, a_max=len(x))\n if x[x1] < range.right():\n x1 = np.searchsorted(x, range.right()) + 2*ds\n x1 = np.clip(x1, a_min=0, a_max=len(x))\n \n x = x[x0:x1]\n y = y[x0:x1]\n \n if ds > 1:\n if self.opts['downsampleMethod'] == 'subsample':\n x = x[::ds]\n y = y[::ds]\n elif self.opts['downsampleMethod'] == 'mean':\n n = len(x) // ds\n x = x[:n*ds:ds]\n y = y[:n*ds].reshape(n,ds).mean(axis=1)\n elif self.opts['downsampleMethod'] == 'peak':\n n = len(x) // ds\n x1 = np.empty((n,2))\n x1[:] = x[:n*ds:ds,np.newaxis]\n x = x1.reshape(n*2)\n y1 = np.empty((n,2))\n y2 = y[:n*ds].reshape((n, ds))\n y1[:,0] = y2.max(axis=1)\n y1[:,1] = y2.min(axis=1)\n y = y1.reshape(n*2)\n \n \n self.xDisp = x\n self.yDisp = y\n return self.xDisp, self.yDisp\n\n def dataBounds(self, ax, frac=1.0, orthoRange=None):\n \"\"\"\n Returns the range occupied by the data (along a specific axis) in this item.\n This method is called by ViewBox when auto-scaling.\n\n =============== =============================================================\n **Arguments:**\n ax (0 or 1) the axis for which to return this item's data range\n frac (float 0.0-1.0) Specifies what fraction of the total data \n range to return. By default, the entire range is returned.\n This allows the ViewBox to ignore large spikes in the data\n when auto-scaling.\n orthoRange ([min,max] or None) Specifies that only the data within the\n given range (orthogonal to *ax*) should me measured when \n returning the data range. (For example, a ViewBox might ask\n what is the y-range of all data with x-values between min\n and max)\n =============== =============================================================\n \"\"\"\n \n range = [None, None]\n if self.curve.isVisible():\n range = self.curve.dataBounds(ax, frac, orthoRange)\n elif self.scatter.isVisible():\n r2 = self.scatter.dataBounds(ax, frac, orthoRange)\n range = [\n r2[0] if range[0] is None else (range[0] if r2[0] is None else min(r2[0], range[0])),\n r2[1] if range[1] is None else (range[1] if r2[1] is None else min(r2[1], range[1]))\n ]\n return range\n \n def pixelPadding(self):\n \"\"\"\n Return the size in pixels that this item may draw beyond the values returned by dataBounds().\n This method is called by ViewBox when auto-scaling.\n \"\"\"\n pad = 0\n if self.curve.isVisible():\n pad = max(pad, self.curve.pixelPadding())\n elif self.scatter.isVisible():\n pad = max(pad, self.scatter.pixelPadding())\n return pad\n \n\n def clear(self):\n #for i in self.curves+self.scatters:\n #if i.scene() is not None:\n #i.scene().removeItem(i)\n #self.curves = []\n #self.scatters = []\n self.xData = None\n self.yData = None\n #self.xClean = None\n #self.yClean = None\n self.xDisp = None\n self.yDisp = None\n self.curve.clear()\n self.scatter.clear()\n\n def appendData(self, *args, **kargs):\n pass\n \n def curveClicked(self):\n self.sigClicked.emit(self)\n \n def scatterClicked(self, plt, points):\n self.sigClicked.emit(self)\n self.sigPointsClicked.emit(self, points)\n \n def viewRangeChanged(self):\n # view range has changed; re-plot if needed\n if self.opts['clipToView'] or self.opts['autoDownsample']:\n self.xDisp = self.yDisp = None\n self.updateItems()\n \n def _fourierTransform(self, x, y):\n ## Perform fourier transform. If x values are not sampled uniformly,\n ## then use np.interp to resample before taking fft.\n dx = np.diff(x)\n uniform = not np.any(np.abs(dx-dx[0]) > (abs(dx[0]) / 1000.))\n if not uniform:\n x2 = np.linspace(x[0], x[-1], len(x))\n y = np.interp(x2, x, y)\n x = x2\n n = y.size\n f = np.fft.rfft(y) / n\n d = float(x[-1]-x[0]) / (len(x)-1)\n x = np.fft.rfftfreq(n, d)\n y = np.abs(f)\n return x, y\n \ndef dataType(obj):\n if hasattr(obj, '__len__') and len(obj) == 0:\n return 'empty'\n if isinstance(obj, dict):\n return 'dictOfLists'\n elif isSequence(obj):\n first = obj[0]\n \n if (hasattr(obj, 'implements') and obj.implements('MetaArray')):\n return 'MetaArray'\n elif isinstance(obj, np.ndarray):\n if obj.ndim == 1:\n if obj.dtype.names is None:\n return 'listOfValues'\n else:\n return 'recarray'\n elif obj.ndim == 2 and obj.dtype.names is None and obj.shape[1] == 2:\n return 'Nx2array'\n else:\n raise Exception('array shape must be (N,) or (N,2); got %s instead' % str(obj.shape))\n elif isinstance(first, dict):\n return 'listOfDicts'\n else:\n return 'listOfValues'\n \n \ndef isSequence(obj):\n return hasattr(obj, '__iter__') or isinstance(obj, np.ndarray) or (hasattr(obj, 'implements') and obj.implements('MetaArray'))\n \n \n \n#class TableData:\n #\"\"\"\n #Class for presenting multiple forms of tabular data through a consistent interface.\n #May contain:\n #- numpy record array\n #- list-of-dicts (all dicts are _not_ required to have the same keys)\n #- dict-of-lists\n #- dict (single record)\n #Note: if all the values in this record are lists, it will be interpreted as multiple records\n \n #Data can be accessed and modified by column, by row, or by value\n #data[columnName]\n #data[rowId]\n #data[columnName, rowId] = value\n #data[columnName] = [value, value, ...]\n #data[rowId] = {columnName: value, ...}\n #\"\"\"\n \n #def __init__(self, data):\n #self.data = data\n #if isinstance(data, np.ndarray):\n #self.mode = 'array'\n #elif isinstance(data, list):\n #self.mode = 'list'\n #elif isinstance(data, dict):\n #types = set(map(type, data.values()))\n ### dict may be a dict-of-lists or a single record\n #types -= set([list, np.ndarray]) ## if dict contains any non-sequence values, it is probably a single record.\n #if len(types) != 0:\n #self.data = [self.data]\n #self.mode = 'list'\n #else:\n #self.mode = 'dict'\n #elif isinstance(data, TableData):\n #self.data = data.data\n #self.mode = data.mode\n #else:\n #raise TypeError(type(data))\n \n #for fn in ['__getitem__', '__setitem__']:\n #setattr(self, fn, getattr(self, '_TableData'+fn+self.mode))\n \n #def originalData(self):\n #return self.data\n \n #def toArray(self):\n #if self.mode == 'array':\n #return self.data\n #if len(self) < 1:\n ##return np.array([]) ## need to return empty array *with correct columns*, but this is very difficult, so just return None\n #return None\n #rec1 = self[0]\n #dtype = functions.suggestRecordDType(rec1)\n ##print rec1, dtype\n #arr = np.empty(len(self), dtype=dtype)\n #arr[0] = tuple(rec1.values())\n #for i in xrange(1, len(self)):\n #arr[i] = tuple(self[i].values())\n #return arr\n \n #def __getitem__array(self, arg):\n #if isinstance(arg, tuple):\n #return self.data[arg[0]][arg[1]]\n #else:\n #return self.data[arg]\n \n #def __getitem__list(self, arg):\n #if isinstance(arg, basestring):\n #return [d.get(arg, None) for d in self.data]\n #elif isinstance(arg, int):\n #return self.data[arg]\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #return self.data[arg[0]][arg[1]]\n #else:\n #raise TypeError(type(arg))\n \n #def __getitem__dict(self, arg):\n #if isinstance(arg, basestring):\n #return self.data[arg]\n #elif isinstance(arg, int):\n #return dict([(k, v[arg]) for k, v in self.data.items()])\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #return self.data[arg[1]][arg[0]]\n #else:\n #raise TypeError(type(arg))\n\n #def __setitem__array(self, arg, val):\n #if isinstance(arg, tuple):\n #self.data[arg[0]][arg[1]] = val\n #else:\n #self.data[arg] = val\n\n #def __setitem__list(self, arg, val):\n #if isinstance(arg, basestring):\n #if len(val) != len(self.data):\n #raise Exception(\"Values (%d) and data set (%d) are not the same length.\" % (len(val), len(self.data)))\n #for i, rec in enumerate(self.data):\n #rec[arg] = val[i]\n #elif isinstance(arg, int):\n #self.data[arg] = val\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #self.data[arg[0]][arg[1]] = val\n #else:\n #raise TypeError(type(arg))\n \n #def __setitem__dict(self, arg, val):\n #if isinstance(arg, basestring):\n #if len(val) != len(self.data[arg]):\n #raise Exception(\"Values (%d) and data set (%d) are not the same length.\" % (len(val), len(self.data[arg])))\n #self.data[arg] = val\n #elif isinstance(arg, int):\n #for k in self.data:\n #self.data[k][arg] = val[k]\n #elif isinstance(arg, tuple):\n #arg = self._orderArgs(arg)\n #self.data[arg[1]][arg[0]] = val\n #else:\n #raise TypeError(type(arg))\n\n #def _orderArgs(self, args):\n ### return args in (int, str) order\n #if isinstance(args[0], basestring):\n #return (args[1], args[0])\n #else:\n #return args\n \n #def __iter__(self):\n #for i in xrange(len(self)):\n #yield self[i]\n\n #def __len__(self):\n #if self.mode == 'array' or self.mode == 'list':\n #return len(self.data)\n #else:\n #return max(map(len, self.data.values()))\n\n #def columnNames(self):\n #\"\"\"returns column names in no particular order\"\"\"\n #if self.mode == 'array':\n #return self.data.dtype.names\n #elif self.mode == 'list':\n #names = set()\n #for row in self.data:\n #names.update(row.keys())\n #return list(names)\n #elif self.mode == 'dict':\n #return self.data.keys()\n \n #def keys(self):\n #return self.columnNames()\n" ]
[ [ "numpy.fft.rfft", "numpy.array", "numpy.empty", "numpy.errstate", "numpy.fft.rfftfreq", "numpy.interp", "numpy.diff", "numpy.abs", "numpy.log10" ] ]
Robert-xiaoqiang/Model-Capability-Assessment
[ "3cb8673ea66bfeded9d6421e15b288b485ccc53b", "3cb8673ea66bfeded9d6421e15b288b485ccc53b" ]
[ "cbbc/qapackage/RACEProcessor.py", "cbbc/qapackage/TestHelper.py" ]
[ "import json\nimport os\nimport logging\nlogger = logging.getLogger(__name__)\nfrom functools import partial\nfrom multiprocessing import Pool, cpu_count\nfrom enum import Enum\nimport glob\n\nimport torch\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.nn import functional as F\nfrom torch.utils.data import TensorDataset\n\nimport numpy as np\nfrom tqdm import tqdm\n\nclass RACEExample(object):\n def __init__(self,\n qas_id,\n context_text,\n question_text,\n answer_text_0,\n answer_text_1,\n answer_text_2,\n answer_text_3,\n label = None):\n self.qas_id = qas_id\n self.context_text = context_text\n self.question_text = question_text\n self.answer_texts = [\n answer_text_0,\n answer_text_1,\n answer_text_2,\n answer_text_3,\n ]\n self.label = label\n\nclass RACEFeatures(object):\n def __init__(self,\n qas_id,\n example_index,\n unique_id,\n choices_features,\n label\n\n ):\n self.qas_id = qas_id\n self.example_index = example_index\n self.unique_id = unique_id\n self.choices_features = [\n {\n 'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'token_type_ids': token_type_ids\n }\n for _, input_ids, attention_mask, token_type_ids in choices_features\n ]\n self.label = label\n\n\nclass RACEProcessor:\n def __init__(self):\n pass\n\n def get_train_examples(self, train_filename):\n return self._create_examples(train_filename)\n\n def get_dev_examples(self, dev_filename):\n return self._create_examples(dev_filename)\n\n def _create_examples(self, root_dirname):\n examples = [ ]\n for question_level in [ 'middle', 'high' ]:\n dirname = os.path.join(root_dirname, question_level)\n filenames = glob.glob(dirname + \"/*txt\")\n for filename in tqdm(filenames, desc = 'processing {} directory'.format(question_level)):\n main_filename = os.path.splitext(os.path.basename(filename))[0]\n with open(filename) as f:\n data_raw = json.load(f)\n article = data_raw['article']\n n_questions = len(data_raw['questions'])\n for i in range(n_questions):\n question = data_raw['questions'][i]\n # answer label (0123 -> ABCD)\n label = ord(data_raw['answers'][i]) - ord('A')\n options = data_raw['options'][i]\n # if possible(answerable)\n # train with the first answer\n # dev/val/test with all answers\n examples.append(\n RACEExample(\n qas_id = os.path.basename(root_dirname) + '-' + question_level + '-' + main_filename + '-q' + str(i),\n context_text = article,\n question_text = question,\n\n answer_text_0 = options[0],\n answer_text_1 = options[1],\n answer_text_2 = options[2],\n answer_text_3 = options[3],\n label = label))\n \n return examples \n\n\nclass RACEConverter:\n def __init__(self):\n pass\n def convert_examples_to_features(\n self, \n examples, \n tokenizer, \n max_seq_length, \n doc_stride,\n max_query_length,\n is_training, \n padding_strategy=\"max_length\",\n return_dataset=False,\n threads=1,\n tqdm_enabled=True\n ):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n unique_id = 1000000000\n example_index = 0\n features = []\n for example_index, example in enumerate(tqdm(examples)):\n context_tokens = tokenizer.tokenize(example.context_text)\n question_tokens = tokenizer.tokenize(example.question_text)\n\n choices_features = []\n for answer_index, answer in enumerate(example.answer_texts):\n qapair_tokens = question_tokens + tokenizer.tokenize(answer)\n self._truncate_seq_pair(context_tokens, qapair_tokens, max_seq_length - 3)\n\n tokens = [\"[CLS]\"] + context_tokens + [\"[SEP]\"] + qapair_tokens + [\"[SEP]\"]\n token_type_ids = [0] * (len(context_tokens) + 2) + [1] * (len(qapair_tokens) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n attention_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n attention_mask += padding\n token_type_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(attention_mask) == max_seq_length\n assert len(token_type_ids) == max_seq_length\n\n choices_features.append((tokens, input_ids, attention_mask, token_type_ids))\n\n label = example.label\n\n features.append(\n RACEFeatures(\n qas_id = example.qas_id,\n example_index = example_index,\n unique_id = unique_id,\n choices_features = choices_features,\n label = label\n )\n )\n # identify feature\n unique_id += 1\n # identify example, here we regard an example as a feature simply\n example_index += 1\n\n if return_dataset == \"pt\":\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([ [ choice_features['input_ids'] for choice_features in f.choices_features ]\n for f in features ], dtype=torch.long)\n all_attention_mask = torch.tensor([ [ choice_features['attention_mask'] for choice_features in f.choices_features ]\n for f in features ], dtype=torch.long)\n all_token_type_ids = torch.tensor([ [ choice_features['token_type_ids'] for choice_features in f.choices_features ]\n for f in features ], dtype=torch.long)\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n\n if not is_training:\n all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_attention_mask,\n all_token_type_ids,\n all_feature_index\n )\n else:\n dataset = TensorDataset(\n all_input_ids,\n all_attention_mask,\n all_token_type_ids,\n all_labels\n )\n return features, dataset\n else:\n return features\n\n def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n", "import collections\nimport json\nimport math\nimport re\nimport string\nimport logging\nimport unicodedata\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\nclass MCResult:\n def __init__(self, unique_id, choices_logits):\n self.choices_logits = choices_logits\n self.unique_id = unique_id\n\ndef compute_accuracy_logits(\n all_examples,\n all_features,\n all_results,\n output_prediction_file\n):\n if output_prediction_file:\n logger.info(f\"Writing predictions to: {output_prediction_file}\")\n \n unique_id_to_result = { }\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n \n example_index_to_feature = { }\n\n all_predictions = { }\n for example_index, example in enumerate(all_examples):\n \n feature_index = example_index # here, simple implementation\n\n feature = all_features[feature_index]\n label, unique_id = int(feature.label), int(feature.unique_id)\n result = unique_id_to_result[unique_id]\n\n choices_probs = compute_softmax(result.choices_logits)\n pred_option_id = np.argmax(np.asarray(choices_probs))\n\n all_predictions[example.qas_id] = {\n 'pred_option': chr(ord('A') + pred_option_id),\n 'pred_prob': choices_probs[pred_option_id],\n 'gold_answers': [ chr(ord('A') + label ) ],\n 'accuracy': int(pred_option_id == label)\n }\n\n if output_prediction_file:\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n return all_predictions\n\nclass QAResult:\n \"\"\"\n Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.\n Args:\n unique_id: The unique identifier corresponding to that example.\n start_logits: The logits corresponding to the start of the answer\n end_logits: The logits corresponding to the end of the answer\n \"\"\"\n\n def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):\n self.start_logits = start_logits\n self.end_logits = end_logits\n self.unique_id = unique_id\n\n if start_top_index:\n self.start_top_index = start_top_index\n self.end_top_index = end_top_index\n self.cls_logits = cls_logits\n\ndef compute_predictions_logits(\n all_examples,\n all_features,\n all_results,\n n_best_size,\n max_answer_length,\n do_lower_case,\n output_prediction_file,\n output_nbest_file,\n verbose_logging,\n version,\n null_score_diff_threshold,\n tokenizer\n):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n if output_prediction_file:\n logger.info(f\"Writing predictions to: {output_prediction_file}\")\n if output_nbest_file:\n logger.info(f\"Writing nbest to: {output_nbest_file}\")\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\", [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\", \"start_prob\", \"end_prob\"]\n )\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min null score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n feature_start_probs = compute_softmax(result.start_logits)\n feature_end_probs = compute_softmax(result.end_logits)\n # if we could have irrelevant answers, get the min score of irrelevant\n if version == 2:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n null_start_prob = feature_start_probs[0]\n null_end_prob = feature_end_probs[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index],\n start_prob=feature_start_probs[start_index],\n end_prob=feature_end_probs[end_index]\n )\n )\n # both start_logits and end_logits are inclined to output null charactar ([0] in vacab.list),\n # then the prediction text will be empty string.\n if version == 2:\n prelim_predictions.append(\n PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit,\n start_prob=null_start_prob,\n end_prob=null_end_prob \n )\n )\n # build for nbest (logits/probs with descending order)\n prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)\n\n NbestPrediction = collections.namedtuple(\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\", \"start_prob\", \"end_prob\"]\n )\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n #\n # this token is from PretrainedTokenizer (WordPiece tokenized, normalized, especially uncased).\n # It will degrads performance when being evaluated by official scripts !!!!\n #\n tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n #\n # however, this token is from whitespace_tokenize.\n # It is more natural to represent answers, but contains dirty subtokens !!!!\n #\n orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]\n\n tok_text = tokenizer.convert_tokens_to_string(tok_tokens)\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)\n if final_text in seen_predictions:\n continue\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit, start_prob=pred.start_prob, end_prob=pred.end_prob))\n\n # if we didn't include the empty option in the n-best, include it\n if version == 2:\n if \"\" not in seen_predictions:\n nbest.append(NbestPrediction(text=\"\", start_logit=null_start_logit, end_logit=null_end_logit, start_prob=null_start_prob, end_prob=null_end_prob))\n\n # In very rare edge cases we could only have single null prediction.\n # So we just create a nonce prediction in this case to avoid failure.\n if len(nbest) == 1:\n nbest.insert(0, NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0, start_prob=0.0, end_prob=0.0))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0, start_prob=0.0, end_prob=0.0))\n\n assert len(nbest) >= 1, \"No valid predictions\"\n\n total_scores = []\n best_non_null_entry = None\n # nbest has been in a descending order,\n # so best_non_null_entry is just the first entry with its text non-empty string\n # (maybe from sorted prelim_predictions or extra null appending to avoid assertation)\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n output[\"start_prob\"] = entry.start_prob\n output[\"end_prob\"] = entry.end_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1, \"No valid predictions\"\n\n if not version == 2:\n # on SQuAD1.1, we regard the best answer as our final answer\n all_predictions[example.qas_id] = {\n \"text\": nbest_json[0][\"text\"],\n \"start_logit\": nbest_json[0][\"start_logit\"],\n \"end_logit\": nbest_json[0][\"end_logit\"],\n \"start_prob\": nbest_json[0][\"start_prob\"],\n \"end_prob\": nbest_json[0][\"end_prob\"],\n }\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - (best_non_null_entry.start_logit + best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > null_score_diff_threshold:\n all_predictions[example.qas_id] = {\n 'text': \"\",\n 'start_logit': null_start_logit,\n 'end_logit': null_end_logit,\n 'start_prob': null_start_prob,\n 'end_prob': null_end_prob, \n }\n else:\n all_predictions[example.qas_id] = {\n 'text': best_non_null_entry.text,\n 'start_logit': best_non_null_entry.start_logit,\n 'end_logit': best_non_null_entry.end_logit,\n 'start_prob': best_non_null_entry.start_prob,\n 'end_prob': best_non_null_entry.end_prob,\n }\n all_nbest_json[example.qas_id] = nbest_json\n\n overall_em_f1_score = inplace_evaluate_em_f1(all_examples, all_predictions)\n\n # id -> best answer {text, start_logit, end_logit}\n if output_prediction_file:\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n # id -> nbest list [ {text, probability, start_logit, end_logit}, ... ]\n if output_nbest_file:\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n return overall_em_f1_score\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\ndef compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\ndef get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heuristic between\n # `pred_text` and `orig_text` to get a character-to-character alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n logger.info(f\"Unable to find text: '{pred_text}' in '{orig_text}'\")\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n logger.info(f\"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'\")\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position : (orig_end_position + 1)]\n return output_text\n\ndef inplace_evaluate_em_f1(examples, preds):\n \"\"\"\n Computes the exact and f1 scores from the examples and the model predictions\n \"\"\"\n exact_scores = {}\n f1_scores = {}\n\n total_em_score = total_f1_score = 0.0\n n_examples = len(examples)\n\n for example in examples:\n qas_id = example.qas_id\n gold_answers = [ answer[\"text\"] for answer in example.answers if normalize_answer(answer[\"text\"]) ]\n\n if not gold_answers:\n # For unanswerable questions, only correct answer is empty string\n gold_answers = [\"\"]\n\n if qas_id not in preds:\n print(f\"Missing prediction for {qas_id}\")\n continue\n\n pred = preds[qas_id]['text']\n em_score = max(compute_exact(a, pred) for a in gold_answers)\n f1_score = max(compute_f1(a, pred) for a in gold_answers)\n\n preds[qas_id].update({\n 'gold_answers': gold_answers,\n 'em_score': em_score,\n 'f1_score': f1_score\n })\n total_em_score += em_score\n total_f1_score += f1_score\n\n total_em_score /= n_examples\n total_f1_score /= n_examples\n\n return total_em_score, total_f1_score\n\ndef compute_exact(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\ndef compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold)\n pred_toks = get_tokens(a_pred)\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)\n num_same = sum(common.values())\n if len(gold_toks) == 0 or len(pred_toks) == 0:\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks)\n recall = 1.0 * num_same / len(gold_toks)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\ndef get_tokens(s):\n if not s:\n return []\n return normalize_answer(s).split()\n\nclass BasicTokenizer:\n \"\"\"\n Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).\n\n Args:\n do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to lowercase the input when tokenizing.\n never_split (:obj:`Iterable`, `optional`):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n :obj:`do_basic_tokenize=True`\n tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to tokenize Chinese characters.\n\n This should likely be deactivated for Japanese (see this `issue\n <https://github.com/huggingface/transformers/issues/328>`__).\n strip_accents: (:obj:`bool`, `optional`):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for :obj:`lowercase` (as in the original BERT).\n \"\"\"\n\n def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):\n if never_split is None:\n never_split = []\n self.do_lower_case = do_lower_case\n self.never_split = set(never_split)\n self.tokenize_chinese_chars = tokenize_chinese_chars\n self.strip_accents = strip_accents\n\n def tokenize(self, text, never_split=None):\n \"\"\"\n Basic Tokenization of a piece of text. Split on \"white spaces\" only, for sub-word tokenization, see\n WordPieceTokenizer.\n\n Args:\n **never_split**: (`optional`) list of str\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n :func:`PreTrainedTokenizer.tokenize`) List of token not to split.\n \"\"\"\n # union() returns a new set by concatenating the two sets.\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n\n # This was added on November 1st, 2018 for the multilingual and Chinese\n # models. This is also applied to the English models now, but it doesn't\n # matter since the English models were not trained on any Chinese data\n # and generally don't have any Chinese data in them (there are Chinese\n # characters in the vocabulary because Wikipedia does have some Chinese\n # words in the English Wikipedia.).\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n\n output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)\n\n def _run_split_on_punc(self, text, never_split=None):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n if never_split is not None and text in never_split:\n return [text]\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(\" \")\n output.append(char)\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n # This defines a \"chinese character\" as anything in the CJK Unicode block:\n # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n #\n # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n # despite its name. The modern Korean Hangul alphabet is a different block,\n # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n # space-separated words, so they are not treated specially and handled\n # like the all of the other languages.\n if (\n (cp >= 0x4E00 and cp <= 0x9FFF)\n or (cp >= 0x3400 and cp <= 0x4DBF) #\n or (cp >= 0x20000 and cp <= 0x2A6DF) #\n or (cp >= 0x2A700 and cp <= 0x2B73F) #\n or (cp >= 0x2B740 and cp <= 0x2B81F) #\n or (cp >= 0x2B820 and cp <= 0x2CEAF) #\n or (cp >= 0xF900 and cp <= 0xFAFF)\n or (cp >= 0x2F800 and cp <= 0x2FA1F) #\n ): #\n return True\n\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)\n\ndef _is_whitespace(char):\n \"\"\"Checks whether `char` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically control characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef _is_control(char):\n \"\"\"Checks whether `char` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False\n\n\ndef _is_punctuation(char):\n \"\"\"Checks whether `char` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a piece of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens" ]
[ [ "torch.tensor", "torch.utils.data.TensorDataset" ], [ "numpy.asarray" ] ]
Madrugaur/wiki-gender-and-stem
[ "d9ee8600651b91b6225bcb0fa824f8830711fba1" ]
[ "code/python/scripts/keyword_identifier.py" ]
[ "import math\n\nfrom scipy.stats.distributions import chi2\nimport os\nimport json\n\nfrom google_ngram_api import NGramRequest\n\n\ndef find_all_keys(data):\n keys = set()\n for data_dict in data:\n for key in data_dict[\"counts\"].keys():\n if data_dict[\"counts\"][key] > 10:\n keys.add(key)\n return keys\n\n\ndef find_relative_keywords(a_dict, b_dict):\n keywords = set()\n for key in a_dict:\n if key in b_dict:\n a_freq = math.log(a_dict[key])\n b_freq = math.log(b_dict[key])\n lr = likelihood_ratio(a_freq, b_freq)\n p = chi2.sf(lr, 1)\n if p < 0.001:\n keywords.add(key.lower())\n return keywords\n\n\ndef find_freq_by_gender(data):\n f_keys = find_all_keys(filter(lambda item: item[\"gender\"] == \"female\", data))\n m_keys = find_all_keys(filter(lambda item: item[\"gender\"] == \"male\", data))\n m_dict = dict()\n f_dict = dict()\n for d_dict in data:\n for key in d_dict[\"freq\"].keys():\n if key in f_keys:\n f_dict[key] = d_dict[\"freq\"][key]\n if key in m_keys:\n m_dict[key] = d_dict[\"freq\"][key]\n f_keywords = find_relative_keywords(f_dict, m_dict)\n m_keywords = find_relative_keywords(m_dict, f_dict)\n print(\"-------f_keywords\\n\", \"\\n\".join(f_keywords))\n print(\"-------m_keywords\\n\", \"\\n\".join(m_keywords))\n\n\ndef find_aggregate_freq(data):\n agg_freq = dict()\n count_dict = dict()\n keys = find_all_keys(data)\n for key in keys:\n for data_dict in data:\n l_key = key.lower()\n if key in data_dict[\"freq\"].keys():\n if l_key in agg_freq.keys():\n agg_freq[l_key] += data_dict[\"freq\"][key]\n count_dict[l_key] += 1\n else:\n agg_freq[l_key] = data_dict[\"freq\"][key]\n count_dict[l_key] = 1\n for key in agg_freq.keys():\n agg_freq[key] /= count_dict[key]\n\n return agg_freq\n\n\ndef likelihood_ratio(llmin, llmax):\n return 2 * (llmax - llmin)\n\n\ndef get_data():\n files = os.listdir(\"data/people/\")\n data = []\n for f in files:\n with open(\"data/people/\" + f, \"r\", encoding=\"utf8\") as file:\n data.append(json.load(file))\n return data\n\n\ndef find_aggregate_keywords():\n data = get_data()\n with open(\"data/aggregate_frequency.json\", \"r\", encoding=\"utf8\") as file:\n agg_freq = json.load(file)\n key_words = set()\n for person_data in data:\n for word in person_data[\"freq\"].keys():\n if word.lower() in agg_freq.keys():\n local_freq = math.log(person_data[\"freq\"][word])\n act_freq = math.log(agg_freq[word.lower()])\n lr = likelihood_ratio(local_freq, act_freq)\n p = chi2.sf(lr, 1)\n if p < 0.001:\n key_words.add(word)\n with open(\"data/aggregate_keywords.txt\", \"w\", encoding=\"utf8\") as keyword_file:\n for key in key_words:\n keyword_file.write(key + \"\\n\")\n\n\ndef find_global_keywords(data):\n key_words = set()\n with open(\"data/aggregate_frequency.json\", \"r\", encoding=\"utf8\") as file:\n agg_dict = json.load(file)\n keys = list(agg_dict.keys())\n global_freqs = list()\n step = 900\n for chunk in range(step, len(keys), step):\n search_str = \",\".join(keys[chunk - step: chunk])\n retrieved = NGramRequest(search_str, start_year=2018).getJSON()\n global_freqs.append(retrieved)\n with open(\"data/global_freqs.json\", \"w\", encoding=\"utf8\") as g_freqs_file:\n g_freqs_file.write(json.dumps(global_freqs, indent=4))\n for ngram in global_freqs:\n g_word = ngram['ngram']\n g_freq = ngram['timeseries'][-1]\n if g_freq != 0:\n g_ll = math.log(g_freq)\n local_ll = math.log(agg_dict[g_word])\n lr = likelihood_ratio(local_ll, g_ll)\n p = chi2.sf(lr, 1)\n if p < 0.001:\n print(g_word)\n key_words.add(g_word)\n print(\"\\n\".join(key_words))\n\n\nif __name__ == '__main__':\n find_global_keywords(get_data())\n # find_freq_by_gender(get_data())\n # find_aggregate_keywords()\n" ]
[ [ "scipy.stats.distributions.chi2.sf" ] ]
aicroe/mlscratch
[ "59100ea2a83fc8cd8ae617b686f6981d62073528" ]
[ "src/mlscratch/measurer/pairwise_measurer.py" ]
[ "\"\"\"PairwiseMeasurer's module.\"\"\"\nimport numpy as np\n\nfrom mlscratch.tensor import Tensor\nfrom .measurer import Measurer\n\nclass PairwiseMeasurer(Measurer[float]):\n \"\"\"Compares the actual result against the expected element by element.\"\"\"\n\n def measure(self, result: Tensor, expected: Tensor):\n return np.average(result == expected)\n" ]
[ [ "numpy.average" ] ]
ColinAE/Computer-Vision-Classifier
[ "188200e13fdff8c805afd913d92a052034588a6d" ]
[ "evaluation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 6 12:58:03 2016\n\n@author: haiming\n\"\"\"\n\nimport sys\nimport numpy as np\nimport copy\nthreashold = 0.5\nframe_c_r = 0\nframe_c_w = 0\n# gt.txt the text file that contain ground truth\n# dt.txt the text file that contain the region of interest created by our algorithm\ndef print_Usage_and_Exit():\n # threashold from 0.1 to 0.5\n print('Usage: evaluation.py (gt.txt) (dt.txt) (threadshold)')\n sys.exit(-1)\n\ndef load_file(gt_file, dt_file):\n def bs(bytes):\n '''Convert bytes to string and remove double quotes'''\n return str(bytes,'utf-8').replace('\"','')\n classification = {'car':0, 'person':1, 'motorcycle':2,'unknown':10}\n converters = {9: lambda x: classification[bs(x)]}\n datagt = np.loadtxt(gt_file, delimiter=' ', converters=converters, dtype='int')\n datadt = np.loadtxt(dt_file, delimiter=' ', converters=converters, dtype='int')\n #sort the data by frame number \n datagt_sort = datagt[datagt[:, 5].argsort()]\n datadt_sort = datadt[datadt[:, 5].argsort()]\n # delete all the objects that out of frame\n datagt_com = datagt_sort[datagt_sort[:, 6]==0]\n datadt_com = datadt #datadt[datadt[:, 6]==0]\n return (datagt_com, datadt_com)\n\ndef frame_class(dt, gt):\n global frame_c_r\n global frame_c_w\n if dt[9] == gt[9]:\n frame_c_r += 1\n else:\n frame_c_w += 1\n \n#simple algorithm to find the TP TN and FP\ndef FindOverLap(gts, dts):\n dt_list = [] \n if gts.shape[0] != 0 and dts.shape[0] != 0:\n for gt in gts:\n GTArea = (gt[3] - gt[1])*(gt[4]-gt[2])\n for dt in dts:\n DTArea = (dt[3] - dt[1])*(dt[4]-dt[2])\n cross_xmin = np.maximum(gt[1], dt[1])\n cross_ymin = np.maximum(gt[2], dt[2])\n cross_xmax = np.minimum(gt[3], dt[3])\n cross_ymax = np.minimum(gt[4], dt[4])\n if cross_xmin >= cross_xmax or cross_ymin >= cross_ymax:\n cross_area = 0\n else:\n cross_area = (cross_xmax - cross_xmin) * (cross_ymax - cross_ymin) \n overlap_percentage = 2 * cross_area / (GTArea + DTArea)\n if overlap_percentage >= threashold:\n dt_list.append([dt[0], gt[0]])\n frame_class(dt, gt)\n # print(\"GT id %d, DT id %d, cross_area %f\" % (gt[0], dt[0], overlap_percentage))\n return [(dts.shape[0], gts.shape[0]), dt_list]\n\ndef frame_based_detection(gt_data, dt_data):\n first_frame = dt_data[0, 5] - 1\n last_frame = dt_data[-1, 5] - 1\n # print(first_frame, last_frame)\n full_list = []\n for frame in range(first_frame, last_frame + 1):\n # print(frame)\n gts = gt_data[gt_data[:, 5] == frame]\n dts = dt_data[dt_data[:, 5] == frame + 1]\n full_list.append([frame, FindOverLap(gts, dts)])\n pair_dict = {}\n #Frame based calculation\n TP_sum = 0\n FP_sum = 0\n FN_sum = 0\n for struct in full_list:\n frame, dt_list = struct\n (n_dt, n_gt), pairs = dt_list\n dt_v = np.zeros(1000, dtype = int)\n gt_v = np.zeros(1000, dtype = int)\n if len(pairs) != 0: \n for pair in pairs:\n dt_id, gt_id = pair\n dt_v[dt_id] = 1\n gt_v[gt_id] = 1\n index = (dt_id, gt_id)\n if index not in pair_dict:\n pair_dict[index] = [frame]\n else:\n pair_dict[index].append(frame)\n TP = np.sum(gt_v)\n FP = n_dt - np.sum(dt_v)\n FN = n_gt - TP\n TP_sum += TP\n FP_sum += FP \n FN_sum += FN \n # print(frame, TP, FP, FN)\n print(\"Frame level:TP=%d FP=%d FN=%d\" % (TP_sum, FP_sum, FN_sum))\n sensitivity = TP_sum/(TP_sum + FN_sum)\n PPV = TP_sum/(TP_sum + FP_sum)\n print(\"Frame Level: S=%.3f PPV=%.3f\" %(sensitivity, PPV))\n print(\"Frame Level: R=%d W=%d %.3f\" %(frame_c_r, frame_c_w, frame_c_r/(frame_c_r+frame_c_w)))\n return pair_dict\n\ndef gt_analysis(gt_frame, dt_frame, pair_dict):\n gt_r = copy.copy(gt_frame)\n FP_frame = 0\n TP_frame = 0\n FN_frame = 0 \n FN_P = 0\n for key,value in pair_dict.items():\n if len(value) >= 20:\n dt_index, gt_index = key\n gt_r[gt_index] = list(set(gt_r[gt_index]) - set(value))\n FP_frame += len(list(set(dt_frame[dt_index]) - set(value)))\n i = 0\n for key,value in gt_r.items():\n # print(key, len(value) / len(gt_frame[key]))\n TP_frame += len(gt_frame[key]) - len(value)\n FN_frame += len(value)\n p1 = len(value) / len(gt_frame[key])\n FN_P += p1\n i += 1\n s = TP_frame / (TP_frame + FN_frame)\n ppv = TP_frame / (TP_frame + FP_frame)\n print(\"object level, ground truth\")\n print(TP_frame, FP_frame, FN_frame, s, ppv)\n print(FN_P/i, 1-(FN_P/i))\n \ndef dt_analysis(gt_frame, dt_frame, pair_dict):\n dt_r = copy.copy(dt_frame)\n FP_frame = 0\n TP_frame = 0\n FN_frame = 0\n FP_P = 0\n for key,value in pair_dict.items():\n if len(value) >= 20:\n dt_index, gt_index = key\n dt_r[dt_index] = list(set(dt_r[dt_index]) - set(value))\n FN_frame += len(list(set(gt_frame[gt_index]) - set(value)))\n i = 0 \n for key,value in dt_r.items():\n # print(key, len(value))\n TP_frame += len(dt_frame[key]) - len(value)\n FP_frame += len(value)\n p1 = len(value) / len(dt_frame[key])\n FP_P += p1\n i += 1\n s = TP_frame / (TP_frame + FN_frame)\n ppv = TP_frame / (TP_frame + FP_frame)\n print(\"object level, system measurement\")\n print(TP_frame, FP_frame, FN_frame, s, ppv)\n print(FP_P/i, 1-(FP_P/i))\n \ndef match_class(pair_dict, datagt_com, datadt_com):\n right_c = 0\n wrong_c = 0\n for key,value in pair_dict.items():\n if len(value) >= 20:\n dt_index, gt_index = key\n for frame in value:\n gt_c = datagt_com[datagt_com[:, 5] == frame]\n gt_c = gt_c[gt_c[:,0] == gt_index][0, 9]\n dt_c = datadt_com[datadt_com[:, 5] == frame+1]\n dt_c = dt_c[dt_c[:,0] == dt_index][0, 9]\n if gt_c == dt_c:\n right_c += 1\n else:\n wrong_c += 1\n percentage = right_c/(right_c + wrong_c)\n print(\"object level, classification\")\n print(right_c, wrong_c, percentage)\n\ndef object_based_detection(pair_dict, gt_data, dt_data):\n gt_frame = {}\n dt_frame = {}\n #Objects based calculation\n first_frame = dt_data[0, 5] - 1\n last_frame = dt_data[-1, 5] - 1\n # print(first_frame, last_frame)\n for frame in range(first_frame, last_frame + 1):\n #print(frame)\n gts = gt_data[gt_data[:, 5] == frame]\n dts = dt_data[dt_data[:, 5] == frame + 1]\n if gts.shape[0] != 0:\n for gt in gts:\n if gt[0] not in gt_frame:\n gt_frame[gt[0]] = [frame]\n else:\n gt_frame[gt[0]].append(frame)\n if dts.shape[0] != 0:\n for dt in dts:\n if dt[0] not in dt_frame:\n dt_frame[dt[0]] = [frame]\n else:\n dt_frame[dt[0]].append(frame)\n gt_analysis(gt_frame, dt_frame, pair_dict)\n dt_analysis(gt_frame, dt_frame, pair_dict)\n match_class(pair_dict, gt_data, dt_data)\n \ndef main(arg=None):\n if len(sys.argv) != 4:\n print_Usage_and_Exit()\n gt_file = sys.argv[1]\n dt_file = sys.argv[2]\n global threashold\n threashold = float(sys.argv[3])\n gt_data, dt_data = load_file(gt_file, dt_file)\n p_d = frame_based_detection(gt_data, dt_data)\n object_based_detection(p_d, gt_data, dt_data)\n return 0\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.zeros", "numpy.minimum", "numpy.sum", "numpy.loadtxt", "numpy.maximum" ] ]
mfomicheva/OpenNMT-tf
[ "a367676a16f9e77f76bc58e138e78614eb4add1e" ]
[ "opennmt/inputters/inputter.py" ]
[ "\"\"\"Define generic inputters.\"\"\"\n\nimport abc\nimport six\n\nimport tensorflow as tf\n\nfrom opennmt.layers.reducer import ConcatReducer, JoinReducer\nfrom opennmt.utils import compat\nfrom opennmt.utils.data import inference_pipeline, training_pipeline\nfrom opennmt.utils.misc import extract_prefixed_keys, extract_suffixed_keys, item_or_tuple\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass Inputter(tf.keras.layers.Layer):\n \"\"\"Base class for inputters.\"\"\"\n\n def __init__(self, dtype=tf.float32):\n super(Inputter, self).__init__(dtype=dtype)\n self.volatile = set()\n self.process_hooks = []\n self.is_target = False\n\n @property\n def num_outputs(self):\n \"\"\"How many parallel outputs does this inputter produce.\"\"\"\n return 1\n\n def initialize(self, metadata, asset_dir=None, asset_prefix=\"\"):\n \"\"\"Initializes the inputter.\n\n Args:\n metadata: A dictionary containing additional metadata set\n by the user.\n asset_dir: The directory where assets can be written. If ``None``, no\n assets are returned.\n asset_prefix: The prefix to attach to assets filename.\n\n Returns:\n A dictionary containing additional assets used by the inputter.\n \"\"\"\n _ = metadata\n if asset_dir is not None:\n return self.export_assets(asset_dir, asset_prefix=asset_prefix)\n return {}\n\n def export_assets(self, asset_dir, asset_prefix=\"\"):\n \"\"\"Exports assets used by this tokenizer.\n\n Args:\n asset_dir: The directory where assets can be written.\n asset_prefix: The prefix to attach to assets filename.\n\n Returns:\n A dictionary containing additional assets used by the inputter.\n \"\"\"\n _ = asset_dir\n _ = asset_prefix\n return {}\n\n @abc.abstractmethod\n def make_dataset(self, data_file, training=None):\n \"\"\"Creates the base dataset required by this inputter.\n\n Args:\n data_file: The data file.\n training: Run in training mode.\n\n Returns:\n A ``tf.data.Dataset``.\n \"\"\"\n raise NotImplementedError()\n\n def make_inference_dataset(self,\n features_file,\n batch_size,\n bucket_width=None,\n num_threads=1,\n prefetch_buffer_size=None):\n \"\"\"Builds a dataset to be used for inference.\n\n For evaluation and training datasets, see\n :class:`opennmt.inputters.inputter.ExampleInputter`.\n\n Args:\n features_file: The test file.\n batch_size: The batch size to use.\n bucket_width: The width of the length buckets to select batch candidates\n from (for efficiency). Set ``None`` to not constrain batch formation.\n num_threads: The number of elements processed in parallel.\n prefetch_buffer_size: The number of batches to prefetch asynchronously. If\n ``None``, use an automatically tuned value on TensorFlow 1.8+ and 1 on\n older versions.\n\n Returns:\n A ``tf.data.Dataset``.\n \"\"\"\n map_func = lambda *arg: self.make_features(item_or_tuple(arg), training=False)\n dataset = self.make_dataset(features_file, training=False)\n dataset = inference_pipeline(\n dataset,\n batch_size,\n process_fn=map_func,\n num_threads=num_threads,\n prefetch_buffer_size=prefetch_buffer_size,\n bucket_width=bucket_width,\n length_fn=self.get_length)\n return dataset\n\n @abc.abstractmethod\n def get_dataset_size(self, data_file):\n \"\"\"Returns the size of the dataset.\n\n Args:\n data_file: The data file.\n\n Returns:\n The total size.\n \"\"\"\n raise NotImplementedError()\n\n def get_serving_input_receiver(self):\n \"\"\"Returns a serving input receiver for this inputter.\n\n Returns:\n A ``tf.estimator.export.ServingInputReceiver``.\n \"\"\"\n if self.is_target:\n raise ValueError(\"Target inputters do not define a serving input\")\n receiver_tensors = self.get_receiver_tensors()\n if receiver_tensors is None:\n raise NotImplementedError(\"This inputter does not define receiver tensors.\")\n features = self.make_features(features=receiver_tensors.copy())\n return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)\n\n def get_receiver_tensors(self):\n \"\"\"Returns the input placeholders for serving.\"\"\"\n return None\n\n def get_length(self, features):\n \"\"\"Returns the length of the input features, if defined.\"\"\"\n return features.get(\"length\")\n\n @abc.abstractmethod\n def make_features(self, element=None, features=None, training=None):\n \"\"\"Creates features from data.\n\n Args:\n element: An element from the dataset.\n features: An optional dictionary of features to augment.\n training: Run in training mode.\n\n Returns:\n A dictionary of ``tf.Tensor``.\n \"\"\"\n raise NotImplementedError()\n\n def call(self, features, training=None): # pylint: disable=arguments-differ\n \"\"\"Forwards call to ``make_inputs().``\"\"\"\n return self.make_inputs(features, training=training)\n\n def make_inputs(self, features, training=None):\n \"\"\"Creates the model input from the features.\n\n Args:\n features: A dictionary of ``tf.Tensor``.\n training: Run in training mode.\n\n Returns:\n The model input.\n \"\"\"\n _ = training\n return features\n\n def visualize(self, log_dir):\n \"\"\"Visualizes the transformation, usually embeddings.\n\n Args:\n log_dir: The active log directory.\n \"\"\"\n _ = log_dir\n return\n\n\n # TODO: remove the following methods at some point.\n\n def set_data_field(self, data, key, value, volatile=False):\n \"\"\"Sets a data field.\n\n Args:\n data: The data dictionary.\n key: The value key.\n value: The value to assign.\n volatile: If ``True``, the key/value pair will be removed once the\n processing done.\n\n Returns:\n The updated data dictionary.\n \"\"\"\n data[key] = value\n if volatile:\n self.volatile.add(key)\n return data\n\n def remove_data_field(self, data, key):\n \"\"\"Removes a data field.\n\n Args:\n data: The data dictionary.\n key: The value key.\n\n Returns:\n The updated data dictionary.\n \"\"\"\n del data[key]\n return data\n\n def add_process_hooks(self, hooks):\n \"\"\"Adds processing hooks.\n\n Processing hooks are additional and model specific data processing\n functions applied after calling this inputter\n :meth:`opennmt.inputters.inputter.Inputter.process` function.\n\n Args:\n hooks: A list of callables with the signature\n ``(inputter, data) -> data``.\n \"\"\"\n self.process_hooks.extend(hooks)\n\n def process(self, data, training=None):\n \"\"\"Prepares raw data.\n\n Args:\n data: The raw data.\n training: Run in training mode.\n\n Returns:\n A dictionary of ``tf.Tensor``.\n\n See Also:\n :meth:`opennmt.inputters.inputter.Inputter.transform_data`\n \"\"\"\n data = self.make_features(data, training=training)\n for hook in self.process_hooks:\n data = hook(self, data)\n for key in self.volatile:\n data = self.remove_data_field(data, key)\n self.volatile.clear()\n return data\n\n def transform_data(self, data, mode=tf.estimator.ModeKeys.TRAIN, log_dir=None):\n \"\"\"Transforms the processed data to an input.\n\n This is usually a simple forward of a :obj:`data` field to\n :meth:`opennmt.inputters.inputter.Inputter.transform`.\n\n See also `process`.\n\n Args:\n data: A dictionary of data fields.\n mode: A ``tf.estimator.ModeKeys`` mode.\n log_dir: The log directory. If set, visualization will be setup.\n\n Returns:\n The transformed input.\n \"\"\"\n inputs = self.make_inputs(data, training=mode == tf.estimator.ModeKeys.TRAIN)\n if log_dir:\n self.visualize(log_dir)\n return inputs\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass MultiInputter(Inputter):\n \"\"\"An inputter that gathers multiple inputters, possibly nested.\"\"\"\n\n def __init__(self, inputters, reducer=None):\n if not isinstance(inputters, list) or not inputters:\n raise ValueError(\"inputters must be a non empty list\")\n dtype = inputters[0].dtype\n for inputter in inputters:\n if inputter.dtype != dtype:\n raise TypeError(\"All inputters must have the same dtype\")\n super(MultiInputter, self).__init__(dtype=dtype)\n self.inputters = inputters\n self.reducer = reducer\n\n @property\n def num_outputs(self):\n if self.reducer is None or isinstance(self.reducer, JoinReducer):\n return len(self.inputters)\n return 1\n\n def get_leaf_inputters(self):\n \"\"\"Returns a list of all leaf Inputter instances.\"\"\"\n inputters = []\n for inputter in self.inputters:\n if isinstance(inputter, MultiInputter):\n inputters.extend(inputter.get_leaf_inputters())\n else:\n inputters.append(inputter)\n return inputters\n\n def __getattribute__(self, name):\n if name == \"built\":\n return all(inputter.built for inputter in self.inputters)\n else:\n return super(MultiInputter, self).__getattribute__(name)\n\n def initialize(self, metadata, asset_dir=None, asset_prefix=\"\"):\n for i, inputter in enumerate(self.inputters):\n inputter.initialize(metadata, asset_prefix=\"%s%d_\" % (asset_prefix, i + 1))\n return super(MultiInputter, self).initialize(\n metadata, asset_dir=asset_dir, asset_prefix=asset_prefix)\n\n def export_assets(self, asset_dir, asset_prefix=\"\"):\n assets = {}\n for i, inputter in enumerate(self.inputters):\n assets.update(inputter.export_assets(\n asset_dir, asset_prefix=\"%s%d_\" % (asset_prefix, i + 1)))\n return assets\n\n @abc.abstractmethod\n def make_dataset(self, data_file, training=None):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_dataset_size(self, data_file):\n raise NotImplementedError()\n\n def visualize(self, log_dir):\n for inputter in self.inputters:\n inputter.visualize(log_dir)\n\n\nclass ParallelInputter(MultiInputter):\n \"\"\"An multi inputter that process parallel data.\"\"\"\n\n def __init__(self,\n inputters,\n reducer=None,\n share_parameters=False,\n combine_features=True):\n \"\"\"Initializes a parallel inputter.\n\n Args:\n inputters: A list of :class:`opennmt.inputters.inputter.Inputter`.\n reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all inputs. If\n set, parallel inputs are assumed to have the same length.\n share_parameters: Share the inputters parameters.\n combine_features: Combine each inputter features in a single dict or\n return them separately. This is typically ``True`` for multi source\n inputs but ``False`` for features/labels parallel data.\n\n Raises:\n ValueError: if :obj:`share_parameters` is set but the child inputters are\n not of the same type.\n \"\"\"\n super(ParallelInputter, self).__init__(inputters, reducer=reducer)\n self.combine_features = combine_features\n self.share_parameters = share_parameters\n if self.share_parameters:\n leaves = self.get_leaf_inputters()\n for inputter in leaves[1:]:\n if type(inputter) is not type(leaves[0]):\n raise ValueError(\"Each inputter must be of the same type for parameter sharing\")\n\n def make_dataset(self, data_file, training=None):\n if not isinstance(data_file, list) or len(data_file) != len(self.inputters):\n raise ValueError(\"The number of data files must be the same as the number of inputters\")\n datasets = [\n inputter.make_dataset(data, training=training)\n for inputter, data in zip(self.inputters, data_file)]\n return tf.data.Dataset.zip(tuple(datasets))\n\n def get_dataset_size(self, data_file):\n if not isinstance(data_file, list) or len(data_file) != len(self.inputters):\n raise ValueError(\"The number of data files must be the same as the number of inputters\")\n dataset_sizes = [\n inputter.get_dataset_size(data)\n for inputter, data in zip(self.inputters, data_file)]\n dataset_size = dataset_sizes[0]\n for size in dataset_sizes:\n if size != dataset_size:\n raise RuntimeError(\"The parallel data files do not have the same size\")\n return dataset_size\n\n def get_receiver_tensors(self):\n receiver_tensors = {}\n for i, inputter in enumerate(self.inputters):\n tensors = inputter.get_receiver_tensors()\n for key, value in six.iteritems(tensors):\n receiver_tensors[\"{}_{}\".format(key, i)] = value\n return receiver_tensors\n\n def get_length(self, features):\n lengths = []\n for i, inputter in enumerate(self.inputters):\n if self.combine_features:\n sub_features = extract_prefixed_keys(features, \"inputter_{}_\".format(i))\n else:\n sub_features = features[i]\n lengths.append(inputter.get_length(sub_features))\n if self.reducer is None:\n return lengths\n else:\n return lengths[0]\n\n def make_features(self, element=None, features=None, training=None):\n if self.combine_features:\n if features is None:\n features = {}\n for i, inputter in enumerate(self.inputters):\n prefix = \"inputter_%d_\" % i\n sub_features = extract_prefixed_keys(features, prefix)\n if not sub_features:\n # Also try to read the format produced by get_receiver_tensors.\n sub_features = extract_suffixed_keys(features, \"_%d\" % i)\n sub_features = inputter.make_features(\n element=element[i] if element is not None else None,\n features=sub_features,\n training=training)\n for key, value in six.iteritems(sub_features):\n features[\"%s%s\" % (prefix, key)] = value\n return features\n else:\n if features is None:\n features = [{} for _ in self.inputters]\n for i, inputter in enumerate(self.inputters):\n features[i] = inputter.make_features(\n element=element[i] if element is not None else None,\n features=features[i],\n training=training)\n return tuple(features)\n\n def _get_names(self):\n for i, _ in enumerate(self.inputters):\n yield \"inputter_%d\" % i\n\n def _get_shared_name(self):\n return \"\"\n\n def _get_scopes(self):\n for _, name in zip(self.inputters, self._get_names()):\n if self.share_parameters:\n name = self._get_shared_name()\n yield name\n\n def build(self, input_shape=None):\n if self.share_parameters:\n # When sharing parameters, build the first leaf inputter and then set\n # all attributes with parameters to the other inputters.\n leaves = self.get_leaf_inputters()\n first, others = leaves[0], leaves[1:]\n with compat.tf_compat(v1=\"variable_scope\")(self._get_shared_name()):\n first.build(input_shape)\n for name, attr in six.iteritems(first.__dict__):\n if (isinstance(attr, tf.Variable)\n or (isinstance(attr, tf.keras.layers.Layer) and attr.variables)):\n for inputter in others:\n setattr(inputter, name, attr)\n inputter.built = True\n else:\n for inputter, scope in zip(self.inputters, self._get_names()):\n with compat.tf_compat(v1=\"variable_scope\")(scope):\n inputter.build(input_shape)\n super(ParallelInputter, self).build(input_shape)\n\n def make_inputs(self, features, training=None):\n if not self.built:\n self.build()\n transformed = []\n for i, (inputter, scope) in enumerate(zip(self.inputters, self._get_scopes())):\n with compat.tf_compat(v1=\"variable_scope\")(scope):\n if self.combine_features:\n sub_features = extract_prefixed_keys(features, \"inputter_{}_\".format(i))\n else:\n sub_features = features[i]\n transformed.append(inputter.make_inputs(sub_features, training=training))\n if self.reducer is not None:\n transformed = self.reducer(transformed)\n return transformed\n\n\nclass MixedInputter(MultiInputter):\n \"\"\"An multi inputter that applies several transformation on the same data.\"\"\"\n\n def __init__(self,\n inputters,\n reducer=ConcatReducer(),\n dropout=0.0):\n \"\"\"Initializes a mixed inputter.\n\n Args:\n inputters: A list of :class:`opennmt.inputters.inputter.Inputter`.\n reducer: A :class:`opennmt.layers.reducer.Reducer` to merge all inputs.\n dropout: The probability to drop units in the merged inputs.\n \"\"\"\n super(MixedInputter, self).__init__(inputters, reducer=reducer)\n self.dropout = dropout\n\n def make_dataset(self, data_file, training=None):\n datasets = [\n inputter.make_dataset(data_file, training=training)\n for inputter in self.inputters]\n return datasets[0]\n\n def get_dataset_size(self, data_file):\n return self.inputters[0].get_dataset_size(data_file)\n\n def get_receiver_tensors(self):\n receiver_tensors = {}\n for inputter in self.inputters:\n receiver_tensors.update(inputter.get_receiver_tensors())\n return receiver_tensors\n\n def get_length(self, features):\n return self.inputters[0].get_length(features)\n\n def make_features(self, element=None, features=None, training=None):\n if features is None:\n features = {}\n for inputter in self.inputters:\n features = inputter.make_features(\n element=element, features=features, training=training)\n return features\n\n def make_inputs(self, features, training=None):\n transformed = []\n for i, inputter in enumerate(self.inputters):\n with compat.tf_compat(v1=\"variable_scope\")(\"inputter_{}\".format(i)):\n transformed.append(inputter.make_inputs(features, training=training))\n outputs = self.reducer(transformed)\n outputs = tf.layers.dropout(outputs, rate=self.dropout, training=training)\n return outputs\n\n\nclass ExampleInputter(ParallelInputter):\n \"\"\"An inputter that returns training examples (parallel features and labels).\"\"\"\n\n def __init__(self, features_inputter, labels_inputter, share_parameters=False):\n \"\"\"Initializes this inputter.\n\n Args:\n features_inputter: An inputter producing the features (source).\n labels_inputter: An inputter producing the labels (target).\n share_parameters: Share the inputters parameters.\n \"\"\"\n self.features_inputter = features_inputter\n self.labels_inputter = labels_inputter\n self.labels_inputter.is_target = True\n super(ExampleInputter, self).__init__(\n [self.features_inputter, self.labels_inputter],\n share_parameters=share_parameters,\n combine_features=False)\n\n def initialize(self, metadata, asset_dir=None, asset_prefix=\"\"):\n self.features_inputter.initialize(metadata, asset_prefix=\"source_\")\n self.labels_inputter.initialize(metadata, asset_prefix=\"target_\")\n if asset_dir is not None:\n return self.export_assets(asset_dir, asset_prefix=asset_prefix)\n return {}\n\n def export_assets(self, asset_dir, asset_prefix=\"\"):\n assets = {}\n assets.update(self.features_inputter.export_assets(\n asset_dir, asset_prefix=\"source_\"))\n assets.update(self.labels_inputter.export_assets(\n asset_dir, asset_prefix=\"target_\"))\n return assets\n\n def make_inference_dataset(self,\n features_file,\n batch_size,\n bucket_width=None,\n num_threads=1,\n prefetch_buffer_size=None):\n return self.features_inputter.make_inference_dataset(\n features_file,\n batch_size,\n bucket_width=bucket_width,\n num_threads=num_threads,\n prefetch_buffer_size=prefetch_buffer_size)\n\n def make_evaluation_dataset(self,\n features_file,\n labels_file,\n batch_size,\n num_threads=1,\n prefetch_buffer_size=None):\n \"\"\"Builds a dataset to be used for evaluation.\n\n Args:\n features_file: The evaluation source file.\n labels_file: The evaluation target file.\n batch_size: The batch size to use.\n num_threads: The number of elements processed in parallel.\n prefetch_buffer_size: The number of batches to prefetch asynchronously. If\n ``None``, use an automatically tuned value on TensorFlow 1.8+ and 1 on\n older versions.\n\n Returns:\n A ``tf.data.Dataset``.\n \"\"\"\n map_func = lambda *arg: self.make_features(arg, training=False)\n dataset = self.make_dataset([features_file, labels_file], training=False)\n dataset = inference_pipeline(\n dataset,\n batch_size,\n process_fn=map_func,\n num_threads=num_threads,\n prefetch_buffer_size=prefetch_buffer_size)\n return dataset\n\n def make_training_dataset(self,\n features_file,\n labels_file,\n batch_size,\n batch_type=\"examples\",\n batch_multiplier=1,\n batch_size_multiple=1,\n shuffle_buffer_size=None,\n bucket_width=None,\n maximum_features_length=None,\n maximum_labels_length=None,\n single_pass=False,\n num_shards=1,\n shard_index=0,\n num_threads=4,\n prefetch_buffer_size=None):\n \"\"\"Builds a dataset to be used for training. It supports the full training\n pipeline, including:\n\n * sharding\n * shuffling\n * filtering\n * bucketing\n * prefetching\n\n Args:\n features_file: The evaluation source file.\n labels_file: The evaluation target file.\n batch_size: The batch size to use.\n batch_type: The training batching stragety to use: can be \"examples\" or\n \"tokens\".\n batch_multiplier: The batch size multiplier to prepare splitting accross\n replicated graph parts.\n batch_size_multiple: When :obj:`batch_type` is \"tokens\", ensure that the\n result batch size is a multiple of this value.\n shuffle_buffer_size: The number of elements from which to sample.\n bucket_width: The width of the length buckets to select batch candidates\n from (for efficiency). Set ``None`` to not constrain batch formation.\n maximum_features_length: The maximum length or list of maximum lengths of\n the features sequence(s). ``None`` to not constrain the length.\n maximum_labels_length: The maximum length of the labels sequence.\n ``None`` to not constrain the length.\n single_pass: If ``True``, makes a single pass over the training data.\n num_shards: The number of data shards (usually the number of workers in a\n distributed setting).\n shard_index: The shard index this data pipeline should read from.\n num_threads: The number of elements processed in parallel.\n prefetch_buffer_size: The number of batches to prefetch asynchronously. If\n ``None``, use an automatically tuned value on TensorFlow 1.8+ and 1 on\n older versions.\n\n Returns:\n A ``tf.data.Dataset``.\n \"\"\"\n dataset_size = self.features_inputter.get_dataset_size(features_file)\n map_func = lambda *arg: self.make_features(arg, training=True)\n dataset = self.make_dataset([features_file, labels_file], training=True)\n dataset = training_pipeline(\n dataset,\n batch_size,\n batch_type=batch_type,\n batch_multiplier=batch_multiplier,\n bucket_width=bucket_width,\n single_pass=single_pass,\n process_fn=map_func,\n num_threads=num_threads,\n shuffle_buffer_size=shuffle_buffer_size,\n prefetch_buffer_size=prefetch_buffer_size,\n dataset_size=dataset_size,\n maximum_features_length=maximum_features_length,\n maximum_labels_length=maximum_labels_length,\n features_length_fn=self.features_inputter.get_length,\n labels_length_fn=self.labels_inputter.get_length,\n batch_size_multiple=batch_size_multiple,\n num_shards=num_shards,\n shard_index=shard_index)\n return dataset\n" ]
[ [ "tensorflow.layers.dropout", "tensorflow.estimator.export.ServingInputReceiver" ] ]
gun8474/face-recognition-by-OAGAN
[ "54c67a29a22e25b14a24fb8aa3badba5444653ac", "54c67a29a22e25b14a24fb8aa3badba5444653ac" ]
[ "implementations/sgan/sgan_main.py", "implementations/sgan/sgan_main_cuda.py" ]
[ "# μƒˆλ‘œμš΄ 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/tree/master/implementations/context_encoder\n# μš°λ¦¬λž‘ 같은 3채널에 ν•˜κ³ μžν•˜λŠ” 바도 λΉ„μŠ·ν•¨. shape μ°Έκ³ ν•˜κΈ° μ’‹μ„λ“―ν•˜μ—¬ 첨뢀함\n\n# λ‚˜(μ†Œν˜„)λŠ” 사진 11개 -> batch_size=11, num_classes=11μ΄λ‹ˆ μ£Όμ˜λ°”λžŒ!\n\nimport argparse\nimport os\nimport numpy as np\nfrom dataloader import OAGandataset\nimport math\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport torchvision.models as models\n#from auxiliary_training import *\nfrom loss import sganloss\n\nos.makedirs(\"images\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=11, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0001, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=100, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--num_classes\", type=int, default=11, help=\"number of classes for dataset\")\nparser.add_argument(\"--img_size\", type=int, default=128, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=400, help=\"interval between image sampling\")\nopt = parser.parse_args()\n# print(opt)\n\ncuda = True if torch.cuda.is_available() else False\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nclass IdentityPadding(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super(IdentityPadding, self).__init__()\n\n self.pooling = nn.MaxPool2d(1, stride=stride)\n self.add_channels = out_channels - in_channels\n\n def forward(self, x):\n out = F.pad(x, (0, 0, 0, 0, 0, self.add_channels))\n out = self.pooling(out)\n return out\n\n# μ½”λ“œ 좜처 : https://dnddnjs.github.io/cifar10/2018/10/09/resnet/\n# https://github.com/eriklindernoren/PyTorch-GAN/blob/a163b82beff3d01688d8315a3fd39080400e7c01/implementations/srgan/models.py#L18\n# μ—¬κΈΈλ³΄λ‹ˆ residual block ν• λ•Œ in, out channel이 동일함.\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1, down_sample=False):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.stride = stride\n\n if down_sample:\n self.down_sample = IdentityPadding(in_channels, out_channels, stride)\n else:\n self.down_sample = None\n\n def forward(self, x):\n shortcut = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.down_sample is not None:\n shortcut = self.down_sample(x)\n\n out += shortcut\n out = self.relu(out)\n return out\n\n# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n # TODO : 밑에 3쀄이 μ˜λ―Έν•˜λŠ” 것 μ°Ύμ•„ μˆ˜μ • or μ‚­μ œν•˜κΈ°\n # self.label_emb = nn.Embedding(opt.num_classes, opt.latent_dim)\n\n # self.init_size = opt.img_size // 4 # Initial size before upsampling\n # self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))\n\n self.FaceOcclusion_1=nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3),\n nn.InstanceNorm2d(64),\n nn.ReLU(),\n # -----\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(256),\n nn.ReLU(),\n # -----\n ResidualBlock(256, 256),\n ResidualBlock(256, 256),\n ResidualBlock(256, 256),\n # -----\n nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(64),\n nn.ReLU()\n # -----\n )\n self.FaceOcclusion_2=nn.Sequential(\n nn.Conv2d(64, 1, kernel_size=7, stride=1, padding=3),\n nn.Sigmoid()\n )\n\n self.FaceCompletion=nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(512),\n nn.ReLU(),\n # -----\n nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(256),\n nn.ReLU(),\n nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(64),\n nn.ReLU(),\n # -----\n nn.Conv2d(64, 3, kernel_size=7, stride=1, padding=3),\n nn.Tanh()\n )\n\n def forward(self, x):\n # occlusion aware module\n out_predicted=self.FaceOcclusion_1(x)\n out_predictedM=self.FaceOcclusion_2(out_predicted)\n # TODO: μ•„λž«μ€„ 1-xκ°€ μ•„λ‹ˆλΌ 1-out_predictedM같은데 μ–΄λ–»κ²Œ μƒκ°ν•˜μ‡Ό?일단 λ°”κΎΈκ² μŒ! -> λ‹΅λ³€: 마자용!!!!\n out_InvertedM = torch.ones(1, 1, 128, 128) - out_predictedM\n out_oa=torch.matmul(out_predicted, out_predictedM)\n\n # face completion module\n out_synth=self.FaceCompletion(out_oa)\n out_fc=torch.matmul(out_InvertedM, out_synth)\n out_filter=torch.matmul(x, out_predictedM)\n out_final=out_filter + out_fc\n\n return out_predictedM, out_InvertedM, out_synth, out_final\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n self.discriminator_block = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU()\n )\n # The height and width of downsampled image\n # ds_size = opt.img_size // 2 ** 4\n # Output layers\n # https://github.com/znxlwm/pytorch-pix2pix/blob/3059f2af53324e77089bbcfc31279f01a38c40b8/network.py#L104- patch gan discriminator code\n # κΈ°μ‘΄ sganμ½”λ“œλŠ” linearμ˜€μ§€λ§Œ μš°λ¦¬λŠ” 논문에 따라 convλ₯Ό μ·¨ν•˜λ©΄μ„œ shape이 λ‹¬λΌμ§€κ²Œ λœλ“―.\n self.adv_layer = nn.Sequential(nn.Conv2d(2048, 1, kernel_size=3, stride=1, padding=1),\n nn.Sigmoid())\n self.attr_layer = nn.Sequential(nn.Conv2d(2048, opt.num_classes, kernel_size=2, stride=1, padding=0),\n nn.Softmax()) # attribute classificationλŒ€μ‹  μ–Όκ΅΄ 인식 μˆ˜ν–‰\n def forward(self, x):\n out = self.discriminator_block(x) # torch.Size([11, 2048, 2, 2])\n # out = out.view(out.shape[0], -1) # torch.Size([11, 8192])\n validity = self.adv_layer(out) # torch.Size([11, 1, 2, 2])\n label = self.attr_layer(out) # torch.Size([11, 11, 1, 1])\n # label = label.view(label.shape[0], -1) # torch.Size([11, 11]) # μ™œ label view ν–ˆλŠ”μ§€ μ„€λͺ…λ°”λžŒ!\n\n return validity, label\n\n#TODO: loss에 ν• λ‹Ήλ˜λŠ” weight parameter (μ‘°μ • ν•„μš” -> 14μ£Όμ°¨ λ°œν‘œ μ°Έκ³ )\nclass weight():\n\n def __init__(self):\n self.lam1 = 0.2\n self.lam2 = 0.2\n self.lam3 = 0.2\n self.lam4 = 0.2\n self.lam5 = 0.1\n self.lam6 = 0.1\n self.alpha = 0.7\n self.beta = 0.3\n\nw = weight()\n\n# Loss functions - TODO: μ§€ν˜œ,승건 μˆ˜μ • λΆ€λΆ„\n# loss ν•©μΉ˜λŠ”κ±° κ·Έλƒ₯ sum of scala vector*loss 둜 ν•˜λ©΄λ λ“―?\n# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py 210쀄\nadversarial_loss = torch.nn.BCELoss()\nattribute_loss = nn.MSELoss() # discriminator에 μ‚¬μš©λ˜λŠ” attribute loss\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n attribute_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\n\n# data loader\n'''\ndata loadν•  λΆ€λΆ„ Index\n10000μž₯μ—μ„œ 7000μž₯은 train에 μ‚¬μš©,\nκ·Έ 7000μž₯을 1000μž₯μ”© λ‚˜λˆ μ„œ alternative train에 μ‚¬μš©\n\n총 7번의 alternative train paired:unpaired λΉ„μœ¨μ€ 각각\n9:1, 8:2, 7:3, 6:4, 5:5, 4:6,3:7\n\nμ•Œμ•„μ„œ κ³„μ‚°ν•΄μ„œ indexλ°”κΎΈμ‹œκΈΈ~\n\n'''\nidx1 = 0\nidx2 = 899\nidx3 = 900\nidx4 = 999\n\n#TODO: model saveν›„ 뢈러였기 (일단은 saveλ§Œμ΄λΌλ„)\n#처음 μ•ˆ 사싀 : 숫자 Parameterκ°€ 문자 parameter보닀 먼저와야함..\npaired_dataset = OAGandataset(idx1, idx2, paired=True, folder_numbering=False)\nunpaired_dataset = OAGandataset(idx3, idx4, unpaired=True, folder_numbering=False)\n\ntrain_dataloader_p = DataLoader(paired_dataset,\n shuffle=True,\n num_workers=0,\n batch_size=opt.batch_size)\n\ntrain_dataloader_up = DataLoader(unpaired_dataset,\n shuffle=True,\n num_workers=0,\n batch_size=opt.batch_size)\nprint (\"data loaded\")\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n\n# ----------\n# Training\n# TODO: alternating training 보고 λ””μžμΈν•˜κΈ°\n# ----------\n\n#λ‚˜λ„ TODO넣고싢은데 어케함??????? -> 주석에 μ˜μ–΄λ‘œ νˆ¬λ‘μ“°λ©΄ λ°”λ‘œ μ μš©λ©λ‹ˆλ‹€~\n#paired image training (unpaired도 λ”°λ‘œ λ§Œλ“€κ³ , loss도 상황에 따라 적용)\nprint (\"paired train\")\nfor epoch in range(opt.n_epochs):\n for i, (imgs,imgs_gt,labels) in enumerate(train_dataloader_p):\n batch_size = opt.batch_size\n\n # Adversarial ground truths\n # valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)\n valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)\n # fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)\n # fake_attr_gt = Variable(LongTensor(batch_size).fill_(opt.num_classes), requires_grad=False)\n fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(FloatTensor))\n # TODO: labelsλŠ” float ν˜•νƒœμΌ 수 μ—†μŒ. 무쑰건 long type이어야함. λ‹€λ₯Έ κ³³μ—μ„œ λ¬Έμ œκ°€ μžˆλŠ”κ±°μž„. -> μ—¬λŸ¬ 자료 μ°Ύμ•„λ΄€μ§€λ§Œ λ‹€λ“€ μ΄μœ λŠ” λͺ¨λ₯΄μ§€λ§Œ label을 float둜 ν˜•λ³€ν™˜ν•˜λΌκ³  함\n # labels = Variable(labels.type(LongTensor))\n labels = Variable(labels.type(FloatTensor))\n\n # line 280, line 286 -> FloatTensorκ°€ κΈ°λŒ€λœλ‹€κ³  ν•΄μ„œ LongTensor -> FloatTensor 둜 λ°”κΏ”λ΄„ => μ—λŸ¬ μ•ˆλ‚¨\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise and labels as generator input\n # z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim)))) -> μš°λ¦¬λŠ” μ‚¬μš©X\n\n # Generate a batch of images\n out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 μ“°μ΄λŠ” μ• λ“€\n\n loss = sganloss([out_final,\n out_predictedM,\n out_InvertedM,\n out_synth],\n imgs_gt)\n # # Loss measures generator's ability to fool the discriminator\n\n\n validity, _ = discriminator(out_final) # ?????????? : ν•΄κ²°ν–ˆμœΌλ‹ˆκΉŒ λ¬ΌμŒν‘œ μΉ˜μš°μ‹œκΈΈ!\n # print('validity', validity.shape) # validity torch.Size([10, 1, 2, 2])\n # print('val', valid.shape) # val torch.Size([10, 1])\n\n g_loss = 0\n g_loss += w.lam1*loss.perceptual_loss()\n g_loss += w.lam2*loss.style_loss()\n g_loss += w.lam3*loss.pixel_loss(w.alpha, w.beta) \n g_loss += w.lam4*loss.smooth_loss()\n g_loss += w.lam5*loss.l2_norm()\n g_loss += w.lam6*adversarial_loss(validity,valid)\n \n print (\"loss:\",g_loss)\n \n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # d_alpha, d_betaλŠ” discriminator에 μ‚¬μš©λ˜λŠ” 2κ°€μ§€ lossν•¨μˆ˜μ— λŒ€ν•œ κ°€μ€‘μΉ˜κ°’μœΌλ‘œ μš°λ¦¬κ°€ κ²°μ •ν•΄μ•Ό ν•˜λŠ”λ“―\n d_alpha = 0.5\n d_beta = 1 - d_alpha\n\n # Loss for real images\n real_pred, real_attr = discriminator(real_imgs)\n # d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2\n d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)\n # print('r',real_pred.shape)\n # print('valid', valid.shape)\n\n # Loss for fake images\n fake_pred, fake_attr = discriminator(out_final.detach())\n # d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2\n d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)\n\n # Total discriminator loss\n d_loss = (d_real_loss + d_fake_loss) / 2\n # print(d_loss.type) # μ›λž˜(sgan)λž‘ typeλ˜‘κ°™μŒ(<built-in method type of Tensor object at ...>). λ‘˜λ‹€ floatν˜•νƒœ.\n\n # Calculate discriminator accuracy\n pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)\n gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)\n d_acc = np.mean(np.argmax(pred, axis=1) == gt)\n\n d_loss.backward()\n optimizer_D.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())\n )\n\n batches_done = epoch * len(train_dataloader_p) + i\n if batches_done % opt.sample_interval == 0:\n save_image(out_final.data[:25], \"finals/%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_synth.data[:25], \"synth/%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_predictedM.data[:25], \"masks/%d.png\" % batches_done, nrow=5, normalize=True)\n torch.save(generator, \"generator_paired%d.pt\" % batches_done)\n torch.save(discriminator, \"discriminator_paired%d.pt\" % batches_done)\n\nprint(\"unpaired train\")\nfor epoch in range(opt.n_epochs):\n for i, (imgs, labels) in enumerate(train_dataloader_up):\n\n batch_size = opt.batch_size\n\n # Adversarial ground truths\n # valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)\n valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)\n # fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)\n # fake_attr_gt = Variable(LongTensor(batch_size).fill_(opt.num_classes), requires_grad=False)\n fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(FloatTensor))\n labels = Variable(labels.type(FloatTensor))\n\n # line 280, line 286 -> FloatTensorκ°€ κΈ°λŒ€λœλ‹€κ³  ν•΄μ„œ LongTensor -> FloatTensor 둜 λ°”κΏ”λ΄„ => μ—λŸ¬ μ•ˆλ‚¨\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise and labels as generator input\n # z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim)))) -> μš°λ¦¬λŠ” μ‚¬μš©X\n\n # Generate a batch of images\n out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 μ“°μ΄λŠ” μ• λ“€\n loss = sganloss([out_final,\n out_predictedM,\n out_InvertedM,\n out_synth],\n )\n # # Loss measures generator's ability to fool the discriminator\n\n validity, _ = discriminator(out_final) # ?????????? : ν•΄κ²°ν–ˆμœΌλ‹ˆκΉŒ λ¬ΌμŒν‘œ μΉ˜μš°μ‹œκΈΈ!\n # print('validity', validity.shape) # validity torch.Size([10, 1, 2, 2])\n # print('val', valid.shape) # val torch.Size([10, 1])\n\n g_loss = 0\n g_loss += w.lam4 * loss.smooth_loss()\n g_loss += w.lam5 * loss.l2_norm()\n g_loss += w.lam6 * adversarial_loss(validity, valid)\n\n print(\"loss:\", g_loss)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # d_alpha, d_betaλŠ” discriminator에 μ‚¬μš©λ˜λŠ” 2κ°€μ§€ lossν•¨μˆ˜μ— λŒ€ν•œ κ°€μ€‘μΉ˜κ°’μœΌλ‘œ μš°λ¦¬κ°€ κ²°μ •ν•΄μ•Ό ν•˜λŠ”λ“―\n d_alpha = 0.5\n d_beta = 1 - d_alpha\n\n # Loss for real images\n real_pred, real_attr = discriminator(real_imgs)\n # d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2\n d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)\n # print('r',real_pred.shape)\n # print('valid', valid.shape)\n\n # Loss for fake images\n fake_pred, fake_attr = discriminator(out_final.detach())\n # d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2\n d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)\n\n # Total discriminator loss\n d_loss = (d_real_loss + d_fake_loss) / 2\n # print(d_loss.type) # μ›λž˜(sgan)λž‘ typeλ˜‘κ°™μŒ(<built-in method type of Tensor object at ...>). λ‘˜λ‹€ floatν˜•νƒœ.\n\n # Calculate discriminator accuracy\n pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)\n gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)\n d_acc = np.mean(np.argmax(pred, axis=1) == gt)\n\n d_loss.backward()\n optimizer_D.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())\n )\n\n batches_done = epoch * len(train_dataloader_p) + i\n if batches_done % opt.sample_interval == 0:\n save_image(out_final.data[:25], \"finals/%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_synth.data[:25], \"synth/%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_predictedM.data[:25], \"masks/%d.png\" % batches_done, nrow=5, normalize=True)\n torch.save(generator, \"generator_unpaired%d.pt\" % batches_done)\n torch.save(discriminator, \"discriminator_unpaired%d.pt\" % batches_done)\n", "import argparse\nimport os\nimport numpy as np\nfrom dataloader import OAGandataset\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport torchvision.models as models\n#from auxiliary_training import *\nfrom loss import sganloss\n\nos.makedirs(\"finals\", exist_ok=True)\nos.makedirs(\"synth\", exist_ok=True)\nos.makedirs(\"masks\", exist_ok=True)\nos.makedirs(\"model\", exist_ok=True)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_epochs\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=10, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.001, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--latent_dim\", type=int, default=100, help=\"dimensionality of the latent space\")\nparser.add_argument(\"--num_classes\", type=int, default=7000, help=\"number of classes for paired-dataset\")\nparser.add_argument(\"--img_size\", type=int, default=128, help=\"size of each image dimension\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=400, help=\"interval between image sampling\")\nopt = parser.parse_args()\n\ncuda = True if torch.cuda.is_available() else False\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\nclass IdentityPadding(nn.Module):\n def __init__(self, in_channels, out_channels, stride):\n super(IdentityPadding, self).__init__()\n\n self.pooling = nn.MaxPool2d(1, stride=stride)\n self.add_channels = out_channels - in_channels\n\n def forward(self, x):\n out = F.pad(x, (0, 0, 0, 0, 0, self.add_channels))\n out = self.pooling(out)\n return out\n\n# μ½”λ“œ 좜처 : https://dnddnjs.github.io/cifar10/2018/10/09/resnet/\n# https://github.com/eriklindernoren/PyTorch-GAN/blob/a163b82beff3d01688d8315a3fd39080400e7c01/implementations/srgan/models.py#L18\n# μ—¬κΈΈλ³΄λ‹ˆ residual block ν• λ•Œ in, out channel이 동일함.\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1, down_sample=False):\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.stride = stride\n\n if down_sample:\n self.down_sample = IdentityPadding(in_channels, out_channels, stride)\n else:\n self.down_sample = None\n\n def forward(self, x):\n shortcut = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.down_sample is not None:\n shortcut = self.down_sample(x)\n\n out += shortcut\n out = self.relu(out)\n return out\n\n# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n # TODO : 밑에 3쀄이 μ˜λ―Έν•˜λŠ” 것 μ°Ύμ•„ μˆ˜μ • or μ‚­μ œν•˜κΈ°\n # self.label_emb = nn.Embedding(opt.num_classes, opt.latent_dim)\n\n # self.init_size = opt.img_size // 4 # Initial size before upsampling\n # self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))\n\n self.FaceOcclusion_1=nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3),\n nn.InstanceNorm2d(64),\n nn.ReLU(),\n # -----\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(256),\n nn.ReLU(),\n # -----\n ResidualBlock(256, 256),\n ResidualBlock(256, 256),\n ResidualBlock(256, 256),\n # -----\n nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(64),\n nn.ReLU()\n # -----\n )\n self.FaceOcclusion_2=nn.Sequential(\n nn.Conv2d(64, 1, kernel_size=7, stride=1, padding=3),\n nn.Sigmoid()\n )\n\n self.FaceCompletion=nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(512),\n nn.ReLU(),\n nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(512),\n nn.ReLU(),\n # -----\n nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(256),\n nn.ReLU(),\n nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(128),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(64),\n nn.ReLU(),\n # -----\n nn.Conv2d(64, 3, kernel_size=7, stride=1, padding=3),\n nn.Tanh()\n )\n\n def forward(self, x):\n # occlusion aware module\n out_predicted=self.FaceOcclusion_1(x)\n out_predictedM=self.FaceOcclusion_2(out_predicted)\n out_InvertedM = torch.ones(1, 1, 128, 128).cuda() - out_predictedM\n out_oa=torch.matmul(out_predicted, out_predictedM)\n\n # face completion module\n out_synth=self.FaceCompletion(out_oa)\n out_fc=torch.matmul(out_InvertedM, out_synth)\n out_filter=torch.matmul(x, out_predictedM)\n out_final=out_filter + out_fc\n\n return out_predictedM, out_InvertedM, out_synth, out_final\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.discriminator_block = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(512, 1024, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(),\n nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU()\n )\n # The height and width of downsampled image\n # ds_size = opt.img_size // 2 ** 4\n # Output layers\n # https://github.com/znxlwm/pytorch-pix2pix/blob/3059f2af53324e77089bbcfc31279f01a38c40b8/network.py#L104- patch gan discriminator code\n # κΈ°μ‘΄ sganμ½”λ“œλŠ” linearμ˜€μ§€λ§Œ μš°λ¦¬λŠ” 논문에 따라 convλ₯Ό μ·¨ν•˜λ©΄μ„œ shape이 λ‹¬λΌμ§€κ²Œ λœλ“―.\n self.adv_layer = nn.Sequential(nn.Conv2d(2048, 1, kernel_size=3, stride=1, padding=1),\n nn.Sigmoid())\n self.attr_layer = nn.Sequential(nn.Conv2d(2048, opt.num_classes, kernel_size=2, stride=1, padding=0),\n nn.Softmax()) # attribute classificationλŒ€μ‹  μ–Όκ΅΄ 인식 μˆ˜ν–‰\n #TODO: paired - unpaired의 class 수(=이미지 수)κ°€ λ‹€λ₯Έλ° attr_layerλŠ” 전체 class 수둜 λ“€μ–΄κ°€μžˆμŒ. μ–΄λ–»κ²Œ ν•˜λ©΄ 쒋을지 λ‹€λ₯Έ λ…Όλ¬Έ or μ½”λ“œ 찾아보기\n \n def forward(self, x):\n out = self.discriminator_block(x) # torch.Size([11, 2048, 2, 2])\n # out = out.view(out.shape[0], -1) # torch.Size([11, 8192])\n validity = self.adv_layer(out) # torch.Size([11, 1, 2, 2])\n label = self.attr_layer(out) # torch.Size([11, 11, 1, 1])\n\n return validity, label\n\nclass weight():\n\n def __init__(self):\n self.lam1 = 0.05 # perceptual_loss\n self.lam2 = 120 # style_loss\n self.lam3 = 1 # pixel_loss\n self.lam4 = 0.001 # smooth_loss\n self.lam5 = -1 # L2 norm\n self.lam6 = 1 # adversarial_loss\n self.alpha = 0.5\n self.beta = 0.5\n\nw = weight()\n\n# 참고링크: https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cogan/cogan.py 210쀄\nadversarial_loss = torch.nn.BCELoss()\nattribute_loss = nn.MSELoss() # discriminator에 μ‚¬μš©λ˜λŠ” attribute loss\n\n# Initialize generator and discriminator\ngenerator = Generator()\ndiscriminator = Discriminator()\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n attribute_loss.cuda()\n\n# Initialize weights\ngenerator.apply(weights_init_normal)\ndiscriminator.apply(weights_init_normal)\n\n# data loader\n'''\ndata loadν•  λΆ€λΆ„ Index\n10000μž₯μ—μ„œ 7000μž₯은 train에 μ‚¬μš©,\nκ·Έ 7000μž₯을 1000μž₯μ”© λ‚˜λˆ μ„œ alternative train에 μ‚¬μš©\n총 7번의 alternative train paired:unpaired λΉ„μœ¨μ€ 각각\n9:1, 8:2, 7:3, 6:4, 5:5, 4:6,3:7\nμ•Œμ•„μ„œ κ³„μ‚°ν•΄μ„œ indexλ°”κΎΈμ‹œκΈΈ~\n'''\nidx1 = 0\nidx2 = 140 * 9 - 1\nidx3 = 140 * 9\nidx4 = 140 * 10 - 1\n\n#TODO: λͺ¨λΈ saveν–ˆμœΌλ‹ˆ 이제 trainig된 λͺ¨λΈμ„ loadν•΄μ„œ 이어 ν•™μŠ΅ν•˜λŠ”κ±° λ§Œλ“€κΈ°\n#처음 μ•ˆ 사싀 : 숫자 Parameterκ°€ 문자 parameter보닀 먼저와야함..\npaired_dataset = OAGandataset(idx1, idx2, paired=True, folder_numbering=False)\nunpaired_dataset = OAGandataset(idx3, idx4, paired=False, folder_numbering=False)\n\ntrain_dataloader_p = DataLoader(paired_dataset,\n shuffle=True,\n num_workers=0,\n batch_size=opt.batch_size)\n\ntrain_dataloader_up = DataLoader(unpaired_dataset,\n shuffle=True,\n num_workers=0,\n batch_size=opt.batch_size)\nprint (\"data loaded\")\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n\n# ----------\n# Training\n# ----------\n\n#paired image training (unpaired도 λ”°λ‘œ λ§Œλ“€κ³ , loss도 상황에 따라 적용)\nprint (\"paired train\")\nfor epoch in range(opt.n_epochs):\n for i, (imgs,imgs_gt,labels) in enumerate(train_dataloader_p):\n #TODO: batch_size ν•˜λ‚˜λ‘œ ν†΅μΌν•˜κΈ°(opt, dataloader, img shape)\n batch_size = opt.batch_size\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)\n fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(FloatTensor))\n labels = Variable(labels.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise and labels as generator input\n # z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim)))) -> μš°λ¦¬λŠ” μ‚¬μš©X\n\n # Generate a batch of images\n out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 μ“°μ΄λŠ” μ• λ“€\n loss = sganloss([out_final,\n out_predictedM,\n out_InvertedM,\n out_synth],imgs_gt.cuda())\n \n # # Loss measures generator's ability to fool the discriminator\n validity, _ = discriminator(out_final)\n g_loss = 0\n g_loss += w.lam1*loss.perceptual_loss()\n g_loss += w.lam2*loss.style_loss()\n g_loss += w.lam3*loss.pixel_loss(w.alpha, w.beta)\n g_loss += w.lam4*loss.smooth_loss()\n g_loss += w.lam5*loss.l2_norm()\n g_loss += w.lam6*adversarial_loss(validity,valid)\n\n print (\"loss:\",loss.perceptual_loss(), loss.style_loss(), loss.pixel_loss(w.alpha, w.beta), loss.smooth_loss(), loss.l2_norm(), adversarial_loss(validity,valid))\n \n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # d_alpha, d_betaλŠ” discriminator에 μ‚¬μš©λ˜λŠ” 2κ°€μ§€ lossν•¨μˆ˜μ— λŒ€ν•œ κ°€μ€‘μΉ˜κ°’μœΌλ‘œ μš°λ¦¬κ°€ κ²°μ •ν•΄μ•Ό ν•˜λŠ”λ“―\n d_alpha = 0.5\n d_beta = 1 - d_alpha\n\n # Loss for real images\n real_pred, real_attr = discriminator(real_imgs)\n # d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2\n d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)\n # print('r',real_pred.shape)\n # print('valid', valid.shape)\n\n # Loss for fake images\n fake_pred, fake_attr = discriminator(out_final.detach())\n # d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2\n d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)\n\n # Total discriminator loss\n d_loss = (d_real_loss + d_fake_loss) / 2\n # print(d_loss.type) # μ›λž˜(sgan)λž‘ typeλ˜‘κ°™μŒ(<built-in method type of Tensor object at ...>). λ‘˜λ‹€ floatν˜•νƒœ.\n\n # Calculate discriminator accuracy\n pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)\n gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)\n d_acc = np.mean(np.argmax(pred, axis=1) == gt)\n\n d_loss.backward()\n optimizer_D.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())\n )\n\n batches_done = epoch * len(train_dataloader_p) + i\n if batches_done % opt.sample_interval == 0:\n save_image(out_final.data[:10], \"finals/%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_synth.data[:10], \"synth/%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_predictedM.data[:10], \"masks/%d.png\" % batches_done, nrow=5, normalize=True)\n torch.save(generator, \"model/generator_paired%d.pt\" % batches_done)\n torch.save(discriminator, \"model/discriminator_paired%d.pt\" % batches_done)\n\nprint(\"unpaired train\")\nfor epoch in range(opt.n_epochs):\n for i, (imgs, labels) in enumerate(train_dataloader_up):\n\n batch_size = opt.batch_size\n\n # Adversarial ground truths\n valid = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(1.0), requires_grad=False)\n fake = Variable(FloatTensor(batch_size, 1, 2, 2).fill_(0.0), requires_grad=False)\n fake_attr_gt = Variable(FloatTensor(batch_size).fill_(opt.num_classes), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(FloatTensor))\n labels = Variable(labels.type(FloatTensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Generate a batch of images\n out_predictedM, out_InvertedM, out_synth, out_final = generator(real_imgs) # discriminator와 loss 계산에 μ“°μ΄λŠ” μ• λ“€\n loss = sganloss([out_final, \n out_predictedM,\n out_InvertedM,\n out_synth]\n )\n #TODO: μ—¬κΈ°μ„œ μ—λŸ¬λ‚¨.TypeError: conv2d(): argument 'input' (position 1) must be Tensor, not NoneType\n # generatorμ—μ„œ λ¬Έμ œμΈλ“―\n \n # # Loss measures generator's ability to fool the discriminator\n validity, _ = discriminator(out_final)\n g_loss = 0\n g_loss += w.lam4 * loss.smooth_loss()\n g_loss += w.lam5 * loss.l2_norm()\n g_loss += w.lam6 * adversarial_loss(validity, valid)\n\n print(\"loss:\", g_loss)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # d_alpha, d_betaλŠ” discriminator에 μ‚¬μš©λ˜λŠ” 2κ°€μ§€ lossν•¨μˆ˜μ— λŒ€ν•œ κ°€μ€‘μΉ˜κ°’μœΌλ‘œ μš°λ¦¬κ°€ κ²°μ •ν•΄μ•Ό ν•˜λŠ”λ“―\n d_alpha = 0.5\n d_beta = 1 - d_alpha\n\n # Loss for real images\n real_pred, real_attr = discriminator(real_imgs)\n # d_real_loss = (adversarial_loss(real_pred, valid) + attribute_loss(real_attr, labels)) / 2\n d_real_loss = d_alpha * adversarial_loss(real_pred, valid) + d_beta * attribute_loss(real_attr, labels)\n # print('r',real_pred.shape)\n # print('valid', valid.shape)\n\n # Loss for fake images\n fake_pred, fake_attr = discriminator(out_final.detach())\n # d_fake_loss = (adversarial_loss(fake_pred, fake) + attribute_loss(fake_attr, fake_attr_gt)) / 2\n d_fake_loss = d_alpha * adversarial_loss(fake_pred, fake) + d_beta * attribute_loss(fake_attr, fake_attr_gt)\n\n # Total discriminator loss\n d_loss = (d_real_loss + d_fake_loss) / 2\n # print(d_loss.type) # μ›λž˜(sgan)λž‘ typeλ˜‘κ°™μŒ(<built-in method type of Tensor object at ...>). λ‘˜λ‹€ floatν˜•νƒœ.\n\n # Calculate discriminator accuracy\n pred = np.concatenate([real_attr.data.cpu().numpy(), fake_attr.data.cpu().numpy()], axis=0)\n gt = np.concatenate([labels.data.cpu().numpy(), fake_attr_gt.data.cpu().numpy()], axis=0)\n d_acc = np.mean(np.argmax(pred, axis=1) == gt)\n\n d_loss.backward()\n optimizer_D.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(train_dataloader_p), d_loss.item(), 100 * d_acc, g_loss.item())\n )\n\n batches_done = epoch * len(train_dataloader_p) + i\n if batches_done % opt.sample_interval == 0:\n save_image(out_final.data[:10], \"finals/up_%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_synth.data[:10], \"synth/up_%d.png\" % batches_done, nrow=5, normalize=True)\n save_image(out_predictedM.data[:10], \"masks/up_%d.png\" % batches_done, nrow=5, normalize=True)\n torch.save(generator, \"model/generator_unpaired%d.pt\" % batches_done)\n torch.save(discriminator, \"model/discriminator_unpaired%d.pt\" % batches_done)\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.ones", "torch.cuda.is_available", "torch.nn.functional.pad", "torch.nn.Softmax", "torch.nn.MaxPool2d", "torch.nn.init.constant_", "torch.nn.ConvTranspose2d", "torch.nn.init.normal_", "numpy.argmax", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.nn.Tanh", "torch.save", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.matmul", "torch.nn.MSELoss", "torch.nn.Sigmoid" ], [ "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "torch.ones", "torch.cuda.is_available", "torch.nn.functional.pad", "torch.nn.Softmax", "torch.nn.MaxPool2d", "torch.nn.init.constant_", "torch.nn.ConvTranspose2d", "torch.nn.init.normal_", "numpy.argmax", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "torch.nn.Tanh", "torch.save", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.matmul", "torch.nn.MSELoss", "torch.nn.Sigmoid" ] ]
zhou3968322/dl-code-read
[ "aca204a986dabe2755becff0f42de1082299d791", "aca204a986dabe2755becff0f42de1082299d791", "aca204a986dabe2755becff0f42de1082299d791", "aca204a986dabe2755becff0f42de1082299d791" ]
[ "pytorch/test/jit/test_save_load.py", "pytorch/test/test_jit_py3.py", "pytorch/torch/testing/_internal/common_device_type.py", "pytorch/test/test_distributions.py" ]
[ "import os\nimport io\nimport sys\nimport random\nimport torch\nfrom itertools import product as product\nfrom torch import Tensor\nfrom typing import NamedTuple\n\n# Make the helper files in test/ importable\npytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(pytorch_test_dir)\nfrom torch.testing._internal.jit_utils import (JitTestCase,\n clear_class_registry)\n\nif __name__ == \"__main__\":\n raise RuntimeError(\n \"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_jit.py TESTNAME\\n\\n\"\n \"instead.\"\n )\n\nclass TestSaveLoad(JitTestCase):\n def test_versioned_symbols(self):\n \"\"\"\n Tests Torchscript symbol versioning. See note [Versioned Symbols].\n This test uses an undocumented, test-only function\n torch._test_serialization_subcmul.\n\n This function is implemented as (a - alpha * b) with a default value\n of 1 for alpha. In file format version 2, however, it was implemented\n as (b - alpha * a) with a default value of 2 for alpha.\n This test verifies a module seralized with file format version 2\n exhibits the old behavior, and that the same module newly serialized\n exhibits the current behavior.\n \"\"\"\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b, alpha: float):\n no_alpha = torch._test_serialization_subcmul(a, b)\n with_alpha = torch._test_serialization_subcmul(a, b, alpha)\n return no_alpha, with_alpha\n\n def historic_subcmul(a, b, alpha=2):\n return b - alpha * a\n\n def current_subcmul(a, b, alpha=1):\n return a - alpha * b\n\n # Loads and verifies the historic behavior of the module\n # that was serialized with version 2\n module_v2 = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/_test_serialization_subcmul_v2.pt\")\n a = torch.randn((5,))\n b = torch.randn((5,))\n alpha = random.random()\n args = (a, b, alpha)\n no_alpha_v2, with_alpha_v2 = module_v2(*args)\n self.assertEqual(no_alpha_v2, historic_subcmul(a, b))\n self.assertEqual(with_alpha_v2, historic_subcmul(*args))\n\n # Scripts, saves, loads and verifies the current behavior of the module\n scripted_module = torch.jit.script(MyModule())\n buffer = io.BytesIO()\n torch.jit.save(scripted_module, buffer)\n buffer.seek(0)\n module_current = torch.jit.load(buffer)\n no_alpha_current, with_alpha_current = module_current(*args)\n self.assertEqual(no_alpha_current, current_subcmul(a, b))\n self.assertEqual(with_alpha_current, current_subcmul(*args))\n\n # Helper that returns the module after saving and loading\n def _save_load_module(self, m):\n scripted_module = torch.jit.script(m())\n buffer = io.BytesIO()\n torch.jit.save(scripted_module, buffer)\n buffer.seek(0)\n return torch.jit.load(buffer)\n\n # Helper which returns the result of a function or the exception the\n # function threw.\n def _try_fn(self, fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n return e\n\n def _verify_no(self, kind, m):\n node_count = sum(kind in str(n) for n in m.graph.nodes())\n self.assertEqual(node_count, 0)\n\n def _verify_count(self, kind, m, count):\n node_count = sum(kind in str(n) for n in m.graph.nodes())\n self.assertEqual(node_count, count)\n\n \"\"\"\n Tests that verify Torchscript remaps aten::div(_) from versions 0-3\n to call either aten::true_divide(_), if an input is a float type,\n or aten::floor_divide(_) otherwise.\n\n NOTE: currently compares against current div behavior, too, since\n div behavior has not yet been updated.\n \"\"\"\n\n def test_versioned_div_tensor(self):\n def historic_div(self, other):\n if self.is_floating_point() or other.is_floating_point():\n return self.true_divide(other)\n return self.floor_divide(other)\n\n # Tensor x Tensor\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b):\n result_0 = a / b\n result_1 = torch.div(a, b)\n result_2 = a.div(b)\n\n return result_0, result_1, result_2\n\n # Loads historic module\n try:\n v3_module = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_tensor_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n self._verify_no(\"aten::div\", v3_module)\n self._verify_count(\"aten::true_divide\", v3_module, 3)\n self._verify_count(\"aten::floor_divide\", v3_module, 3)\n\n current_module = self._save_load_module(MyModule)\n self._verify_count(\"aten::div\", current_module, 3)\n\n vals = (2., 3., 2, 3)\n for val_a, val_b in product(vals, vals):\n a = torch.tensor((val_a,))\n b = torch.tensor((val_b,))\n\n def _helper(m, fn):\n m_results = self._try_fn(m, a, b)\n fn_result = self._try_fn(fn, a, b)\n\n if isinstance(m_results, Exception):\n self.assertTrue(isinstance(fn_result, Exception))\n else:\n for result in m_results:\n self.assertEqual(result, fn_result)\n\n _helper(v3_module, historic_div)\n _helper(current_module, torch.div)\n\n def test_versioned_div_tensor_inplace(self):\n def historic_div_(self, other):\n if self.is_floating_point() or other.is_floating_point():\n return self.true_divide_(other)\n return self.floor_divide_(other)\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b):\n a /= b\n return a\n\n try:\n v3_module = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_tensor_inplace_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n self._verify_no(\"aten::div\", v3_module)\n self._verify_count(\"aten::true_divide\", v3_module, 1)\n self._verify_count(\"aten::floor_divide\", v3_module, 1)\n\n current_module = self._save_load_module(MyModule)\n self._verify_count(\"aten::div\", current_module, 1)\n\n vals = (2., 3., 2, 3)\n for val_a, val_b in product(vals, vals):\n a = torch.tensor((val_a,))\n b = torch.tensor((val_b,))\n\n def _helper(m, fn):\n fn_result = self._try_fn(fn, a.clone(), b)\n m_result = self._try_fn(m, a, b)\n\n if isinstance(m_result, Exception):\n self.assertTrue(fn_result, Exception)\n else:\n self.assertEqual(m_result, fn_result)\n self.assertEqual(m_result, a)\n\n _helper(v3_module, historic_div_)\n\n # Recreates a since it was modified in place\n a = torch.tensor((val_a,))\n _helper(current_module, torch.Tensor.div_)\n\n def test_versioned_div_tensor_out(self):\n def historic_div_out(self, other, out):\n if self.is_floating_point() or other.is_floating_point() or out.is_floating_point():\n return torch.true_divide(self, other, out=out)\n return torch.floor_divide(self, other, out=out)\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b, out):\n return a.div(b, out=out)\n\n try:\n v3_module = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_tensor_out_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n self._verify_no(\"aten::div\", v3_module)\n self._verify_count(\"aten::true_divide\", v3_module, 1)\n self._verify_count(\"aten::floor_divide\", v3_module, 1)\n\n current_module = self._save_load_module(MyModule)\n self._verify_count(\"aten::div\", current_module, 1)\n\n vals = (2., 3., 2, 3)\n for val_a, val_b in product(vals, vals):\n a = torch.tensor((val_a,))\n b = torch.tensor((val_b,))\n\n for out in (torch.empty((1,)), torch.empty((1,), dtype=torch.long)):\n def _helper(m, fn):\n fn_result = None\n if fn is torch.div:\n fn_result = self._try_fn(fn, a, b, out=out.clone())\n else:\n fn_result = self._try_fn(fn, a, b, out.clone())\n m_result = self._try_fn(m, a, b, out)\n\n if isinstance(m_result, Exception):\n self.assertTrue(fn_result, Exception)\n else:\n self.assertEqual(m_result, fn_result)\n self.assertEqual(m_result, out)\n\n _helper(v3_module, historic_div_out)\n _helper(current_module, torch.div)\n\n def test_versioned_div_scalar(self):\n def historic_div_scalar_float(self, other: float):\n return torch.true_divide(self, other)\n\n def historic_div_scalar_int(self, other: int):\n if self.is_floating_point():\n return torch.true_divide(self, other)\n return torch.floor_divide(self, other)\n\n class MyModuleFloat(torch.nn.Module):\n def __init__(self):\n super(MyModuleFloat, self).__init__()\n\n def forward(self, a, b: float):\n return a / b\n\n class MyModuleInt(torch.nn.Module):\n def __init__(self):\n super(MyModuleInt, self).__init__()\n\n def forward(self, a, b: int):\n return a / b\n\n try:\n v3_module_float = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_float_v3.pt\")\n v3_module_int = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_int_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n for m in (v3_module_float, v3_module_int):\n self._verify_no(\"aten::div\", m)\n self._verify_count(\"aten::true_divide\", m, 1)\n self._verify_count(\"aten::floor_divide\", m, 1)\n\n current_module_float = self._save_load_module(MyModuleFloat)\n current_module_int = self._save_load_module(MyModuleInt)\n\n for m in (current_module_float, current_module_int):\n self._verify_count(\"aten::div\", m, 1)\n\n vals = (2., 3., 2, 3)\n for val_a, val_b in product(vals, vals):\n a = torch.tensor((val_a,))\n b = val_b\n\n def _helper(m, fn):\n m_result = self._try_fn(m, a, b)\n fn_result = self._try_fn(fn, a, b)\n\n if isinstance(m_result, Exception):\n self.assertTrue(fn_result, Exception)\n else:\n self.assertEqual(m_result, fn_result)\n\n if isinstance(b, float):\n _helper(v3_module_float, historic_div_scalar_float)\n _helper(current_module_float, torch.div)\n else:\n _helper(v3_module_int, historic_div_scalar_int)\n _helper(current_module_int, torch.div)\n\n def test_versioned_div_scalar_reciprocal(self):\n def historic_div_scalar_float_reciprocal(self, other: float):\n return other / self\n\n def historic_div_scalar_int_reciprocal(self, other: int):\n if self.is_floating_point():\n return other / self\n return other // self\n\n class MyModuleFloat(torch.nn.Module):\n def __init__(self):\n super(MyModuleFloat, self).__init__()\n\n def forward(self, a, b: float):\n return b / a\n\n class MyModuleInt(torch.nn.Module):\n def __init__(self):\n super(MyModuleInt, self).__init__()\n\n def forward(self, a, b: int):\n return b / a\n\n try:\n v3_module_float = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_reciprocal_float_v3.pt\")\n v3_module_int = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_reciprocal_int_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n # NOTE: number / tensor is rewritten to torch.reciprocal(a) * b\n # so true_divide and floor_divide do not appear in their graphs\n for m in (v3_module_float, v3_module_int):\n self._verify_no(\"aten::div\", m)\n self._verify_no(\"aten::true_divide\", m)\n self._verify_no(\"aten::floor_divide\", m)\n self._verify_count(\"aten::reciprocal\", m, 1)\n\n current_module_float = self._save_load_module(MyModuleFloat)\n current_module_int = self._save_load_module(MyModuleInt)\n\n vals = (2., 3., 2, 3)\n for val_a, val_b in product(vals, vals):\n a = torch.tensor((val_a,))\n b = val_b\n\n def _helper(m, fn):\n m_result = self._try_fn(m, a, b)\n fn_result = None\n # Reverses argument order for torch.div\n if fn is torch.div:\n fn_result = self._try_fn(torch.div, b, a)\n else:\n fn_result = self._try_fn(fn, a, b)\n\n if not a.is_floating_point():\n # NOTE: Torchscript rewrites the module forward into\n # torch.reciprocal(a) * b, but torch.reciprocal is\n # implemented for integer dtypes.\n self.assertTrue(m_result, Exception)\n self.assertTrue('\"reciprocal_cpu\" not implemented for' in str(m_result))\n elif isinstance(m_result, Exception):\n self.assertTrue(fn_result, Exception)\n else:\n self.assertEqual(m_result, fn_result)\n\n if isinstance(b, float):\n _helper(v3_module_float, historic_div_scalar_float_reciprocal)\n _helper(current_module_float, torch.div)\n else:\n _helper(v3_module_int, historic_div_scalar_int_reciprocal)\n _helper(current_module_int, torch.div)\n\n def test_versioned_div_scalar_inplace(self):\n def historic_div_scalar_float_inplace(self, other: float):\n return self.true_divide_(other)\n\n def historic_div_scalar_int_inplace(self, other: int):\n if self.is_floating_point():\n return self.true_divide_(other)\n\n return self.floor_divide_(other)\n\n class MyModuleFloat(torch.nn.Module):\n def __init__(self):\n super(MyModuleFloat, self).__init__()\n\n def forward(self, a, b: float):\n a /= b\n return a\n\n class MyModuleInt(torch.nn.Module):\n def __init__(self):\n super(MyModuleInt, self).__init__()\n\n def forward(self, a, b: int):\n a /= b\n return a\n\n try:\n v3_module_float = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_inplace_float_v3.pt\")\n v3_module_int = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_inplace_int_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n for m in (v3_module_float, v3_module_int):\n self._verify_no(\"aten::div\", m)\n self._verify_count(\"aten::true_divide\", m, 1)\n self._verify_count(\"aten::floor_divide\", m, 1)\n\n current_module_float = self._save_load_module(MyModuleFloat)\n current_module_int = self._save_load_module(MyModuleInt)\n\n for m in (current_module_float, current_module_int):\n self._verify_count(\"aten::div\", m, 1)\n\n for m in (current_module_float, current_module_int):\n self._verify_count(\"aten::div\", m, 1)\n\n vals = (2., 3., 2, 3)\n for val_a, val_b in product(vals, vals):\n a = torch.tensor((val_a,))\n b = val_b\n\n def _helper(m, fn):\n m_result = self._try_fn(m, a, b)\n fn_result = self._try_fn(fn, a, b)\n\n if isinstance(m_result, Exception):\n self.assertTrue(fn_result, Exception)\n else:\n self.assertEqual(m_result, fn_result)\n\n if isinstance(b, float):\n _helper(v3_module_float, historic_div_scalar_float_inplace)\n _helper(current_module_float, torch.Tensor.div_)\n else:\n _helper(v3_module_int, historic_div_scalar_int_inplace)\n _helper(current_module_int, torch.Tensor.div_)\n\n # NOTE: Scalar division was already true division in op version 3,\n # so this test verifies the behavior is unchanged.\n def test_versioned_div_scalar_scalar(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a: float, b: int, c: float, d: int):\n result_0 = a / b\n result_1 = a / c\n result_2 = b / c\n result_3 = b / d\n return (result_0, result_1, result_2, result_3)\n\n try:\n v3_module = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_div_scalar_scalar_v3.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n self._verify_count(\"aten::div\", v3_module, 4)\n\n current_module = self._save_load_module(MyModule)\n self._verify_count(\"aten::div\", current_module, 4)\n\n def _helper(m, fn):\n vals = (5., 3, 2., 7)\n m_result = m(*vals)\n fn_result = fn(*vals)\n for mr, hr in zip(m_result, fn_result):\n self.assertEqual(mr, hr)\n\n _helper(v3_module, current_module)\n\n # NOTE: the JIT was incapable of handling boolean fill values when\n # PyTorch produced file format versions 0-4\n def test_versioned_full_integer_value(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, int_fill: int):\n size = torch.Size(2, 2)\n a = torch.full(size, int_fill)\n b = torch.full(size, 1)\n return (a, b)\n\n try:\n v4_module = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_full_integer_value_v4.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n self._verify_count(\"aten::full\", v4_module, 2)\n\n current_module = self._save_load_module(MyModule)\n self._verify_count(\"aten::full\", current_module, 2)\n\n # Verifies historic integer type inference is float\n # NOTE: only verifies floating point, not exact dtype, due to\n # https://github.com/pytorch/pytorch/issues/40470\n results = v4_module(2)\n for result in results:\n self.assertTrue(result.is_floating_point())\n\n # Verifies values are correct\n a, b = results\n self.assertTrue((a == 2.).all())\n self.assertTrue((b == 1.).all())\n\n # Tests that torch.full behavior which is the same from prior versions\n # to version 5 is preserved.\n # NOTE: while torch.full in eager PyTorch accepts a requires_grad argument,\n # it does not in Torchscript (see https://github.com/pytorch/pytorch/issues/40363)\n def test_versioned_full_preserved(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, float_fill: float):\n size = (2, 2)\n a = torch.full(size, 1.)\n b = torch.full(size, float_fill)\n c = torch.full(size, float_fill, dtype=torch.long)\n\n out = torch.empty(size, dtype=torch.long)\n d = torch.full(size, float_fill, out=out)\n\n e = torch.full(size, float_fill, dtype=torch.float16, pin_memory=None,\n layout=torch.strided, device='cpu')\n return (a, b, c, d, e)\n\n try:\n v4_module = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/test_versioned_full_preserved_v4.pt\")\n except Exception as e:\n self.skipTest(\"Failed to load fixture!\")\n\n self._verify_count(\"aten::full\", v4_module, 5)\n\n current_module = self._save_load_module(MyModule)\n self._verify_count(\"aten::full\", current_module, 5)\n\n self.assertEqual(v4_module(2.), current_module(2.))\n\n def test_versioned_symbols_reserialization(self):\n \"\"\"\n Tests that loading and saving serialized Torchscript with a versioned\n symbol won't persist the original function and will inline the\n versioned builtin.\n \"\"\"\n module_v2 = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/_test_serialization_subcmul_v2.pt\")\n buffer = io.BytesIO()\n torch.jit.save(module_v2, buffer)\n buffer.seek(0)\n module_reserialized = torch.jit.load(buffer)\n\n subcmul_nodes = sum(\"subcmul\" in n.kind() for\n n in module_reserialized.graph.nodes())\n self.assertEqual(subcmul_nodes, 0)\n\n def test_different_modules(self):\n \"\"\"\n Exercise the situation where we have the same qualified name\n in two different CompilationUnits on save/load.\n \"\"\"\n class Foo(torch.nn.Module):\n def __init__(self):\n super(Foo, self).__init__()\n self.foo = torch.nn.Linear(2, 2)\n self.bar = torch.nn.Linear(2, 2)\n\n def forward(self, x):\n x = self.foo(x)\n x = self.bar(x)\n return x\n\n first_script_module = torch.jit.script(Foo())\n first_saved_module = io.BytesIO()\n torch.jit.save(first_script_module, first_saved_module)\n first_saved_module.seek(0)\n\n clear_class_registry()\n\n class Foo(torch.nn.Module):\n def __init__(self):\n super(Foo, self).__init__()\n self.foo = torch.nn.Linear(2, 2)\n\n def forward(self, x):\n x = self.foo(x)\n return x\n\n second_script_module = torch.jit.script(Foo())\n second_saved_module = io.BytesIO()\n torch.jit.save(torch.jit.script(Foo()), second_saved_module)\n second_saved_module.seek(0)\n\n clear_class_registry()\n\n self.assertEqual(\n first_script_module._c.qualified_name, second_script_module._c.qualified_name\n )\n\n class ContainsBoth(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.add_module(\"second\", torch.jit.load(second_saved_module))\n self.add_module(\"first\", torch.jit.load(first_saved_module))\n\n def forward(self, x):\n x = self.first(x)\n x = self.second(x)\n return x\n\n sm = torch.jit.script(ContainsBoth())\n contains_both = io.BytesIO()\n torch.jit.save(sm, contains_both)\n contains_both.seek(0)\n sm = torch.jit.load(contains_both)\n\n def test_different_functions(self):\n \"\"\"\n Exercise the situation where we have the same qualified name\n in two different CompilationUnits on save/load.\n \"\"\"\n def lol(x):\n return x\n\n class Foo(torch.nn.Module):\n def forward(self, x):\n return lol(x)\n\n first_script_module = torch.jit.script(Foo())\n first_saved_module = io.BytesIO()\n torch.jit.save(first_script_module, first_saved_module)\n first_saved_module.seek(0)\n\n clear_class_registry()\n\n def lol(x): # noqa: F811\n return \"hello\"\n\n class Foo(torch.nn.Module):\n def forward(self, x):\n return lol(x)\n\n second_script_module = torch.jit.script(Foo())\n second_saved_module = io.BytesIO()\n torch.jit.save(torch.jit.script(Foo()), second_saved_module)\n second_saved_module.seek(0)\n\n clear_class_registry()\n\n self.assertEqual(\n first_script_module._c.qualified_name, second_script_module._c.qualified_name\n )\n\n class ContainsBoth(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.add_module(\"second\", torch.jit.load(second_saved_module))\n self.add_module(\"first\", torch.jit.load(first_saved_module))\n\n def forward(self, x):\n x = self.first(x)\n x = self.second(x)\n return x\n\n sm = torch.jit.script(ContainsBoth())\n contains_both = io.BytesIO()\n torch.jit.save(sm, contains_both)\n contains_both.seek(0)\n sm = torch.jit.load(contains_both)\n\n def test_different_interfaces(self):\n \"\"\"\n Exercise the situation where we have the same qualified name\n in two different CompilationUnits on save/load.\n \"\"\"\n @torch.jit.interface\n class MyInterface(object):\n def bar(self, x):\n # type: (Tensor) -> Tensor\n pass\n\n @torch.jit.script\n class ImplementInterface(object):\n def __init__(self):\n pass\n\n def bar(self, x):\n return x\n\n class Foo(torch.nn.Module):\n __annotations__ = {\"interface\": MyInterface}\n\n def __init__(self):\n super().__init__()\n self.interface = ImplementInterface()\n\n def forward(self, x):\n return self.interface.bar(x)\n\n first_script_module = torch.jit.script(Foo())\n first_saved_module = io.BytesIO()\n torch.jit.save(first_script_module, first_saved_module)\n first_saved_module.seek(0)\n\n clear_class_registry()\n\n @torch.jit.interface\n class MyInterface(object):\n def not_bar(self, x):\n # type: (Tensor) -> Tensor\n pass\n\n @torch.jit.script # noqa: F811\n class ImplementInterface(object): # noqa: F811\n def __init__(self):\n pass\n\n def not_bar(self, x):\n return x\n\n class Foo(torch.nn.Module):\n __annotations__ = {\"interface\": MyInterface}\n\n def __init__(self):\n super().__init__()\n self.interface = ImplementInterface()\n\n def forward(self, x):\n return self.interface.not_bar(x)\n\n second_script_module = torch.jit.script(Foo())\n second_saved_module = io.BytesIO()\n torch.jit.save(torch.jit.script(Foo()), second_saved_module)\n second_saved_module.seek(0)\n\n clear_class_registry()\n\n self.assertEqual(\n first_script_module._c.qualified_name, second_script_module._c.qualified_name\n )\n\n class ContainsBoth(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.add_module(\"second\", torch.jit.load(second_saved_module))\n self.add_module(\"first\", torch.jit.load(first_saved_module))\n\n def forward(self, x):\n x = self.first(x)\n x = self.second(x)\n return x\n\n sm = torch.jit.script(ContainsBoth())\n contains_both = io.BytesIO()\n torch.jit.save(sm, contains_both)\n contains_both.seek(0)\n sm = torch.jit.load(contains_both)\n\n def test_many_collisions(self):\n class MyCoolNamedTuple(NamedTuple):\n a: int\n\n @torch.jit.interface\n class MyInterface(object):\n def bar(self, x):\n # type: (Tensor) -> Tensor\n pass\n\n @torch.jit.script\n class ImplementInterface(object):\n def __init__(self):\n pass\n\n def bar(self, x):\n return x\n\n def lol(x):\n return x\n\n class Foo(torch.nn.Module):\n interface: MyInterface\n\n def __init__(self):\n super().__init__()\n self.foo = torch.nn.Linear(2, 2)\n self.bar = torch.nn.Linear(2, 2)\n self.interface = ImplementInterface()\n\n def forward(self, x):\n x = self.foo(x)\n x = self.bar(x)\n x = lol(x)\n x = self.interface.bar(x)\n\n return x, MyCoolNamedTuple(a=5)\n\n\n first_script_module = torch.jit.script(Foo())\n first_saved_module = io.BytesIO()\n torch.jit.save(first_script_module, first_saved_module)\n first_saved_module.seek(0)\n\n clear_class_registry()\n\n @torch.jit.interface\n class MyInterface(object):\n def not_bar(self, x):\n # type: (Tensor) -> Tensor\n pass\n\n @torch.jit.script # noqa F811\n class ImplementInterface(object): # noqa F811\n def __init__(self):\n pass\n\n def not_bar(self, x):\n return x\n\n def lol(x): # noqa F811\n return \"asdofij\"\n\n class MyCoolNamedTuple(NamedTuple): # noqa F811\n a: str\n\n class Foo(torch.nn.Module):\n interface: MyInterface\n\n def __init__(self):\n super().__init__()\n self.foo = torch.nn.Linear(2, 2)\n self.interface = ImplementInterface()\n\n def forward(self, x):\n x = self.foo(x)\n self.interface.not_bar(x)\n x = lol(x)\n return x, MyCoolNamedTuple(a=\"hello\")\n\n second_script_module = torch.jit.script(Foo())\n second_saved_module = io.BytesIO()\n torch.jit.save(second_script_module, second_saved_module)\n second_saved_module.seek(0)\n\n clear_class_registry()\n\n self.assertEqual(\n first_script_module._c.qualified_name, second_script_module._c.qualified_name\n )\n\n class ContainsBoth(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.add_module(\"second\", torch.jit.load(second_saved_module))\n self.add_module(\"first\", torch.jit.load(first_saved_module))\n\n def forward(self, x):\n x, named_tuple_1 = self.first(x)\n x, named_tuple_2 = self.second(x)\n return len(x + named_tuple_2.a) + named_tuple_1.a\n\n sm = torch.jit.script(ContainsBoth())\n contains_both = io.BytesIO()\n torch.jit.save(sm, contains_both)\n contains_both.seek(0)\n sm = torch.jit.load(contains_both)\n", "from collections import namedtuple\nfrom torch.testing._internal.common_utils import run_tests\nfrom torch.testing._internal.jit_utils import JitTestCase\nfrom torch.testing import FileCheck\nfrom torch import jit\nfrom typing import NamedTuple, List, Optional, Dict, Tuple, Any\nfrom jit.test_module_interface import TestModuleInterface # noqa: F401\nimport unittest\nimport sys\nimport torch\nimport torch.testing._internal.jit_utils\nimport torch.nn as nn\nimport types\n\nclass TestScriptPy3(JitTestCase):\n def test_joined_str(self):\n def func(x):\n hello, test = \"Hello\", \"test\"\n print(f\"{hello + ' ' + test}, I'm a {test}\") # noqa E999\n print(f\"format blank\") # noqa F541\n hi = 'hi'\n print(f\"stuff before {hi}\")\n print(f\"{hi} stuff after\")\n return x + 1\n\n x = torch.arange(4., requires_grad=True)\n # TODO: Add support for f-strings in string parser frontend\n # self.checkScript(func, [x], optimize=True, capture_output=True)\n\n with self.capture_stdout() as captured:\n out = func(x)\n\n scripted = torch.jit.script(func)\n with self.capture_stdout() as captured_script:\n out_script = func(x)\n\n self.assertEqual(out, out_script)\n self.assertEqual(captured, captured_script)\n\n @unittest.skipIf(sys.version_info[:2] < (3, 7), \"`dataclasses` module not present on < 3.7\")\n def test_dataclass_error(self):\n from dataclasses import dataclass\n\n @dataclass\n class NormalizationInfo(object):\n mean: float = 0.0\n\n def compute(self, total_rows):\n return self.mean\n\n def fn():\n return NormalizationInfo(1, 2, 3, 4, 5)\n\n with self.assertRaisesRegex(OSError, \"NormalizationInfo\"):\n torch.jit.script(fn)\n\n def test_optional_dict_construct(self):\n class M(torch.nn.Module):\n def use(self, buffer: Dict[str, Optional[torch.Tensor]]):\n return buffer[\"prev_key\"]\n\n def forward(self, x):\n prev_key = torch.rand(2, 3)\n next_key = torch.rand(2, 3)\n saved_state: Dict[str, Optional[torch.Tensor]] = {\n \"prev_key\": prev_key,\n \"next_key\": next_key,\n }\n\n return self.use(saved_state)\n\n self.checkModule(M(), (torch.rand(2, 2),))\n\n def test_kwarg_support(self):\n with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, \"variable number of arguments\"):\n class M(torch.nn.Module):\n def forward(self, *, n_tokens: int, device_name: str = 2):\n pass\n torch.jit.script(M())\n\n class M(torch.nn.Module):\n def forward(self, *, n_tokens: int, device_name: str):\n return n_tokens, device_name\n\n sm = torch.jit.script(M())\n\n with self.assertRaisesRegex(RuntimeError, \"missing value for argument 'n_tokens'\"):\n sm()\n\n input = (3, 'hello')\n self.assertEqual(sm(*input), input)\n\n def test_named_tuple(self):\n class FeatureVector(NamedTuple):\n float_features: float\n sequence_features: List[float]\n time_since_first: float\n\n @torch.jit.script\n def foo(x) -> float:\n fv = FeatureVector(3.0, [3.0], 3.0) # noqa\n rv = fv.float_features\n for val in fv.sequence_features:\n rv += val\n rv *= fv.time_since_first\n return rv\n\n self.assertEqual(foo(torch.rand(3, 4)), 18.0)\n\n def test_named_tuple_constant(self):\n class Tup(NamedTuple):\n a: int\n b: int\n\n @torch.jit.script\n def foo():\n return Tup(1, 2)\n\n self.assertEqual(foo(), Tup(1, 2))\n\n def test_dict_preserves_order(self):\n def dict_ordering():\n a : Dict[int, int] = {}\n for i in range(1000):\n a[i] = i + 1\n return a\n\n self.checkScript(dict_ordering, ())\n di = torch.jit.script(dict_ordering)()\n res = list(di.items())\n for i in range(1000):\n key, value = res[i]\n self.assertTrue(key == i and value == i + 1)\n\n def test_list_unification_hint(self):\n with self.assertRaisesRegex(RuntimeError, \"Expected a List type hint\"):\n @torch.jit.script\n def x():\n b : int = [2, 3]\n return b\n\n def test_return_named_tuple(self):\n class FeatureVector(NamedTuple):\n float_features: float\n sequence_features: List[float]\n time_since_first: float\n\n @torch.jit.script\n def foo(x):\n fv = FeatureVector(3.0, [3.0], 3.0)\n return fv\n\n out = foo(torch.rand(3, 4))\n out = foo(torch.rand(3, 4))\n self.assertEqual(out.float_features, 3.0)\n self.assertEqual(out.sequence_features, [3.0])\n self.assertEqual(out.time_since_first, 3.0)\n\n def test_named_tuple_as_attr(self):\n class Config(NamedTuple):\n size: int\n\n class MyMod(nn.Module):\n configs: Dict[int, Config]\n\n def __init__(self, configs):\n super().__init__()\n self.configs = configs\n\n def forward(self, x):\n for _id, config in self.configs.items():\n x += config.size\n return x\n\n s = torch.jit.script(MyMod({0: Config(size=16)}))\n\n def test_types_as_values(self):\n def fn(m: torch.Tensor) -> torch.device:\n return m.device\n\n self.checkScript(fn, [torch.randn(2, 2)])\n\n GG = namedtuple('GG', ['f', 'g'])\n\n class Foo(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @torch.jit.ignore\n def foo(self, x, z):\n # type: (Tensor, Tensor) -> Tuple[GG, GG]\n return GG(x, z), GG(x, z)\n\n def forward(self, x, z):\n return self.foo(x, z)\n\n foo = torch.jit.script(Foo())\n y = foo(torch.randn(2, 2), torch.randn(2, 2))\n\n class Foo(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @torch.jit.ignore\n def foo(self, x, z) -> Tuple[GG, GG]:\n return GG(x, z)\n\n def forward(self, x, z):\n return self.foo(x, z)\n\n foo = torch.jit.script(Foo())\n y = foo(torch.randn(2, 2), torch.randn(2, 2))\n\n\n def test_named_tuple_resolution(self):\n class TheType(NamedTuple):\n t: int\n\n class MyModule(types.ModuleType):\n def __init__(self):\n super(MyModule, self).__init__('MyModule')\n\n def __getattr__(self, attr):\n return TheType\n\n some_module = MyModule()\n\n def fn() -> some_module.Type:\n return some_module.Type(1)\n\n self.checkScript(fn, [])\n\n def test_ignore_with_types(self):\n @torch.jit.ignore\n def fn(x: Dict[str, Optional[torch.Tensor]]):\n return x + 10\n\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n\n def forward(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> torch.Tensor:\n self.dropout_modality(in_batch)\n fn(in_batch)\n return torch.tensor(1)\n\n @torch.jit.ignore\n def dropout_modality(self, in_batch: Dict[str, Optional[torch.Tensor]]) -> Dict[str, Optional[torch.Tensor]]:\n return in_batch\n\n sm = torch.jit.script(M())\n FileCheck().check(\"dropout_modality\").check(\"in_batch\").run(str(sm.graph))\n\n def test_python_callable(self):\n class MyPythonClass(object):\n @torch.jit.ignore\n def __call__(self, *args) -> str:\n return str(type(args[0]))\n\n the_class = MyPythonClass()\n\n @torch.jit.script\n def fn(x):\n return the_class(x)\n\n # This doesn't involve the string frontend, so don't use checkScript\n x = torch.ones(2)\n self.assertEqual(fn(x), the_class(x))\n\n def test_bad_types(self):\n @torch.jit.ignore\n def fn(my_arg):\n return my_arg + 10\n\n with self.assertRaisesRegex(RuntimeError, \"argument 'my_arg'\"):\n @torch.jit.script\n def other_fn(x):\n return fn('2')\n\n def test_named_tuple_slice_unpack(self):\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int]\n\n @torch.jit.script\n def foo(a : int, b : float, c : List[int]):\n tup = MyCoolNamedTuple(a, b, c) # noqa\n my_a, my_b, my_c = tup\n return tup[:1], my_a, my_c\n\n self.assertEqual(foo(3, 3.5, [6]), ((3,), 3, [6]))\n\n def test_named_tuple_lower(self):\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int]\n\n @torch.jit.script\n def foo(a : int):\n tup = MyCoolNamedTuple(a, 3.14, [9]) # noqa\n return tup\n\n FileCheck().check('TupleConstruct').run(foo.graph)\n torch._C._jit_pass_lower_all_tuples(foo.graph)\n FileCheck().check_not('TupleConstruct').run(foo.graph)\n\n def test_named_tuple_type_annotation(self):\n global MyCoolNamedTuple # see [local resolution in python]\n\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int]\n\n @torch.jit.script\n def foo(x : MyCoolNamedTuple) -> MyCoolNamedTuple:\n return x\n\n mnt = MyCoolNamedTuple(42, 420.0, [666])\n self.assertEqual(foo(mnt), mnt)\n\n def test_named_tuple_wrong_types(self):\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int]\n\n with self.assertRaisesRegex(RuntimeError, \"Expected a value of type 'int' for argument 'a'\"\n \" but instead found type 'str'\"):\n @torch.jit.script\n def foo():\n tup = MyCoolNamedTuple('foo', 'bar', 'baz') # noqa\n return tup\n\n def test_named_tuple_kwarg_construct(self):\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int]\n\n @torch.jit.script\n def foo():\n tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa\n return tup\n\n tup = foo()\n self.assertEqual(tup.a, 9)\n self.assertEqual(tup.b, 3.5)\n self.assertEqual(tup.c, [1, 2, 3])\n\n def test_named_tuple_default_error(self):\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int] = [3, 4, 5]\n\n with self.assertRaisesRegex(RuntimeError, 'Default values are currently not supported'):\n @torch.jit.script\n def foo():\n tup = MyCoolNamedTuple(c=[1, 2, 3], b=3.5, a=9) # noqa\n return tup\n\n @unittest.skipIf(True, \"broken while these tests were not in CI\")\n def test_named_tuple_serialization(self):\n class MyCoolNamedTuple(NamedTuple):\n a : int\n b : float\n c : List[int]\n\n class MyMod(torch.jit.ScriptModule):\n @torch.jit.script_method\n def forward(self):\n return MyCoolNamedTuple(3, 3.5, [3, 4, 5])\n\n mm = MyMod()\n mm.save('foo.zip')\n torch.testing._internal.jit_utils.clear_class_registry()\n loaded = torch.jit.load('foo.zip')\n\n out = mm()\n out_loaded = loaded()\n\n for name in ['a', 'b', 'c']:\n self.assertEqual(getattr(out_loaded, name), getattr(out, name))\n\n def test_type_annotate_py3(self):\n def fn():\n a : List[int] = []\n b : torch.Tensor = torch.ones(2, 2)\n c : Optional[torch.Tensor] = None\n d : Optional[torch.Tensor] = torch.ones(3, 4)\n for _ in range(10):\n a.append(4)\n c = torch.ones(2, 2)\n d = None\n return a, b, c, d\n\n self.checkScript(fn, ())\n\n def wrong_type():\n wrong : List[int] = [0.5]\n return wrong\n\n with self.assertRaisesRegex(RuntimeError, \"Lists must contain only a single type\"):\n torch.jit.script(wrong_type)\n\n def test_subexpression_List_Future(self):\n\n @torch.jit.script\n def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:\n return x[0]\n\n FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)\n\n def test_subexpression_Future_annotate(self):\n @torch.jit.script\n def fn() -> torch.jit.Future[int]:\n x: List[torch.jit.Future[int]] = []\n return x[0]\n\n FileCheck().check(\"Future[int][]\").run(fn.graph)\n\n def test_future_isinstance(self):\n @torch.jit.script\n def fn(x: Any) -> torch.jit.Future[int]:\n assert isinstance(x, jit.Future[int])\n return x\n\n FileCheck().check(\"Future[int]\").run(fn.graph)\n\n def test_subexpression_Tuple_int_int_Future(self):\n\n @torch.jit.script\n def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:\n return x[0], x[2]\n\n FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)\n\n def test_subexpression_Dict_int_Future(self):\n\n @torch.jit.script\n def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:\n return x[y]\n\n FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)\n\n def test_subexpression_Optional(self):\n\n @torch.jit.script\n def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:\n if x is not None:\n return x[0]\n else:\n return None\n\n FileCheck().check('Dict(int, Future(int))?').run(fn.graph)\n\n def test_unimported_type_resolution(self):\n # verify fallback from the python resolver to the c++ resolver\n\n @ torch.jit.script\n def fn(x):\n # type: (number) -> number\n return x + 1\n\n FileCheck().check('Scalar').run(fn.graph)\n\n def test_parser_bug(self):\n def parser_bug(o: Optional[torch.Tensor]):\n pass\n\n def test_mismatched_annotation(self):\n with self.assertRaisesRegex(RuntimeError, 'annotated with type'):\n @torch.jit.script\n def foo():\n x : str = 4\n return x\n\n def test_reannotate(self):\n with self.assertRaisesRegex(RuntimeError, 'declare and annotate'):\n @torch.jit.script\n def foo():\n x = 5\n if True:\n x : Optional[int] = 7\n\n def test_module_inplace_construct(self):\n class M(nn.Module):\n def __init__(self, start: int):\n super().__init__()\n self.linear = nn.Linear(3, 3)\n self.attribute = start\n self.parameter = nn.Parameter(torch.tensor(3, dtype=torch.float))\n\n def method(self) -> int:\n return self.attribute\n\n @torch.jit.unused\n def unused_method(self):\n return self.attribute + self.attribute\n\n def forward(self, x):\n return self.linear(self.linear(x))\n\n\n class N(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(4, 4)\n\n @torch.jit.ignore\n def ignored_method(self, x):\n return x\n\n def forward(self, x):\n return self.linear(x)\n\n m = torch.jit.script(M(3))\n n = torch.jit.script(N())\n\n n._reconstruct(m._c)\n\n inp = torch.rand((3))\n\n # Check that both modules produce the same output.\n with torch.no_grad():\n m_out = m(inp)\n n_out = n(inp)\n self.assertEqual(m_out, n_out)\n\n # Check that ignored method is still intact.\n self.assertEqual(inp, n.ignored_method(inp))\n\n def test_export_opnames_interface(self):\n global OneTwoModule\n\n @torch.jit.interface\n class OneTwoModule(nn.Module):\n def one(self, x, y):\n # type: (Tensor, Tensor) -> Tensor\n pass\n\n def two(self, x):\n # type: (Tensor) -> Tensor\n pass\n\n def forward(self, x):\n # type: (Tensor) -> Tensor\n pass\n\n class FooMod(nn.Module):\n def one(self, x, y):\n # type: (Tensor, Tensor) -> Tensor\n return x + y\n\n def two(self, x):\n # type: (Tensor) -> Tensor\n return 2 * x\n\n def forward(self, x):\n # type: (Tensor) -> Tensor\n return self.one(self.two(x), x)\n\n class BarMod(nn.Module):\n def one(self, x, y):\n # type: (Tensor, Tensor) -> Tensor\n return x * y\n\n def two(self, x):\n # type: (Tensor) -> Tensor\n return 2 / x\n\n def forward(self, x):\n # type: (Tensor) -> Tensor\n return self.two(self.one(x, x))\n\n class M(nn.Module):\n sub : OneTwoModule\n\n def __init__(self):\n super(M, self).__init__()\n self.sub = BarMod()\n\n def forward(self, x):\n # type: (Tensor) -> Tensor\n return self.sub.forward(x)\n\n def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):\n return mod_list[0].forward(x) + mod_list[1].forward(x)\n\n scripted_M_mod = torch.jit.script(M())\n # Temporarily test empty output because lite interpreter does not support interface call\n # Replace it with the issubset call when interface call is supported.\n self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)\n # self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(\n # set(torch.jit.export_opnames(scripted_M_mod))))\n\n scripted_M_mod.sub = torch.jit.script(FooMod())\n self.assertTrue(len(torch.jit.export_opnames(scripted_M_mod)) == 0)\n # self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(\n # set(torch.jit.export_opnames(scripted_M_mod))))\n\n\nif __name__ == '__main__':\n run_tests()\n", "import copy\nimport gc\nimport inspect\nimport runpy\nimport threading\nfrom functools import wraps\nimport unittest\nimport os\nimport torch\nfrom torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \\\n skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN\n\ntry:\n import psutil\n HAS_PSUTIL = True\nexcept ImportError:\n HAS_PSUTIL = False\n\n# Note: Generic Device-Type Testing\n#\n# [WRITING TESTS]\n#\n# Write your test class as usual except:\n# (1) Each test method should have one of four signatures:\n#\n# (1a) testX(self, device)\n#\n# (1b) @deviceCountAtLeast(<minimum number of devices to run test with>)\n# testX(self, devices)\n#\n# (1c) @dtypes(<list of dtypes> or <list of tuples of dtypes>)\n# testX(self, device, dtype)\n#\n# (1d) @deviceCountAtLeast(<minimum number of devices to run test with>)\n# @dtypes(<list of dtypes> or <list of tuples of dtypes>)\n# testX(self, devices, dtype)\n#\n#\n# Note that the decorators are required for signatures (1b), (1c) and\n# (1d).\n#\n# When a test like (1a) is called it will be given a device string,\n# like 'cpu' or 'cuda:0.'\n#\n# Tests like (1b) are called with a list of device strings, like\n# ['cuda:0', 'cuda:1']. The first device string will be the\n# primary device. These tests will be skipped if the device type\n# has fewer available devices than the argument to @deviceCountAtLeast.\n#\n# Tests like (1c) are called with a device string and a torch.dtype (or\n# a tuple of torch.dtypes) from the list of dtypes (or list of tuples\n# of torch.dtypes) specified in the @dtypes decorator. Device-specific\n# dtype overrides can be specified using @dtypesIfCPU and @dtypesIfCUDA.\n#\n# Tests like (1d) take a devices argument like (1b) and a dtype\n# argument from (1c).\n#\n# (2) Prefer using test decorators defined in this file to others.\n# For example, using the @skipIfNoLapack decorator instead of the\n# @skipCPUIfNoLapack will cause the test to not run on CUDA if\n# LAPACK is not available, which is wrong. If you need to use a decorator\n# you may want to ask about porting it to this framework.\n#\n# See the TestTorchDeviceType class in test_torch.py for an example.\n#\n# [RUNNING TESTS]\n#\n# After defining your test class call instantiate_device_type_tests on it\n# and pass in globals() for the second argument. This will instantiate\n# discoverable device-specific test classes from your generic class. It will\n# also hide the tests in your generic class so they're not run.\n#\n# If you device-generic test class is TestClass then new classes with names\n# TestClass<DEVICE_TYPE> will be created for each available device type.\n# TestClassCPU and TestClassCUDA, for example. Tests in these classes also\n# have the device type and dtype, if provided, appended to their original\n# name. testX, for instance, becomes testX_<device_type> or\n# testX_<device_type>_<dtype>.\n#\n# More concretely, TestTorchDeviceType becomes TestTorchDeviceTypeCPU,\n# TestTorchDeviceTypeCUDA, ... test_diagonal in TestTorchDeviceType becomes\n# test_diagonal_cpu, test_diagonal_cuda, ... test_erfinv, which accepts a dtype,\n# becomes test_erfinv_cpu_float, test_erfinv_cpu_double, test_erfinv_cuda_half,\n# ...\n#\n# In short, if you write a test signature like\n# def textX(self, device)\n# You are effectively writing\n# def testX_cpu(self, device='cpu')\n# def textX_cuda(self, device='cuda')\n# def testX_xla(self, device='xla')\n# ...\n#\n# These tests can be run directly like normal tests:\n# \"python test_torch.py TestTorchDeviceTypeCPU.test_diagonal_cpu\"\n#\n# All the tests for a particular device type can be run using the class, and\n# other collections of tests can be run using pytest filtering, like\n#\n# \"pytest test_torch.py -k 'test_diag'\"\n#\n# which will run test_diag on every available device.\n#\n# To specify particular device types the 'and' keyword can be used:\n#\n# \"pytest test_torch.py -k 'test_erfinv and cpu'\"\n#\n# will run test_erfinv on all cpu dtypes.\n#\n# [ADDING A DEVICE TYPE]\n#\n# To add a device type:\n#\n# (1) Create a new \"TestBase\" extending DeviceTypeTestBase.\n# See CPUTestBase and CUDATestBase below.\n# (2) Define the \"device_type\" attribute of the base to be the\n# appropriate string.\n# (3) Add logic to this file that appends your base class to\n# device_type_test_bases when your device type is available.\n# (4) (Optional) Write setUpClass/tearDownClass class methods that\n# instantiate dependencies (see MAGMA in CUDATestBase).\n# (5) (Optional) Override the \"instantiate_test\" method for total\n# control over how your class creates tests.\n#\n# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF\n# they are run. This makes it useful for initializing devices and dependencies.\n#\n# Note [Overriding methods in generic tests]\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Device generic tests look a lot like normal test classes, but they differ\n# from ordinary classes in some important ways. In particular, overriding\n# methods in generic tests doesn't work quite the way you expect.\n#\n# class TestFooDeviceType(TestCase):\n# # Intention is to override\n# def assertEqual(self, x, y):\n# # This DOESN'T WORK!\n# super(TestFooDeviceType, self).assertEqual(x, y)\n#\n# If you try to run this code, you'll get an error saying that TestFooDeviceType\n# is not in scope. This is because after instantiating our classes, we delete\n# it from the parent scope. Instead, you need to hardcode a direct invocation\n# of the desired subclass call, e.g.,\n#\n# class TestFooDeviceType(TestCase):\n# # Intention is to override\n# def assertEqual(self, x, y):\n# TestCase.assertEqual(x, y)\n#\n# However, a less error-prone way of customizing the behavior of TestCase\n# is to either (1) add your functionality to TestCase and make it toggled\n# by a class attribute, or (2) create your own subclass of TestCase, and\n# then inherit from it for your generic test.\n#\n\n# List of device type test bases that can be used to instantiate tests.\n# See below for how this list is populated. If you're adding a device type\n# you should check if it's available and (if it is) add it to this list.\ndevice_type_test_bases = []\n\n\nclass DeviceTypeTestBase(TestCase):\n device_type = 'generic_device_type'\n\n # Precision is a thread-local setting since it may be overridden per test\n _tls = threading.local()\n _tls.precision = TestCase._precision\n\n @property\n def precision(self):\n return self._tls.precision\n\n @precision.setter\n def precision(self, prec):\n self._tls.precision = prec\n\n # Returns a string representing the device that single device tests should use.\n # Note: single device tests use this device exclusively.\n @classmethod\n def get_primary_device(cls):\n return cls.device_type\n\n # Returns a list of strings representing all available devices of this\n # device type. The primary device must be the first string in the list\n # and the list must contain no duplicates.\n # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic\n # mechanism of acquiring all available devices.\n @classmethod\n def get_all_devices(cls):\n return [cls.get_primary_device()]\n\n # Returns the dtypes the test has requested.\n # Prefers device-specific dtype specifications over generic ones.\n @classmethod\n def _get_dtypes(cls, test):\n if not hasattr(test, 'dtypes'):\n return None\n return test.dtypes.get(cls.device_type, test.dtypes.get('all', None))\n\n def _get_precision_override(self, test, dtype):\n if not hasattr(test, 'precision_overrides'):\n return self.precision\n return test.precision_overrides.get(dtype, self.precision)\n\n # Creates device-specific tests.\n @classmethod\n def instantiate_test(cls, name, test):\n test_name = name + \"_\" + cls.device_type\n\n dtypes = cls._get_dtypes(test)\n if dtypes is None: # Test has no dtype variants\n assert not hasattr(cls, test_name), \"Redefinition of test {0}\".format(test_name)\n\n @wraps(test)\n def instantiated_test(self, test=test):\n device_arg = cls.get_primary_device() if not hasattr(test, 'num_required_devices') else cls.get_all_devices()\n return test(self, device_arg)\n\n setattr(cls, test_name, instantiated_test)\n else: # Test has dtype variants\n for dtype in dtypes:\n # Constructs dtype suffix\n if isinstance(dtype, (list, tuple)):\n dtype_str = \"\"\n for d in dtype:\n dtype_str += \"_\" + str(d).split('.')[1]\n else:\n dtype_str = \"_\" + str(dtype).split('.')[1]\n\n dtype_test_name = test_name + dtype_str\n assert not hasattr(cls, dtype_test_name), \"Redefinition of test {0}\".format(dtype_test_name)\n\n @wraps(test)\n def instantiated_test(self, test=test, dtype=dtype):\n device_arg = cls.get_primary_device() if not hasattr(test, 'num_required_devices') else cls.get_all_devices()\n # Sets precision and runs test\n # Note: precision is reset after the test is run\n guard_precision = self.precision\n try :\n self.precision = self._get_precision_override(test, dtype)\n result = test(self, device_arg, dtype)\n finally:\n self.precision = guard_precision\n\n return result\n\n setattr(cls, dtype_test_name, instantiated_test)\n\n\nclass CPUTestBase(DeviceTypeTestBase):\n device_type = 'cpu'\n\n\nclass CUDATestBase(DeviceTypeTestBase):\n device_type = 'cuda'\n _do_cuda_memory_leak_check = True\n _do_cuda_non_default_stream = True\n\n def has_cudnn(self):\n return not self.no_cudnn\n\n @classmethod\n def get_primary_device(cls):\n return cls.primary_device\n\n @classmethod\n def get_all_devices(cls):\n primary_device_idx = int(cls.get_primary_device().split(':')[1])\n num_devices = torch.cuda.device_count()\n\n prim_device = cls.get_primary_device()\n cuda_str = 'cuda:{0}'\n non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]\n return [prim_device] + non_primary_devices\n\n @classmethod\n def setUpClass(cls):\n # has_magma shows up after cuda is initialized\n t = torch.ones(1).cuda()\n cls.no_magma = not torch.cuda.has_magma\n\n # Determines if cuDNN is available and its version\n cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)\n cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()\n\n # Acquires the current device as the primary (test) device\n cls.primary_device = 'cuda:{0}'.format(torch.cuda.current_device())\n\n\n# Adds available device-type-specific test base classes\ndevice_type_test_bases.append(CPUTestBase)\nif torch.cuda.is_available():\n device_type_test_bases.append(CUDATestBase)\n\n\n# Note [How to extend DeviceTypeTestBase to add new test device]\n# The following logic optionally allows downstream projects like pytorch/xla to\n# add more test devices.\n# Instructions:\n# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.\n# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define\n# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of\n# `instantiate_test` method.\n# - DO NOT import common_device_type inside the file.\n# `runpy.run_path` with `globals()` already properly setup the context so that\n# `DeviceTypeTestBase` is already available.\n# - Set a top-level variable `TEST_CLASS` equal to your new class.\n# E.g. TEST_CLASS = XLATensorBase\n# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path\n# to this file. Multiple paths can be separated by `:`.\n# See pytorch/xla/test/pytorch_test_base.py for a more detailed example.\n_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)\nif _TORCH_TEST_DEVICES:\n for path in _TORCH_TEST_DEVICES.split(':'):\n mod = runpy.run_path(path, init_globals=globals())\n device_type_test_bases.append(mod['TEST_CLASS'])\n\n\nPYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1'\n\n\n# Adds 'instantiated' device-specific test cases to the given scope.\n# The tests in these test cases are derived from the generic tests in\n# generic_test_class.\n# See note \"Generic Device Type Testing.\"\ndef instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None):\n # Removes the generic test class from its enclosing scope so its tests\n # are not discoverable.\n del scope[generic_test_class.__name__]\n\n # Creates an 'empty' version of the generic_test_class\n # Note: we don't inherit from the generic_test_class directly because\n # that would add its tests to our test classes and they would be\n # discovered (despite not being runnable). Inherited methods also\n # can't be removed later, and we can't rely on load_tests because\n # pytest doesn't support it (as of this writing).\n empty_name = generic_test_class.__name__ + \"_base\"\n empty_class = type(empty_name, generic_test_class.__bases__, {})\n\n # Acquires members names\n # See Note [Overriding methods in generic tests]\n generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys())\n generic_tests = [x for x in generic_members if x.startswith('test')]\n\n # Creates device-specific test cases\n for base in device_type_test_bases:\n # Skips bases listed in except_for\n if except_for is not None and only_for is not None:\n assert base.device_type not in except_for or base.device_type not in only_for,\\\n \"same device cannot appear in except_for and only_for\"\n if except_for is not None and base.device_type in except_for:\n continue\n if only_for is not None and base.device_type not in only_for:\n continue\n\n class_name = generic_test_class.__name__ + base.device_type.upper()\n device_type_test_class = type(class_name, (base, empty_class), {})\n\n for name in generic_members:\n if name in generic_tests: # Instantiates test member\n # Requires tests be a function for Python2 compat\n # (In Python2 tests are type checked methods wrapping functions)\n test = getattr(generic_test_class, name)\n if hasattr(test, '__func__'):\n test = test.__func__\n assert inspect.isfunction(test), \"Couldn't extract function from '{0}'\".format(name)\n\n # Instantiates the device-specific tests\n device_type_test_class.instantiate_test(name, copy.deepcopy(test))\n else: # Ports non-test member\n assert name not in device_type_test_class.__dict__, \"Redefinition of directly defined member {0}\".format(name)\n\n # Unwraps to functions (when available) for Python2 compat\n nontest = getattr(generic_test_class, name)\n if hasattr(nontest, '__func__'):\n nontest = nontest.__func__\n\n setattr(device_type_test_class, name, nontest)\n\n # Mimics defining the instantiated class in the caller's file\n # by setting its module to the given class's and adding\n # the module to the given scope.\n # This lets the instantiated class be discovered by unittest.\n device_type_test_class.__module__ = generic_test_class.__module__\n scope[class_name] = device_type_test_class\n\n\n# Decorator that skips a test if the given condition is true.\n# Notes:\n# (1) Skip conditions stack.\n# (2) Skip conditions can be bools or strings. If a string the\n# test base must have defined the corresponding attribute to be False\n# for the test to run. If you want to use a string argument you should\n# probably define a new decorator instead (see below).\n# (3) Prefer the existing decorators to defining the 'device_type' kwarg.\nclass skipIf(object):\n\n def __init__(self, dep, reason, device_type=None):\n self.dep = dep\n self.reason = reason\n self.device_type = device_type\n\n def __call__(self, fn):\n\n @wraps(fn)\n def dep_fn(slf, device, *args, **kwargs):\n if self.device_type is None or self.device_type == slf.device_type:\n if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep):\n raise unittest.SkipTest(self.reason)\n\n return fn(slf, device, *args, **kwargs)\n return dep_fn\n\n\n# Skips a test on CPU if the condition is true.\nclass skipCPUIf(skipIf):\n\n def __init__(self, dep, reason):\n super().__init__(dep, reason, device_type='cpu')\n\n\n# Skips a test on CUDA if the condition is true.\nclass skipCUDAIf(skipIf):\n\n def __init__(self, dep, reason):\n super().__init__(dep, reason, device_type='cuda')\n\n\n# Only runs on cuda, and only run when there is enough GPU RAM\ndef largeCUDATensorTest(size):\n if isinstance(size, str):\n assert size.endswith(\"GB\") or size.endswith(\"gb\"), \"only bytes or GB supported\"\n size = 1024 ** 3 * int(size[:-2])\n valid = torch.cuda.is_available() and torch.cuda.get_device_properties(0).total_memory >= size\n return unittest.skipIf(not valid, \"No CUDA or Has CUDA but GPU RAM is not large enough\")\n\n\ndef _has_sufficient_memory(device, size):\n if device.startswith('cuda'):\n return (torch.cuda.is_available() and\n torch.cuda.get_device_properties(0).total_memory >= size)\n if device == 'xla':\n raise unittest.SkipTest('TODO: Memory availability checks for XLA?')\n\n if device != 'cpu':\n raise unittest.SkipTest('Unknown device type')\n\n # CPU\n if not HAS_PSUTIL:\n raise unittest.SkipTest('Need psutil to determine if memory is sufficient')\n\n # The sanitizers have significant memory overheads\n if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN:\n effective_size = size * 10\n else:\n effective_size = size\n\n if psutil.virtual_memory().available < effective_size:\n gc.collect()\n return psutil.virtual_memory().available >= effective_size\n\n\ndef largeTensorTest(size):\n \"\"\"Skip test if the device has insufficient memory to run the test\n\n size may be a number of bytes, a string of the form \"N GB\", or a callable\n \"\"\"\n if isinstance(size, str):\n assert size.endswith(\"GB\") or size.endswith(\"gb\"), \"only bytes or GB supported\"\n size = 1024 ** 3 * int(size[:-2])\n\n def inner(fn):\n @wraps(fn)\n def dep_fn(self, *args, **kwargs):\n size_bytes = size(self, *args, **kwargs) if callable(size) else size\n if not _has_sufficient_memory(self.device_type, size_bytes):\n raise unittest.SkipTest('Insufficient {} memory'.format(self.device_type))\n\n return fn(self, *args, **kwargs)\n return dep_fn\n return inner\n\n\nclass expectedFailure(object):\n\n def __init__(self, device_type):\n self.device_type = device_type\n\n def __call__(self, fn):\n\n @wraps(fn)\n def efail_fn(slf, device, *args, **kwargs):\n if self.device_type is None or self.device_type == slf.device_type:\n try:\n fn(slf, device, *args, **kwargs)\n except Exception:\n return\n else:\n slf.fail('expected test to fail, but it passed')\n\n return fn(slf, device, *args, **kwargs)\n return efail_fn\n\n\nclass onlyOn(object):\n\n def __init__(self, device_type):\n self.device_type = device_type\n\n def __call__(self, fn):\n\n @wraps(fn)\n def only_fn(slf, device, *args, **kwargs):\n if self.device_type != slf.device_type:\n reason = \"Only runs on {0}\".format(self.device_type)\n raise unittest.SkipTest(reason)\n\n return fn(slf, device, *args, **kwargs)\n\n return only_fn\n\n\n# Decorator that provides all available devices of the device type to the test\n# as a list of strings instead of providing a single device string.\n# Skips the test if the number of available devices of the variant's device\n# type is less than the 'num_required_devices' arg.\nclass deviceCountAtLeast(object):\n\n def __init__(self, num_required_devices):\n self.num_required_devices = num_required_devices\n\n def __call__(self, fn):\n assert not hasattr(fn, 'num_required_devices'), \"deviceCountAtLeast redefinition for {0}\".format(fn.__name__)\n fn.num_required_devices = self.num_required_devices\n\n @wraps(fn)\n def multi_fn(slf, devices, *args, **kwargs):\n if len(devices) < self.num_required_devices:\n reason = \"fewer than {0} devices detected\".format(self.num_required_devices)\n raise unittest.SkipTest(reason)\n\n return fn(slf, devices, *args, **kwargs)\n\n return multi_fn\n\n# Only runs the test on the CPU and CUDA (the native device types)\ndef onlyOnCPUAndCUDA(fn):\n @wraps(fn)\n def only_fn(self, device, *args, **kwargs):\n if self.device_type != 'cpu' and self.device_type != 'cuda':\n reason = \"Doesn't run on {0}\".format(self.device_type)\n raise unittest.SkipTest(reason)\n\n return fn(self, device, *args, **kwargs)\n\n return only_fn\n\n# Specifies per-dtype precision overrides.\n# Ex.\n#\n# @precisionOverride(torch.half : 1e-2, torch.float : 1e-4)\n# @dtypes(torch.half, torch.float, torch.double)\n# def test_X(self, device, dtype):\n# ...\n#\n# When the test is instantiated its class's precision will be set to the\n# corresponding override, if it exists.\n# self.precision can be accessed directly, and it also controls the behavior of\n# functions like self.assertEqual().\n#\n# Note that self.precision is a scalar value, so if you require multiple\n# precisions (or are working with multiple dtypes) they should be specified\n# explicitly and computed using self.precision (e.g.\n# self.precision *2, max(1, self.precision)).\nclass precisionOverride(object):\n\n def __init__(self, d):\n assert isinstance(d, dict), \"precisionOverride not given a dtype : precision dict!\"\n for dtype, prec in d.items():\n assert isinstance(dtype, torch.dtype), \"precisionOverride given unknown dtype {0}\".format(dtype)\n\n self.d = d\n\n def __call__(self, fn):\n fn.precision_overrides = self.d\n return fn\n\n\n# Decorator that instantiates a variant of the test for each given dtype.\n# Notes:\n# (1) Tests that accept the dtype argument MUST use this decorator.\n# (2) Can be overridden for the CPU or CUDA, respectively, using dtypesIfCPU\n# or dtypesIfCUDA.\n# (3) Can accept an iterable of dtypes or an iterable of tuples\n# of dtypes.\n# Examples:\n# @dtypes(torch.float32, torch.float64)\n# @dtypes((torch.long, torch.float32), (torch.int, torch.float64))\nclass dtypes(object):\n\n # Note: *args, **kwargs for Python2 compat.\n # Python 3 allows (self, *args, device_type='all').\n def __init__(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], (list, tuple)):\n for arg in args:\n assert isinstance(arg, (list, tuple)), \\\n \"When one dtype variant is a tuple or list, \" \\\n \"all dtype variants must be. \" \\\n \"Received non-list non-tuple dtype {0}\".format(str(arg))\n assert all(isinstance(dtype, torch.dtype) for dtype in arg), \"Unknown dtype in {0}\".format(str(arg))\n else:\n assert all(isinstance(arg, torch.dtype) for arg in args), \"Unknown dtype in {0}\".format(str(args))\n\n self.args = args\n self.device_type = kwargs.get('device_type', 'all')\n\n def __call__(self, fn):\n d = getattr(fn, 'dtypes', {})\n assert self.device_type not in d, \"dtypes redefinition for {0}\".format(self.device_type)\n d[self.device_type] = self.args\n fn.dtypes = d\n return fn\n\n\n# Overrides specified dtypes on the CPU.\nclass dtypesIfCPU(dtypes):\n\n def __init__(self, *args):\n super().__init__(*args, device_type='cpu')\n\n\n# Overrides specified dtypes on CUDA.\nclass dtypesIfCUDA(dtypes):\n\n def __init__(self, *args):\n super().__init__(*args, device_type='cuda')\n\n\ndef onlyCPU(fn):\n return onlyOn('cpu')(fn)\n\n\ndef onlyCUDA(fn):\n return onlyOn('cuda')(fn)\n\n\ndef expectedFailureCUDA(fn):\n return expectedFailure('cuda')(fn)\n\nclass expectedAlertNondeterministic:\n def __init__(self, caller_name, device_type=None, fn_has_device_arg=True):\n self.device_type = device_type\n self.error_message = caller_name + ' does not have a deterministic implementation, but you set'\n self.fn_has_device_arg = fn_has_device_arg\n\n def __call__(self, fn):\n @wraps(fn)\n def efail_fn(slf, device, *args, **kwargs):\n if self.device_type is None or self.device_type == slf.device_type:\n deterministic_restore = torch.is_deterministic()\n torch.set_deterministic(True)\n try:\n if self.fn_has_device_arg:\n fn(slf, device, *args, **kwargs)\n else:\n fn(slf, *args, **kwargs)\n except RuntimeError as e:\n torch.set_deterministic(deterministic_restore)\n if self.error_message not in str(e):\n slf.fail(\n 'expected non-deterministic error message to start with \"'\n + self.error_message\n + '\" but got this instead: \"' + str(e) + '\"')\n return\n else:\n torch.set_deterministic(deterministic_restore)\n slf.fail('expected a non-deterministic error, but it was not raised')\n\n if self.fn_has_device_arg:\n return fn(slf, device, *args, **kwargs)\n else:\n return fn(slf, *args, **kwargs)\n\n @wraps(fn)\n def efail_fn_no_device(slf, *args, **kwargs):\n return efail_fn(slf, None, *args, **kwargs)\n\n if self.fn_has_device_arg:\n return efail_fn\n else:\n return efail_fn_no_device\n\n# Skips a test on CPU if LAPACK is not available.\ndef skipCPUIfNoLapack(fn):\n return skipCPUIf(not torch._C.has_lapack, \"PyTorch compiled without Lapack\")(fn)\n\n\n# Skips a test on CPU if MKL is not available.\ndef skipCPUIfNoMkl(fn):\n return skipCPUIf(not TEST_MKL, \"PyTorch is built without MKL support\")(fn)\n\n\n# Skips a test on CUDA if MAGMA is not available.\ndef skipCUDAIfNoMagma(fn):\n return skipCUDAIf('no_magma', \"no MAGMA library detected\")(skipCUDANonDefaultStreamIf(True)(fn))\n\n\n# Skips a test on CUDA when using ROCm.\ndef skipCUDAIfRocm(fn):\n return skipCUDAIf(TEST_WITH_ROCM, \"test doesn't currently work on the ROCm stack\")(fn)\n\n# Skips a test on CUDA when not using ROCm.\ndef skipCUDAIfNotRocm(fn):\n return skipCUDAIf(not TEST_WITH_ROCM, \"test doesn't currently work on the CUDA stack\")(fn)\n\n\n# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.\ndef skipCUDAIfCudnnVersionLessThan(version=0):\n\n def dec_fn(fn):\n @wraps(fn)\n def wrap_fn(self, device, *args, **kwargs):\n if self.device_type == 'cuda':\n if self.no_cudnn:\n reason = \"cuDNN not available\"\n raise unittest.SkipTest(reason)\n if self.cudnn_version is None or self.cudnn_version < version:\n reason = \"cuDNN version {0} is available but {1} required\".format(self.cudnn_version, version)\n raise unittest.SkipTest(reason)\n\n return fn(self, device, *args, **kwargs)\n\n return wrap_fn\n return dec_fn\n\n\ndef skipCUDAIfNoCudnn(fn):\n return skipCUDAIfCudnnVersionLessThan(0)(fn)\n", "\"\"\"\nNote [Randomized statistical tests]\n-----------------------------------\n\nThis note describes how to maintain tests in this file as random sources\nchange. This file contains two types of randomized tests:\n\n1. The easier type of randomized test are tests that should always pass but are\n initialized with random data. If these fail something is wrong, but it's\n fine to use a fixed seed by inheriting from common.TestCase.\n\n2. The trickier tests are statistical tests. These tests explicitly call\n set_rng_seed(n) and are marked \"see Note [Randomized statistical tests]\".\n These statistical tests have a known positive failure rate\n (we set failure_rate=1e-3 by default). We need to balance strength of these\n tests with annoyance of false alarms. One way that works is to specifically\n set seeds in each of the randomized tests. When a random generator\n occasionally changes (as in #4312 vectorizing the Box-Muller sampler), some\n of these statistical tests may (rarely) fail. If one fails in this case,\n it's fine to increment the seed of the failing test (but you shouldn't need\n to increment it more than once; otherwise something is probably actually\n wrong).\n\"\"\"\n\nimport math\nimport numbers\nimport unittest\nfrom collections import namedtuple\nfrom itertools import product\nfrom random import shuffle\n\nimport torch\n\n# TODO: remove this global setting\n# Distributions tests use double as the default dtype\ntorch.set_default_dtype(torch.double)\n\nfrom torch._six import inf\nfrom torch.testing._internal.common_utils import TestCase, run_tests, set_rng_seed, TEST_WITH_UBSAN, load_tests\nfrom torch.testing._internal.common_cuda import TEST_CUDA\nfrom torch.autograd import grad, gradcheck\nfrom torch.distributions import (Bernoulli, Beta, Binomial, Categorical,\n Cauchy, Chi2, ContinuousBernoulli, Dirichlet,\n Distribution, Exponential, ExponentialFamily,\n FisherSnedecor, Gamma, Geometric, Gumbel,\n HalfCauchy, HalfNormal,\n Independent, Laplace, LogisticNormal,\n LogNormal, LowRankMultivariateNormal,\n MixtureSameFamily, Multinomial, MultivariateNormal,\n NegativeBinomial, Normal, OneHotCategorical, Pareto,\n Poisson, RelaxedBernoulli, RelaxedOneHotCategorical,\n StudentT, TransformedDistribution, Uniform,\n VonMises, Weibull, constraints, kl_divergence)\nfrom torch.distributions.constraint_registry import biject_to, transform_to\nfrom torch.distributions.constraints import Constraint, is_dependent\nfrom torch.distributions.dirichlet import _Dirichlet_backward\nfrom torch.distributions.kl import _kl_expfamily_expfamily\nfrom torch.distributions.transforms import (AbsTransform, AffineTransform,\n CatTransform, ComposeTransform, ExpTransform,\n LowerCholeskyTransform,\n PowerTransform, SigmoidTransform,\n TanhTransform, SoftmaxTransform,\n StickBreakingTransform,\n identity_transform, StackTransform)\nfrom torch.distributions.utils import probs_to_logits, lazy_property\nfrom torch.nn.functional import softmax\n\n# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for\n# sharding on sandcastle. This line silences flake warnings\nload_tests = load_tests\n\nTEST_NUMPY = True\ntry:\n import numpy as np\n import scipy.stats\n import scipy.special\nexcept ImportError:\n TEST_NUMPY = False\n\n\ndef pairwise(Dist, *params):\n \"\"\"\n Creates a pair of distributions `Dist` initialized to test each element of\n param with each other.\n \"\"\"\n params1 = [torch.tensor([p] * len(p)) for p in params]\n params2 = [p.transpose(0, 1) for p in params1]\n return Dist(*params1), Dist(*params2)\n\n\ndef is_all_nan(tensor):\n \"\"\"\n Checks if all entries of a tensor is nan.\n \"\"\"\n return (tensor != tensor).all()\n\n\n# Register all distributions for generic tests.\nExample = namedtuple('Example', ['Dist', 'params'])\nEXAMPLES = [\n Example(Bernoulli, [\n {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)},\n {'probs': torch.tensor([0.3], requires_grad=True)},\n {'probs': 0.3},\n {'logits': torch.tensor([0.], requires_grad=True)},\n ]),\n Example(Geometric, [\n {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)},\n {'probs': torch.tensor([0.3], requires_grad=True)},\n {'probs': 0.3},\n ]),\n Example(Beta, [\n {\n 'concentration1': torch.randn(2, 3).exp().requires_grad_(),\n 'concentration0': torch.randn(2, 3).exp().requires_grad_(),\n },\n {\n 'concentration1': torch.randn(4).exp().requires_grad_(),\n 'concentration0': torch.randn(4).exp().requires_grad_(),\n },\n ]),\n Example(Categorical, [\n {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)},\n {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},\n ]),\n Example(Binomial, [\n {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10])},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': torch.tensor([10, 8])},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True),\n 'total_count': torch.tensor([[10., 8.], [5., 3.]])},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True),\n 'total_count': torch.tensor(0.)},\n ]),\n Example(NegativeBinomial, [\n {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10},\n {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': 10},\n {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10])},\n {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True), 'total_count': torch.tensor([10, 8])},\n {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True),\n 'total_count': torch.tensor([[10., 8.], [5., 3.]])},\n {'probs': torch.tensor([[0.9, 0.0], [0.0, 0.9]], requires_grad=True),\n 'total_count': torch.tensor(0.)},\n ]),\n Example(Multinomial, [\n {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True), 'total_count': 10},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True), 'total_count': 10},\n ]),\n Example(Cauchy, [\n {'loc': 0.0, 'scale': 1.0},\n {'loc': torch.tensor([0.0]), 'scale': 1.0},\n {'loc': torch.tensor([[0.0], [0.0]]),\n 'scale': torch.tensor([[1.0], [1.0]])}\n ]),\n Example(Chi2, [\n {'df': torch.randn(2, 3).exp().requires_grad_()},\n {'df': torch.randn(1).exp().requires_grad_()},\n ]),\n Example(StudentT, [\n {'df': torch.randn(2, 3).exp().requires_grad_()},\n {'df': torch.randn(1).exp().requires_grad_()},\n ]),\n Example(Dirichlet, [\n {'concentration': torch.randn(2, 3).exp().requires_grad_()},\n {'concentration': torch.randn(4).exp().requires_grad_()},\n ]),\n Example(Exponential, [\n {'rate': torch.randn(5, 5).abs().requires_grad_()},\n {'rate': torch.randn(1).abs().requires_grad_()},\n ]),\n Example(FisherSnedecor, [\n {\n 'df1': torch.randn(5, 5).abs().requires_grad_(),\n 'df2': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'df1': torch.randn(1).abs().requires_grad_(),\n 'df2': torch.randn(1).abs().requires_grad_(),\n },\n {\n 'df1': torch.tensor([1.0]),\n 'df2': 1.0,\n }\n ]),\n Example(Gamma, [\n {\n 'concentration': torch.randn(2, 3).exp().requires_grad_(),\n 'rate': torch.randn(2, 3).exp().requires_grad_(),\n },\n {\n 'concentration': torch.randn(1).exp().requires_grad_(),\n 'rate': torch.randn(1).exp().requires_grad_(),\n },\n ]),\n Example(Gumbel, [\n {\n 'loc': torch.randn(5, 5, requires_grad=True),\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'loc': torch.randn(1, requires_grad=True),\n 'scale': torch.randn(1).abs().requires_grad_(),\n },\n ]),\n Example(HalfCauchy, [\n {'scale': 1.0},\n {'scale': torch.tensor([[1.0], [1.0]])}\n ]),\n Example(HalfNormal, [\n {'scale': torch.randn(5, 5).abs().requires_grad_()},\n {'scale': torch.randn(1).abs().requires_grad_()},\n {'scale': torch.tensor([1e-5, 1e-5], requires_grad=True)}\n ]),\n Example(Independent, [\n {\n 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),\n torch.randn(2, 3).abs().requires_grad_()),\n 'reinterpreted_batch_ndims': 0,\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),\n torch.randn(2, 3).abs().requires_grad_()),\n 'reinterpreted_batch_ndims': 1,\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),\n torch.randn(2, 3).abs().requires_grad_()),\n 'reinterpreted_batch_ndims': 2,\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),\n torch.randn(2, 3, 5).abs().requires_grad_()),\n 'reinterpreted_batch_ndims': 2,\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),\n torch.randn(2, 3, 5).abs().requires_grad_()),\n 'reinterpreted_batch_ndims': 3,\n },\n ]),\n Example(Laplace, [\n {\n 'loc': torch.randn(5, 5, requires_grad=True),\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'loc': torch.randn(1, requires_grad=True),\n 'scale': torch.randn(1).abs().requires_grad_(),\n },\n {\n 'loc': torch.tensor([1.0, 0.0], requires_grad=True),\n 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),\n },\n ]),\n Example(LogNormal, [\n {\n 'loc': torch.randn(5, 5, requires_grad=True),\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'loc': torch.randn(1, requires_grad=True),\n 'scale': torch.randn(1).abs().requires_grad_(),\n },\n {\n 'loc': torch.tensor([1.0, 0.0], requires_grad=True),\n 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),\n },\n ]),\n Example(LogisticNormal, [\n {\n 'loc': torch.randn(5, 5).requires_grad_(),\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'loc': torch.randn(1).requires_grad_(),\n 'scale': torch.randn(1).abs().requires_grad_(),\n },\n {\n 'loc': torch.tensor([1.0, 0.0], requires_grad=True),\n 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),\n },\n ]),\n Example(LowRankMultivariateNormal, [\n {\n 'loc': torch.randn(5, 2, requires_grad=True),\n 'cov_factor': torch.randn(5, 2, 1, requires_grad=True),\n 'cov_diag': torch.tensor([2.0, 0.25], requires_grad=True),\n },\n {\n 'loc': torch.randn(4, 3, requires_grad=True),\n 'cov_factor': torch.randn(3, 2, requires_grad=True),\n 'cov_diag': torch.tensor([5.0, 1.5, 3.], requires_grad=True),\n }\n ]),\n Example(MultivariateNormal, [\n {\n 'loc': torch.randn(5, 2, requires_grad=True),\n 'covariance_matrix': torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True),\n },\n {\n 'loc': torch.randn(2, 3, requires_grad=True),\n 'precision_matrix': torch.tensor([[2.0, 0.1, 0.0],\n [0.1, 0.25, 0.0],\n [0.0, 0.0, 0.3]], requires_grad=True),\n },\n {\n 'loc': torch.randn(5, 3, 2, requires_grad=True),\n 'scale_tril': torch.tensor([[[2.0, 0.0], [-0.5, 0.25]],\n [[2.0, 0.0], [0.3, 0.25]],\n [[5.0, 0.0], [-0.5, 1.5]]], requires_grad=True),\n },\n {\n 'loc': torch.tensor([1.0, -1.0]),\n 'covariance_matrix': torch.tensor([[5.0, -0.5], [-0.5, 1.5]]),\n },\n ]),\n Example(Normal, [\n {\n 'loc': torch.randn(5, 5, requires_grad=True),\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'loc': torch.randn(1, requires_grad=True),\n 'scale': torch.randn(1).abs().requires_grad_(),\n },\n {\n 'loc': torch.tensor([1.0, 0.0], requires_grad=True),\n 'scale': torch.tensor([1e-5, 1e-5], requires_grad=True),\n },\n ]),\n Example(OneHotCategorical, [\n {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]], requires_grad=True)},\n {'logits': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},\n ]),\n Example(Pareto, [\n {\n 'scale': 1.0,\n 'alpha': 1.0\n },\n {\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n 'alpha': torch.randn(5, 5).abs().requires_grad_()\n },\n {\n 'scale': torch.tensor([1.0]),\n 'alpha': 1.0\n }\n ]),\n Example(Poisson, [\n {\n 'rate': torch.randn(5, 5).abs().requires_grad_(),\n },\n {\n 'rate': torch.randn(3).abs().requires_grad_(),\n },\n {\n 'rate': 0.2,\n }\n ]),\n Example(RelaxedBernoulli, [\n {\n 'temperature': torch.tensor([0.5], requires_grad=True),\n 'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True),\n },\n {\n 'temperature': torch.tensor([2.0]),\n 'probs': torch.tensor([0.3]),\n },\n {\n 'temperature': torch.tensor([7.2]),\n 'logits': torch.tensor([-2.0, 2.0, 1.0, 5.0])\n }\n ]),\n Example(RelaxedOneHotCategorical, [\n {\n 'temperature': torch.tensor([0.5], requires_grad=True),\n 'probs': torch.tensor([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True)\n },\n {\n 'temperature': torch.tensor([2.0]),\n 'probs': torch.tensor([[1.0, 0.0], [0.0, 1.0]])\n },\n {\n 'temperature': torch.tensor([7.2]),\n 'logits': torch.tensor([[-2.0, 2.0], [1.0, 5.0]])\n }\n ]),\n Example(TransformedDistribution, [\n {\n 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),\n torch.randn(2, 3).abs().requires_grad_()),\n 'transforms': [],\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, requires_grad=True),\n torch.randn(2, 3).abs().requires_grad_()),\n 'transforms': ExpTransform(),\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),\n torch.randn(2, 3, 5).abs().requires_grad_()),\n 'transforms': [AffineTransform(torch.randn(3, 5), torch.randn(3, 5)),\n ExpTransform()],\n },\n {\n 'base_distribution': Normal(torch.randn(2, 3, 5, requires_grad=True),\n torch.randn(2, 3, 5).abs().requires_grad_()),\n 'transforms': AffineTransform(1, 2),\n },\n ]),\n Example(Uniform, [\n {\n 'low': torch.zeros(5, 5, requires_grad=True),\n 'high': torch.ones(5, 5, requires_grad=True),\n },\n {\n 'low': torch.zeros(1, requires_grad=True),\n 'high': torch.ones(1, requires_grad=True),\n },\n {\n 'low': torch.tensor([1.0, 1.0], requires_grad=True),\n 'high': torch.tensor([2.0, 3.0], requires_grad=True),\n },\n ]),\n Example(Weibull, [\n {\n 'scale': torch.randn(5, 5).abs().requires_grad_(),\n 'concentration': torch.randn(1).abs().requires_grad_()\n }\n ]),\n Example(MixtureSameFamily, [\n {\n 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)),\n 'component_distribution': Normal(torch.randn(5, requires_grad=True),\n torch.rand(5, requires_grad=True)),\n },\n {\n 'mixture_distribution': Categorical(torch.rand(5, requires_grad=True)),\n 'component_distribution': MultivariateNormal(\n loc=torch.randn(5, 2, requires_grad=True),\n covariance_matrix=torch.tensor([[2.0, 0.3], [0.3, 0.25]], requires_grad=True)),\n },\n ]),\n Example(VonMises, [\n {\n 'loc': torch.tensor(1.0, requires_grad=True),\n 'concentration': torch.tensor(10.0, requires_grad=True)\n },\n {\n 'loc': torch.tensor([0.0, math.pi / 2], requires_grad=True),\n 'concentration': torch.tensor([1.0, 10.0], requires_grad=True)\n },\n ]),\n Example(ContinuousBernoulli, [\n {'probs': torch.tensor([0.7, 0.2, 0.4], requires_grad=True)},\n {'probs': torch.tensor([0.3], requires_grad=True)},\n {'probs': 0.3},\n {'logits': torch.tensor([0.], requires_grad=True)},\n ])\n]\n\nBAD_EXAMPLES = [\n Example(Bernoulli, [\n {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)},\n {'probs': torch.tensor([-0.5], requires_grad=True)},\n {'probs': 1.00001},\n ]),\n Example(Beta, [\n {\n 'concentration1': torch.tensor([0.0], requires_grad=True),\n 'concentration0': torch.tensor([0.0], requires_grad=True),\n },\n {\n 'concentration1': torch.tensor([-1.0], requires_grad=True),\n 'concentration0': torch.tensor([-2.0], requires_grad=True),\n },\n ]),\n Example(Geometric, [\n {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)},\n {'probs': torch.tensor([-0.3], requires_grad=True)},\n {'probs': 1.00000001},\n ]),\n Example(Categorical, [\n {'probs': torch.tensor([[-0.1, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True)},\n {'probs': torch.tensor([[-1.0, 10.0], [0.0, -1.0]], requires_grad=True)},\n ]),\n Example(Binomial, [\n {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True),\n 'total_count': 10},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True),\n 'total_count': 10},\n ]),\n Example(NegativeBinomial, [\n {'probs': torch.tensor([[-0.0000001, 0.2, 0.3], [0.5, 0.3, 0.2]], requires_grad=True),\n 'total_count': 10},\n {'probs': torch.tensor([[1.0, 0.0], [0.0, 2.0]], requires_grad=True),\n 'total_count': 10},\n ]),\n Example(Cauchy, [\n {'loc': 0.0, 'scale': -1.0},\n {'loc': torch.tensor([0.0]), 'scale': 0.0},\n {'loc': torch.tensor([[0.0], [-2.0]]),\n 'scale': torch.tensor([[-0.000001], [1.0]])}\n ]),\n Example(Chi2, [\n {'df': torch.tensor([0.], requires_grad=True)},\n {'df': torch.tensor([-2.], requires_grad=True)},\n ]),\n Example(StudentT, [\n {'df': torch.tensor([0.], requires_grad=True)},\n {'df': torch.tensor([-2.], requires_grad=True)},\n ]),\n Example(Dirichlet, [\n {'concentration': torch.tensor([0.], requires_grad=True)},\n {'concentration': torch.tensor([-2.], requires_grad=True)}\n ]),\n Example(Exponential, [\n {'rate': torch.tensor([0., 0.], requires_grad=True)},\n {'rate': torch.tensor([-2.], requires_grad=True)}\n ]),\n Example(FisherSnedecor, [\n {\n 'df1': torch.tensor([0., 0.], requires_grad=True),\n 'df2': torch.tensor([-1., -100.], requires_grad=True),\n },\n {\n 'df1': torch.tensor([1., 1.], requires_grad=True),\n 'df2': torch.tensor([0., 0.], requires_grad=True),\n }\n ]),\n Example(Gamma, [\n {\n 'concentration': torch.tensor([0., 0.], requires_grad=True),\n 'rate': torch.tensor([-1., -100.], requires_grad=True),\n },\n {\n 'concentration': torch.tensor([1., 1.], requires_grad=True),\n 'rate': torch.tensor([0., 0.], requires_grad=True),\n }\n ]),\n Example(Gumbel, [\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([0., 1.], requires_grad=True),\n },\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([1., -1.], requires_grad=True),\n },\n ]),\n Example(HalfCauchy, [\n {'scale': -1.0},\n {'scale': 0.0},\n {'scale': torch.tensor([[-0.000001], [1.0]])}\n ]),\n Example(HalfNormal, [\n {'scale': torch.tensor([0., 1.], requires_grad=True)},\n {'scale': torch.tensor([1., -1.], requires_grad=True)},\n ]),\n Example(Laplace, [\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([0., 1.], requires_grad=True),\n },\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([1., -1.], requires_grad=True),\n },\n ]),\n Example(LogNormal, [\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([0., 1.], requires_grad=True),\n },\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([1., -1.], requires_grad=True),\n },\n ]),\n Example(MultivariateNormal, [\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'covariance_matrix': torch.tensor([[1.0, 0.0], [0.0, -2.0]], requires_grad=True),\n },\n ]),\n Example(Normal, [\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([0., 1.], requires_grad=True),\n },\n {\n 'loc': torch.tensor([1., 1.], requires_grad=True),\n 'scale': torch.tensor([1., -1.], requires_grad=True),\n },\n {\n 'loc': torch.tensor([1.0, 0.0], requires_grad=True),\n 'scale': torch.tensor([1e-5, -1e-5], requires_grad=True),\n },\n ]),\n Example(OneHotCategorical, [\n {'probs': torch.tensor([[0.1, 0.2, 0.3], [0.1, -10.0, 0.2]], requires_grad=True)},\n {'probs': torch.tensor([[0.0, 0.0], [0.0, 0.0]], requires_grad=True)},\n ]),\n Example(Pareto, [\n {\n 'scale': 0.0,\n 'alpha': 0.0\n },\n {\n 'scale': torch.tensor([0.0, 0.0], requires_grad=True),\n 'alpha': torch.tensor([-1e-5, 0.0], requires_grad=True)\n },\n {\n 'scale': torch.tensor([1.0]),\n 'alpha': -1.0\n }\n ]),\n Example(Poisson, [\n {\n 'rate': torch.tensor([0.0], requires_grad=True),\n },\n {\n 'rate': -1.0,\n }\n ]),\n Example(RelaxedBernoulli, [\n {\n 'temperature': torch.tensor([1.5], requires_grad=True),\n 'probs': torch.tensor([1.7, 0.2, 0.4], requires_grad=True),\n },\n {\n 'temperature': torch.tensor([2.0]),\n 'probs': torch.tensor([-1.0]),\n }\n ]),\n Example(RelaxedOneHotCategorical, [\n {\n 'temperature': torch.tensor([0.5], requires_grad=True),\n 'probs': torch.tensor([[-0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], requires_grad=True)\n },\n {\n 'temperature': torch.tensor([2.0]),\n 'probs': torch.tensor([[-1.0, 0.0], [-1.0, 1.1]])\n }\n ]),\n Example(TransformedDistribution, [\n {\n 'base_distribution': Normal(0, 1),\n 'transforms': lambda x: x,\n },\n {\n 'base_distribution': Normal(0, 1),\n 'transforms': [lambda x: x],\n },\n ]),\n Example(Uniform, [\n {\n 'low': torch.tensor([2.0], requires_grad=True),\n 'high': torch.tensor([2.0], requires_grad=True),\n },\n {\n 'low': torch.tensor([0.0], requires_grad=True),\n 'high': torch.tensor([0.0], requires_grad=True),\n },\n {\n 'low': torch.tensor([1.0], requires_grad=True),\n 'high': torch.tensor([0.0], requires_grad=True),\n }\n ]),\n Example(Weibull, [\n {\n 'scale': torch.tensor([0.0], requires_grad=True),\n 'concentration': torch.tensor([0.0], requires_grad=True)\n },\n {\n 'scale': torch.tensor([1.0], requires_grad=True),\n 'concentration': torch.tensor([-1.0], requires_grad=True)\n }\n ]),\n Example(ContinuousBernoulli, [\n {'probs': torch.tensor([1.1, 0.2, 0.4], requires_grad=True)},\n {'probs': torch.tensor([-0.5], requires_grad=True)},\n {'probs': 1.00001},\n ])\n]\n\n\nclass TestDistributions(TestCase):\n _do_cuda_memory_leak_check = True\n _do_cuda_non_default_stream = True\n\n def _gradcheck_log_prob(self, dist_ctor, ctor_params):\n # performs gradient checks on log_prob\n distribution = dist_ctor(*ctor_params)\n s = distribution.sample()\n if s.is_floating_point():\n s = s.detach().requires_grad_()\n\n expected_shape = distribution.batch_shape + distribution.event_shape\n self.assertEqual(s.size(), expected_shape)\n\n def apply_fn(s, *params):\n return dist_ctor(*params).log_prob(s)\n\n gradcheck(apply_fn, (s,) + tuple(ctor_params), raise_exception=True)\n\n def _check_log_prob(self, dist, asset_fn):\n # checks that the log_prob matches a reference function\n s = dist.sample()\n log_probs = dist.log_prob(s)\n log_probs_data_flat = log_probs.view(-1)\n s_data_flat = s.view(len(log_probs_data_flat), -1)\n for i, (val, log_prob) in enumerate(zip(s_data_flat, log_probs_data_flat)):\n asset_fn(i, val.squeeze(), log_prob)\n\n def _check_sampler_sampler(self, torch_dist, ref_dist, message, multivariate=False,\n circular=False, num_samples=10000, failure_rate=1e-3):\n # Checks that the .sample() method matches a reference function.\n torch_samples = torch_dist.sample((num_samples,)).squeeze()\n torch_samples = torch_samples.cpu().numpy()\n ref_samples = ref_dist.rvs(num_samples).astype(np.float64)\n if multivariate:\n # Project onto a random axis.\n axis = np.random.normal(size=torch_samples.shape[-1])\n axis /= np.linalg.norm(axis)\n torch_samples = np.dot(torch_samples, axis)\n ref_samples = np.dot(ref_samples, axis)\n samples = [(x, +1) for x in torch_samples] + [(x, -1) for x in ref_samples]\n if circular:\n samples = [(np.cos(x), v) for (x, v) in samples]\n shuffle(samples) # necessary to prevent stable sort from making uneven bins for discrete\n samples.sort(key=lambda x: x[0])\n samples = np.array(samples)[:, 1]\n\n # Aggregate into bins filled with roughly zero-mean unit-variance RVs.\n num_bins = 10\n samples_per_bin = len(samples) // num_bins\n bins = samples.reshape((num_bins, samples_per_bin)).mean(axis=1)\n stddev = samples_per_bin ** -0.5\n threshold = stddev * scipy.special.erfinv(1 - 2 * failure_rate / num_bins)\n message = '{}.sample() is biased:\\n{}'.format(message, bins)\n for bias in bins:\n self.assertLess(-threshold, bias, message)\n self.assertLess(bias, threshold, message)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def _check_sampler_discrete(self, torch_dist, ref_dist, message,\n num_samples=10000, failure_rate=1e-3):\n \"\"\"Runs a Chi2-test for the support, but ignores tail instead of combining\"\"\"\n torch_samples = torch_dist.sample((num_samples,)).squeeze()\n torch_samples = torch_samples.cpu().numpy()\n unique, counts = np.unique(torch_samples, return_counts=True)\n pmf = ref_dist.pmf(unique)\n msk = (counts > 5) & ((pmf * num_samples) > 5)\n self.assertGreater(pmf[msk].sum(), 0.9, \"Distribution is too sparse for test; try increasing num_samples\")\n chisq, p = scipy.stats.chisquare(counts[msk], pmf[msk] * num_samples)\n self.assertGreater(p, failure_rate, message)\n\n def _check_enumerate_support(self, dist, examples):\n for params, expected in examples:\n params = {k: torch.tensor(v) for k, v in params.items()}\n expected = torch.tensor(expected)\n d = dist(**params)\n actual = d.enumerate_support(expand=False)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(actual, expected)\n actual = d.enumerate_support(expand=True)\n expected_with_expand = expected.expand((-1,) + d.batch_shape + d.event_shape)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(actual, expected_with_expand)\n\n def test_repr(self):\n for Dist, params in EXAMPLES:\n for param in params:\n dist = Dist(**param)\n self.assertTrue(repr(dist).startswith(dist.__class__.__name__))\n\n def test_sample_detached(self):\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n variable_params = [p for p in param.values() if getattr(p, 'requires_grad', False)]\n if not variable_params:\n continue\n dist = Dist(**param)\n sample = dist.sample()\n self.assertFalse(sample.requires_grad,\n msg='{} example {}/{}, .sample() is not detached'.format(\n Dist.__name__, i + 1, len(params)))\n\n def test_rsample_requires_grad(self):\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n if not any(getattr(p, 'requires_grad', False) for p in param.values()):\n continue\n dist = Dist(**param)\n if not dist.has_rsample:\n continue\n sample = dist.rsample()\n self.assertTrue(sample.requires_grad,\n msg='{} example {}/{}, .rsample() does not require grad'.format(\n Dist.__name__, i + 1, len(params)))\n\n def test_enumerate_support_type(self):\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(**param)\n try:\n self.assertTrue(type(dist.sample()) is type(dist.enumerate_support()),\n msg=('{} example {}/{}, return type mismatch between ' +\n 'sample and enumerate_support.').format(Dist.__name__, i + 1, len(params)))\n except NotImplementedError:\n pass\n\n def test_lazy_property_grad(self):\n x = torch.randn(1, requires_grad=True)\n\n class Dummy(object):\n @lazy_property\n def y(self):\n return x + 1\n\n def test():\n x.grad = None\n Dummy().y.backward()\n self.assertEqual(x.grad, torch.ones(1))\n\n test()\n with torch.no_grad():\n test()\n\n mean = torch.randn(2)\n cov = torch.eye(2, requires_grad=True)\n distn = MultivariateNormal(mean, cov)\n with torch.no_grad():\n distn.scale_tril\n distn.scale_tril.sum().backward()\n self.assertIsNotNone(cov.grad)\n\n def test_has_examples(self):\n distributions_with_examples = {e.Dist for e in EXAMPLES}\n for Dist in globals().values():\n if isinstance(Dist, type) and issubclass(Dist, Distribution) \\\n and Dist is not Distribution and Dist is not ExponentialFamily:\n self.assertIn(Dist, distributions_with_examples,\n \"Please add {} to the EXAMPLES list in test_distributions.py\".format(Dist.__name__))\n\n def test_distribution_expand(self):\n shapes = [torch.Size(), torch.Size((2,)), torch.Size((2, 1))]\n for Dist, params in EXAMPLES:\n for param in params:\n for shape in shapes:\n d = Dist(**param)\n expanded_shape = shape + d.batch_shape\n original_shape = d.batch_shape + d.event_shape\n expected_shape = shape + original_shape\n expanded = d.expand(batch_shape=list(expanded_shape))\n sample = expanded.sample()\n actual_shape = expanded.sample().shape\n self.assertEqual(expanded.__class__, d.__class__)\n self.assertEqual(d.sample().shape, original_shape)\n self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))\n self.assertEqual(actual_shape, expected_shape)\n self.assertEqual(expanded.batch_shape, expanded_shape)\n try:\n self.assertEqual(expanded.mean,\n d.mean.expand(expanded_shape + d.event_shape))\n self.assertEqual(expanded.variance,\n d.variance.expand(expanded_shape + d.event_shape))\n except NotImplementedError:\n pass\n\n def test_distribution_subclass_expand(self):\n expand_by = torch.Size((2,))\n for Dist, params in EXAMPLES:\n\n class SubClass(Dist):\n pass\n\n for param in params:\n d = SubClass(**param)\n expanded_shape = expand_by + d.batch_shape\n original_shape = d.batch_shape + d.event_shape\n expected_shape = expand_by + original_shape\n expanded = d.expand(batch_shape=expanded_shape)\n sample = expanded.sample()\n actual_shape = expanded.sample().shape\n self.assertEqual(expanded.__class__, d.__class__)\n self.assertEqual(d.sample().shape, original_shape)\n self.assertEqual(expanded.log_prob(sample), d.log_prob(sample))\n self.assertEqual(actual_shape, expected_shape)\n\n def test_bernoulli(self):\n p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)\n r = torch.tensor(0.3, requires_grad=True)\n s = 0.3\n self.assertEqual(Bernoulli(p).sample((8,)).size(), (8, 3))\n self.assertFalse(Bernoulli(p).sample().requires_grad)\n self.assertEqual(Bernoulli(r).sample((8,)).size(), (8,))\n self.assertEqual(Bernoulli(r).sample().size(), ())\n self.assertEqual(Bernoulli(r).sample((3, 2)).size(), (3, 2,))\n self.assertEqual(Bernoulli(s).sample().size(), ())\n self._gradcheck_log_prob(Bernoulli, (p,))\n\n def ref_log_prob(idx, val, log_prob):\n prob = p[idx]\n self.assertEqual(log_prob, math.log(prob if val else 1 - prob))\n\n self._check_log_prob(Bernoulli(p), ref_log_prob)\n self._check_log_prob(Bernoulli(logits=p.log() - (-p).log1p()), ref_log_prob)\n self.assertRaises(NotImplementedError, Bernoulli(r).rsample)\n\n # check entropy computation\n self.assertEqual(Bernoulli(p).entropy(), torch.tensor([0.6108, 0.5004, 0.6730]), atol=1e-4, rtol=0)\n self.assertEqual(Bernoulli(torch.tensor([0.0])).entropy(), torch.tensor([0.0]))\n self.assertEqual(Bernoulli(s).entropy(), torch.tensor(0.6108), atol=1e-4, rtol=0)\n\n def test_bernoulli_enumerate_support(self):\n examples = [\n ({\"probs\": [0.1]}, [[0], [1]]),\n ({\"probs\": [0.1, 0.9]}, [[0], [1]]),\n ({\"probs\": [[0.1, 0.2], [0.3, 0.4]]}, [[[0]], [[1]]]),\n ]\n self._check_enumerate_support(Bernoulli, examples)\n\n def test_bernoulli_3d(self):\n p = torch.full((2, 3, 5), 0.5).requires_grad_()\n self.assertEqual(Bernoulli(p).sample().size(), (2, 3, 5))\n self.assertEqual(Bernoulli(p).sample(sample_shape=(2, 5)).size(),\n (2, 5, 2, 3, 5))\n self.assertEqual(Bernoulli(p).sample((2,)).size(), (2, 2, 3, 5))\n\n def test_geometric(self):\n p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)\n r = torch.tensor(0.3, requires_grad=True)\n s = 0.3\n self.assertEqual(Geometric(p).sample((8,)).size(), (8, 3))\n self.assertEqual(Geometric(1).sample(), 0)\n self.assertEqual(Geometric(1).log_prob(torch.tensor(1.)), -inf)\n self.assertEqual(Geometric(1).log_prob(torch.tensor(0.)), 0)\n self.assertFalse(Geometric(p).sample().requires_grad)\n self.assertEqual(Geometric(r).sample((8,)).size(), (8,))\n self.assertEqual(Geometric(r).sample().size(), ())\n self.assertEqual(Geometric(r).sample((3, 2)).size(), (3, 2))\n self.assertEqual(Geometric(s).sample().size(), ())\n self._gradcheck_log_prob(Geometric, (p,))\n self.assertRaises(ValueError, lambda: Geometric(0))\n self.assertRaises(NotImplementedError, Geometric(r).rsample)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_geometric_log_prob_and_entropy(self):\n p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)\n s = 0.3\n\n def ref_log_prob(idx, val, log_prob):\n prob = p[idx].detach()\n self.assertEqual(log_prob, scipy.stats.geom(prob, loc=-1).logpmf(val))\n\n self._check_log_prob(Geometric(p), ref_log_prob)\n self._check_log_prob(Geometric(logits=p.log() - (-p).log1p()), ref_log_prob)\n\n # check entropy computation\n self.assertEqual(Geometric(p).entropy(), scipy.stats.geom(p.detach().numpy(), loc=-1).entropy(), atol=1e-3, rtol=0)\n self.assertEqual(float(Geometric(s).entropy()), scipy.stats.geom(s, loc=-1).entropy().item(), atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_geometric_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for prob in [0.01, 0.18, 0.8]:\n self._check_sampler_discrete(Geometric(prob),\n scipy.stats.geom(p=prob, loc=-1),\n 'Geometric(prob={})'.format(prob))\n\n def test_binomial(self):\n p = torch.arange(0.05, 1, 0.1).requires_grad_()\n for total_count in [1, 2, 10]:\n self._gradcheck_log_prob(lambda p: Binomial(total_count, p), [p])\n self._gradcheck_log_prob(lambda p: Binomial(total_count, None, p.log()), [p])\n self.assertRaises(NotImplementedError, Binomial(10, p).rsample)\n self.assertRaises(NotImplementedError, Binomial(10, p).entropy)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_binomial_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for prob in [0.01, 0.1, 0.5, 0.8, 0.9]:\n for count in [2, 10, 100, 500]:\n self._check_sampler_discrete(Binomial(total_count=count, probs=prob),\n scipy.stats.binom(count, prob),\n 'Binomial(total_count={}, probs={})'.format(count, prob))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_binomial_log_prob(self):\n probs = torch.arange(0.05, 1, 0.1)\n for total_count in [1, 2, 10]:\n\n def ref_log_prob(idx, x, log_prob):\n p = probs.view(-1)[idx].item()\n expected = scipy.stats.binom(total_count, p).logpmf(x)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n self._check_log_prob(Binomial(total_count, probs), ref_log_prob)\n logits = probs_to_logits(probs, is_binary=True)\n self._check_log_prob(Binomial(total_count, logits=logits), ref_log_prob)\n\n def test_binomial_stable(self):\n logits = torch.tensor([-100., 100.], dtype=torch.float)\n total_count = 1.\n x = torch.tensor([0., 0.], dtype=torch.float)\n log_prob = Binomial(total_count, logits=logits).log_prob(x)\n self.assertTrue(torch.isfinite(log_prob).all())\n\n # make sure that the grad at logits=0, value=0 is 0.5\n x = torch.tensor(0., requires_grad=True)\n y = Binomial(total_count, logits=x).log_prob(torch.tensor(0.))\n self.assertEqual(grad(y, x)[0], torch.tensor(-0.5))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_binomial_log_prob_vectorized_count(self):\n probs = torch.tensor([0.2, 0.7, 0.9])\n for total_count, sample in [(torch.tensor([10]), torch.tensor([7., 3., 9.])),\n (torch.tensor([1, 2, 10]), torch.tensor([0., 1., 9.]))]:\n log_prob = Binomial(total_count, probs).log_prob(sample)\n expected = scipy.stats.binom(total_count.cpu().numpy(), probs.cpu().numpy()).logpmf(sample)\n self.assertEqual(log_prob, expected, atol=1e-4, rtol=0)\n\n def test_binomial_enumerate_support(self):\n examples = [\n ({\"probs\": [0.1], \"total_count\": 2}, [[0], [1], [2]]),\n ({\"probs\": [0.1, 0.9], \"total_count\": 2}, [[0], [1], [2]]),\n ({\"probs\": [[0.1, 0.2], [0.3, 0.4]], \"total_count\": 3}, [[[0]], [[1]], [[2]], [[3]]]),\n ]\n self._check_enumerate_support(Binomial, examples)\n\n def test_binomial_extreme_vals(self):\n total_count = 100\n bin0 = Binomial(total_count, 0)\n self.assertEqual(bin0.sample(), 0)\n self.assertEqual(bin0.log_prob(torch.tensor([0.]))[0], 0, atol=1e-3, rtol=0)\n self.assertEqual(float(bin0.log_prob(torch.tensor([1.])).exp()), 0)\n bin1 = Binomial(total_count, 1)\n self.assertEqual(bin1.sample(), total_count)\n self.assertEqual(bin1.log_prob(torch.tensor([float(total_count)]))[0], 0, atol=1e-3, rtol=0)\n self.assertEqual(float(bin1.log_prob(torch.tensor([float(total_count - 1)])).exp()), 0)\n zero_counts = torch.zeros(torch.Size((2, 2)))\n bin2 = Binomial(zero_counts, 1)\n self.assertEqual(bin2.sample(), zero_counts)\n self.assertEqual(bin2.log_prob(zero_counts), zero_counts)\n\n def test_binomial_vectorized_count(self):\n set_rng_seed(1) # see Note [Randomized statistical tests]\n total_count = torch.tensor([[4, 7], [3, 8]])\n bin0 = Binomial(total_count, torch.tensor(1.))\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(bin0.sample(), total_count)\n bin1 = Binomial(total_count, torch.tensor(0.5))\n samples = bin1.sample(torch.Size((100000,)))\n self.assertTrue((samples <= total_count.type_as(samples)).all())\n self.assertEqual(samples.mean(dim=0), bin1.mean, atol=0.02, rtol=0)\n self.assertEqual(samples.var(dim=0), bin1.variance, atol=0.02, rtol=0)\n\n def test_negative_binomial(self):\n p = torch.arange(0.05, 1, 0.1).requires_grad_()\n for total_count in [1, 2, 10]:\n self._gradcheck_log_prob(lambda p: NegativeBinomial(total_count, p), [p])\n self._gradcheck_log_prob(lambda p: NegativeBinomial(total_count, None, p.log()), [p])\n self.assertRaises(NotImplementedError, NegativeBinomial(10, p).rsample)\n self.assertRaises(NotImplementedError, NegativeBinomial(10, p).entropy)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_negative_binomial_log_prob(self):\n probs = torch.arange(0.05, 1, 0.1)\n for total_count in [1, 2, 10]:\n\n def ref_log_prob(idx, x, log_prob):\n p = probs.view(-1)[idx].item()\n expected = scipy.stats.nbinom(total_count, 1 - p).logpmf(x)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(NegativeBinomial(total_count, probs), ref_log_prob)\n logits = probs_to_logits(probs, is_binary=True)\n self._check_log_prob(NegativeBinomial(total_count, logits=logits), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_negative_binomial_log_prob_vectorized_count(self):\n probs = torch.tensor([0.2, 0.7, 0.9])\n for total_count, sample in [(torch.tensor([10]), torch.tensor([7., 3., 9.])),\n (torch.tensor([1, 2, 10]), torch.tensor([0., 1., 9.]))]:\n log_prob = NegativeBinomial(total_count, probs).log_prob(sample)\n expected = scipy.stats.nbinom(total_count.cpu().numpy(), 1 - probs.cpu().numpy()).logpmf(sample)\n self.assertEqual(log_prob, expected, atol=1e-4, rtol=0)\n\n def test_multinomial_1d(self):\n total_count = 10\n p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)\n self.assertEqual(Multinomial(total_count, p).sample().size(), (3,))\n self.assertEqual(Multinomial(total_count, p).sample((2, 2)).size(), (2, 2, 3))\n self.assertEqual(Multinomial(total_count, p).sample((1,)).size(), (1, 3))\n self._gradcheck_log_prob(lambda p: Multinomial(total_count, p), [p])\n self._gradcheck_log_prob(lambda p: Multinomial(total_count, None, p.log()), [p])\n self.assertRaises(NotImplementedError, Multinomial(10, p).rsample)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_multinomial_1d_log_prob(self):\n total_count = 10\n p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)\n dist = Multinomial(total_count, probs=p)\n x = dist.sample()\n log_prob = dist.log_prob(x)\n expected = torch.tensor(scipy.stats.multinomial.logpmf(x.numpy(), n=total_count, p=dist.probs.detach().numpy()))\n self.assertEqual(log_prob, expected)\n\n dist = Multinomial(total_count, logits=p.log())\n x = dist.sample()\n log_prob = dist.log_prob(x)\n expected = torch.tensor(scipy.stats.multinomial.logpmf(x.numpy(), n=total_count, p=dist.probs.detach().numpy()))\n self.assertEqual(log_prob, expected)\n\n def test_multinomial_2d(self):\n total_count = 10\n probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]\n probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]\n p = torch.tensor(probabilities, requires_grad=True)\n s = torch.tensor(probabilities_1, requires_grad=True)\n self.assertEqual(Multinomial(total_count, p).sample().size(), (2, 3))\n self.assertEqual(Multinomial(total_count, p).sample(sample_shape=(3, 4)).size(), (3, 4, 2, 3))\n self.assertEqual(Multinomial(total_count, p).sample((6,)).size(), (6, 2, 3))\n set_rng_seed(0)\n self._gradcheck_log_prob(lambda p: Multinomial(total_count, p), [p])\n self._gradcheck_log_prob(lambda p: Multinomial(total_count, None, p.log()), [p])\n\n # sample check for extreme value of probs\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(Multinomial(total_count, s).sample(),\n torch.tensor([[total_count, 0], [0, total_count]]))\n\n # check entropy computation\n self.assertRaises(NotImplementedError, Multinomial(10, p).entropy)\n\n def test_categorical_1d(self):\n p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)\n self.assertTrue(is_all_nan(Categorical(p).mean))\n self.assertTrue(is_all_nan(Categorical(p).variance))\n self.assertEqual(Categorical(p).sample().size(), ())\n self.assertFalse(Categorical(p).sample().requires_grad)\n self.assertEqual(Categorical(p).sample((2, 2)).size(), (2, 2))\n self.assertEqual(Categorical(p).sample((1,)).size(), (1,))\n self._gradcheck_log_prob(Categorical, (p,))\n self.assertRaises(NotImplementedError, Categorical(p).rsample)\n\n def test_categorical_2d(self):\n probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]\n probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]\n p = torch.tensor(probabilities, requires_grad=True)\n s = torch.tensor(probabilities_1, requires_grad=True)\n self.assertEqual(Categorical(p).mean.size(), (2,))\n self.assertEqual(Categorical(p).variance.size(), (2,))\n self.assertTrue(is_all_nan(Categorical(p).mean))\n self.assertTrue(is_all_nan(Categorical(p).variance))\n self.assertEqual(Categorical(p).sample().size(), (2,))\n self.assertEqual(Categorical(p).sample(sample_shape=(3, 4)).size(), (3, 4, 2))\n self.assertEqual(Categorical(p).sample((6,)).size(), (6, 2))\n self._gradcheck_log_prob(Categorical, (p,))\n\n # sample check for extreme value of probs\n set_rng_seed(0)\n self.assertEqual(Categorical(s).sample(sample_shape=(2,)),\n torch.tensor([[0, 1], [0, 1]]))\n\n def ref_log_prob(idx, val, log_prob):\n sample_prob = p[idx][val] / p[idx].sum()\n self.assertEqual(log_prob, math.log(sample_prob))\n\n self._check_log_prob(Categorical(p), ref_log_prob)\n self._check_log_prob(Categorical(logits=p.log()), ref_log_prob)\n\n # check entropy computation\n self.assertEqual(Categorical(p).entropy(), torch.tensor([1.0114, 1.0297]), atol=1e-4, rtol=0)\n self.assertEqual(Categorical(s).entropy(), torch.tensor([0.0, 0.0]))\n # issue gh-40553\n logits = p.log()\n logits[1, 1] = logits[0, 2] = float('-inf')\n e = Categorical(logits=logits).entropy()\n self.assertEqual(e, torch.tensor([0.6365, 0.5983]), atol=1e-4, rtol=0)\n\n def test_categorical_enumerate_support(self):\n examples = [\n ({\"probs\": [0.1, 0.2, 0.7]}, [0, 1, 2]),\n ({\"probs\": [[0.1, 0.9], [0.3, 0.7]]}, [[0], [1]]),\n ]\n self._check_enumerate_support(Categorical, examples)\n\n def test_one_hot_categorical_1d(self):\n p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)\n self.assertEqual(OneHotCategorical(p).sample().size(), (3,))\n self.assertFalse(OneHotCategorical(p).sample().requires_grad)\n self.assertEqual(OneHotCategorical(p).sample((2, 2)).size(), (2, 2, 3))\n self.assertEqual(OneHotCategorical(p).sample((1,)).size(), (1, 3))\n self._gradcheck_log_prob(OneHotCategorical, (p,))\n self.assertRaises(NotImplementedError, OneHotCategorical(p).rsample)\n\n def test_one_hot_categorical_2d(self):\n probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]\n probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]\n p = torch.tensor(probabilities, requires_grad=True)\n s = torch.tensor(probabilities_1, requires_grad=True)\n self.assertEqual(OneHotCategorical(p).sample().size(), (2, 3))\n self.assertEqual(OneHotCategorical(p).sample(sample_shape=(3, 4)).size(), (3, 4, 2, 3))\n self.assertEqual(OneHotCategorical(p).sample((6,)).size(), (6, 2, 3))\n self._gradcheck_log_prob(OneHotCategorical, (p,))\n\n dist = OneHotCategorical(p)\n x = dist.sample()\n self.assertEqual(dist.log_prob(x), Categorical(p).log_prob(x.max(-1)[1]))\n\n def test_one_hot_categorical_enumerate_support(self):\n examples = [\n ({\"probs\": [0.1, 0.2, 0.7]}, [[1, 0, 0], [0, 1, 0], [0, 0, 1]]),\n ({\"probs\": [[0.1, 0.9], [0.3, 0.7]]}, [[[1, 0]], [[0, 1]]]),\n ]\n self._check_enumerate_support(OneHotCategorical, examples)\n\n def test_poisson_shape(self):\n rate = torch.randn(2, 3).abs().requires_grad_()\n rate_1d = torch.randn(1).abs().requires_grad_()\n self.assertEqual(Poisson(rate).sample().size(), (2, 3))\n self.assertEqual(Poisson(rate).sample((7,)).size(), (7, 2, 3))\n self.assertEqual(Poisson(rate_1d).sample().size(), (1,))\n self.assertEqual(Poisson(rate_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Poisson(2.0).sample((2,)).size(), (2,))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_poisson_log_prob(self):\n rate = torch.randn(2, 3).abs().requires_grad_()\n rate_1d = torch.randn(1).abs().requires_grad_()\n\n def ref_log_prob(idx, x, log_prob):\n l = rate.view(-1)[idx].detach()\n expected = scipy.stats.poisson.logpmf(x, l)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n set_rng_seed(0)\n self._check_log_prob(Poisson(rate), ref_log_prob)\n self._gradcheck_log_prob(Poisson, (rate,))\n self._gradcheck_log_prob(Poisson, (rate_1d,))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_poisson_sample(self):\n set_rng_seed(1) # see Note [Randomized statistical tests]\n for rate in [0.1, 1.0, 5.0]:\n self._check_sampler_discrete(Poisson(rate),\n scipy.stats.poisson(rate),\n 'Poisson(lambda={})'.format(rate),\n failure_rate=1e-3)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_poisson_gpu_sample(self):\n set_rng_seed(1)\n for rate in [0.12, 0.9, 4.0]:\n self._check_sampler_discrete(Poisson(torch.tensor([rate]).cuda()),\n scipy.stats.poisson(rate),\n 'Poisson(lambda={}, cuda)'.format(rate),\n failure_rate=1e-3)\n\n def test_relaxed_bernoulli(self):\n p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)\n r = torch.tensor(0.3, requires_grad=True)\n s = 0.3\n temp = torch.tensor(0.67, requires_grad=True)\n self.assertEqual(RelaxedBernoulli(temp, p).sample((8,)).size(), (8, 3))\n self.assertFalse(RelaxedBernoulli(temp, p).sample().requires_grad)\n self.assertEqual(RelaxedBernoulli(temp, r).sample((8,)).size(), (8,))\n self.assertEqual(RelaxedBernoulli(temp, r).sample().size(), ())\n self.assertEqual(RelaxedBernoulli(temp, r).sample((3, 2)).size(), (3, 2,))\n self.assertEqual(RelaxedBernoulli(temp, s).sample().size(), ())\n self._gradcheck_log_prob(RelaxedBernoulli, (temp, p))\n self._gradcheck_log_prob(RelaxedBernoulli, (temp, r))\n\n # test that rsample doesn't fail\n s = RelaxedBernoulli(temp, p).rsample()\n s.backward(torch.ones_like(s))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_rounded_relaxed_bernoulli(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n\n class Rounded(object):\n def __init__(self, dist):\n self.dist = dist\n\n def sample(self, *args, **kwargs):\n return torch.round(self.dist.sample(*args, **kwargs))\n\n for probs, temp in product([0.1, 0.2, 0.8], [0.1, 1.0, 10.0]):\n self._check_sampler_discrete(Rounded(RelaxedBernoulli(temp, probs)),\n scipy.stats.bernoulli(probs),\n 'Rounded(RelaxedBernoulli(temp={}, probs={}))'.format(temp, probs),\n failure_rate=1e-3)\n\n for probs in [0.001, 0.2, 0.999]:\n equal_probs = torch.tensor(0.5)\n dist = RelaxedBernoulli(1e10, probs)\n s = dist.rsample()\n self.assertEqual(equal_probs, s)\n\n def test_relaxed_one_hot_categorical_1d(self):\n p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)\n temp = torch.tensor(0.67, requires_grad=True)\n self.assertEqual(RelaxedOneHotCategorical(probs=p, temperature=temp).sample().size(), (3,))\n self.assertFalse(RelaxedOneHotCategorical(probs=p, temperature=temp).sample().requires_grad)\n self.assertEqual(RelaxedOneHotCategorical(probs=p, temperature=temp).sample((2, 2)).size(), (2, 2, 3))\n self.assertEqual(RelaxedOneHotCategorical(probs=p, temperature=temp).sample((1,)).size(), (1, 3))\n self._gradcheck_log_prob(RelaxedOneHotCategorical, (temp, p))\n\n def test_relaxed_one_hot_categorical_2d(self):\n probabilities = [[0.1, 0.2, 0.3], [0.5, 0.3, 0.2]]\n probabilities_1 = [[1.0, 0.0], [0.0, 1.0]]\n temp = torch.tensor([3.0], requires_grad=True)\n # The lower the temperature, the more unstable the log_prob gradcheck is\n # w.r.t. the sample. Values below 0.25 empirically fail the default tol.\n temp_2 = torch.tensor([0.25], requires_grad=True)\n p = torch.tensor(probabilities, requires_grad=True)\n s = torch.tensor(probabilities_1, requires_grad=True)\n self.assertEqual(RelaxedOneHotCategorical(temp, p).sample().size(), (2, 3))\n self.assertEqual(RelaxedOneHotCategorical(temp, p).sample(sample_shape=(3, 4)).size(), (3, 4, 2, 3))\n self.assertEqual(RelaxedOneHotCategorical(temp, p).sample((6,)).size(), (6, 2, 3))\n self._gradcheck_log_prob(RelaxedOneHotCategorical, (temp, p))\n self._gradcheck_log_prob(RelaxedOneHotCategorical, (temp_2, p))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_argmax_relaxed_categorical(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n\n class ArgMax(object):\n def __init__(self, dist):\n self.dist = dist\n\n def sample(self, *args, **kwargs):\n s = self.dist.sample(*args, **kwargs)\n _, idx = torch.max(s, -1)\n return idx\n\n class ScipyCategorical(object):\n def __init__(self, dist):\n self.dist = dist\n\n def pmf(self, samples):\n new_samples = np.zeros(samples.shape + self.dist.p.shape)\n new_samples[np.arange(samples.shape[0]), samples] = 1\n return self.dist.pmf(new_samples)\n\n for probs, temp in product([torch.tensor([0.1, 0.9]), torch.tensor([0.2, 0.2, 0.6])], [0.1, 1.0, 10.0]):\n self._check_sampler_discrete(ArgMax(RelaxedOneHotCategorical(temp, probs)),\n ScipyCategorical(scipy.stats.multinomial(1, probs)),\n 'Rounded(RelaxedOneHotCategorical(temp={}, probs={}))'.format(temp, probs),\n failure_rate=1e-3)\n\n for probs in [torch.tensor([0.1, 0.9]), torch.tensor([0.2, 0.2, 0.6])]:\n equal_probs = torch.ones(probs.size()) / probs.size()[0]\n dist = RelaxedOneHotCategorical(1e10, probs)\n s = dist.rsample()\n self.assertEqual(equal_probs, s)\n\n def test_uniform(self):\n low = torch.zeros(5, 5, requires_grad=True)\n high = (torch.ones(5, 5) * 3).requires_grad_()\n low_1d = torch.zeros(1, requires_grad=True)\n high_1d = (torch.ones(1) * 3).requires_grad_()\n self.assertEqual(Uniform(low, high).sample().size(), (5, 5))\n self.assertEqual(Uniform(low, high).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(Uniform(low_1d, high_1d).sample().size(), (1,))\n self.assertEqual(Uniform(low_1d, high_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Uniform(0.0, 1.0).sample((1,)).size(), (1,))\n\n # Check log_prob computation when value outside range\n uniform = Uniform(low_1d, high_1d)\n above_high = torch.tensor([4.0])\n below_low = torch.tensor([-1.0])\n self.assertEqual(uniform.log_prob(above_high).item(), -inf)\n self.assertEqual(uniform.log_prob(below_low).item(), -inf)\n\n # check cdf computation when value outside range\n self.assertEqual(uniform.cdf(below_low).item(), 0)\n self.assertEqual(uniform.cdf(above_high).item(), 1)\n\n set_rng_seed(1)\n self._gradcheck_log_prob(Uniform, (low, high))\n self._gradcheck_log_prob(Uniform, (low, 1.0))\n self._gradcheck_log_prob(Uniform, (0.0, high))\n\n state = torch.get_rng_state()\n rand = low.new(low.size()).uniform_()\n torch.set_rng_state(state)\n u = Uniform(low, high).rsample()\n u.backward(torch.ones_like(u))\n self.assertEqual(low.grad, 1 - rand)\n self.assertEqual(high.grad, rand)\n low.grad.zero_()\n high.grad.zero_()\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_vonmises_sample(self):\n for loc in [0.0, math.pi / 2.0]:\n for concentration in [0.03, 0.3, 1.0, 10.0, 100.0]:\n self._check_sampler_sampler(VonMises(loc, concentration),\n scipy.stats.vonmises(loc=loc, kappa=concentration),\n \"VonMises(loc={}, concentration={})\".format(loc, concentration),\n num_samples=int(1e5), circular=True)\n\n def test_vonmises_logprob(self):\n concentrations = [0.01, 0.03, 0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0]\n for concentration in concentrations:\n grid = torch.arange(0., 2 * math.pi, 1e-4)\n prob = VonMises(0.0, concentration).log_prob(grid).exp()\n norm = prob.mean().item() * 2 * math.pi\n self.assertLess(abs(norm - 1), 1e-3)\n\n def test_cauchy(self):\n loc = torch.zeros(5, 5, requires_grad=True)\n scale = torch.ones(5, 5, requires_grad=True)\n loc_1d = torch.zeros(1, requires_grad=True)\n scale_1d = torch.ones(1, requires_grad=True)\n self.assertTrue(is_all_nan(Cauchy(loc_1d, scale_1d).mean))\n self.assertEqual(Cauchy(loc_1d, scale_1d).variance, inf)\n self.assertEqual(Cauchy(loc, scale).sample().size(), (5, 5))\n self.assertEqual(Cauchy(loc, scale).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(Cauchy(loc_1d, scale_1d).sample().size(), (1,))\n self.assertEqual(Cauchy(loc_1d, scale_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Cauchy(0.0, 1.0).sample((1,)).size(), (1,))\n\n set_rng_seed(1)\n self._gradcheck_log_prob(Cauchy, (loc, scale))\n self._gradcheck_log_prob(Cauchy, (loc, 1.0))\n self._gradcheck_log_prob(Cauchy, (0.0, scale))\n\n state = torch.get_rng_state()\n eps = loc.new(loc.size()).cauchy_()\n torch.set_rng_state(state)\n c = Cauchy(loc, scale).rsample()\n c.backward(torch.ones_like(c))\n self.assertEqual(loc.grad, torch.ones_like(scale))\n self.assertEqual(scale.grad, eps)\n loc.grad.zero_()\n scale.grad.zero_()\n\n def test_halfcauchy(self):\n scale = torch.ones(5, 5, requires_grad=True)\n scale_1d = torch.ones(1, requires_grad=True)\n self.assertTrue(is_all_nan(HalfCauchy(scale_1d).mean))\n self.assertEqual(HalfCauchy(scale_1d).variance, inf)\n self.assertEqual(HalfCauchy(scale).sample().size(), (5, 5))\n self.assertEqual(HalfCauchy(scale).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(HalfCauchy(scale_1d).sample().size(), (1,))\n self.assertEqual(HalfCauchy(scale_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(HalfCauchy(1.0).sample((1,)).size(), (1,))\n\n set_rng_seed(1)\n self._gradcheck_log_prob(HalfCauchy, (scale,))\n self._gradcheck_log_prob(HalfCauchy, (1.0,))\n\n state = torch.get_rng_state()\n eps = scale.new(scale.size()).cauchy_().abs_()\n torch.set_rng_state(state)\n c = HalfCauchy(scale).rsample()\n c.backward(torch.ones_like(c))\n self.assertEqual(scale.grad, eps)\n scale.grad.zero_()\n\n def test_halfnormal(self):\n std = torch.randn(5, 5).abs().requires_grad_()\n std_1d = torch.randn(1, requires_grad=True)\n std_delta = torch.tensor([1e-5, 1e-5])\n self.assertEqual(HalfNormal(std).sample().size(), (5, 5))\n self.assertEqual(HalfNormal(std).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(HalfNormal(std_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(HalfNormal(std_1d).sample().size(), (1,))\n self.assertEqual(HalfNormal(.6).sample((1,)).size(), (1,))\n self.assertEqual(HalfNormal(50.0).sample((1,)).size(), (1,))\n\n # sample check for extreme value of std\n set_rng_seed(1)\n self.assertEqual(HalfNormal(std_delta).sample(sample_shape=(1, 2)),\n torch.tensor([[[0.0, 0.0], [0.0, 0.0]]]),\n atol=1e-4, rtol=0)\n\n self._gradcheck_log_prob(HalfNormal, (std,))\n self._gradcheck_log_prob(HalfNormal, (1.0,))\n\n # check .log_prob() can broadcast.\n dist = HalfNormal(torch.ones(2, 1, 4))\n log_prob = dist.log_prob(torch.ones(3, 1))\n self.assertEqual(log_prob.shape, (2, 3, 4))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_halfnormal_logprob(self):\n std = torch.randn(5, 1).abs().requires_grad_()\n\n def ref_log_prob(idx, x, log_prob):\n s = std.view(-1)[idx].detach()\n expected = scipy.stats.halfnorm(scale=s).logpdf(x)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(HalfNormal(std), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_halfnormal_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for std in [0.1, 1.0, 10.0]:\n self._check_sampler_sampler(HalfNormal(std),\n scipy.stats.halfnorm(scale=std),\n 'HalfNormal(scale={})'.format(std))\n\n def test_lognormal(self):\n mean = torch.randn(5, 5, requires_grad=True)\n std = torch.randn(5, 5).abs().requires_grad_()\n mean_1d = torch.randn(1, requires_grad=True)\n std_1d = torch.randn(1).abs().requires_grad_()\n mean_delta = torch.tensor([1.0, 0.0])\n std_delta = torch.tensor([1e-5, 1e-5])\n self.assertEqual(LogNormal(mean, std).sample().size(), (5, 5))\n self.assertEqual(LogNormal(mean, std).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(LogNormal(mean_1d, std_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(LogNormal(mean_1d, std_1d).sample().size(), (1,))\n self.assertEqual(LogNormal(0.2, .6).sample((1,)).size(), (1,))\n self.assertEqual(LogNormal(-0.7, 50.0).sample((1,)).size(), (1,))\n\n # sample check for extreme value of mean, std\n set_rng_seed(1)\n self.assertEqual(LogNormal(mean_delta, std_delta).sample(sample_shape=(1, 2)),\n torch.tensor([[[math.exp(1), 1.0], [math.exp(1), 1.0]]]),\n atol=1e-4, rtol=0)\n\n self._gradcheck_log_prob(LogNormal, (mean, std))\n self._gradcheck_log_prob(LogNormal, (mean, 1.0))\n self._gradcheck_log_prob(LogNormal, (0.0, std))\n\n # check .log_prob() can broadcast.\n dist = LogNormal(torch.zeros(4), torch.ones(2, 1, 1))\n log_prob = dist.log_prob(torch.ones(3, 1))\n self.assertEqual(log_prob.shape, (2, 3, 4))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_lognormal_logprob(self):\n mean = torch.randn(5, 1, requires_grad=True)\n std = torch.randn(5, 1).abs().requires_grad_()\n\n def ref_log_prob(idx, x, log_prob):\n m = mean.view(-1)[idx].detach()\n s = std.view(-1)[idx].detach()\n expected = scipy.stats.lognorm(s=s, scale=math.exp(m)).logpdf(x)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(LogNormal(mean, std), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_lognormal_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for mean, std in product([-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(LogNormal(mean, std),\n scipy.stats.lognorm(scale=math.exp(mean), s=std),\n 'LogNormal(loc={}, scale={})'.format(mean, std))\n\n def test_logisticnormal(self):\n mean = torch.randn(5, 5).requires_grad_()\n std = torch.randn(5, 5).abs().requires_grad_()\n mean_1d = torch.randn(1).requires_grad_()\n std_1d = torch.randn(1).requires_grad_()\n mean_delta = torch.tensor([1.0, 0.0])\n std_delta = torch.tensor([1e-5, 1e-5])\n self.assertEqual(LogisticNormal(mean, std).sample().size(), (5, 6))\n self.assertEqual(LogisticNormal(mean, std).sample((7,)).size(), (7, 5, 6))\n self.assertEqual(LogisticNormal(mean_1d, std_1d).sample((1,)).size(), (1, 2))\n self.assertEqual(LogisticNormal(mean_1d, std_1d).sample().size(), (2,))\n self.assertEqual(LogisticNormal(0.2, .6).sample((1,)).size(), (2,))\n self.assertEqual(LogisticNormal(-0.7, 50.0).sample((1,)).size(), (2,))\n\n # sample check for extreme value of mean, std\n set_rng_seed(1)\n self.assertEqual(LogisticNormal(mean_delta, std_delta).sample(),\n torch.tensor([math.exp(1) / (1. + 1. + math.exp(1)),\n 1. / (1. + 1. + math.exp(1)),\n 1. / (1. + 1. + math.exp(1))]),\n atol=1e-4, rtol=0)\n\n self._gradcheck_log_prob(LogisticNormal, (mean, std))\n self._gradcheck_log_prob(LogisticNormal, (mean, 1.0))\n self._gradcheck_log_prob(LogisticNormal, (0.0, std))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_logisticnormal_logprob(self):\n mean = torch.randn(5, 7).requires_grad_()\n std = torch.randn(5, 7).abs().requires_grad_()\n\n # Smoke test for now\n # TODO: Once _check_log_prob works with multidimensional distributions,\n # add proper testing of the log probabilities.\n dist = LogisticNormal(mean, std)\n assert dist.log_prob(dist.sample()).detach().cpu().numpy().shape == (5,)\n\n def _get_logistic_normal_ref_sampler(self, base_dist):\n\n def _sampler(num_samples):\n x = base_dist.rvs(num_samples)\n offset = np.log((x.shape[-1] + 1) - np.ones_like(x).cumsum(-1))\n z = 1. / (1. + np.exp(offset - x))\n z_cumprod = np.cumprod(1 - z, axis=-1)\n y1 = np.pad(z, ((0, 0), (0, 1)), mode='constant', constant_values=1.)\n y2 = np.pad(z_cumprod, ((0, 0), (1, 0)), mode='constant', constant_values=1.)\n return y1 * y2\n\n return _sampler\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_logisticnormal_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n means = map(np.asarray, [(-1.0, -1.0), (0.0, 0.0), (1.0, 1.0)])\n covs = map(np.diag, [(0.1, 0.1), (1.0, 1.0), (10.0, 10.0)])\n for mean, cov in product(means, covs):\n base_dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)\n ref_dist = scipy.stats.multivariate_normal(mean=mean, cov=cov)\n ref_dist.rvs = self._get_logistic_normal_ref_sampler(base_dist)\n mean_th = torch.tensor(mean)\n std_th = torch.tensor(np.sqrt(np.diag(cov)))\n self._check_sampler_sampler(\n LogisticNormal(mean_th, std_th), ref_dist,\n 'LogisticNormal(loc={}, scale={})'.format(mean_th, std_th),\n multivariate=True)\n\n def test_mixture_same_family_shape(self):\n normal_case_1d = MixtureSameFamily(\n Categorical(torch.rand(5)),\n Normal(torch.randn(5), torch.rand(5)))\n normal_case_1d_batch = MixtureSameFamily(\n Categorical(torch.rand(3, 5)),\n Normal(torch.randn(3, 5), torch.rand(3, 5)))\n normal_case_1d_multi_batch = MixtureSameFamily(\n Categorical(torch.rand(4, 3, 5)),\n Normal(torch.randn(4, 3, 5), torch.rand(4, 3, 5)))\n normal_case_2d = MixtureSameFamily(\n Categorical(torch.rand(5)),\n Independent(Normal(torch.randn(5, 2), torch.rand(5, 2)), 1))\n normal_case_2d_batch = MixtureSameFamily(\n Categorical(torch.rand(3, 5)),\n Independent(Normal(torch.randn(3, 5, 2), torch.rand(3, 5, 2)), 1))\n normal_case_2d_multi_batch = MixtureSameFamily(\n Categorical(torch.rand(4, 3, 5)),\n Independent(Normal(torch.randn(4, 3, 5, 2), torch.rand(4, 3, 5, 2)), 1))\n\n self.assertEqual(normal_case_1d.sample().size(), ())\n self.assertEqual(normal_case_1d.sample((2,)).size(), (2,))\n self.assertEqual(normal_case_1d.sample((2, 7)).size(), (2, 7))\n self.assertEqual(normal_case_1d_batch.sample().size(), (3,))\n self.assertEqual(normal_case_1d_batch.sample((2,)).size(), (2, 3))\n self.assertEqual(normal_case_1d_batch.sample((2, 7)).size(), (2, 7, 3))\n self.assertEqual(normal_case_1d_multi_batch.sample().size(), (4, 3))\n self.assertEqual(normal_case_1d_multi_batch.sample((2,)).size(), (2, 4, 3))\n self.assertEqual(normal_case_1d_multi_batch.sample((2, 7)).size(), (2, 7, 4, 3))\n\n self.assertEqual(normal_case_2d.sample().size(), (2,))\n self.assertEqual(normal_case_2d.sample((2,)).size(), (2, 2))\n self.assertEqual(normal_case_2d.sample((2, 7)).size(), (2, 7, 2))\n self.assertEqual(normal_case_2d_batch.sample().size(), (3, 2))\n self.assertEqual(normal_case_2d_batch.sample((2,)).size(), (2, 3, 2))\n self.assertEqual(normal_case_2d_batch.sample((2, 7)).size(), (2, 7, 3, 2))\n self.assertEqual(normal_case_2d_multi_batch.sample().size(), (4, 3, 2))\n self.assertEqual(normal_case_2d_multi_batch.sample((2,)).size(), (2, 4, 3, 2))\n self.assertEqual(normal_case_2d_multi_batch.sample((2, 7)).size(), (2, 7, 4, 3, 2))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_mixture_same_family_log_prob(self):\n probs = torch.rand(5, 5).softmax(dim=-1)\n loc = torch.randn(5, 5)\n scale = torch.rand(5, 5)\n\n def ref_log_prob(idx, x, log_prob):\n p = probs[idx].numpy()\n m = loc[idx].numpy()\n s = scale[idx].numpy()\n mix = scipy.stats.multinomial(1, p)\n comp = scipy.stats.norm(m, s)\n expected = scipy.special.logsumexp(comp.logpdf(x) + np.log(mix.p))\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(\n MixtureSameFamily(Categorical(probs=probs),\n Normal(loc, scale)), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_mixture_same_family_sample(self):\n probs = torch.rand(5).softmax(dim=-1)\n loc = torch.randn(5)\n scale = torch.rand(5)\n\n class ScipyMixtureNormal(object):\n def __init__(self, probs, mu, std):\n self.probs = probs\n self.mu = mu\n self.std = std\n\n def rvs(self, n_sample):\n comp_samples = [scipy.stats.norm(m, s).rvs(n_sample) for m, s\n in zip(self.mu, self.std)]\n mix_samples = scipy.stats.multinomial(1, self.probs).rvs(n_sample)\n samples = []\n for i in range(n_sample):\n samples.append(comp_samples[mix_samples[i].argmax()][i])\n return np.asarray(samples)\n\n self._check_sampler_sampler(\n MixtureSameFamily(Categorical(probs=probs), Normal(loc, scale)),\n ScipyMixtureNormal(probs.numpy(), loc.numpy(), scale.numpy()),\n '''MixtureSameFamily(Categorical(probs={}),\n Normal(loc={}, scale={}))'''.format(probs, loc, scale))\n\n def test_normal(self):\n loc = torch.randn(5, 5, requires_grad=True)\n scale = torch.randn(5, 5).abs().requires_grad_()\n loc_1d = torch.randn(1, requires_grad=True)\n scale_1d = torch.randn(1).abs().requires_grad_()\n loc_delta = torch.tensor([1.0, 0.0])\n scale_delta = torch.tensor([1e-5, 1e-5])\n self.assertEqual(Normal(loc, scale).sample().size(), (5, 5))\n self.assertEqual(Normal(loc, scale).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(Normal(loc_1d, scale_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Normal(loc_1d, scale_1d).sample().size(), (1,))\n self.assertEqual(Normal(0.2, .6).sample((1,)).size(), (1,))\n self.assertEqual(Normal(-0.7, 50.0).sample((1,)).size(), (1,))\n\n # sample check for extreme value of mean, std\n set_rng_seed(1)\n self.assertEqual(Normal(loc_delta, scale_delta).sample(sample_shape=(1, 2)),\n torch.tensor([[[1.0, 0.0], [1.0, 0.0]]]),\n atol=1e-4, rtol=0)\n\n self._gradcheck_log_prob(Normal, (loc, scale))\n self._gradcheck_log_prob(Normal, (loc, 1.0))\n self._gradcheck_log_prob(Normal, (0.0, scale))\n\n state = torch.get_rng_state()\n eps = torch.normal(torch.zeros_like(loc), torch.ones_like(scale))\n torch.set_rng_state(state)\n z = Normal(loc, scale).rsample()\n z.backward(torch.ones_like(z))\n self.assertEqual(loc.grad, torch.ones_like(loc))\n self.assertEqual(scale.grad, eps)\n loc.grad.zero_()\n scale.grad.zero_()\n self.assertEqual(z.size(), (5, 5))\n\n def ref_log_prob(idx, x, log_prob):\n m = loc.view(-1)[idx]\n s = scale.view(-1)[idx]\n expected = (math.exp(-(x - m) ** 2 / (2 * s ** 2)) /\n math.sqrt(2 * math.pi * s ** 2))\n self.assertEqual(log_prob, math.log(expected), atol=1e-3, rtol=0)\n\n self._check_log_prob(Normal(loc, scale), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_normal_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for loc, scale in product([-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(Normal(loc, scale),\n scipy.stats.norm(loc=loc, scale=scale),\n 'Normal(mean={}, std={})'.format(loc, scale))\n\n def test_lowrank_multivariate_normal_shape(self):\n mean = torch.randn(5, 3, requires_grad=True)\n mean_no_batch = torch.randn(3, requires_grad=True)\n mean_multi_batch = torch.randn(6, 5, 3, requires_grad=True)\n\n # construct PSD covariance\n cov_factor = torch.randn(3, 1, requires_grad=True)\n cov_diag = torch.randn(3).abs().requires_grad_()\n\n # construct batch of PSD covariances\n cov_factor_batched = torch.randn(6, 5, 3, 2, requires_grad=True)\n cov_diag_batched = torch.randn(6, 5, 3).abs().requires_grad_()\n\n # ensure that sample, batch, event shapes all handled correctly\n self.assertEqual(LowRankMultivariateNormal(mean, cov_factor, cov_diag)\n .sample().size(), (5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor, cov_diag)\n .sample().size(), (3,))\n self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor, cov_diag)\n .sample().size(), (6, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean, cov_factor, cov_diag)\n .sample((2,)).size(), (2, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor, cov_diag)\n .sample((2,)).size(), (2, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor, cov_diag)\n .sample((2,)).size(), (2, 6, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean, cov_factor, cov_diag)\n .sample((2, 7)).size(), (2, 7, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor, cov_diag)\n .sample((2, 7)).size(), (2, 7, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor, cov_diag)\n .sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean, cov_factor_batched, cov_diag_batched)\n .sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_no_batch, cov_factor_batched, cov_diag_batched)\n .sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(LowRankMultivariateNormal(mean_multi_batch, cov_factor_batched, cov_diag_batched)\n .sample((2, 7)).size(), (2, 7, 6, 5, 3))\n\n # check gradients\n self._gradcheck_log_prob(LowRankMultivariateNormal,\n (mean, cov_factor, cov_diag))\n self._gradcheck_log_prob(LowRankMultivariateNormal,\n (mean_multi_batch, cov_factor, cov_diag))\n self._gradcheck_log_prob(LowRankMultivariateNormal,\n (mean_multi_batch, cov_factor_batched, cov_diag_batched))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_lowrank_multivariate_normal_log_prob(self):\n mean = torch.randn(3, requires_grad=True)\n cov_factor = torch.randn(3, 1, requires_grad=True)\n cov_diag = torch.randn(3).abs().requires_grad_()\n cov = cov_factor.matmul(cov_factor.t()) + cov_diag.diag()\n\n # check that logprob values match scipy logpdf,\n # and that covariance and scale_tril parameters are equivalent\n dist1 = LowRankMultivariateNormal(mean, cov_factor, cov_diag)\n ref_dist = scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy())\n\n x = dist1.sample((10,))\n expected = ref_dist.logpdf(x.numpy())\n\n self.assertEqual(0.0, np.mean((dist1.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)\n\n # Double-check that batched versions behave the same as unbatched\n mean = torch.randn(5, 3, requires_grad=True)\n cov_factor = torch.randn(5, 3, 2, requires_grad=True)\n cov_diag = torch.randn(5, 3).abs().requires_grad_()\n\n dist_batched = LowRankMultivariateNormal(mean, cov_factor, cov_diag)\n dist_unbatched = [LowRankMultivariateNormal(mean[i], cov_factor[i], cov_diag[i])\n for i in range(mean.size(0))]\n\n x = dist_batched.sample((10,))\n batched_prob = dist_batched.log_prob(x)\n unbatched_prob = torch.stack([dist_unbatched[i].log_prob(x[:, i]) for i in range(5)]).t()\n\n self.assertEqual(batched_prob.shape, unbatched_prob.shape)\n self.assertEqual(0.0, (batched_prob - unbatched_prob).abs().max(), atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_lowrank_multivariate_normal_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n mean = torch.randn(5, requires_grad=True)\n cov_factor = torch.randn(5, 1, requires_grad=True)\n cov_diag = torch.randn(5).abs().requires_grad_()\n cov = cov_factor.matmul(cov_factor.t()) + cov_diag.diag()\n\n self._check_sampler_sampler(LowRankMultivariateNormal(mean, cov_factor, cov_diag),\n scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),\n 'LowRankMultivariateNormal(loc={}, cov_factor={}, cov_diag={})'\n .format(mean, cov_factor, cov_diag), multivariate=True)\n\n def test_lowrank_multivariate_normal_properties(self):\n loc = torch.randn(5)\n cov_factor = torch.randn(5, 2)\n cov_diag = torch.randn(5).abs()\n cov = cov_factor.matmul(cov_factor.t()) + cov_diag.diag()\n m1 = LowRankMultivariateNormal(loc, cov_factor, cov_diag)\n m2 = MultivariateNormal(loc=loc, covariance_matrix=cov)\n self.assertEqual(m1.mean, m2.mean)\n self.assertEqual(m1.variance, m2.variance)\n self.assertEqual(m1.covariance_matrix, m2.covariance_matrix)\n self.assertEqual(m1.scale_tril, m2.scale_tril)\n self.assertEqual(m1.precision_matrix, m2.precision_matrix)\n self.assertEqual(m1.entropy(), m2.entropy())\n\n def test_lowrank_multivariate_normal_moments(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n mean = torch.randn(5)\n cov_factor = torch.randn(5, 2)\n cov_diag = torch.randn(5).abs()\n d = LowRankMultivariateNormal(mean, cov_factor, cov_diag)\n samples = d.rsample((100000,))\n empirical_mean = samples.mean(0)\n self.assertEqual(d.mean, empirical_mean, atol=0.01, rtol=0)\n empirical_var = samples.var(0)\n self.assertEqual(d.variance, empirical_var, atol=0.02, rtol=0)\n\n def test_multivariate_normal_shape(self):\n mean = torch.randn(5, 3, requires_grad=True)\n mean_no_batch = torch.randn(3, requires_grad=True)\n mean_multi_batch = torch.randn(6, 5, 3, requires_grad=True)\n\n # construct PSD covariance\n tmp = torch.randn(3, 10)\n cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()\n prec = cov.inverse().requires_grad_()\n scale_tril = torch.cholesky(cov, upper=False).requires_grad_()\n\n # construct batch of PSD covariances\n tmp = torch.randn(6, 5, 3, 10)\n cov_batched = (tmp.unsqueeze(-2) * tmp.unsqueeze(-3)).mean(-1).requires_grad_()\n prec_batched = cov_batched.inverse()\n scale_tril_batched = cov_batched.cholesky(upper=False)\n\n # ensure that sample, batch, event shapes all handled correctly\n self.assertEqual(MultivariateNormal(mean, cov).sample().size(), (5, 3))\n self.assertEqual(MultivariateNormal(mean_no_batch, cov).sample().size(), (3,))\n self.assertEqual(MultivariateNormal(mean_multi_batch, cov).sample().size(), (6, 5, 3))\n self.assertEqual(MultivariateNormal(mean, cov).sample((2,)).size(), (2, 5, 3))\n self.assertEqual(MultivariateNormal(mean_no_batch, cov).sample((2,)).size(), (2, 3))\n self.assertEqual(MultivariateNormal(mean_multi_batch, cov).sample((2,)).size(), (2, 6, 5, 3))\n self.assertEqual(MultivariateNormal(mean, cov).sample((2, 7)).size(), (2, 7, 5, 3))\n self.assertEqual(MultivariateNormal(mean_no_batch, cov).sample((2, 7)).size(), (2, 7, 3))\n self.assertEqual(MultivariateNormal(mean_multi_batch, cov).sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(MultivariateNormal(mean, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(MultivariateNormal(mean_no_batch, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(MultivariateNormal(mean_multi_batch, cov_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(MultivariateNormal(mean, precision_matrix=prec).sample((2, 7)).size(), (2, 7, 5, 3))\n self.assertEqual(MultivariateNormal(mean, precision_matrix=prec_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))\n self.assertEqual(MultivariateNormal(mean, scale_tril=scale_tril).sample((2, 7)).size(), (2, 7, 5, 3))\n self.assertEqual(MultivariateNormal(mean, scale_tril=scale_tril_batched).sample((2, 7)).size(), (2, 7, 6, 5, 3))\n\n # check gradients\n # We write a custom gradcheck function to maintain the symmetry\n # of the perturbed covariances and their inverses (precision)\n def multivariate_normal_log_prob_gradcheck(mean, covariance=None, precision=None, scale_tril=None):\n mvn_samples = MultivariateNormal(mean, covariance, precision, scale_tril).sample().requires_grad_()\n\n def gradcheck_func(samples, mu, sigma, prec, scale_tril):\n if sigma is not None:\n sigma = 0.5 * (sigma + sigma.transpose(-1, -2)) # Ensure symmetry of covariance\n if prec is not None:\n prec = 0.5 * (prec + prec.transpose(-1, -2)) # Ensure symmetry of precision\n return MultivariateNormal(mu, sigma, prec, scale_tril).log_prob(samples)\n gradcheck(gradcheck_func, (mvn_samples, mean, covariance, precision, scale_tril), raise_exception=True)\n\n multivariate_normal_log_prob_gradcheck(mean, cov)\n multivariate_normal_log_prob_gradcheck(mean_multi_batch, cov)\n multivariate_normal_log_prob_gradcheck(mean_multi_batch, cov_batched)\n multivariate_normal_log_prob_gradcheck(mean, None, prec)\n multivariate_normal_log_prob_gradcheck(mean_no_batch, None, prec_batched)\n multivariate_normal_log_prob_gradcheck(mean, None, None, scale_tril)\n multivariate_normal_log_prob_gradcheck(mean_no_batch, None, None, scale_tril_batched)\n\n def test_multivariate_normal_stable_with_precision_matrix(self):\n x = torch.randn(10)\n P = torch.exp(-(x - x.unsqueeze(-1)) ** 2) # RBF kernel\n MultivariateNormal(x.new_zeros(10), precision_matrix=P)\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_multivariate_normal_log_prob(self):\n mean = torch.randn(3, requires_grad=True)\n tmp = torch.randn(3, 10)\n cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()\n prec = cov.inverse().requires_grad_()\n scale_tril = torch.cholesky(cov, upper=False).requires_grad_()\n\n # check that logprob values match scipy logpdf,\n # and that covariance and scale_tril parameters are equivalent\n dist1 = MultivariateNormal(mean, cov)\n dist2 = MultivariateNormal(mean, precision_matrix=prec)\n dist3 = MultivariateNormal(mean, scale_tril=scale_tril)\n ref_dist = scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy())\n\n x = dist1.sample((10,))\n expected = ref_dist.logpdf(x.numpy())\n\n self.assertEqual(0.0, np.mean((dist1.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)\n self.assertEqual(0.0, np.mean((dist2.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)\n self.assertEqual(0.0, np.mean((dist3.log_prob(x).detach().numpy() - expected)**2), atol=1e-3, rtol=0)\n\n # Double-check that batched versions behave the same as unbatched\n mean = torch.randn(5, 3, requires_grad=True)\n tmp = torch.randn(5, 3, 10)\n cov = (tmp.unsqueeze(-2) * tmp.unsqueeze(-3)).mean(-1).requires_grad_()\n\n dist_batched = MultivariateNormal(mean, cov)\n dist_unbatched = [MultivariateNormal(mean[i], cov[i]) for i in range(mean.size(0))]\n\n x = dist_batched.sample((10,))\n batched_prob = dist_batched.log_prob(x)\n unbatched_prob = torch.stack([dist_unbatched[i].log_prob(x[:, i]) for i in range(5)]).t()\n\n self.assertEqual(batched_prob.shape, unbatched_prob.shape)\n self.assertEqual(0.0, (batched_prob - unbatched_prob).abs().max(), atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_multivariate_normal_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n mean = torch.randn(3, requires_grad=True)\n tmp = torch.randn(3, 10)\n cov = (torch.matmul(tmp, tmp.t()) / tmp.size(-1)).requires_grad_()\n prec = cov.inverse().requires_grad_()\n scale_tril = torch.cholesky(cov, upper=False).requires_grad_()\n\n self._check_sampler_sampler(MultivariateNormal(mean, cov),\n scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),\n 'MultivariateNormal(loc={}, cov={})'.format(mean, cov),\n multivariate=True)\n self._check_sampler_sampler(MultivariateNormal(mean, precision_matrix=prec),\n scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),\n 'MultivariateNormal(loc={}, atol={})'.format(mean, prec),\n multivariate=True)\n self._check_sampler_sampler(MultivariateNormal(mean, scale_tril=scale_tril),\n scipy.stats.multivariate_normal(mean.detach().numpy(), cov.detach().numpy()),\n 'MultivariateNormal(loc={}, scale_tril={})'.format(mean, scale_tril),\n multivariate=True)\n\n def test_multivariate_normal_properties(self):\n loc = torch.randn(5)\n scale_tril = transform_to(constraints.lower_cholesky)(torch.randn(5, 5))\n m = MultivariateNormal(loc=loc, scale_tril=scale_tril)\n self.assertEqual(m.covariance_matrix, m.scale_tril.mm(m.scale_tril.t()))\n self.assertEqual(m.covariance_matrix.mm(m.precision_matrix), torch.eye(m.event_shape[0]))\n self.assertEqual(m.scale_tril, torch.cholesky(m.covariance_matrix, upper=False))\n\n def test_multivariate_normal_moments(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n mean = torch.randn(5)\n scale_tril = transform_to(constraints.lower_cholesky)(torch.randn(5, 5))\n d = MultivariateNormal(mean, scale_tril=scale_tril)\n samples = d.rsample((100000,))\n empirical_mean = samples.mean(0)\n self.assertEqual(d.mean, empirical_mean, atol=0.01, rtol=0)\n empirical_var = samples.var(0)\n self.assertEqual(d.variance, empirical_var, atol=0.05, rtol=0)\n\n def test_exponential(self):\n rate = torch.randn(5, 5).abs().requires_grad_()\n rate_1d = torch.randn(1).abs().requires_grad_()\n self.assertEqual(Exponential(rate).sample().size(), (5, 5))\n self.assertEqual(Exponential(rate).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(Exponential(rate_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Exponential(rate_1d).sample().size(), (1,))\n self.assertEqual(Exponential(0.2).sample((1,)).size(), (1,))\n self.assertEqual(Exponential(50.0).sample((1,)).size(), (1,))\n\n self._gradcheck_log_prob(Exponential, (rate,))\n state = torch.get_rng_state()\n eps = rate.new(rate.size()).exponential_()\n torch.set_rng_state(state)\n z = Exponential(rate).rsample()\n z.backward(torch.ones_like(z))\n self.assertEqual(rate.grad, -eps / rate**2)\n rate.grad.zero_()\n self.assertEqual(z.size(), (5, 5))\n\n def ref_log_prob(idx, x, log_prob):\n m = rate.view(-1)[idx]\n expected = math.log(m) - m * x\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Exponential(rate), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_exponential_sample(self):\n set_rng_seed(1) # see Note [Randomized statistical tests]\n for rate in [1e-5, 1.0, 10.]:\n self._check_sampler_sampler(Exponential(rate),\n scipy.stats.expon(scale=1. / rate),\n 'Exponential(rate={})'.format(rate))\n\n def test_laplace(self):\n loc = torch.randn(5, 5, requires_grad=True)\n scale = torch.randn(5, 5).abs().requires_grad_()\n loc_1d = torch.randn(1, requires_grad=True)\n scale_1d = torch.randn(1, requires_grad=True)\n loc_delta = torch.tensor([1.0, 0.0])\n scale_delta = torch.tensor([1e-5, 1e-5])\n self.assertEqual(Laplace(loc, scale).sample().size(), (5, 5))\n self.assertEqual(Laplace(loc, scale).sample((7,)).size(), (7, 5, 5))\n self.assertEqual(Laplace(loc_1d, scale_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Laplace(loc_1d, scale_1d).sample().size(), (1,))\n self.assertEqual(Laplace(0.2, .6).sample((1,)).size(), (1,))\n self.assertEqual(Laplace(-0.7, 50.0).sample((1,)).size(), (1,))\n\n # sample check for extreme value of mean, std\n set_rng_seed(0)\n self.assertEqual(Laplace(loc_delta, scale_delta).sample(sample_shape=(1, 2)),\n torch.tensor([[[1.0, 0.0], [1.0, 0.0]]]),\n atol=1e-4, rtol=0)\n\n self._gradcheck_log_prob(Laplace, (loc, scale))\n self._gradcheck_log_prob(Laplace, (loc, 1.0))\n self._gradcheck_log_prob(Laplace, (0.0, scale))\n\n state = torch.get_rng_state()\n eps = torch.ones_like(loc).uniform_(-.5, .5)\n torch.set_rng_state(state)\n z = Laplace(loc, scale).rsample()\n z.backward(torch.ones_like(z))\n self.assertEqual(loc.grad, torch.ones_like(loc))\n self.assertEqual(scale.grad, -eps.sign() * torch.log1p(-2 * eps.abs()))\n loc.grad.zero_()\n scale.grad.zero_()\n self.assertEqual(z.size(), (5, 5))\n\n def ref_log_prob(idx, x, log_prob):\n m = loc.view(-1)[idx]\n s = scale.view(-1)[idx]\n expected = (-math.log(2 * s) - abs(x - m) / s)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Laplace(loc, scale), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_laplace_sample(self):\n set_rng_seed(1) # see Note [Randomized statistical tests]\n for loc, scale in product([-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(Laplace(loc, scale),\n scipy.stats.laplace(loc=loc, scale=scale),\n 'Laplace(loc={}, scale={})'.format(loc, scale))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_gamma_shape(self):\n alpha = torch.randn(2, 3).exp().requires_grad_()\n beta = torch.randn(2, 3).exp().requires_grad_()\n alpha_1d = torch.randn(1).exp().requires_grad_()\n beta_1d = torch.randn(1).exp().requires_grad_()\n self.assertEqual(Gamma(alpha, beta).sample().size(), (2, 3))\n self.assertEqual(Gamma(alpha, beta).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Gamma(alpha_1d, beta_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Gamma(alpha_1d, beta_1d).sample().size(), (1,))\n self.assertEqual(Gamma(0.5, 0.5).sample().size(), ())\n self.assertEqual(Gamma(0.5, 0.5).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n a = alpha.view(-1)[idx].detach()\n b = beta.view(-1)[idx].detach()\n expected = scipy.stats.gamma.logpdf(x, a, scale=1 / b)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Gamma(alpha, beta), ref_log_prob)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_gamma_gpu_shape(self):\n alpha = torch.randn(2, 3).cuda().exp().requires_grad_()\n beta = torch.randn(2, 3).cuda().exp().requires_grad_()\n alpha_1d = torch.randn(1).cuda().exp().requires_grad_()\n beta_1d = torch.randn(1).cuda().exp().requires_grad_()\n self.assertEqual(Gamma(alpha, beta).sample().size(), (2, 3))\n self.assertEqual(Gamma(alpha, beta).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Gamma(alpha_1d, beta_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Gamma(alpha_1d, beta_1d).sample().size(), (1,))\n self.assertEqual(Gamma(0.5, 0.5).sample().size(), ())\n self.assertEqual(Gamma(0.5, 0.5).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n a = alpha.view(-1)[idx].detach().cpu()\n b = beta.view(-1)[idx].detach().cpu()\n expected = scipy.stats.gamma.logpdf(x.cpu(), a, scale=1 / b)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Gamma(alpha, beta), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_gamma_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for alpha, beta in product([0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(Gamma(alpha, beta),\n scipy.stats.gamma(alpha, scale=1.0 / beta),\n 'Gamma(concentration={}, rate={})'.format(alpha, beta))\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_gamma_gpu_sample(self):\n set_rng_seed(0)\n for alpha, beta in product([0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):\n a, b = torch.tensor([alpha]).cuda(), torch.tensor([beta]).cuda()\n self._check_sampler_sampler(Gamma(a, b),\n scipy.stats.gamma(alpha, scale=1.0 / beta),\n 'Gamma(alpha={}, beta={})'.format(alpha, beta),\n failure_rate=1e-4)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_pareto(self):\n scale = torch.randn(2, 3).abs().requires_grad_()\n alpha = torch.randn(2, 3).abs().requires_grad_()\n scale_1d = torch.randn(1).abs().requires_grad_()\n alpha_1d = torch.randn(1).abs().requires_grad_()\n self.assertEqual(Pareto(scale_1d, 0.5).mean, inf)\n self.assertEqual(Pareto(scale_1d, 0.5).variance, inf)\n self.assertEqual(Pareto(scale, alpha).sample().size(), (2, 3))\n self.assertEqual(Pareto(scale, alpha).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Pareto(scale_1d, alpha_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Pareto(scale_1d, alpha_1d).sample().size(), (1,))\n self.assertEqual(Pareto(1.0, 1.0).sample().size(), ())\n self.assertEqual(Pareto(1.0, 1.0).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n s = scale.view(-1)[idx].detach()\n a = alpha.view(-1)[idx].detach()\n expected = scipy.stats.pareto.logpdf(x, a, scale=s)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Pareto(scale, alpha), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_pareto_sample(self):\n set_rng_seed(1) # see Note [Randomized statistical tests]\n for scale, alpha in product([0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(Pareto(scale, alpha),\n scipy.stats.pareto(alpha, scale=scale),\n 'Pareto(scale={}, alpha={})'.format(scale, alpha))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_gumbel(self):\n loc = torch.randn(2, 3, requires_grad=True)\n scale = torch.randn(2, 3).abs().requires_grad_()\n loc_1d = torch.randn(1, requires_grad=True)\n scale_1d = torch.randn(1).abs().requires_grad_()\n self.assertEqual(Gumbel(loc, scale).sample().size(), (2, 3))\n self.assertEqual(Gumbel(loc, scale).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Gumbel(loc_1d, scale_1d).sample().size(), (1,))\n self.assertEqual(Gumbel(loc_1d, scale_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Gumbel(1.0, 1.0).sample().size(), ())\n self.assertEqual(Gumbel(1.0, 1.0).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n l = loc.view(-1)[idx].detach()\n s = scale.view(-1)[idx].detach()\n expected = scipy.stats.gumbel_r.logpdf(x, loc=l, scale=s)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Gumbel(loc, scale), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_gumbel_sample(self):\n set_rng_seed(1) # see note [Randomized statistical tests]\n for loc, scale in product([-5.0, -1.0, -0.1, 0.1, 1.0, 5.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(Gumbel(loc, scale),\n scipy.stats.gumbel_r(loc=loc, scale=scale),\n 'Gumbel(loc={}, scale={})'.format(loc, scale))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_fishersnedecor(self):\n df1 = torch.randn(2, 3).abs().requires_grad_()\n df2 = torch.randn(2, 3).abs().requires_grad_()\n df1_1d = torch.randn(1).abs()\n df2_1d = torch.randn(1).abs()\n self.assertTrue(is_all_nan(FisherSnedecor(1, 2).mean))\n self.assertTrue(is_all_nan(FisherSnedecor(1, 4).variance))\n self.assertEqual(FisherSnedecor(df1, df2).sample().size(), (2, 3))\n self.assertEqual(FisherSnedecor(df1, df2).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(FisherSnedecor(df1_1d, df2_1d).sample().size(), (1,))\n self.assertEqual(FisherSnedecor(df1_1d, df2_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(FisherSnedecor(1.0, 1.0).sample().size(), ())\n self.assertEqual(FisherSnedecor(1.0, 1.0).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n f1 = df1.view(-1)[idx].detach()\n f2 = df2.view(-1)[idx].detach()\n expected = scipy.stats.f.logpdf(x, f1, f2)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(FisherSnedecor(df1, df2), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_fishersnedecor_sample(self):\n set_rng_seed(1) # see note [Randomized statistical tests]\n for df1, df2 in product([0.1, 0.5, 1.0, 5.0, 10.0], [0.1, 0.5, 1.0, 5.0, 10.0]):\n self._check_sampler_sampler(FisherSnedecor(df1, df2),\n scipy.stats.f(df1, df2),\n 'FisherSnedecor(loc={}, scale={})'.format(df1, df2))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_chi2_shape(self):\n df = torch.randn(2, 3).exp().requires_grad_()\n df_1d = torch.randn(1).exp().requires_grad_()\n self.assertEqual(Chi2(df).sample().size(), (2, 3))\n self.assertEqual(Chi2(df).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Chi2(df_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(Chi2(df_1d).sample().size(), (1,))\n self.assertEqual(Chi2(torch.tensor(0.5, requires_grad=True)).sample().size(), ())\n self.assertEqual(Chi2(0.5).sample().size(), ())\n self.assertEqual(Chi2(0.5).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n d = df.view(-1)[idx].detach()\n expected = scipy.stats.chi2.logpdf(x, d)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(Chi2(df), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_chi2_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for df in [0.1, 1.0, 5.0]:\n self._check_sampler_sampler(Chi2(df),\n scipy.stats.chi2(df),\n 'Chi2(df={})'.format(df))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_studentT(self):\n df = torch.randn(2, 3).exp().requires_grad_()\n df_1d = torch.randn(1).exp().requires_grad_()\n self.assertTrue(is_all_nan(StudentT(1).mean))\n self.assertTrue(is_all_nan(StudentT(1).variance))\n self.assertEqual(StudentT(2).variance, inf)\n self.assertEqual(StudentT(df).sample().size(), (2, 3))\n self.assertEqual(StudentT(df).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(StudentT(df_1d).sample((1,)).size(), (1, 1))\n self.assertEqual(StudentT(df_1d).sample().size(), (1,))\n self.assertEqual(StudentT(torch.tensor(0.5, requires_grad=True)).sample().size(), ())\n self.assertEqual(StudentT(0.5).sample().size(), ())\n self.assertEqual(StudentT(0.5).sample((1,)).size(), (1,))\n\n def ref_log_prob(idx, x, log_prob):\n d = df.view(-1)[idx].detach()\n expected = scipy.stats.t.logpdf(x, d)\n self.assertEqual(log_prob, expected, atol=1e-3, rtol=0)\n\n self._check_log_prob(StudentT(df), ref_log_prob)\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_studentT_sample(self):\n set_rng_seed(11) # see Note [Randomized statistical tests]\n for df, loc, scale in product([0.1, 1.0, 5.0, 10.0], [-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(StudentT(df=df, loc=loc, scale=scale),\n scipy.stats.t(df=df, loc=loc, scale=scale),\n 'StudentT(df={}, loc={}, scale={})'.format(df, loc, scale))\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_studentT_log_prob(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n num_samples = 10\n for df, loc, scale in product([0.1, 1.0, 5.0, 10.0], [-1.0, 0.0, 1.0], [0.1, 1.0, 10.0]):\n dist = StudentT(df=df, loc=loc, scale=scale)\n x = dist.sample((num_samples,))\n actual_log_prob = dist.log_prob(x)\n for i in range(num_samples):\n expected_log_prob = scipy.stats.t.logpdf(x[i], df=df, loc=loc, scale=scale)\n self.assertEqual(float(actual_log_prob[i]), float(expected_log_prob), atol=1e-3, rtol=0)\n\n def test_dirichlet_shape(self):\n alpha = torch.randn(2, 3).exp().requires_grad_()\n alpha_1d = torch.randn(4).exp().requires_grad_()\n self.assertEqual(Dirichlet(alpha).sample().size(), (2, 3))\n self.assertEqual(Dirichlet(alpha).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Dirichlet(alpha_1d).sample().size(), (4,))\n self.assertEqual(Dirichlet(alpha_1d).sample((1,)).size(), (1, 4))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_dirichlet_log_prob(self):\n num_samples = 10\n alpha = torch.exp(torch.randn(5))\n dist = Dirichlet(alpha)\n x = dist.sample((num_samples,))\n actual_log_prob = dist.log_prob(x)\n for i in range(num_samples):\n expected_log_prob = scipy.stats.dirichlet.logpdf(x[i].numpy(), alpha.numpy())\n self.assertEqual(actual_log_prob[i], expected_log_prob, atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_dirichlet_sample(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n alpha = torch.exp(torch.randn(3))\n self._check_sampler_sampler(Dirichlet(alpha),\n scipy.stats.dirichlet(alpha.numpy()),\n 'Dirichlet(alpha={})'.format(list(alpha)),\n multivariate=True)\n\n def test_beta_shape(self):\n con1 = torch.randn(2, 3).exp().requires_grad_()\n con0 = torch.randn(2, 3).exp().requires_grad_()\n con1_1d = torch.randn(4).exp().requires_grad_()\n con0_1d = torch.randn(4).exp().requires_grad_()\n self.assertEqual(Beta(con1, con0).sample().size(), (2, 3))\n self.assertEqual(Beta(con1, con0).sample((5,)).size(), (5, 2, 3))\n self.assertEqual(Beta(con1_1d, con0_1d).sample().size(), (4,))\n self.assertEqual(Beta(con1_1d, con0_1d).sample((1,)).size(), (1, 4))\n self.assertEqual(Beta(0.1, 0.3).sample().size(), ())\n self.assertEqual(Beta(0.1, 0.3).sample((5,)).size(), (5,))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_beta_log_prob(self):\n for _ in range(100):\n con1 = np.exp(np.random.normal())\n con0 = np.exp(np.random.normal())\n dist = Beta(con1, con0)\n x = dist.sample()\n actual_log_prob = dist.log_prob(x).sum()\n expected_log_prob = scipy.stats.beta.logpdf(x, con1, con0)\n self.assertEqual(float(actual_log_prob), float(expected_log_prob), atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_beta_sample(self):\n set_rng_seed(1) # see Note [Randomized statistical tests]\n for con1, con0 in product([0.1, 1.0, 10.0], [0.1, 1.0, 10.0]):\n self._check_sampler_sampler(Beta(con1, con0),\n scipy.stats.beta(con1, con0),\n 'Beta(alpha={}, beta={})'.format(con1, con0))\n # Check that small alphas do not cause NANs.\n for Tensor in [torch.FloatTensor, torch.DoubleTensor]:\n x = Beta(Tensor([1e-6]), Tensor([1e-6])).sample()[0]\n self.assertTrue(np.isfinite(x) and x > 0, 'Invalid Beta.sample(): {}'.format(x))\n\n def test_beta_underflow(self):\n # For low values of (alpha, beta), the gamma samples can underflow\n # with float32 and result in a spurious mode at 0.5. To prevent this,\n # torch._sample_dirichlet works with double precision for intermediate\n # calculations.\n set_rng_seed(1)\n num_samples = 50000\n for dtype in [torch.float, torch.double]:\n conc = torch.tensor(1e-2, dtype=dtype)\n beta_samples = Beta(conc, conc).sample([num_samples])\n self.assertEqual((beta_samples == 0).sum(), 0)\n self.assertEqual((beta_samples == 1).sum(), 0)\n # assert support is concentrated around 0 and 1\n frac_zeros = float((beta_samples < 0.1).sum()) / num_samples\n frac_ones = float((beta_samples > 0.9).sum()) / num_samples\n self.assertEqual(frac_zeros, 0.5, atol=0.05, rtol=0)\n self.assertEqual(frac_ones, 0.5, atol=0.05, rtol=0)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n def test_beta_underflow_gpu(self):\n set_rng_seed(1)\n num_samples = 50000\n conc = torch.tensor(1e-2, dtype=torch.float64).cuda()\n beta_samples = Beta(conc, conc).sample([num_samples])\n self.assertEqual((beta_samples == 0).sum(), 0)\n self.assertEqual((beta_samples == 1).sum(), 0)\n # assert support is concentrated around 0 and 1\n frac_zeros = float((beta_samples < 0.1).sum()) / num_samples\n frac_ones = float((beta_samples > 0.9).sum()) / num_samples\n # TODO: increase precision once imbalance on GPU is fixed.\n self.assertEqual(frac_zeros, 0.5, atol=0.12, rtol=0)\n self.assertEqual(frac_ones, 0.5, atol=0.12, rtol=0)\n\n def test_continuous_bernoulli(self):\n p = torch.tensor([0.7, 0.2, 0.4], requires_grad=True)\n r = torch.tensor(0.3, requires_grad=True)\n s = 0.3\n self.assertEqual(ContinuousBernoulli(p).sample((8,)).size(), (8, 3))\n self.assertFalse(ContinuousBernoulli(p).sample().requires_grad)\n self.assertEqual(ContinuousBernoulli(r).sample((8,)).size(), (8,))\n self.assertEqual(ContinuousBernoulli(r).sample().size(), ())\n self.assertEqual(ContinuousBernoulli(r).sample((3, 2)).size(), (3, 2,))\n self.assertEqual(ContinuousBernoulli(s).sample().size(), ())\n self._gradcheck_log_prob(ContinuousBernoulli, (p,))\n\n def ref_log_prob(idx, val, log_prob):\n prob = p[idx]\n if prob > 0.499 and prob < 0.501: # using default value of lim here\n log_norm_const = math.log(2.) + 4. / 3. * math.pow(prob - 0.5, 2) + 104. / 45. * math.pow(prob - 0.5, 4)\n else:\n log_norm_const = math.log(2. * math.atanh(1. - 2. * prob) / (1. - 2.0 * prob))\n res = val * math.log(prob) + (1. - val) * math.log1p(-prob) + log_norm_const\n self.assertEqual(log_prob, res)\n\n self._check_log_prob(ContinuousBernoulli(p), ref_log_prob)\n self._check_log_prob(ContinuousBernoulli(logits=p.log() - (-p).log1p()), ref_log_prob)\n\n # check entropy computation\n self.assertEqual(ContinuousBernoulli(p).entropy(), torch.tensor([-0.02938, -0.07641, -0.00682]), atol=1e-4, rtol=0)\n # entropy below corresponds to the clamped value of prob when using float 64\n # the value for float32 should be -1.76898\n self.assertEqual(ContinuousBernoulli(torch.tensor([0.0])).entropy(), torch.tensor([-2.58473]), atol=1e-5, rtol=0)\n self.assertEqual(ContinuousBernoulli(s).entropy(), torch.tensor(-0.02938), atol=1e-4, rtol=0)\n\n def test_continuous_bernoulli_3d(self):\n p = torch.full((2, 3, 5), 0.5).requires_grad_()\n self.assertEqual(ContinuousBernoulli(p).sample().size(), (2, 3, 5))\n self.assertEqual(ContinuousBernoulli(p).sample(sample_shape=(2, 5)).size(),\n (2, 5, 2, 3, 5))\n self.assertEqual(ContinuousBernoulli(p).sample((2,)).size(), (2, 2, 3, 5))\n\n def test_independent_shape(self):\n for Dist, params in EXAMPLES:\n for param in params:\n base_dist = Dist(**param)\n x = base_dist.sample()\n base_log_prob_shape = base_dist.log_prob(x).shape\n for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):\n indep_dist = Independent(base_dist, reinterpreted_batch_ndims)\n indep_log_prob_shape = base_log_prob_shape[:len(base_log_prob_shape) - reinterpreted_batch_ndims]\n self.assertEqual(indep_dist.log_prob(x).shape, indep_log_prob_shape)\n self.assertEqual(indep_dist.sample().shape, base_dist.sample().shape)\n self.assertEqual(indep_dist.has_rsample, base_dist.has_rsample)\n if indep_dist.has_rsample:\n self.assertEqual(indep_dist.sample().shape, base_dist.sample().shape)\n try:\n self.assertEqual(indep_dist.enumerate_support().shape, base_dist.enumerate_support().shape)\n self.assertEqual(indep_dist.mean.shape, base_dist.mean.shape)\n except NotImplementedError:\n pass\n try:\n self.assertEqual(indep_dist.variance.shape, base_dist.variance.shape)\n except NotImplementedError:\n pass\n try:\n self.assertEqual(indep_dist.entropy().shape, indep_log_prob_shape)\n except NotImplementedError:\n pass\n\n def test_independent_expand(self):\n for Dist, params in EXAMPLES:\n for param in params:\n base_dist = Dist(**param)\n for reinterpreted_batch_ndims in range(len(base_dist.batch_shape) + 1):\n for s in [torch.Size(), torch.Size((2,)), torch.Size((2, 3))]:\n indep_dist = Independent(base_dist, reinterpreted_batch_ndims)\n expanded_shape = s + indep_dist.batch_shape\n expanded = indep_dist.expand(expanded_shape)\n expanded_sample = expanded.sample()\n expected_shape = expanded_shape + indep_dist.event_shape\n self.assertEqual(expanded_sample.shape, expected_shape)\n self.assertEqual(expanded.log_prob(expanded_sample),\n indep_dist.log_prob(expanded_sample))\n self.assertEqual(expanded.event_shape, indep_dist.event_shape)\n self.assertEqual(expanded.batch_shape, expanded_shape)\n\n def test_cdf_icdf_inverse(self):\n # Tests the invertibility property on the distributions\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(**param)\n samples = dist.sample(sample_shape=(20,))\n try:\n cdf = dist.cdf(samples)\n actual = dist.icdf(cdf)\n except NotImplementedError:\n continue\n rel_error = torch.abs(actual - samples) / (1e-10 + torch.abs(samples))\n self.assertLess(rel_error.max(), 1e-4, msg='\\n'.join([\n '{} example {}/{}, icdf(cdf(x)) != x'.format(Dist.__name__, i + 1, len(params)),\n 'x = {}'.format(samples),\n 'cdf(x) = {}'.format(cdf),\n 'icdf(cdf(x)) = {}'.format(actual),\n ]))\n\n def test_cdf_log_prob(self):\n # Tests if the differentiation of the CDF gives the PDF at a given value\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(**param)\n samples = dist.sample()\n if samples.dtype.is_floating_point:\n samples.requires_grad_()\n try:\n cdfs = dist.cdf(samples)\n pdfs = dist.log_prob(samples).exp()\n except NotImplementedError:\n continue\n cdfs_derivative = grad(cdfs.sum(), [samples])[0] # this should not be wrapped in torch.abs()\n self.assertEqual(cdfs_derivative, pdfs, msg='\\n'.join([\n '{} example {}/{}, d(cdf)/dx != pdf(x)'.format(Dist.__name__, i + 1, len(params)),\n 'x = {}'.format(samples),\n 'cdf = {}'.format(cdfs),\n 'pdf = {}'.format(pdfs),\n 'grad(cdf) = {}'.format(cdfs_derivative),\n ]))\n\n def test_valid_parameter_broadcasting(self):\n # Test correct broadcasting of parameter sizes for distributions that have multiple\n # parameters.\n # example type (distribution instance, expected sample shape)\n valid_examples = [\n (Normal(loc=torch.tensor([0., 0.]), scale=1),\n (2,)),\n (Normal(loc=0, scale=torch.tensor([1., 1.])),\n (2,)),\n (Normal(loc=torch.tensor([0., 0.]), scale=torch.tensor([1.])),\n (2,)),\n (Normal(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (Normal(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.]])),\n (1, 2)),\n (Normal(loc=torch.tensor([0.]), scale=torch.tensor([[1.]])),\n (1, 1)),\n (FisherSnedecor(df1=torch.tensor([1., 1.]), df2=1),\n (2,)),\n (FisherSnedecor(df1=1, df2=torch.tensor([1., 1.])),\n (2,)),\n (FisherSnedecor(df1=torch.tensor([1., 1.]), df2=torch.tensor([1.])),\n (2,)),\n (FisherSnedecor(df1=torch.tensor([1., 1.]), df2=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (FisherSnedecor(df1=torch.tensor([1., 1.]), df2=torch.tensor([[1.]])),\n (1, 2)),\n (FisherSnedecor(df1=torch.tensor([1.]), df2=torch.tensor([[1.]])),\n (1, 1)),\n (Gamma(concentration=torch.tensor([1., 1.]), rate=1),\n (2,)),\n (Gamma(concentration=1, rate=torch.tensor([1., 1.])),\n (2,)),\n (Gamma(concentration=torch.tensor([1., 1.]), rate=torch.tensor([[1.], [1.], [1.]])),\n (3, 2)),\n (Gamma(concentration=torch.tensor([1., 1.]), rate=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (Gamma(concentration=torch.tensor([1., 1.]), rate=torch.tensor([[1.]])),\n (1, 2)),\n (Gamma(concentration=torch.tensor([1.]), rate=torch.tensor([[1.]])),\n (1, 1)),\n (Gumbel(loc=torch.tensor([0., 0.]), scale=1),\n (2,)),\n (Gumbel(loc=0, scale=torch.tensor([1., 1.])),\n (2,)),\n (Gumbel(loc=torch.tensor([0., 0.]), scale=torch.tensor([1.])),\n (2,)),\n (Gumbel(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (Gumbel(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.]])),\n (1, 2)),\n (Gumbel(loc=torch.tensor([0.]), scale=torch.tensor([[1.]])),\n (1, 1)),\n (Laplace(loc=torch.tensor([0., 0.]), scale=1),\n (2,)),\n (Laplace(loc=0, scale=torch.tensor([1., 1.])),\n (2,)),\n (Laplace(loc=torch.tensor([0., 0.]), scale=torch.tensor([1.])),\n (2,)),\n (Laplace(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (Laplace(loc=torch.tensor([0., 0.]), scale=torch.tensor([[1.]])),\n (1, 2)),\n (Laplace(loc=torch.tensor([0.]), scale=torch.tensor([[1.]])),\n (1, 1)),\n (Pareto(scale=torch.tensor([1., 1.]), alpha=1),\n (2,)),\n (Pareto(scale=1, alpha=torch.tensor([1., 1.])),\n (2,)),\n (Pareto(scale=torch.tensor([1., 1.]), alpha=torch.tensor([1.])),\n (2,)),\n (Pareto(scale=torch.tensor([1., 1.]), alpha=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (Pareto(scale=torch.tensor([1., 1.]), alpha=torch.tensor([[1.]])),\n (1, 2)),\n (Pareto(scale=torch.tensor([1.]), alpha=torch.tensor([[1.]])),\n (1, 1)),\n (StudentT(df=torch.tensor([1., 1.]), loc=1),\n (2,)),\n (StudentT(df=1, scale=torch.tensor([1., 1.])),\n (2,)),\n (StudentT(df=torch.tensor([1., 1.]), loc=torch.tensor([1.])),\n (2,)),\n (StudentT(df=torch.tensor([1., 1.]), scale=torch.tensor([[1.], [1.]])),\n (2, 2)),\n (StudentT(df=torch.tensor([1., 1.]), loc=torch.tensor([[1.]])),\n (1, 2)),\n (StudentT(df=torch.tensor([1.]), scale=torch.tensor([[1.]])),\n (1, 1)),\n (StudentT(df=1., loc=torch.zeros(5, 1), scale=torch.ones(3)),\n (5, 3)),\n ]\n\n for dist, expected_size in valid_examples:\n actual_size = dist.sample().size()\n self.assertEqual(actual_size, expected_size,\n msg='{} actual size: {} != expected size: {}'.format(dist, actual_size, expected_size))\n\n sample_shape = torch.Size((2,))\n expected_size = sample_shape + expected_size\n actual_size = dist.sample(sample_shape).size()\n self.assertEqual(actual_size, expected_size,\n msg='{} actual size: {} != expected size: {}'.format(dist, actual_size, expected_size))\n\n def test_invalid_parameter_broadcasting(self):\n # invalid broadcasting cases; should throw error\n # example type (distribution class, distribution params)\n invalid_examples = [\n (Normal, {\n 'loc': torch.tensor([[0, 0]]),\n 'scale': torch.tensor([1, 1, 1, 1])\n }),\n (Normal, {\n 'loc': torch.tensor([[[0, 0, 0], [0, 0, 0]]]),\n 'scale': torch.tensor([1, 1])\n }),\n (FisherSnedecor, {\n 'df1': torch.tensor([1, 1]),\n 'df2': torch.tensor([1, 1, 1]),\n }),\n (Gumbel, {\n 'loc': torch.tensor([[0, 0]]),\n 'scale': torch.tensor([1, 1, 1, 1])\n }),\n (Gumbel, {\n 'loc': torch.tensor([[[0, 0, 0], [0, 0, 0]]]),\n 'scale': torch.tensor([1, 1])\n }),\n (Gamma, {\n 'concentration': torch.tensor([0, 0]),\n 'rate': torch.tensor([1, 1, 1])\n }),\n (Laplace, {\n 'loc': torch.tensor([0, 0]),\n 'scale': torch.tensor([1, 1, 1])\n }),\n (Pareto, {\n 'scale': torch.tensor([1, 1]),\n 'alpha': torch.tensor([1, 1, 1])\n }),\n (StudentT, {\n 'df': torch.tensor([1, 1]),\n 'scale': torch.tensor([1, 1, 1])\n }),\n (StudentT, {\n 'df': torch.tensor([1, 1]),\n 'loc': torch.tensor([1, 1, 1])\n })\n ]\n\n for dist, kwargs in invalid_examples:\n self.assertRaises(RuntimeError, dist, **kwargs)\n\n\n# These tests are only needed for a few distributions that implement custom\n# reparameterized gradients. Most .rsample() implementations simply rely on\n# the reparameterization trick and do not need to be tested for accuracy.\nclass TestRsample(TestCase):\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_gamma(self):\n num_samples = 100\n for alpha in [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]:\n alphas = torch.tensor([alpha] * num_samples, dtype=torch.float, requires_grad=True)\n betas = alphas.new_ones(num_samples)\n x = Gamma(alphas, betas).rsample()\n x.sum().backward()\n x, ind = x.sort()\n x = x.detach().numpy()\n actual_grad = alphas.grad[ind].numpy()\n # Compare with expected gradient dx/dalpha along constant cdf(x,alpha).\n cdf = scipy.stats.gamma.cdf\n pdf = scipy.stats.gamma.pdf\n eps = 0.01 * alpha / (1.0 + alpha ** 0.5)\n cdf_alpha = (cdf(x, alpha + eps) - cdf(x, alpha - eps)) / (2 * eps)\n cdf_x = pdf(x, alpha)\n expected_grad = -cdf_alpha / cdf_x\n rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)\n self.assertLess(np.max(rel_error), 0.0005, '\\n'.join([\n 'Bad gradient dx/alpha for x ~ Gamma({}, 1)'.format(alpha),\n 'x {}'.format(x),\n 'expected {}'.format(expected_grad),\n 'actual {}'.format(actual_grad),\n 'rel error {}'.format(rel_error),\n 'max error {}'.format(rel_error.max()),\n 'at alpha={}, x={}'.format(alpha, x[rel_error.argmax()]),\n ]))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_chi2(self):\n num_samples = 100\n for df in [1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]:\n dfs = torch.tensor([df] * num_samples, dtype=torch.float, requires_grad=True)\n x = Chi2(dfs).rsample()\n x.sum().backward()\n x, ind = x.sort()\n x = x.detach().numpy()\n actual_grad = dfs.grad[ind].numpy()\n # Compare with expected gradient dx/ddf along constant cdf(x,df).\n cdf = scipy.stats.chi2.cdf\n pdf = scipy.stats.chi2.pdf\n eps = 0.01 * df / (1.0 + df ** 0.5)\n cdf_df = (cdf(x, df + eps) - cdf(x, df - eps)) / (2 * eps)\n cdf_x = pdf(x, df)\n expected_grad = -cdf_df / cdf_x\n rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)\n self.assertLess(np.max(rel_error), 0.001, '\\n'.join([\n 'Bad gradient dx/ddf for x ~ Chi2({})'.format(df),\n 'x {}'.format(x),\n 'expected {}'.format(expected_grad),\n 'actual {}'.format(actual_grad),\n 'rel error {}'.format(rel_error),\n 'max error {}'.format(rel_error.max()),\n ]))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_dirichlet_on_diagonal(self):\n num_samples = 20\n grid = [1e-1, 1e0, 1e1]\n for a0, a1, a2 in product(grid, grid, grid):\n alphas = torch.tensor([[a0, a1, a2]] * num_samples, dtype=torch.float, requires_grad=True)\n x = Dirichlet(alphas).rsample()[:, 0]\n x.sum().backward()\n x, ind = x.sort()\n x = x.detach().numpy()\n actual_grad = alphas.grad[ind].numpy()[:, 0]\n # Compare with expected gradient dx/dalpha0 along constant cdf(x,alpha).\n # This reduces to a distribution Beta(alpha[0], alpha[1] + alpha[2]).\n cdf = scipy.stats.beta.cdf\n pdf = scipy.stats.beta.pdf\n alpha, beta = a0, a1 + a2\n eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))\n cdf_alpha = (cdf(x, alpha + eps, beta) - cdf(x, alpha - eps, beta)) / (2 * eps)\n cdf_x = pdf(x, alpha, beta)\n expected_grad = -cdf_alpha / cdf_x\n rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)\n self.assertLess(np.max(rel_error), 0.001, '\\n'.join([\n 'Bad gradient dx[0]/dalpha[0] for Dirichlet([{}, {}, {}])'.format(a0, a1, a2),\n 'x {}'.format(x),\n 'expected {}'.format(expected_grad),\n 'actual {}'.format(actual_grad),\n 'rel error {}'.format(rel_error),\n 'max error {}'.format(rel_error.max()),\n 'at x={}'.format(x[rel_error.argmax()]),\n ]))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_beta_wrt_alpha(self):\n num_samples = 20\n grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]\n for con1, con0 in product(grid, grid):\n con1s = torch.tensor([con1] * num_samples, dtype=torch.float, requires_grad=True)\n con0s = con1s.new_tensor([con0] * num_samples)\n x = Beta(con1s, con0s).rsample()\n x.sum().backward()\n x, ind = x.sort()\n x = x.detach().numpy()\n actual_grad = con1s.grad[ind].numpy()\n # Compare with expected gradient dx/dcon1 along constant cdf(x,con1,con0).\n cdf = scipy.stats.beta.cdf\n pdf = scipy.stats.beta.pdf\n eps = 0.01 * con1 / (1.0 + np.sqrt(con1))\n cdf_alpha = (cdf(x, con1 + eps, con0) - cdf(x, con1 - eps, con0)) / (2 * eps)\n cdf_x = pdf(x, con1, con0)\n expected_grad = -cdf_alpha / cdf_x\n rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)\n self.assertLess(np.max(rel_error), 0.005, '\\n'.join([\n 'Bad gradient dx/dcon1 for x ~ Beta({}, {})'.format(con1, con0),\n 'x {}'.format(x),\n 'expected {}'.format(expected_grad),\n 'actual {}'.format(actual_grad),\n 'rel error {}'.format(rel_error),\n 'max error {}'.format(rel_error.max()),\n 'at x = {}'.format(x[rel_error.argmax()]),\n ]))\n\n @unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\n def test_beta_wrt_beta(self):\n num_samples = 20\n grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]\n for con1, con0 in product(grid, grid):\n con0s = torch.tensor([con0] * num_samples, dtype=torch.float, requires_grad=True)\n con1s = con0s.new_tensor([con1] * num_samples)\n x = Beta(con1s, con0s).rsample()\n x.sum().backward()\n x, ind = x.sort()\n x = x.detach().numpy()\n actual_grad = con0s.grad[ind].numpy()\n # Compare with expected gradient dx/dcon0 along constant cdf(x,con1,con0).\n cdf = scipy.stats.beta.cdf\n pdf = scipy.stats.beta.pdf\n eps = 0.01 * con0 / (1.0 + np.sqrt(con0))\n cdf_beta = (cdf(x, con1, con0 + eps) - cdf(x, con1, con0 - eps)) / (2 * eps)\n cdf_x = pdf(x, con1, con0)\n expected_grad = -cdf_beta / cdf_x\n rel_error = np.abs(actual_grad - expected_grad) / (expected_grad + 1e-30)\n self.assertLess(np.max(rel_error), 0.005, '\\n'.join([\n 'Bad gradient dx/dcon0 for x ~ Beta({}, {})'.format(con1, con0),\n 'x {}'.format(x),\n 'expected {}'.format(expected_grad),\n 'actual {}'.format(actual_grad),\n 'rel error {}'.format(rel_error),\n 'max error {}'.format(rel_error.max()),\n 'at x = {!r}'.format(x[rel_error.argmax()]),\n ]))\n\n def test_dirichlet_multivariate(self):\n alpha_crit = 0.25 * (5.0 ** 0.5 - 1.0)\n num_samples = 100000\n for shift in [-0.1, -0.05, -0.01, 0.0, 0.01, 0.05, 0.10]:\n alpha = alpha_crit + shift\n alpha = torch.tensor([alpha], dtype=torch.float, requires_grad=True)\n alpha_vec = torch.cat([alpha, alpha, alpha.new([1])])\n z = Dirichlet(alpha_vec.expand(num_samples, 3)).rsample()\n mean_z3 = 1.0 / (2.0 * alpha + 1.0)\n loss = torch.pow(z[:, 2] - mean_z3, 2.0).mean()\n actual_grad = grad(loss, [alpha])[0]\n # Compute expected gradient by hand.\n num = 1.0 - 2.0 * alpha - 4.0 * alpha**2\n den = (1.0 + alpha)**2 * (1.0 + 2.0 * alpha)**3\n expected_grad = num / den\n self.assertEqual(actual_grad, expected_grad, atol=0.002, rtol=0, msg='\\n'.join([\n \"alpha = alpha_c + %.2g\" % shift,\n \"expected_grad: %.5g\" % expected_grad,\n \"actual_grad: %.5g\" % actual_grad,\n \"error = %.2g\" % torch.abs(expected_grad - actual_grad).max(),\n ]))\n\n def test_dirichlet_tangent_field(self):\n num_samples = 20\n alpha_grid = [0.5, 1.0, 2.0]\n\n # v = dx/dalpha[0] is the reparameterized gradient aka tangent field.\n def compute_v(x, alpha):\n return torch.stack([\n _Dirichlet_backward(x, alpha, torch.eye(3, 3)[i].expand_as(x))[:, 0]\n for i in range(3)\n ], dim=-1)\n\n for a1, a2, a3 in product(alpha_grid, alpha_grid, alpha_grid):\n alpha = torch.tensor([a1, a2, a3], requires_grad=True).expand(num_samples, 3)\n x = Dirichlet(alpha).rsample()\n dlogp_da = grad([Dirichlet(alpha).log_prob(x.detach()).sum()],\n [alpha], retain_graph=True)[0][:, 0]\n dlogp_dx = grad([Dirichlet(alpha.detach()).log_prob(x).sum()],\n [x], retain_graph=True)[0]\n v = torch.stack([grad([x[:, i].sum()], [alpha], retain_graph=True)[0][:, 0]\n for i in range(3)], dim=-1)\n # Compute ramaining properties by finite difference.\n self.assertEqual(compute_v(x, alpha), v, msg='Bug in compute_v() helper')\n # dx is an arbitrary orthonormal basis tangent to the simplex.\n dx = torch.tensor([[2., -1., -1.], [0., 1., -1.]])\n dx /= dx.norm(2, -1, True)\n eps = 1e-2 * x.min(-1, True)[0] # avoid boundary\n dv0 = (compute_v(x + eps * dx[0], alpha) - compute_v(x - eps * dx[0], alpha)) / (2 * eps)\n dv1 = (compute_v(x + eps * dx[1], alpha) - compute_v(x - eps * dx[1], alpha)) / (2 * eps)\n div_v = (dv0 * dx[0] + dv1 * dx[1]).sum(-1)\n # This is a modification of the standard continuity equation, using the product rule to allow\n # expression in terms of log_prob rather than the less numerically stable log_prob.exp().\n error = dlogp_da + (dlogp_dx * v).sum(-1) + div_v\n self.assertLess(torch.abs(error).max(), 0.005, '\\n'.join([\n 'Dirichlet([{}, {}, {}]) gradient violates continuity equation:'.format(a1, a2, a3),\n 'error = {}'.format(error),\n ]))\n\n\nclass TestDistributionShapes(TestCase):\n def setUp(self):\n super(TestDistributionShapes, self).setUp()\n self.scalar_sample = 1\n self.tensor_sample_1 = torch.ones(3, 2)\n self.tensor_sample_2 = torch.ones(3, 2, 3)\n Distribution.set_default_validate_args(True)\n\n def tearDown(self):\n super(TestDistributionShapes, self).tearDown()\n Distribution.set_default_validate_args(False)\n\n def test_entropy_shape(self):\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(validate_args=False, **param)\n try:\n actual_shape = dist.entropy().size()\n expected_shape = dist.batch_shape if dist.batch_shape else torch.Size()\n message = '{} example {}/{}, shape mismatch. expected {}, actual {}'.format(\n Dist.__name__, i + 1, len(params), expected_shape, actual_shape)\n self.assertEqual(actual_shape, expected_shape, msg=message)\n except NotImplementedError:\n continue\n\n def test_bernoulli_shape_scalar_params(self):\n bernoulli = Bernoulli(0.3)\n self.assertEqual(bernoulli._batch_shape, torch.Size())\n self.assertEqual(bernoulli._event_shape, torch.Size())\n self.assertEqual(bernoulli.sample().size(), torch.Size())\n self.assertEqual(bernoulli.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, bernoulli.log_prob, self.scalar_sample)\n self.assertEqual(bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(bernoulli.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_bernoulli_shape_tensor_params(self):\n bernoulli = Bernoulli(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))\n self.assertEqual(bernoulli._batch_shape, torch.Size((3, 2)))\n self.assertEqual(bernoulli._event_shape, torch.Size(()))\n self.assertEqual(bernoulli.sample().size(), torch.Size((3, 2)))\n self.assertEqual(bernoulli.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))\n self.assertEqual(bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, bernoulli.log_prob, self.tensor_sample_2)\n self.assertEqual(bernoulli.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))\n\n def test_geometric_shape_scalar_params(self):\n geometric = Geometric(0.3)\n self.assertEqual(geometric._batch_shape, torch.Size())\n self.assertEqual(geometric._event_shape, torch.Size())\n self.assertEqual(geometric.sample().size(), torch.Size())\n self.assertEqual(geometric.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, geometric.log_prob, self.scalar_sample)\n self.assertEqual(geometric.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(geometric.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_geometric_shape_tensor_params(self):\n geometric = Geometric(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))\n self.assertEqual(geometric._batch_shape, torch.Size((3, 2)))\n self.assertEqual(geometric._event_shape, torch.Size(()))\n self.assertEqual(geometric.sample().size(), torch.Size((3, 2)))\n self.assertEqual(geometric.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))\n self.assertEqual(geometric.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, geometric.log_prob, self.tensor_sample_2)\n self.assertEqual(geometric.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))\n\n def test_beta_shape_scalar_params(self):\n dist = Beta(0.1, 0.1)\n self.assertEqual(dist._batch_shape, torch.Size())\n self.assertEqual(dist._event_shape, torch.Size())\n self.assertEqual(dist.sample().size(), torch.Size())\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, dist.log_prob, self.scalar_sample)\n self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_beta_shape_tensor_params(self):\n dist = Beta(torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]),\n torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]))\n self.assertEqual(dist._batch_shape, torch.Size((3, 2)))\n self.assertEqual(dist._event_shape, torch.Size(()))\n self.assertEqual(dist.sample().size(), torch.Size((3, 2)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))\n self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)\n self.assertEqual(dist.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))\n\n def test_binomial_shape(self):\n dist = Binomial(10, torch.tensor([0.6, 0.3]))\n self.assertEqual(dist._batch_shape, torch.Size((2,)))\n self.assertEqual(dist._event_shape, torch.Size(()))\n self.assertEqual(dist.sample().size(), torch.Size((2,)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)\n\n def test_binomial_shape_vectorized_n(self):\n dist = Binomial(torch.tensor([[10, 3, 1], [4, 8, 4]]), torch.tensor([0.6, 0.3, 0.1]))\n self.assertEqual(dist._batch_shape, torch.Size((2, 3)))\n self.assertEqual(dist._event_shape, torch.Size(()))\n self.assertEqual(dist.sample().size(), torch.Size((2, 3)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 2, 3)))\n self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1)\n\n def test_multinomial_shape(self):\n dist = Multinomial(10, torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))\n self.assertEqual(dist._batch_shape, torch.Size((3,)))\n self.assertEqual(dist._event_shape, torch.Size((2,)))\n self.assertEqual(dist.sample().size(), torch.Size((3, 2)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))\n self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3,)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)\n self.assertEqual(dist.log_prob(torch.ones(3, 1, 2)).size(), torch.Size((3, 3)))\n\n def test_categorical_shape(self):\n # unbatched\n dist = Categorical(torch.tensor([0.6, 0.3, 0.1]))\n self.assertEqual(dist._batch_shape, torch.Size(()))\n self.assertEqual(dist._event_shape, torch.Size(()))\n self.assertEqual(dist.sample().size(), torch.Size())\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2,)))\n self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n self.assertEqual(dist.log_prob(torch.ones(3, 1)).size(), torch.Size((3, 1)))\n # batched\n dist = Categorical(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))\n self.assertEqual(dist._batch_shape, torch.Size((3,)))\n self.assertEqual(dist._event_shape, torch.Size(()))\n self.assertEqual(dist.sample().size(), torch.Size((3,)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3,)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1)\n self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n self.assertEqual(dist.log_prob(torch.ones(3, 1)).size(), torch.Size((3, 3)))\n\n def test_one_hot_categorical_shape(self):\n # unbatched\n dist = OneHotCategorical(torch.tensor([0.6, 0.3, 0.1]))\n self.assertEqual(dist._batch_shape, torch.Size(()))\n self.assertEqual(dist._event_shape, torch.Size((3,)))\n self.assertEqual(dist.sample().size(), torch.Size((3,)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_1)\n simplex_sample = self.tensor_sample_2 / self.tensor_sample_2.sum(-1, keepdim=True)\n self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3, 2,)))\n self.assertEqual(dist.log_prob(dist.enumerate_support()).size(), torch.Size((3,)))\n simplex_sample = torch.ones(3, 3) / 3\n self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3,)))\n # batched\n dist = OneHotCategorical(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))\n self.assertEqual(dist._batch_shape, torch.Size((3,)))\n self.assertEqual(dist._event_shape, torch.Size((2,)))\n self.assertEqual(dist.sample().size(), torch.Size((3, 2)))\n self.assertEqual(dist.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))\n simplex_sample = self.tensor_sample_1 / self.tensor_sample_1.sum(-1, keepdim=True)\n self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3,)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)\n self.assertEqual(dist.log_prob(dist.enumerate_support()).size(), torch.Size((2, 3)))\n simplex_sample = torch.ones(3, 1, 2) / 2\n self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3, 3)))\n\n def test_cauchy_shape_scalar_params(self):\n cauchy = Cauchy(0, 1)\n self.assertEqual(cauchy._batch_shape, torch.Size())\n self.assertEqual(cauchy._event_shape, torch.Size())\n self.assertEqual(cauchy.sample().size(), torch.Size())\n self.assertEqual(cauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, cauchy.log_prob, self.scalar_sample)\n self.assertEqual(cauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(cauchy.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_cauchy_shape_tensor_params(self):\n cauchy = Cauchy(torch.tensor([0., 0.]), torch.tensor([1., 1.]))\n self.assertEqual(cauchy._batch_shape, torch.Size((2,)))\n self.assertEqual(cauchy._event_shape, torch.Size(()))\n self.assertEqual(cauchy.sample().size(), torch.Size((2,)))\n self.assertEqual(cauchy.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))\n self.assertEqual(cauchy.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, cauchy.log_prob, self.tensor_sample_2)\n self.assertEqual(cauchy.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_halfcauchy_shape_scalar_params(self):\n halfcauchy = HalfCauchy(1)\n self.assertEqual(halfcauchy._batch_shape, torch.Size())\n self.assertEqual(halfcauchy._event_shape, torch.Size())\n self.assertEqual(halfcauchy.sample().size(), torch.Size())\n self.assertEqual(halfcauchy.sample(torch.Size((3, 2))).size(),\n torch.Size((3, 2)))\n self.assertEqual(halfcauchy.log_prob(self.scalar_sample).size(),\n torch.Size())\n self.assertEqual(halfcauchy.log_prob(self.tensor_sample_1).size(),\n torch.Size((3, 2)))\n self.assertEqual(halfcauchy.log_prob(self.tensor_sample_2).size(),\n torch.Size((3, 2, 3)))\n\n def test_halfcauchy_shape_tensor_params(self):\n halfcauchy = HalfCauchy(torch.tensor([1., 1.]))\n self.assertEqual(halfcauchy._batch_shape, torch.Size((2,)))\n self.assertEqual(halfcauchy._event_shape, torch.Size(()))\n self.assertEqual(halfcauchy.sample().size(), torch.Size((2,)))\n self.assertEqual(halfcauchy.sample(torch.Size((3, 2))).size(),\n torch.Size((3, 2, 2)))\n self.assertEqual(halfcauchy.log_prob(self.tensor_sample_1).size(),\n torch.Size((3, 2)))\n self.assertRaises(ValueError, halfcauchy.log_prob, self.tensor_sample_2)\n self.assertEqual(halfcauchy.log_prob(torch.ones(2, 1)).size(),\n torch.Size((2, 2)))\n\n def test_dirichlet_shape(self):\n dist = Dirichlet(torch.tensor([[0.6, 0.3], [1.6, 1.3], [2.6, 2.3]]))\n self.assertEqual(dist._batch_shape, torch.Size((3,)))\n self.assertEqual(dist._event_shape, torch.Size((2,)))\n self.assertEqual(dist.sample().size(), torch.Size((3, 2)))\n self.assertEqual(dist.sample((5, 4)).size(), torch.Size((5, 4, 3, 2)))\n simplex_sample = self.tensor_sample_1 / self.tensor_sample_1.sum(-1, keepdim=True)\n self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3,)))\n self.assertRaises(ValueError, dist.log_prob, self.tensor_sample_2)\n simplex_sample = torch.ones(3, 1, 2)\n simplex_sample = simplex_sample / simplex_sample.sum(-1).unsqueeze(-1)\n self.assertEqual(dist.log_prob(simplex_sample).size(), torch.Size((3, 3)))\n\n def test_mixture_same_family_shape(self):\n dist = MixtureSameFamily(Categorical(torch.rand(5)),\n Normal(torch.randn(5), torch.rand(5)))\n self.assertEqual(dist._batch_shape, torch.Size())\n self.assertEqual(dist._event_shape, torch.Size())\n self.assertEqual(dist.sample().size(), torch.Size())\n self.assertEqual(dist.sample((5, 4)).size(), torch.Size((5, 4)))\n self.assertEqual(dist.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(dist.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_gamma_shape_scalar_params(self):\n gamma = Gamma(1, 1)\n self.assertEqual(gamma._batch_shape, torch.Size())\n self.assertEqual(gamma._event_shape, torch.Size())\n self.assertEqual(gamma.sample().size(), torch.Size())\n self.assertEqual(gamma.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertEqual(gamma.log_prob(self.scalar_sample).size(), torch.Size())\n self.assertEqual(gamma.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(gamma.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_gamma_shape_tensor_params(self):\n gamma = Gamma(torch.tensor([1., 1.]), torch.tensor([1., 1.]))\n self.assertEqual(gamma._batch_shape, torch.Size((2,)))\n self.assertEqual(gamma._event_shape, torch.Size(()))\n self.assertEqual(gamma.sample().size(), torch.Size((2,)))\n self.assertEqual(gamma.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(gamma.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, gamma.log_prob, self.tensor_sample_2)\n self.assertEqual(gamma.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_chi2_shape_scalar_params(self):\n chi2 = Chi2(1)\n self.assertEqual(chi2._batch_shape, torch.Size())\n self.assertEqual(chi2._event_shape, torch.Size())\n self.assertEqual(chi2.sample().size(), torch.Size())\n self.assertEqual(chi2.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertEqual(chi2.log_prob(self.scalar_sample).size(), torch.Size())\n self.assertEqual(chi2.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(chi2.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_chi2_shape_tensor_params(self):\n chi2 = Chi2(torch.tensor([1., 1.]))\n self.assertEqual(chi2._batch_shape, torch.Size((2,)))\n self.assertEqual(chi2._event_shape, torch.Size(()))\n self.assertEqual(chi2.sample().size(), torch.Size((2,)))\n self.assertEqual(chi2.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(chi2.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, chi2.log_prob, self.tensor_sample_2)\n self.assertEqual(chi2.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_studentT_shape_scalar_params(self):\n st = StudentT(1)\n self.assertEqual(st._batch_shape, torch.Size())\n self.assertEqual(st._event_shape, torch.Size())\n self.assertEqual(st.sample().size(), torch.Size())\n self.assertEqual(st.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, st.log_prob, self.scalar_sample)\n self.assertEqual(st.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(st.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_studentT_shape_tensor_params(self):\n st = StudentT(torch.tensor([1., 1.]))\n self.assertEqual(st._batch_shape, torch.Size((2,)))\n self.assertEqual(st._event_shape, torch.Size(()))\n self.assertEqual(st.sample().size(), torch.Size((2,)))\n self.assertEqual(st.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(st.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, st.log_prob, self.tensor_sample_2)\n self.assertEqual(st.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_pareto_shape_scalar_params(self):\n pareto = Pareto(1, 1)\n self.assertEqual(pareto._batch_shape, torch.Size())\n self.assertEqual(pareto._event_shape, torch.Size())\n self.assertEqual(pareto.sample().size(), torch.Size())\n self.assertEqual(pareto.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertEqual(pareto.log_prob(self.tensor_sample_1 + 1).size(), torch.Size((3, 2)))\n self.assertEqual(pareto.log_prob(self.tensor_sample_2 + 1).size(), torch.Size((3, 2, 3)))\n\n def test_gumbel_shape_scalar_params(self):\n gumbel = Gumbel(1, 1)\n self.assertEqual(gumbel._batch_shape, torch.Size())\n self.assertEqual(gumbel._event_shape, torch.Size())\n self.assertEqual(gumbel.sample().size(), torch.Size())\n self.assertEqual(gumbel.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertEqual(gumbel.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(gumbel.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_vonmises_shape_tensor_params(self):\n von_mises = VonMises(torch.tensor([0., 0.]), torch.tensor([1., 1.]))\n self.assertEqual(von_mises._batch_shape, torch.Size((2,)))\n self.assertEqual(von_mises._event_shape, torch.Size(()))\n self.assertEqual(von_mises.sample().size(), torch.Size((2,)))\n self.assertEqual(von_mises.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))\n self.assertEqual(von_mises.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(von_mises.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_vonmises_shape_scalar_params(self):\n von_mises = VonMises(0., 1.)\n self.assertEqual(von_mises._batch_shape, torch.Size())\n self.assertEqual(von_mises._event_shape, torch.Size())\n self.assertEqual(von_mises.sample().size(), torch.Size())\n self.assertEqual(von_mises.sample(torch.Size((3, 2))).size(),\n torch.Size((3, 2)))\n self.assertEqual(von_mises.log_prob(self.tensor_sample_1).size(),\n torch.Size((3, 2)))\n self.assertEqual(von_mises.log_prob(self.tensor_sample_2).size(),\n torch.Size((3, 2, 3)))\n\n def test_weibull_scale_scalar_params(self):\n weibull = Weibull(1, 1)\n self.assertEqual(weibull._batch_shape, torch.Size())\n self.assertEqual(weibull._event_shape, torch.Size())\n self.assertEqual(weibull.sample().size(), torch.Size())\n self.assertEqual(weibull.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertEqual(weibull.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(weibull.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_normal_shape_scalar_params(self):\n normal = Normal(0, 1)\n self.assertEqual(normal._batch_shape, torch.Size())\n self.assertEqual(normal._event_shape, torch.Size())\n self.assertEqual(normal.sample().size(), torch.Size())\n self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, normal.log_prob, self.scalar_sample)\n self.assertEqual(normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(normal.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_normal_shape_tensor_params(self):\n normal = Normal(torch.tensor([0., 0.]), torch.tensor([1., 1.]))\n self.assertEqual(normal._batch_shape, torch.Size((2,)))\n self.assertEqual(normal._event_shape, torch.Size(()))\n self.assertEqual(normal.sample().size(), torch.Size((2,)))\n self.assertEqual(normal.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(normal.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, normal.log_prob, self.tensor_sample_2)\n self.assertEqual(normal.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_uniform_shape_scalar_params(self):\n uniform = Uniform(0, 1)\n self.assertEqual(uniform._batch_shape, torch.Size())\n self.assertEqual(uniform._event_shape, torch.Size())\n self.assertEqual(uniform.sample().size(), torch.Size())\n self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, uniform.log_prob, self.scalar_sample)\n self.assertEqual(uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(uniform.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_uniform_shape_tensor_params(self):\n uniform = Uniform(torch.tensor([0., 0.]), torch.tensor([1., 1.]))\n self.assertEqual(uniform._batch_shape, torch.Size((2,)))\n self.assertEqual(uniform._event_shape, torch.Size(()))\n self.assertEqual(uniform.sample().size(), torch.Size((2,)))\n self.assertEqual(uniform.sample(torch.Size((3, 2))).size(), torch.Size((3, 2, 2)))\n self.assertEqual(uniform.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, uniform.log_prob, self.tensor_sample_2)\n self.assertEqual(uniform.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_exponential_shape_scalar_param(self):\n expon = Exponential(1.)\n self.assertEqual(expon._batch_shape, torch.Size())\n self.assertEqual(expon._event_shape, torch.Size())\n self.assertEqual(expon.sample().size(), torch.Size())\n self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, expon.log_prob, self.scalar_sample)\n self.assertEqual(expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(expon.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_exponential_shape_tensor_param(self):\n expon = Exponential(torch.tensor([1., 1.]))\n self.assertEqual(expon._batch_shape, torch.Size((2,)))\n self.assertEqual(expon._event_shape, torch.Size(()))\n self.assertEqual(expon.sample().size(), torch.Size((2,)))\n self.assertEqual(expon.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(expon.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, expon.log_prob, self.tensor_sample_2)\n self.assertEqual(expon.log_prob(torch.ones(2, 2)).size(), torch.Size((2, 2)))\n\n def test_laplace_shape_scalar_params(self):\n laplace = Laplace(0, 1)\n self.assertEqual(laplace._batch_shape, torch.Size())\n self.assertEqual(laplace._event_shape, torch.Size())\n self.assertEqual(laplace.sample().size(), torch.Size())\n self.assertEqual(laplace.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, laplace.log_prob, self.scalar_sample)\n self.assertEqual(laplace.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(laplace.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_laplace_shape_tensor_params(self):\n laplace = Laplace(torch.tensor([0., 0.]), torch.tensor([1., 1.]))\n self.assertEqual(laplace._batch_shape, torch.Size((2,)))\n self.assertEqual(laplace._event_shape, torch.Size(()))\n self.assertEqual(laplace.sample().size(), torch.Size((2,)))\n self.assertEqual(laplace.sample((3, 2)).size(), torch.Size((3, 2, 2)))\n self.assertEqual(laplace.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, laplace.log_prob, self.tensor_sample_2)\n self.assertEqual(laplace.log_prob(torch.ones(2, 1)).size(), torch.Size((2, 2)))\n\n def test_continuous_bernoulli_shape_scalar_params(self):\n continuous_bernoulli = ContinuousBernoulli(0.3)\n self.assertEqual(continuous_bernoulli._batch_shape, torch.Size())\n self.assertEqual(continuous_bernoulli._event_shape, torch.Size())\n self.assertEqual(continuous_bernoulli.sample().size(), torch.Size())\n self.assertEqual(continuous_bernoulli.sample((3, 2)).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, continuous_bernoulli.log_prob, self.scalar_sample)\n self.assertEqual(continuous_bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertEqual(continuous_bernoulli.log_prob(self.tensor_sample_2).size(), torch.Size((3, 2, 3)))\n\n def test_continuous_bernoulli_shape_tensor_params(self):\n continuous_bernoulli = ContinuousBernoulli(torch.tensor([[0.6, 0.3], [0.6, 0.3], [0.6, 0.3]]))\n self.assertEqual(continuous_bernoulli._batch_shape, torch.Size((3, 2)))\n self.assertEqual(continuous_bernoulli._event_shape, torch.Size(()))\n self.assertEqual(continuous_bernoulli.sample().size(), torch.Size((3, 2)))\n self.assertEqual(continuous_bernoulli.sample((3, 2)).size(), torch.Size((3, 2, 3, 2)))\n self.assertEqual(continuous_bernoulli.log_prob(self.tensor_sample_1).size(), torch.Size((3, 2)))\n self.assertRaises(ValueError, continuous_bernoulli.log_prob, self.tensor_sample_2)\n self.assertEqual(continuous_bernoulli.log_prob(torch.ones(3, 1, 1)).size(), torch.Size((3, 3, 2)))\n\n\nclass TestKL(TestCase):\n\n def setUp(self):\n super(TestKL, self).setUp()\n\n class Binomial30(Binomial):\n def __init__(self, probs):\n super(Binomial30, self).__init__(30, probs)\n\n # These are pairs of distributions with 4 x 4 parameters as specified.\n # The first of the pair e.g. bernoulli[0] varies column-wise and the second\n # e.g. bernoulli[1] varies row-wise; that way we test all param pairs.\n bernoulli = pairwise(Bernoulli, [0.1, 0.2, 0.6, 0.9])\n binomial30 = pairwise(Binomial30, [0.1, 0.2, 0.6, 0.9])\n binomial_vectorized_count = (Binomial(torch.tensor([3, 4]), torch.tensor([0.4, 0.6])),\n Binomial(torch.tensor([3, 4]), torch.tensor([0.5, 0.8])))\n beta = pairwise(Beta, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5])\n categorical = pairwise(Categorical, [[0.4, 0.3, 0.3],\n [0.2, 0.7, 0.1],\n [0.33, 0.33, 0.34],\n [0.2, 0.2, 0.6]])\n cauchy = pairwise(Cauchy, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])\n chi2 = pairwise(Chi2, [1.0, 2.0, 2.5, 5.0])\n dirichlet = pairwise(Dirichlet, [[0.1, 0.2, 0.7],\n [0.5, 0.4, 0.1],\n [0.33, 0.33, 0.34],\n [0.2, 0.2, 0.4]])\n exponential = pairwise(Exponential, [1.0, 2.5, 5.0, 10.0])\n gamma = pairwise(Gamma, [1.0, 2.5, 1.0, 2.5], [1.5, 1.5, 3.5, 3.5])\n gumbel = pairwise(Gumbel, [-2.0, 4.0, -3.0, 6.0], [1.0, 2.5, 1.0, 2.5])\n halfnormal = pairwise(HalfNormal, [1.0, 2.0, 1.0, 2.0])\n laplace = pairwise(Laplace, [-2.0, 4.0, -3.0, 6.0], [1.0, 2.5, 1.0, 2.5])\n lognormal = pairwise(LogNormal, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])\n normal = pairwise(Normal, [-2.0, 2.0, -3.0, 3.0], [1.0, 2.0, 1.0, 2.0])\n independent = (Independent(normal[0], 1), Independent(normal[1], 1))\n onehotcategorical = pairwise(OneHotCategorical, [[0.4, 0.3, 0.3],\n [0.2, 0.7, 0.1],\n [0.33, 0.33, 0.34],\n [0.2, 0.2, 0.6]])\n pareto = pairwise(Pareto, [2.5, 4.0, 2.5, 4.0], [2.25, 3.75, 2.25, 3.75])\n poisson = pairwise(Poisson, [0.3, 1.0, 5.0, 10.0])\n uniform_within_unit = pairwise(Uniform, [0.15, 0.95, 0.2, 0.8], [0.1, 0.9, 0.25, 0.75])\n uniform_positive = pairwise(Uniform, [1, 1.5, 2, 4], [1.2, 2.0, 3, 7])\n uniform_real = pairwise(Uniform, [-2., -1, 0, 2], [-1., 1, 1, 4])\n uniform_pareto = pairwise(Uniform, [6.5, 8.5, 6.5, 8.5], [7.5, 7.5, 9.5, 9.5])\n continuous_bernoulli = pairwise(ContinuousBernoulli, [0.1, 0.2, 0.5, 0.9])\n\n # These tests should pass with precision = 0.01, but that makes tests very expensive.\n # Instead, we test with precision = 0.1 and only test with higher precision locally\n # when adding a new KL implementation.\n # The following pairs are not tested due to very high variance of the monte carlo\n # estimator; their implementations have been reviewed with extra care:\n # - (pareto, normal)\n self.precision = 0.1 # Set this to 0.01 when testing a new KL implementation.\n self.max_samples = int(1e07) # Increase this when testing at smaller precision.\n self.samples_per_batch = int(1e04)\n self.finite_examples = [\n (bernoulli, bernoulli),\n (bernoulli, poisson),\n (beta, beta),\n (beta, chi2),\n (beta, exponential),\n (beta, gamma),\n (beta, normal),\n (binomial30, binomial30),\n (binomial_vectorized_count, binomial_vectorized_count),\n (categorical, categorical),\n (cauchy, cauchy),\n (chi2, chi2),\n (chi2, exponential),\n (chi2, gamma),\n (chi2, normal),\n (dirichlet, dirichlet),\n (exponential, chi2),\n (exponential, exponential),\n (exponential, gamma),\n (exponential, gumbel),\n (exponential, normal),\n (gamma, chi2),\n (gamma, exponential),\n (gamma, gamma),\n (gamma, gumbel),\n (gamma, normal),\n (gumbel, gumbel),\n (gumbel, normal),\n (halfnormal, halfnormal),\n (independent, independent),\n (laplace, laplace),\n (lognormal, lognormal),\n (laplace, normal),\n (normal, gumbel),\n (normal, normal),\n (onehotcategorical, onehotcategorical),\n (pareto, chi2),\n (pareto, pareto),\n (pareto, exponential),\n (pareto, gamma),\n (poisson, poisson),\n (uniform_within_unit, beta),\n (uniform_positive, chi2),\n (uniform_positive, exponential),\n (uniform_positive, gamma),\n (uniform_real, gumbel),\n (uniform_real, normal),\n (uniform_pareto, pareto),\n (continuous_bernoulli, continuous_bernoulli),\n (continuous_bernoulli, exponential),\n (continuous_bernoulli, normal),\n (beta, continuous_bernoulli)\n ]\n\n self.infinite_examples = [\n (Bernoulli(0), Bernoulli(1)),\n (Bernoulli(1), Bernoulli(0)),\n (Categorical(torch.tensor([0.9, 0.1])), Categorical(torch.tensor([1., 0.]))),\n (Categorical(torch.tensor([[0.9, 0.1], [.9, .1]])), Categorical(torch.tensor([1., 0.]))),\n (Beta(1, 2), Uniform(0.25, 1)),\n (Beta(1, 2), Uniform(0, 0.75)),\n (Beta(1, 2), Uniform(0.25, 0.75)),\n (Beta(1, 2), Pareto(1, 2)),\n (Binomial(31, 0.7), Binomial(30, 0.3)),\n (Binomial(torch.tensor([3, 4]), torch.tensor([0.4, 0.6])),\n Binomial(torch.tensor([2, 3]), torch.tensor([0.5, 0.8]))),\n (Chi2(1), Beta(2, 3)),\n (Chi2(1), Pareto(2, 3)),\n (Chi2(1), Uniform(-2, 3)),\n (Exponential(1), Beta(2, 3)),\n (Exponential(1), Pareto(2, 3)),\n (Exponential(1), Uniform(-2, 3)),\n (Gamma(1, 2), Beta(3, 4)),\n (Gamma(1, 2), Pareto(3, 4)),\n (Gamma(1, 2), Uniform(-3, 4)),\n (Gumbel(-1, 2), Beta(3, 4)),\n (Gumbel(-1, 2), Chi2(3)),\n (Gumbel(-1, 2), Exponential(3)),\n (Gumbel(-1, 2), Gamma(3, 4)),\n (Gumbel(-1, 2), Pareto(3, 4)),\n (Gumbel(-1, 2), Uniform(-3, 4)),\n (Laplace(-1, 2), Beta(3, 4)),\n (Laplace(-1, 2), Chi2(3)),\n (Laplace(-1, 2), Exponential(3)),\n (Laplace(-1, 2), Gamma(3, 4)),\n (Laplace(-1, 2), Pareto(3, 4)),\n (Laplace(-1, 2), Uniform(-3, 4)),\n (Normal(-1, 2), Beta(3, 4)),\n (Normal(-1, 2), Chi2(3)),\n (Normal(-1, 2), Exponential(3)),\n (Normal(-1, 2), Gamma(3, 4)),\n (Normal(-1, 2), Pareto(3, 4)),\n (Normal(-1, 2), Uniform(-3, 4)),\n (Pareto(2, 1), Chi2(3)),\n (Pareto(2, 1), Exponential(3)),\n (Pareto(2, 1), Gamma(3, 4)),\n (Pareto(1, 2), Normal(-3, 4)),\n (Pareto(1, 2), Pareto(3, 4)),\n (Poisson(2), Bernoulli(0.5)),\n (Poisson(2.3), Binomial(10, 0.2)),\n (Uniform(-1, 1), Beta(2, 2)),\n (Uniform(0, 2), Beta(3, 4)),\n (Uniform(-1, 2), Beta(3, 4)),\n (Uniform(-1, 2), Chi2(3)),\n (Uniform(-1, 2), Exponential(3)),\n (Uniform(-1, 2), Gamma(3, 4)),\n (Uniform(-1, 2), Pareto(3, 4)),\n (ContinuousBernoulli(0.25), Uniform(0.25, 1)),\n (ContinuousBernoulli(0.25), Uniform(0, 0.75)),\n (ContinuousBernoulli(0.25), Uniform(0.25, 0.75)),\n (ContinuousBernoulli(0.25), Pareto(1, 2)),\n (Exponential(1), ContinuousBernoulli(0.75)),\n (Gamma(1, 2), ContinuousBernoulli(0.75)),\n (Gumbel(-1, 2), ContinuousBernoulli(0.75)),\n (Laplace(-1, 2), ContinuousBernoulli(0.75)),\n (Normal(-1, 2), ContinuousBernoulli(0.75)),\n (Uniform(-1, 1), ContinuousBernoulli(0.75)),\n (Uniform(0, 2), ContinuousBernoulli(0.75)),\n (Uniform(-1, 2), ContinuousBernoulli(0.75))\n ]\n\n def test_kl_monte_carlo(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for (p, _), (_, q) in self.finite_examples:\n actual = kl_divergence(p, q)\n numerator = 0\n denominator = 0\n while denominator < self.max_samples:\n x = p.sample(sample_shape=(self.samples_per_batch,))\n numerator += (p.log_prob(x) - q.log_prob(x)).sum(0)\n denominator += x.size(0)\n expected = numerator / denominator\n error = torch.abs(expected - actual) / (1 + expected)\n if error[error == error].max() < self.precision:\n break\n self.assertLess(error[error == error].max(), self.precision, '\\n'.join([\n 'Incorrect KL({}, {}).'.format(type(p).__name__, type(q).__name__),\n 'Expected ({} Monte Carlo samples): {}'.format(denominator, expected),\n 'Actual (analytic): {}'.format(actual),\n ]))\n\n # Multivariate normal has a separate Monte Carlo based test due to the requirement of random generation of\n # positive (semi) definite matrices. n is set to 5, but can be increased during testing.\n def test_kl_multivariate_normal(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n n = 5 # Number of tests for multivariate_normal\n for i in range(0, n):\n loc = [torch.randn(4) for _ in range(0, 2)]\n scale_tril = [transform_to(constraints.lower_cholesky)(torch.randn(4, 4)) for _ in range(0, 2)]\n p = MultivariateNormal(loc=loc[0], scale_tril=scale_tril[0])\n q = MultivariateNormal(loc=loc[1], scale_tril=scale_tril[1])\n actual = kl_divergence(p, q)\n numerator = 0\n denominator = 0\n while denominator < self.max_samples:\n x = p.sample(sample_shape=(self.samples_per_batch,))\n numerator += (p.log_prob(x) - q.log_prob(x)).sum(0)\n denominator += x.size(0)\n expected = numerator / denominator\n error = torch.abs(expected - actual) / (1 + expected)\n if error[error == error].max() < self.precision:\n break\n self.assertLess(error[error == error].max(), self.precision, '\\n'.join([\n 'Incorrect KL(MultivariateNormal, MultivariateNormal) instance {}/{}'.format(i + 1, n),\n 'Expected ({} Monte Carlo sample): {}'.format(denominator, expected),\n 'Actual (analytic): {}'.format(actual),\n ]))\n\n def test_kl_multivariate_normal_batched(self):\n b = 7 # Number of batches\n loc = [torch.randn(b, 3) for _ in range(0, 2)]\n scale_tril = [transform_to(constraints.lower_cholesky)(torch.randn(b, 3, 3)) for _ in range(0, 2)]\n expected_kl = torch.stack([\n kl_divergence(MultivariateNormal(loc[0][i], scale_tril=scale_tril[0][i]),\n MultivariateNormal(loc[1][i], scale_tril=scale_tril[1][i])) for i in range(0, b)])\n actual_kl = kl_divergence(MultivariateNormal(loc[0], scale_tril=scale_tril[0]),\n MultivariateNormal(loc[1], scale_tril=scale_tril[1]))\n self.assertEqual(expected_kl, actual_kl)\n\n def test_kl_multivariate_normal_batched_broadcasted(self):\n b = 7 # Number of batches\n loc = [torch.randn(b, 3) for _ in range(0, 2)]\n scale_tril = [transform_to(constraints.lower_cholesky)(torch.randn(b, 3, 3)),\n transform_to(constraints.lower_cholesky)(torch.randn(3, 3))]\n expected_kl = torch.stack([\n kl_divergence(MultivariateNormal(loc[0][i], scale_tril=scale_tril[0][i]),\n MultivariateNormal(loc[1][i], scale_tril=scale_tril[1])) for i in range(0, b)])\n actual_kl = kl_divergence(MultivariateNormal(loc[0], scale_tril=scale_tril[0]),\n MultivariateNormal(loc[1], scale_tril=scale_tril[1]))\n self.assertEqual(expected_kl, actual_kl)\n\n def test_kl_lowrank_multivariate_normal(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n n = 5 # Number of tests for lowrank_multivariate_normal\n for i in range(0, n):\n loc = [torch.randn(4) for _ in range(0, 2)]\n cov_factor = [torch.randn(4, 3) for _ in range(0, 2)]\n cov_diag = [transform_to(constraints.positive)(torch.randn(4)) for _ in range(0, 2)]\n covariance_matrix = [cov_factor[i].matmul(cov_factor[i].t()) +\n cov_diag[i].diag() for i in range(0, 2)]\n p = LowRankMultivariateNormal(loc[0], cov_factor[0], cov_diag[0])\n q = LowRankMultivariateNormal(loc[1], cov_factor[1], cov_diag[1])\n p_full = MultivariateNormal(loc[0], covariance_matrix[0])\n q_full = MultivariateNormal(loc[1], covariance_matrix[1])\n expected = kl_divergence(p_full, q_full)\n\n actual_lowrank_lowrank = kl_divergence(p, q)\n actual_lowrank_full = kl_divergence(p, q_full)\n actual_full_lowrank = kl_divergence(p_full, q)\n\n error_lowrank_lowrank = torch.abs(actual_lowrank_lowrank - expected).max()\n self.assertLess(error_lowrank_lowrank, self.precision, '\\n'.join([\n 'Incorrect KL(LowRankMultivariateNormal, LowRankMultivariateNormal) instance {}/{}'.format(i + 1, n),\n 'Expected (from KL MultivariateNormal): {}'.format(expected),\n 'Actual (analytic): {}'.format(actual_lowrank_lowrank),\n ]))\n\n error_lowrank_full = torch.abs(actual_lowrank_full - expected).max()\n self.assertLess(error_lowrank_full, self.precision, '\\n'.join([\n 'Incorrect KL(LowRankMultivariateNormal, MultivariateNormal) instance {}/{}'.format(i + 1, n),\n 'Expected (from KL MultivariateNormal): {}'.format(expected),\n 'Actual (analytic): {}'.format(actual_lowrank_full),\n ]))\n\n error_full_lowrank = torch.abs(actual_full_lowrank - expected).max()\n self.assertLess(error_full_lowrank, self.precision, '\\n'.join([\n 'Incorrect KL(MultivariateNormal, LowRankMultivariateNormal) instance {}/{}'.format(i + 1, n),\n 'Expected (from KL MultivariateNormal): {}'.format(expected),\n 'Actual (analytic): {}'.format(actual_full_lowrank),\n ]))\n\n def test_kl_lowrank_multivariate_normal_batched(self):\n b = 7 # Number of batches\n loc = [torch.randn(b, 3) for _ in range(0, 2)]\n cov_factor = [torch.randn(b, 3, 2) for _ in range(0, 2)]\n cov_diag = [transform_to(constraints.positive)(torch.randn(b, 3)) for _ in range(0, 2)]\n expected_kl = torch.stack([\n kl_divergence(LowRankMultivariateNormal(loc[0][i], cov_factor[0][i], cov_diag[0][i]),\n LowRankMultivariateNormal(loc[1][i], cov_factor[1][i], cov_diag[1][i]))\n for i in range(0, b)])\n actual_kl = kl_divergence(LowRankMultivariateNormal(loc[0], cov_factor[0], cov_diag[0]),\n LowRankMultivariateNormal(loc[1], cov_factor[1], cov_diag[1]))\n self.assertEqual(expected_kl, actual_kl)\n\n def test_kl_exponential_family(self):\n for (p, _), (_, q) in self.finite_examples:\n if type(p) == type(q) and issubclass(type(p), ExponentialFamily):\n actual = kl_divergence(p, q)\n expected = _kl_expfamily_expfamily(p, q)\n self.assertEqual(actual, expected, msg='\\n'.join([\n 'Incorrect KL({}, {}).'.format(type(p).__name__, type(q).__name__),\n 'Expected (using Bregman Divergence) {}'.format(expected),\n 'Actual (analytic) {}'.format(actual),\n 'max error = {}'.format(torch.abs(actual - expected).max())\n ]))\n\n def test_kl_infinite(self):\n for p, q in self.infinite_examples:\n self.assertTrue((kl_divergence(p, q) == inf).all(),\n 'Incorrect KL({}, {})'.format(type(p).__name__, type(q).__name__))\n\n def test_kl_edgecases(self):\n self.assertEqual(kl_divergence(Bernoulli(0), Bernoulli(0)), 0)\n self.assertEqual(kl_divergence(Bernoulli(1), Bernoulli(1)), 0)\n self.assertEqual(kl_divergence(Categorical(torch.tensor([0., 1.])), Categorical(torch.tensor([0., 1.]))), 0)\n\n def test_kl_shape(self):\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(**param)\n try:\n kl = kl_divergence(dist, dist)\n except NotImplementedError:\n continue\n expected_shape = dist.batch_shape if dist.batch_shape else torch.Size()\n self.assertEqual(kl.shape, expected_shape, msg='\\n'.join([\n '{} example {}/{}'.format(Dist.__name__, i + 1, len(params)),\n 'Expected {}'.format(expected_shape),\n 'Actual {}'.format(kl.shape),\n ]))\n\n def test_entropy_monte_carlo(self):\n set_rng_seed(0) # see Note [Randomized statistical tests]\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(**param)\n try:\n actual = dist.entropy()\n except NotImplementedError:\n continue\n x = dist.sample(sample_shape=(60000,))\n expected = -dist.log_prob(x).mean(0)\n ignore = (expected == inf) | (expected == -inf)\n expected[ignore] = actual[ignore]\n self.assertEqual(actual, expected, atol=0.2, rtol=0, msg='\\n'.join([\n '{} example {}/{}, incorrect .entropy().'.format(Dist.__name__, i + 1, len(params)),\n 'Expected (monte carlo) {}'.format(expected),\n 'Actual (analytic) {}'.format(actual),\n 'max error = {}'.format(torch.abs(actual - expected).max()),\n ]))\n\n def test_entropy_exponential_family(self):\n for Dist, params in EXAMPLES:\n if not issubclass(Dist, ExponentialFamily):\n continue\n for i, param in enumerate(params):\n dist = Dist(**param)\n try:\n actual = dist.entropy()\n except NotImplementedError:\n continue\n try:\n expected = ExponentialFamily.entropy(dist)\n except NotImplementedError:\n continue\n self.assertEqual(actual, expected, msg='\\n'.join([\n '{} example {}/{}, incorrect .entropy().'.format(Dist.__name__, i + 1, len(params)),\n 'Expected (Bregman Divergence) {}'.format(expected),\n 'Actual (analytic) {}'.format(actual),\n 'max error = {}'.format(torch.abs(actual - expected).max())\n ]))\n\n\nclass TestConstraints(TestCase):\n def test_params_constraints(self):\n for Dist, params in EXAMPLES:\n for i, param in enumerate(params):\n dist = Dist(**param)\n for name, value in param.items():\n if isinstance(value, numbers.Number):\n value = torch.tensor([value])\n if Dist in (Categorical, OneHotCategorical, Multinomial) and name == 'probs':\n # These distributions accept positive probs, but elsewhere we\n # use a stricter constraint to the simplex.\n value = value / value.sum(-1, True)\n try:\n constraint = dist.arg_constraints[name]\n except KeyError:\n continue # ignore optional parameters\n\n if is_dependent(constraint):\n continue\n\n message = '{} example {}/{} parameter {} = {}'.format(\n Dist.__name__, i + 1, len(params), name, value)\n self.assertTrue(constraint.check(value).all(), msg=message)\n\n def test_support_constraints(self):\n for Dist, params in EXAMPLES:\n self.assertIsInstance(Dist.support, Constraint)\n for i, param in enumerate(params):\n dist = Dist(**param)\n value = dist.sample()\n constraint = dist.support\n message = '{} example {}/{} sample = {}'.format(\n Dist.__name__, i + 1, len(params), value)\n self.assertTrue(constraint.check(value).all(), msg=message)\n\n\nclass TestNumericalStability(TestCase):\n def _test_pdf_score(self,\n dist_class,\n x,\n expected_value,\n probs=None,\n logits=None,\n expected_gradient=None,\n atol=1e-5):\n if probs is not None:\n p = probs.detach().requires_grad_()\n dist = dist_class(p)\n else:\n p = logits.detach().requires_grad_()\n dist = dist_class(logits=p)\n log_pdf = dist.log_prob(x)\n log_pdf.sum().backward()\n self.assertEqual(log_pdf,\n expected_value,\n atol=atol,\n rtol=0,\n msg='Incorrect value for tensor type: {}. Expected = {}, Actual = {}'\n .format(type(x), expected_value, log_pdf))\n if expected_gradient is not None:\n self.assertEqual(p.grad,\n expected_gradient,\n atol=atol,\n rtol=0,\n msg='Incorrect gradient for tensor type: {}. Expected = {}, Actual = {}'\n .format(type(x), expected_gradient, p.grad))\n\n def test_bernoulli_gradient(self):\n for tensor_type in [torch.FloatTensor, torch.DoubleTensor]:\n self._test_pdf_score(dist_class=Bernoulli,\n probs=tensor_type([0]),\n x=tensor_type([0]),\n expected_value=tensor_type([0]),\n expected_gradient=tensor_type([0]))\n\n self._test_pdf_score(dist_class=Bernoulli,\n probs=tensor_type([0]),\n x=tensor_type([1]),\n expected_value=tensor_type([torch.finfo(tensor_type([]).dtype).eps]).log(),\n expected_gradient=tensor_type([0]))\n\n self._test_pdf_score(dist_class=Bernoulli,\n probs=tensor_type([1e-4]),\n x=tensor_type([1]),\n expected_value=tensor_type([math.log(1e-4)]),\n expected_gradient=tensor_type([10000]))\n\n # Lower precision due to:\n # >>> 1 / (1 - torch.FloatTensor([0.9999]))\n # 9998.3408\n # [torch.FloatTensor of size 1]\n self._test_pdf_score(dist_class=Bernoulli,\n probs=tensor_type([1 - 1e-4]),\n x=tensor_type([0]),\n expected_value=tensor_type([math.log(1e-4)]),\n expected_gradient=tensor_type([-10000]),\n atol=2)\n\n self._test_pdf_score(dist_class=Bernoulli,\n logits=tensor_type([math.log(9999)]),\n x=tensor_type([0]),\n expected_value=tensor_type([math.log(1e-4)]),\n expected_gradient=tensor_type([-1]),\n atol=1e-3)\n\n def test_bernoulli_with_logits_underflow(self):\n for tensor_type, lim in ([(torch.FloatTensor, -1e38),\n (torch.DoubleTensor, -1e308)]):\n self._test_pdf_score(dist_class=Bernoulli,\n logits=tensor_type([lim]),\n x=tensor_type([0]),\n expected_value=tensor_type([0]),\n expected_gradient=tensor_type([0]))\n\n def test_bernoulli_with_logits_overflow(self):\n for tensor_type, lim in ([(torch.FloatTensor, 1e38),\n (torch.DoubleTensor, 1e308)]):\n self._test_pdf_score(dist_class=Bernoulli,\n logits=tensor_type([lim]),\n x=tensor_type([1]),\n expected_value=tensor_type([0]),\n expected_gradient=tensor_type([0]))\n\n def test_categorical_log_prob(self):\n for dtype in ([torch.float, torch.double]):\n p = torch.tensor([0, 1], dtype=dtype, requires_grad=True)\n categorical = OneHotCategorical(p)\n log_pdf = categorical.log_prob(torch.tensor([0, 1], dtype=dtype))\n self.assertEqual(log_pdf.item(), 0)\n\n def test_categorical_log_prob_with_logits(self):\n for dtype in ([torch.float, torch.double]):\n p = torch.tensor([-inf, 0], dtype=dtype, requires_grad=True)\n categorical = OneHotCategorical(logits=p)\n log_pdf_prob_1 = categorical.log_prob(torch.tensor([0, 1], dtype=dtype))\n self.assertEqual(log_pdf_prob_1.item(), 0)\n log_pdf_prob_0 = categorical.log_prob(torch.tensor([1, 0], dtype=dtype))\n self.assertEqual(log_pdf_prob_0.item(), -inf)\n\n def test_multinomial_log_prob(self):\n for dtype in ([torch.float, torch.double]):\n p = torch.tensor([0, 1], dtype=dtype, requires_grad=True)\n s = torch.tensor([0, 10], dtype=dtype)\n multinomial = Multinomial(10, p)\n log_pdf = multinomial.log_prob(s)\n self.assertEqual(log_pdf.item(), 0)\n\n def test_multinomial_log_prob_with_logits(self):\n for dtype in ([torch.float, torch.double]):\n p = torch.tensor([-inf, 0], dtype=dtype, requires_grad=True)\n multinomial = Multinomial(10, logits=p)\n log_pdf_prob_1 = multinomial.log_prob(torch.tensor([0, 10], dtype=dtype))\n self.assertEqual(log_pdf_prob_1.item(), 0)\n log_pdf_prob_0 = multinomial.log_prob(torch.tensor([10, 0], dtype=dtype))\n self.assertEqual(log_pdf_prob_0.item(), -inf)\n\n def test_continuous_bernoulli_gradient(self):\n\n def expec_val(x, probs=None, logits=None):\n assert not (probs is None and logits is None)\n if logits is not None:\n probs = 1. / (1. + math.exp(-logits))\n bern_log_lik = x * math.log(probs) + (1. - x) * math.log1p(-probs)\n if probs < 0.499 or probs > 0.501: # using default values of lims here\n log_norm_const = math.log(\n math.fabs(math.atanh(1. - 2. * probs))) - math.log(math.fabs(1. - 2. * probs)) + math.log(2.)\n else:\n aux = math.pow(probs - 0.5, 2)\n log_norm_const = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * aux) * aux\n log_lik = bern_log_lik + log_norm_const\n return log_lik\n\n def expec_grad(x, probs=None, logits=None):\n assert not (probs is None and logits is None)\n if logits is not None:\n probs = 1. / (1. + math.exp(-logits))\n grad_bern_log_lik = x / probs - (1. - x) / (1. - probs)\n if probs < 0.499 or probs > 0.501: # using default values of lims here\n grad_log_c = 2. * probs - 4. * (probs - 1.) * probs * math.atanh(1. - 2. * probs) - 1.\n grad_log_c /= 2. * (probs - 1.) * probs * (2. * probs - 1.) * math.atanh(1. - 2. * probs)\n else:\n grad_log_c = 8. / 3. * (probs - 0.5) + 416. / 45. * math.pow(probs - 0.5, 3)\n grad = grad_bern_log_lik + grad_log_c\n if logits is not None:\n grad *= 1. / (1. + math.exp(logits)) - 1. / math.pow(1. + math.exp(logits), 2)\n return grad\n\n for tensor_type in [torch.FloatTensor, torch.DoubleTensor]:\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n probs=tensor_type([0.1]),\n x=tensor_type([0.1]),\n expected_value=tensor_type([expec_val(0.1, probs=0.1)]),\n expected_gradient=tensor_type([expec_grad(0.1, probs=0.1)]))\n\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n probs=tensor_type([0.1]),\n x=tensor_type([1.]),\n expected_value=tensor_type([expec_val(1., probs=0.1)]),\n expected_gradient=tensor_type([expec_grad(1., probs=0.1)]))\n\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n probs=tensor_type([0.4999]),\n x=tensor_type([0.9]),\n expected_value=tensor_type([expec_val(0.9, probs=0.4999)]),\n expected_gradient=tensor_type([expec_grad(0.9, probs=0.4999)]))\n\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n probs=tensor_type([1e-4]),\n x=tensor_type([1]),\n expected_value=tensor_type([expec_val(1, probs=1e-4)]),\n expected_gradient=tensor_type(tensor_type([expec_grad(1, probs=1e-4)])),\n atol=1e-3)\n\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n probs=tensor_type([1 - 1e-4]),\n x=tensor_type([0.1]),\n expected_value=tensor_type([expec_val(0.1, probs=1 - 1e-4)]),\n expected_gradient=tensor_type([expec_grad(0.1, probs=1 - 1e-4)]),\n atol=2)\n\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n logits=tensor_type([math.log(9999)]),\n x=tensor_type([0]),\n expected_value=tensor_type([expec_val(0, logits=math.log(9999))]),\n expected_gradient=tensor_type([expec_grad(0, logits=math.log(9999))]),\n atol=1e-3)\n\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n logits=tensor_type([0.001]),\n x=tensor_type([0.5]),\n expected_value=tensor_type([expec_val(0.5, logits=0.001)]),\n expected_gradient=tensor_type([expec_grad(0.5, logits=0.001)]))\n\n def test_continuous_bernoulli_with_logits_underflow(self):\n for tensor_type, lim, expected in ([(torch.FloatTensor, -1e38, 2.76898),\n (torch.DoubleTensor, -1e308, 3.58473)]):\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n logits=tensor_type([lim]),\n x=tensor_type([0]),\n expected_value=tensor_type([expected]),\n expected_gradient=tensor_type([0.]))\n\n def test_continuous_bernoulli_with_logits_overflow(self):\n for tensor_type, lim, expected in ([(torch.FloatTensor, 1e38, 2.76898),\n (torch.DoubleTensor, 1e308, 3.58473)]):\n self._test_pdf_score(dist_class=ContinuousBernoulli,\n logits=tensor_type([lim]),\n x=tensor_type([1]),\n expected_value=tensor_type([expected]),\n expected_gradient=tensor_type([0.]))\n\n\nclass TestLazyLogitsInitialization(TestCase):\n def setUp(self):\n super(TestLazyLogitsInitialization, self).setUp()\n # ContinuousBernoulli is not tested because log_prob is not computed simply\n # from 'logits', but 'probs' is also needed\n self.examples = [e for e in EXAMPLES if e.Dist in\n (Categorical, OneHotCategorical, Bernoulli, Binomial, Multinomial)]\n\n def test_lazy_logits_initialization(self):\n for Dist, params in self.examples:\n param = params[0]\n if 'probs' in param:\n probs = param.pop('probs')\n param['logits'] = probs_to_logits(probs)\n dist = Dist(**param)\n shape = (1,) if not dist.event_shape else dist.event_shape\n dist.log_prob(torch.ones(shape))\n message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))\n self.assertFalse('probs' in vars(dist), msg=message)\n try:\n dist.enumerate_support()\n except NotImplementedError:\n pass\n self.assertFalse('probs' in vars(dist), msg=message)\n batch_shape, event_shape = dist.batch_shape, dist.event_shape\n self.assertFalse('probs' in vars(dist), msg=message)\n\n def test_lazy_probs_initialization(self):\n for Dist, params in self.examples:\n param = params[0]\n if 'probs' in param:\n dist = Dist(**param)\n dist.sample()\n message = 'Failed for {} example 0/{}'.format(Dist.__name__, len(params))\n self.assertFalse('logits' in vars(dist), msg=message)\n try:\n dist.enumerate_support()\n except NotImplementedError:\n pass\n self.assertFalse('logits' in vars(dist), msg=message)\n batch_shape, event_shape = dist.batch_shape, dist.event_shape\n self.assertFalse('logits' in vars(dist), msg=message)\n\n\n@unittest.skipIf(not TEST_NUMPY, \"NumPy not found\")\nclass TestAgainstScipy(TestCase):\n def setUp(self):\n super(TestAgainstScipy, self).setUp()\n positive_var = torch.randn(20).exp()\n positive_var2 = torch.randn(20).exp()\n random_var = torch.randn(20)\n simplex_tensor = softmax(torch.randn(20), dim=-1)\n self.distribution_pairs = [\n (\n Bernoulli(simplex_tensor),\n scipy.stats.bernoulli(simplex_tensor)\n ),\n (\n Beta(positive_var, positive_var2),\n scipy.stats.beta(positive_var, positive_var2)\n ),\n (\n Binomial(10, simplex_tensor),\n scipy.stats.binom(10 * np.ones(simplex_tensor.shape), simplex_tensor.numpy())\n ),\n (\n Cauchy(random_var, positive_var),\n scipy.stats.cauchy(loc=random_var, scale=positive_var)\n ),\n (\n Dirichlet(positive_var),\n scipy.stats.dirichlet(positive_var)\n ),\n (\n Exponential(positive_var),\n scipy.stats.expon(scale=positive_var.reciprocal())\n ),\n (\n FisherSnedecor(positive_var, 4 + positive_var2), # var for df2<=4 is undefined\n scipy.stats.f(positive_var, 4 + positive_var2)\n ),\n (\n Gamma(positive_var, positive_var2),\n scipy.stats.gamma(positive_var, scale=positive_var2.reciprocal())\n ),\n (\n Geometric(simplex_tensor),\n scipy.stats.geom(simplex_tensor, loc=-1)\n ),\n (\n Gumbel(random_var, positive_var2),\n scipy.stats.gumbel_r(random_var, positive_var2)\n ),\n (\n HalfCauchy(positive_var),\n scipy.stats.halfcauchy(scale=positive_var)\n ),\n (\n HalfNormal(positive_var2),\n scipy.stats.halfnorm(scale=positive_var2)\n ),\n (\n Laplace(random_var, positive_var2),\n scipy.stats.laplace(random_var, positive_var2)\n ),\n (\n # Tests fail 1e-5 threshold if scale > 3\n LogNormal(random_var, positive_var.clamp(max=3)),\n scipy.stats.lognorm(s=positive_var.clamp(max=3), scale=random_var.exp())\n ),\n (\n LowRankMultivariateNormal(random_var, torch.zeros(20, 1), positive_var2),\n scipy.stats.multivariate_normal(random_var, torch.diag(positive_var2))\n ),\n (\n Multinomial(10, simplex_tensor),\n scipy.stats.multinomial(10, simplex_tensor)\n ),\n (\n MultivariateNormal(random_var, torch.diag(positive_var2)),\n scipy.stats.multivariate_normal(random_var, torch.diag(positive_var2))\n ),\n (\n Normal(random_var, positive_var2),\n scipy.stats.norm(random_var, positive_var2)\n ),\n (\n OneHotCategorical(simplex_tensor),\n scipy.stats.multinomial(1, simplex_tensor)\n ),\n (\n Pareto(positive_var, 2 + positive_var2),\n scipy.stats.pareto(2 + positive_var2, scale=positive_var)\n ),\n (\n Poisson(positive_var),\n scipy.stats.poisson(positive_var)\n ),\n (\n StudentT(2 + positive_var, random_var, positive_var2),\n scipy.stats.t(2 + positive_var, random_var, positive_var2)\n ),\n (\n Uniform(random_var, random_var + positive_var),\n scipy.stats.uniform(random_var, positive_var)\n ),\n (\n VonMises(random_var, positive_var),\n scipy.stats.vonmises(positive_var, loc=random_var)\n ),\n (\n Weibull(positive_var[0], positive_var2[0]), # scipy var for Weibull only supports scalars\n scipy.stats.weibull_min(c=positive_var2[0], scale=positive_var[0])\n )\n ]\n\n def test_mean(self):\n for pytorch_dist, scipy_dist in self.distribution_pairs:\n if isinstance(pytorch_dist, (Cauchy, HalfCauchy)):\n # Cauchy, HalfCauchy distributions' mean is nan, skipping check\n continue\n elif isinstance(pytorch_dist, (LowRankMultivariateNormal, MultivariateNormal)):\n self.assertEqual(pytorch_dist.mean, scipy_dist.mean, msg=pytorch_dist)\n else:\n self.assertEqual(pytorch_dist.mean, scipy_dist.mean(), msg=pytorch_dist)\n\n def test_variance_stddev(self):\n for pytorch_dist, scipy_dist in self.distribution_pairs:\n if isinstance(pytorch_dist, (Cauchy, HalfCauchy, VonMises)):\n # Cauchy, HalfCauchy distributions' standard deviation is nan, skipping check\n # VonMises variance is circular and scipy doesn't produce a correct result\n continue\n elif isinstance(pytorch_dist, (Multinomial, OneHotCategorical)):\n self.assertEqual(pytorch_dist.variance, np.diag(scipy_dist.cov()), msg=pytorch_dist)\n self.assertEqual(pytorch_dist.stddev, np.diag(scipy_dist.cov()) ** 0.5, msg=pytorch_dist)\n elif isinstance(pytorch_dist, (LowRankMultivariateNormal, MultivariateNormal)):\n self.assertEqual(pytorch_dist.variance, np.diag(scipy_dist.cov), msg=pytorch_dist)\n self.assertEqual(pytorch_dist.stddev, np.diag(scipy_dist.cov) ** 0.5, msg=pytorch_dist)\n else:\n self.assertEqual(pytorch_dist.variance, scipy_dist.var(), msg=pytorch_dist)\n self.assertEqual(pytorch_dist.stddev, scipy_dist.var() ** 0.5, msg=pytorch_dist)\n\n def test_cdf(self):\n for pytorch_dist, scipy_dist in self.distribution_pairs:\n samples = pytorch_dist.sample((5,))\n try:\n cdf = pytorch_dist.cdf(samples)\n except NotImplementedError:\n continue\n self.assertEqual(cdf, scipy_dist.cdf(samples), msg=pytorch_dist)\n\n def test_icdf(self):\n for pytorch_dist, scipy_dist in self.distribution_pairs:\n samples = torch.rand((5,) + pytorch_dist.batch_shape)\n try:\n icdf = pytorch_dist.icdf(samples)\n except NotImplementedError:\n continue\n self.assertEqual(icdf, scipy_dist.ppf(samples), msg=pytorch_dist)\n\n\nclass TestTransforms(TestCase):\n def setUp(self):\n super(TestTransforms, self).setUp()\n self.transforms = []\n transforms_by_cache_size = {}\n for cache_size in [0, 1]:\n transforms = [\n AbsTransform(cache_size=cache_size),\n ExpTransform(cache_size=cache_size),\n PowerTransform(exponent=2,\n cache_size=cache_size),\n PowerTransform(exponent=torch.tensor(5.).normal_(),\n cache_size=cache_size),\n SigmoidTransform(cache_size=cache_size),\n TanhTransform(cache_size=cache_size),\n AffineTransform(0, 1, cache_size=cache_size),\n AffineTransform(1, -2, cache_size=cache_size),\n AffineTransform(torch.randn(5),\n torch.randn(5),\n cache_size=cache_size),\n AffineTransform(torch.randn(4, 5),\n torch.randn(4, 5),\n cache_size=cache_size),\n SoftmaxTransform(cache_size=cache_size),\n StickBreakingTransform(cache_size=cache_size),\n LowerCholeskyTransform(cache_size=cache_size),\n ComposeTransform([\n AffineTransform(torch.randn(4, 5),\n torch.randn(4, 5),\n cache_size=cache_size),\n ]),\n ComposeTransform([\n AffineTransform(torch.randn(4, 5),\n torch.randn(4, 5),\n cache_size=cache_size),\n ExpTransform(cache_size=cache_size),\n ]),\n ComposeTransform([\n AffineTransform(0, 1, cache_size=cache_size),\n AffineTransform(torch.randn(4, 5),\n torch.randn(4, 5),\n cache_size=cache_size),\n AffineTransform(1, -2, cache_size=cache_size),\n AffineTransform(torch.randn(4, 5),\n torch.randn(4, 5),\n cache_size=cache_size),\n ]),\n ]\n for t in transforms[:]:\n transforms.append(t.inv)\n transforms.append(identity_transform)\n self.transforms += transforms\n if cache_size == 0:\n self.unique_transforms = transforms[:]\n\n def _generate_data(self, transform):\n domain = transform.domain\n codomain = transform.codomain\n x = torch.empty(4, 5)\n if domain is constraints.lower_cholesky or codomain is constraints.lower_cholesky:\n x = torch.empty(6, 6)\n x = x.normal_()\n return x\n elif domain is constraints.real:\n return x.normal_()\n elif domain is constraints.positive:\n return x.normal_().exp()\n elif domain is constraints.unit_interval:\n return x.uniform_()\n elif isinstance(domain, constraints.interval):\n x = x.uniform_()\n x = x.mul_(domain.upper_bound - domain.lower_bound).add_(domain.lower_bound)\n return x\n elif domain is constraints.simplex:\n x = x.normal_().exp()\n x /= x.sum(-1, True)\n return x\n raise ValueError('Unsupported domain: {}'.format(domain))\n\n def test_inv_inv(self):\n for t in self.transforms:\n self.assertTrue(t.inv.inv is t)\n\n def test_equality(self):\n transforms = self.unique_transforms\n for x, y in product(transforms, transforms):\n if x is y:\n self.assertTrue(x == y)\n self.assertFalse(x != y)\n else:\n self.assertFalse(x == y)\n self.assertTrue(x != y)\n\n self.assertTrue(identity_transform == identity_transform.inv)\n self.assertFalse(identity_transform != identity_transform.inv)\n\n def test_with_cache(self):\n for transform in self.transforms:\n if transform._cache_size == 0:\n transform = transform.with_cache(1)\n self.assertTrue(transform._cache_size == 1)\n\n x = self._generate_data(transform).requires_grad_()\n try:\n y = transform(x)\n except NotImplementedError:\n continue\n y2 = transform(x)\n self.assertTrue(y2 is y)\n\n def test_forward_inverse_cache(self):\n for transform in self.transforms:\n x = self._generate_data(transform).requires_grad_()\n try:\n y = transform(x)\n except NotImplementedError:\n continue\n x2 = transform.inv(y) # should be implemented at least by caching\n y2 = transform(x2) # should be implemented at least by caching\n if transform.bijective:\n # verify function inverse\n self.assertEqual(x2, x, msg='\\n'.join([\n '{} t.inv(t(-)) error'.format(transform),\n 'x = {}'.format(x),\n 'y = t(x) = {}'.format(y),\n 'x2 = t.inv(y) = {}'.format(x2),\n ]))\n else:\n # verify weaker function pseudo-inverse\n self.assertEqual(y2, y, msg='\\n'.join([\n '{} t(t.inv(t(-))) error'.format(transform),\n 'x = {}'.format(x),\n 'y = t(x) = {}'.format(y),\n 'x2 = t.inv(y) = {}'.format(x2),\n 'y2 = t(x2) = {}'.format(y2),\n ]))\n\n def test_forward_inverse_no_cache(self):\n for transform in self.transforms:\n x = self._generate_data(transform).requires_grad_()\n try:\n y = transform(x)\n x2 = transform.inv(y.clone()) # bypass cache\n y2 = transform(x2)\n except NotImplementedError:\n continue\n if transform.bijective:\n # verify function inverse\n self.assertEqual(x2, x, msg='\\n'.join([\n '{} t.inv(t(-)) error'.format(transform),\n 'x = {}'.format(x),\n 'y = t(x) = {}'.format(y),\n 'x2 = t.inv(y) = {}'.format(x2),\n ]))\n else:\n # verify weaker function pseudo-inverse\n self.assertEqual(y2, y, msg='\\n'.join([\n '{} t(t.inv(t(-))) error'.format(transform),\n 'x = {}'.format(x),\n 'y = t(x) = {}'.format(y),\n 'x2 = t.inv(y) = {}'.format(x2),\n 'y2 = t(x2) = {}'.format(y2),\n ]))\n\n def test_univariate_forward_jacobian(self):\n for transform in self.transforms:\n if transform.event_dim > 0:\n continue\n x = self._generate_data(transform).requires_grad_()\n try:\n y = transform(x)\n actual = transform.log_abs_det_jacobian(x, y)\n except NotImplementedError:\n continue\n expected = torch.abs(grad([y.sum()], [x])[0]).log()\n self.assertEqual(actual, expected, msg='\\n'.join([\n 'Bad {}.log_abs_det_jacobian() disagrees with ()'.format(transform),\n 'Expected: {}'.format(expected),\n 'Actual: {}'.format(actual),\n ]))\n\n def test_univariate_inverse_jacobian(self):\n for transform in self.transforms:\n if transform.event_dim > 0:\n continue\n y = self._generate_data(transform.inv).requires_grad_()\n try:\n x = transform.inv(y)\n actual = transform.log_abs_det_jacobian(x, y)\n except NotImplementedError:\n continue\n expected = -torch.abs(grad([x.sum()], [y])[0]).log()\n self.assertEqual(actual, expected, msg='\\n'.join([\n '{}.log_abs_det_jacobian() disagrees with .inv()'.format(transform),\n 'Expected: {}'.format(expected),\n 'Actual: {}'.format(actual),\n ]))\n\n def test_jacobian_shape(self):\n for transform in self.transforms:\n x = self._generate_data(transform)\n try:\n y = transform(x)\n actual = transform.log_abs_det_jacobian(x, y)\n except NotImplementedError:\n continue\n self.assertEqual(actual.shape, x.shape[:x.dim() - transform.event_dim])\n\n def test_transform_shapes(self):\n transform0 = ExpTransform()\n transform1 = SoftmaxTransform()\n transform2 = LowerCholeskyTransform()\n\n self.assertEqual(transform0.event_dim, 0)\n self.assertEqual(transform1.event_dim, 1)\n self.assertEqual(transform2.event_dim, 2)\n self.assertEqual(ComposeTransform([transform0, transform1]).event_dim, 1)\n self.assertEqual(ComposeTransform([transform0, transform2]).event_dim, 2)\n self.assertEqual(ComposeTransform([transform1, transform2]).event_dim, 2)\n\n def test_transformed_distribution_shapes(self):\n transform0 = ExpTransform()\n transform1 = SoftmaxTransform()\n transform2 = LowerCholeskyTransform()\n base_dist0 = Normal(torch.zeros(4, 4), torch.ones(4, 4))\n base_dist1 = Dirichlet(torch.ones(4, 4))\n base_dist2 = Normal(torch.zeros(3, 4, 4), torch.ones(3, 4, 4))\n examples = [\n ((4, 4), (), base_dist0),\n ((4,), (4,), base_dist1),\n ((4, 4), (), TransformedDistribution(base_dist0, [transform0])),\n ((4,), (4,), TransformedDistribution(base_dist0, [transform1])),\n ((4,), (4,), TransformedDistribution(base_dist0, [transform0, transform1])),\n ((), (4, 4), TransformedDistribution(base_dist0, [transform0, transform2])),\n ((4,), (4,), TransformedDistribution(base_dist0, [transform1, transform0])),\n ((), (4, 4), TransformedDistribution(base_dist0, [transform1, transform2])),\n ((), (4, 4), TransformedDistribution(base_dist0, [transform2, transform0])),\n ((), (4, 4), TransformedDistribution(base_dist0, [transform2, transform1])),\n ((4,), (4,), TransformedDistribution(base_dist1, [transform0])),\n ((4,), (4,), TransformedDistribution(base_dist1, [transform1])),\n ((), (4, 4), TransformedDistribution(base_dist1, [transform2])),\n ((4,), (4,), TransformedDistribution(base_dist1, [transform0, transform1])),\n ((), (4, 4), TransformedDistribution(base_dist1, [transform0, transform2])),\n ((4,), (4,), TransformedDistribution(base_dist1, [transform1, transform0])),\n ((), (4, 4), TransformedDistribution(base_dist1, [transform1, transform2])),\n ((), (4, 4), TransformedDistribution(base_dist1, [transform2, transform0])),\n ((), (4, 4), TransformedDistribution(base_dist1, [transform2, transform1])),\n ((3, 4, 4), (), base_dist2),\n ((3,), (4, 4), TransformedDistribution(base_dist2, [transform2])),\n ((3,), (4, 4), TransformedDistribution(base_dist2, [transform0, transform2])),\n ((3,), (4, 4), TransformedDistribution(base_dist2, [transform1, transform2])),\n ((3,), (4, 4), TransformedDistribution(base_dist2, [transform2, transform0])),\n ((3,), (4, 4), TransformedDistribution(base_dist2, [transform2, transform1])),\n ]\n for batch_shape, event_shape, dist in examples:\n self.assertEqual(dist.batch_shape, batch_shape)\n self.assertEqual(dist.event_shape, event_shape)\n x = dist.rsample()\n try:\n dist.log_prob(x) # this should not crash\n except NotImplementedError:\n continue\n\n def test_jit_fwd(self):\n for transform in self.unique_transforms:\n x = self._generate_data(transform).requires_grad_()\n\n def f(x):\n return transform(x)\n\n try:\n traced_f = torch.jit.trace(f, (x,))\n except NotImplementedError:\n continue\n\n # check on different inputs\n x = self._generate_data(transform).requires_grad_()\n self.assertEqual(f(x), traced_f(x))\n\n def test_jit_inv(self):\n for transform in self.unique_transforms:\n y = self._generate_data(transform.inv).requires_grad_()\n\n def f(y):\n return transform.inv(y)\n\n try:\n traced_f = torch.jit.trace(f, (y,))\n except NotImplementedError:\n continue\n\n # check on different inputs\n y = self._generate_data(transform.inv).requires_grad_()\n self.assertEqual(f(y), traced_f(y))\n\n def test_jit_jacobian(self):\n for transform in self.unique_transforms:\n x = self._generate_data(transform).requires_grad_()\n\n def f(x):\n y = transform(x)\n return transform.log_abs_det_jacobian(x, y)\n\n try:\n traced_f = torch.jit.trace(f, (x,))\n except NotImplementedError:\n continue\n\n # check on different inputs\n x = self._generate_data(transform).requires_grad_()\n self.assertEqual(f(x), traced_f(x))\n\n\nclass TestFunctors(TestCase):\n def test_cat_transform(self):\n x1 = -1 * torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n x2 = (torch.arange(1, 101, dtype=torch.float).view(-1, 100) - 1) / 100\n x3 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n t1, t2, t3 = ExpTransform(), AffineTransform(1, 100), identity_transform\n dim = 0\n x = torch.cat([x1, x2, x3], dim=dim)\n t = CatTransform([t1, t2, t3], dim=dim)\n actual_dom_check = t.domain.check(x)\n expected_dom_check = torch.cat([t1.domain.check(x1),\n t2.domain.check(x2),\n t3.domain.check(x3)], dim=dim)\n self.assertEqual(expected_dom_check, actual_dom_check)\n actual = t(x)\n expected = torch.cat([t1(x1), t2(x2), t3(x3)], dim=dim)\n self.assertEqual(expected, actual)\n y1 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n y2 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n y3 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n y = torch.cat([y1, y2, y3], dim=dim)\n actual_cod_check = t.codomain.check(y)\n expected_cod_check = torch.cat([t1.codomain.check(y1),\n t2.codomain.check(y2),\n t3.codomain.check(y3)], dim=dim)\n self.assertEqual(actual_cod_check, expected_cod_check)\n actual_inv = t.inv(y)\n expected_inv = torch.cat([t1.inv(y1), t2.inv(y2), t3.inv(y3)], dim=dim)\n self.assertEqual(expected_inv, actual_inv)\n actual_jac = t.log_abs_det_jacobian(x, y)\n expected_jac = torch.cat([t1.log_abs_det_jacobian(x1, y1),\n t2.log_abs_det_jacobian(x2, y2),\n t3.log_abs_det_jacobian(x3, y3)], dim=dim)\n self.assertEqual(actual_jac, expected_jac)\n\n def test_cat_transform_non_uniform(self):\n x1 = -1 * torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n x2 = torch.cat([(torch.arange(1, 101, dtype=torch.float).view(-1, 100) - 1) / 100,\n torch.arange(1, 101, dtype=torch.float).view(-1, 100)])\n t1 = ExpTransform()\n t2 = CatTransform([AffineTransform(1, 100), identity_transform], dim=0)\n dim = 0\n x = torch.cat([x1, x2], dim=dim)\n t = CatTransform([t1, t2], dim=dim, lengths=[1, 2])\n actual_dom_check = t.domain.check(x)\n expected_dom_check = torch.cat([t1.domain.check(x1),\n t2.domain.check(x2)], dim=dim)\n self.assertEqual(expected_dom_check, actual_dom_check)\n actual = t(x)\n expected = torch.cat([t1(x1), t2(x2)], dim=dim)\n self.assertEqual(expected, actual)\n y1 = torch.arange(1, 101, dtype=torch.float).view(-1, 100)\n y2 = torch.cat([torch.arange(1, 101, dtype=torch.float).view(-1, 100),\n torch.arange(1, 101, dtype=torch.float).view(-1, 100)])\n y = torch.cat([y1, y2], dim=dim)\n actual_cod_check = t.codomain.check(y)\n expected_cod_check = torch.cat([t1.codomain.check(y1),\n t2.codomain.check(y2)], dim=dim)\n self.assertEqual(actual_cod_check, expected_cod_check)\n actual_inv = t.inv(y)\n expected_inv = torch.cat([t1.inv(y1), t2.inv(y2)], dim=dim)\n self.assertEqual(expected_inv, actual_inv)\n actual_jac = t.log_abs_det_jacobian(x, y)\n expected_jac = torch.cat([t1.log_abs_det_jacobian(x1, y1),\n t2.log_abs_det_jacobian(x2, y2)], dim=dim)\n self.assertEqual(actual_jac, expected_jac)\n\n def test_stack_transform(self):\n x1 = -1 * torch.arange(1, 101, dtype=torch.float)\n x2 = (torch.arange(1, 101, dtype=torch.float) - 1) / 100\n x3 = torch.arange(1, 101, dtype=torch.float)\n t1, t2, t3 = ExpTransform(), AffineTransform(1, 100), identity_transform\n dim = 0\n x = torch.stack([x1, x2, x3], dim=dim)\n t = StackTransform([t1, t2, t3], dim=dim)\n actual_dom_check = t.domain.check(x)\n expected_dom_check = torch.stack([t1.domain.check(x1),\n t2.domain.check(x2),\n t3.domain.check(x3)], dim=dim)\n self.assertEqual(expected_dom_check, actual_dom_check)\n actual = t(x)\n expected = torch.stack([t1(x1), t2(x2), t3(x3)], dim=dim)\n self.assertEqual(expected, actual)\n y1 = torch.arange(1, 101, dtype=torch.float)\n y2 = torch.arange(1, 101, dtype=torch.float)\n y3 = torch.arange(1, 101, dtype=torch.float)\n y = torch.stack([y1, y2, y3], dim=dim)\n actual_cod_check = t.codomain.check(y)\n expected_cod_check = torch.stack([t1.codomain.check(y1),\n t2.codomain.check(y2),\n t3.codomain.check(y3)], dim=dim)\n self.assertEqual(actual_cod_check, expected_cod_check)\n actual_inv = t.inv(x)\n expected_inv = torch.stack([t1.inv(x1), t2.inv(x2), t3.inv(x3)], dim=dim)\n self.assertEqual(expected_inv, actual_inv)\n actual_jac = t.log_abs_det_jacobian(x, y)\n expected_jac = torch.stack([t1.log_abs_det_jacobian(x1, y1),\n t2.log_abs_det_jacobian(x2, y2),\n t3.log_abs_det_jacobian(x3, y3)], dim=dim)\n self.assertEqual(actual_jac, expected_jac)\n\n\nclass TestConstraintRegistry(TestCase):\n def get_constraints(self, is_cuda=False):\n tensor = torch.cuda.DoubleTensor if is_cuda else torch.DoubleTensor\n return [\n constraints.real,\n constraints.positive,\n constraints.greater_than(tensor([-10., -2, 0, 2, 10])),\n constraints.greater_than(0),\n constraints.greater_than(2),\n constraints.greater_than(-2),\n constraints.greater_than_eq(0),\n constraints.greater_than_eq(2),\n constraints.greater_than_eq(-2),\n constraints.less_than(tensor([-10., -2, 0, 2, 10])),\n constraints.less_than(0),\n constraints.less_than(2),\n constraints.less_than(-2),\n constraints.unit_interval,\n constraints.interval(tensor([-4., -2, 0, 2, 4]),\n tensor([-3., 3, 1, 5, 5])),\n constraints.interval(-2, -1),\n constraints.interval(1, 2),\n constraints.half_open_interval(tensor([-4., -2, 0, 2, 4]),\n tensor([-3., 3, 1, 5, 5])),\n constraints.half_open_interval(-2, -1),\n constraints.half_open_interval(1, 2),\n constraints.simplex,\n constraints.lower_cholesky,\n ]\n\n def test_biject_to(self):\n for constraint in self.get_constraints():\n try:\n t = biject_to(constraint)\n except NotImplementedError:\n continue\n self.assertTrue(t.bijective, \"biject_to({}) is not bijective\".format(constraint))\n x = torch.randn(5, 5)\n y = t(x)\n self.assertTrue(constraint.check(y).all(), '\\n'.join([\n \"Failed to biject_to({})\".format(constraint),\n \"x = {}\".format(x),\n \"biject_to(...)(x) = {}\".format(y),\n ]))\n x2 = t.inv(y)\n self.assertEqual(x, x2, msg=\"Error in biject_to({}) inverse\".format(constraint))\n\n j = t.log_abs_det_jacobian(x, y)\n self.assertEqual(j.shape, x.shape[:x.dim() - t.event_dim])\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n def test_biject_to_cuda(self):\n for constraint in self.get_constraints(is_cuda=True):\n try:\n t = biject_to(constraint)\n except NotImplementedError:\n continue\n self.assertTrue(t.bijective, \"biject_to({}) is not bijective\".format(constraint))\n # x = torch.randn(5, 5, device=\"cuda\")\n x = torch.randn(5, 5).cuda()\n y = t(x)\n self.assertTrue(constraint.check(y).all(), '\\n'.join([\n \"Failed to biject_to({})\".format(constraint),\n \"x = {}\".format(x),\n \"biject_to(...)(x) = {}\".format(y),\n ]))\n x2 = t.inv(y)\n self.assertEqual(x, x2, msg=\"Error in biject_to({}) inverse\".format(constraint))\n\n j = t.log_abs_det_jacobian(x, y)\n self.assertEqual(j.shape, x.shape[:x.dim() - t.event_dim])\n\n def test_transform_to(self):\n for constraint in self.get_constraints():\n t = transform_to(constraint)\n x = torch.randn(5, 5)\n y = t(x)\n self.assertTrue(constraint.check(y).all(), \"Failed to transform_to({})\".format(constraint))\n x2 = t.inv(y)\n y2 = t(x2)\n self.assertEqual(y, y2, msg=\"Error in transform_to({}) pseudoinverse\".format(constraint))\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n def test_transform_to_cuda(self):\n for constraint in self.get_constraints(is_cuda=True):\n t = transform_to(constraint)\n # x = torch.randn(5, 5, device=\"cuda\")\n x = torch.randn(5, 5).cuda()\n y = t(x)\n self.assertTrue(constraint.check(y).all(), \"Failed to transform_to({})\".format(constraint))\n x2 = t.inv(y)\n y2 = t(x2)\n self.assertEqual(y, y2, msg=\"Error in transform_to({}) pseudoinverse\".format(constraint))\n\n\nclass TestValidation(TestCase):\n def setUp(self):\n super(TestCase, self).setUp()\n Distribution.set_default_validate_args(True)\n\n def test_valid(self):\n for Dist, params in EXAMPLES:\n for param in params:\n Dist(validate_args=True, **param)\n\n @unittest.skipIf(TEST_WITH_UBSAN, \"division-by-zero error with UBSAN\")\n def test_invalid(self):\n for Dist, params in BAD_EXAMPLES:\n for i, param in enumerate(params):\n try:\n with self.assertRaises(ValueError):\n Dist(validate_args=True, **param)\n except AssertionError:\n fail_string = 'ValueError not raised for {} example {}/{}'\n raise AssertionError(fail_string.format(Dist.__name__, i + 1, len(params)))\n\n def tearDown(self):\n super(TestValidation, self).tearDown()\n Distribution.set_default_validate_args(False)\n\n\nclass TestJit(TestCase):\n def _examples(self):\n for Dist, params in EXAMPLES:\n for param in params:\n keys = param.keys()\n values = tuple(param[key] for key in keys)\n if not all(isinstance(x, torch.Tensor) for x in values):\n continue\n sample = Dist(**param).sample()\n yield Dist, keys, values, sample\n\n def _perturb_tensor(self, value, constraint):\n if isinstance(constraint, constraints._IntegerGreaterThan):\n return value + 1\n if isinstance(constraint, constraints._PositiveDefinite):\n return value + torch.eye(value.shape[-1])\n if value.dtype in [torch.float, torch.double]:\n transform = transform_to(constraint)\n delta = value.new(value.shape).normal_()\n return transform(transform.inv(value) + delta)\n if value.dtype == torch.long:\n result = value.clone()\n result[value == 0] = 1\n result[value == 1] = 0\n return result\n raise NotImplementedError\n\n def _perturb(self, Dist, keys, values, sample):\n with torch.no_grad():\n if Dist is Uniform:\n param = dict(zip(keys, values))\n param['low'] = param['low'] - torch.rand(param['low'].shape)\n param['high'] = param['high'] + torch.rand(param['high'].shape)\n values = [param[key] for key in keys]\n else:\n values = [self._perturb_tensor(value, Dist.arg_constraints.get(key, constraints.real))\n for key, value in zip(keys, values)]\n param = dict(zip(keys, values))\n sample = Dist(**param).sample()\n return values, sample\n\n def test_sample(self):\n for Dist, keys, values, sample in self._examples():\n\n def f(*values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.sample()\n\n traced_f = torch.jit.trace(f, values, check_trace=False)\n\n # FIXME Schema not found for node\n xfail = [\n Cauchy, # aten::cauchy(Double(2,1), float, float, Generator)\n HalfCauchy, # aten::cauchy(Double(2, 1), float, float, Generator)\n VonMises # Variance is not Euclidean\n ]\n if Dist in xfail:\n continue\n\n with torch.random.fork_rng():\n sample = f(*values)\n traced_sample = traced_f(*values)\n self.assertEqual(sample, traced_sample)\n\n # FIXME no nondeterministic nodes found in trace\n xfail = [Beta, Dirichlet]\n if Dist not in xfail:\n self.assertTrue(any(n.isNondeterministic() for n in traced_f.graph.nodes()))\n\n def test_rsample(self):\n for Dist, keys, values, sample in self._examples():\n if not Dist.has_rsample:\n continue\n\n def f(*values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.rsample()\n\n traced_f = torch.jit.trace(f, values, check_trace=False)\n\n # FIXME Schema not found for node\n xfail = [\n Cauchy, # aten::cauchy(Double(2,1), float, float, Generator)\n HalfCauchy, # aten::cauchy(Double(2, 1), float, float, Generator)\n ]\n if Dist in xfail:\n continue\n\n with torch.random.fork_rng():\n sample = f(*values)\n traced_sample = traced_f(*values)\n self.assertEqual(sample, traced_sample)\n\n # FIXME no nondeterministic nodes found in trace\n xfail = [Beta, Dirichlet]\n if Dist not in xfail:\n self.assertTrue(any(n.isNondeterministic() for n in traced_f.graph.nodes()))\n\n def test_log_prob(self):\n for Dist, keys, values, sample in self._examples():\n # FIXME traced functions produce incorrect results\n xfail = [LowRankMultivariateNormal, MultivariateNormal]\n if Dist in xfail:\n continue\n\n def f(sample, *values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.log_prob(sample)\n\n traced_f = torch.jit.trace(f, (sample,) + values)\n\n # check on different data\n values, sample = self._perturb(Dist, keys, values, sample)\n expected = f(sample, *values)\n actual = traced_f(sample, *values)\n self.assertEqual(expected, actual,\n msg='{}\\nExpected:\\n{}\\nActual:\\n{}'.format(Dist.__name__, expected, actual))\n\n def test_enumerate_support(self):\n for Dist, keys, values, sample in self._examples():\n # FIXME traced functions produce incorrect results\n xfail = [Binomial]\n if Dist in xfail:\n continue\n\n def f(*values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.enumerate_support()\n\n try:\n traced_f = torch.jit.trace(f, values)\n except NotImplementedError:\n continue\n\n # check on different data\n values, sample = self._perturb(Dist, keys, values, sample)\n expected = f(*values)\n actual = traced_f(*values)\n self.assertEqual(expected, actual,\n msg='{}\\nExpected:\\n{}\\nActual:\\n{}'.format(Dist.__name__, expected, actual))\n\n def test_mean(self):\n for Dist, keys, values, sample in self._examples():\n\n def f(*values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.mean\n\n try:\n traced_f = torch.jit.trace(f, values)\n except NotImplementedError:\n continue\n\n # check on different data\n values, sample = self._perturb(Dist, keys, values, sample)\n expected = f(*values)\n actual = traced_f(*values)\n expected[expected == float('inf')] = 0.\n actual[actual == float('inf')] = 0.\n self.assertEqual(expected, actual,\n msg='{}\\nExpected:\\n{}\\nActual:\\n{}'.format(Dist.__name__, expected, actual))\n\n def test_variance(self):\n for Dist, keys, values, sample in self._examples():\n if Dist in [Cauchy, HalfCauchy]:\n continue # infinite variance\n\n def f(*values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.variance\n\n try:\n traced_f = torch.jit.trace(f, values)\n except NotImplementedError:\n continue\n\n # check on different data\n values, sample = self._perturb(Dist, keys, values, sample)\n expected = f(*values)\n actual = traced_f(*values)\n expected[expected == float('inf')] = 0.\n actual[actual == float('inf')] = 0.\n self.assertEqual(expected, actual,\n msg='{}\\nExpected:\\n{}\\nActual:\\n{}'.format(Dist.__name__, expected, actual))\n\n def test_entropy(self):\n for Dist, keys, values, sample in self._examples():\n # FIXME traced functions produce incorrect results\n xfail = [LowRankMultivariateNormal, MultivariateNormal]\n if Dist in xfail:\n continue\n\n def f(*values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n return dist.entropy()\n\n try:\n traced_f = torch.jit.trace(f, values)\n except NotImplementedError:\n continue\n\n # check on different data\n values, sample = self._perturb(Dist, keys, values, sample)\n expected = f(*values)\n actual = traced_f(*values)\n self.assertEqual(expected, actual,\n msg='{}\\nExpected:\\n{}\\nActual:\\n{}'.format(Dist.__name__, expected, actual))\n\n def test_cdf(self):\n for Dist, keys, values, sample in self._examples():\n\n def f(sample, *values):\n param = dict(zip(keys, values))\n dist = Dist(**param)\n cdf = dist.cdf(sample)\n return dist.icdf(cdf)\n\n try:\n traced_f = torch.jit.trace(f, (sample,) + values)\n except NotImplementedError:\n continue\n\n # check on different data\n values, sample = self._perturb(Dist, keys, values, sample)\n expected = f(sample, *values)\n actual = traced_f(sample, *values)\n self.assertEqual(expected, actual,\n msg='{}\\nExpected:\\n{}\\nActual:\\n{}'.format(Dist.__name__, expected, actual))\n\n\nif __name__ == '__main__' and torch._C.has_lapack:\n run_tests()\n" ]
[ [ "torch.Size", "torch.nn.Linear", "torch.true_divide", "torch.testing._internal.jit_utils.clear_class_registry", "torch.floor_divide", "torch.jit.load", "torch.jit.save", "torch.tensor", "torch.empty", "torch.full", "torch.div", "torch._test_serialization_subcmul", "torch.randn" ], [ "torch.nn.Linear", "torch.rand", "torch.arange", "torch.no_grad", "torch.testing._internal.jit_utils.clear_class_registry", "torch.testing.FileCheck", "torch.ones", "torch.jit.load", "torch.randn", "torch.tensor", "torch.jit.export_opnames", "torch.jit.script", "torch.testing._internal.common_utils.run_tests", "torch._C._jit_pass_lower_all_tuples" ], [ "torch.backends.cudnn.version", "torch.cuda.get_device_properties", "torch.testing._internal.common_utils.skipCUDANonDefaultStreamIf", "torch.is_deterministic", "torch.set_deterministic", "torch.cuda.current_device", "torch.cuda.device_count", "torch.ones", "torch.cuda.is_available", "torch.backends.cudnn.is_acceptable" ], [ "torch.distributions.transforms.LowerCholeskyTransform", "torch.cat", "torch.distributions.Chi2", "torch.distributions.constraint_registry.transform_to", "torch.distributions.Bernoulli", "numpy.log", "torch.distributions.Pareto", "torch.isfinite", "torch.distributions.constraints.greater_than", "torch.tensor", "torch.distributions.Beta", "torch.distributions.constraints.half_open_interval", "torch.distributions.Independent", "torch.cholesky", "torch.distributions.HalfCauchy", "torch.distributions.transforms.CatTransform", "torch.distributions.transforms.AbsTransform", "torch.pow", "torch.distributions.transforms.SigmoidTransform", "torch.distributions.Uniform", "numpy.cumprod", "torch.distributions.transforms.PowerTransform", "torch.distributions.Dirichlet", "torch.testing._internal.common_utils.run_tests", "torch.distributions.transforms.ComposeTransform", "torch.distributions.transforms.SoftmaxTransform", "numpy.dot", "torch.distributions.LowRankMultivariateNormal", "torch.set_default_dtype", "torch.distributions.LogNormal", "torch.Size", "numpy.linalg.norm", "torch.distributions.transforms.StickBreakingTransform", "torch.distributions.transforms.StackTransform", "torch.distributions.OneHotCategorical", "torch.abs", "numpy.arange", "torch.distributions.transforms.AffineTransform", "torch.zeros_like", "torch.distributions.Binomial", "torch.zeros", "numpy.array", "torch.distributions.Exponential", "numpy.zeros", "torch.max", "torch.distributions.transforms.ExpTransform", "torch.distributions.constraints.greater_than_eq", "torch.distributions.RelaxedBernoulli", "torch.distributions.kl_divergence", "torch.distributions.constraints.is_dependent", "torch.rand", "numpy.ones", "torch.distributions.Geometric", "torch.set_rng_state", "torch.distributions.ExponentialFamily.entropy", "torch.random.fork_rng", "torch.distributions.Poisson", "numpy.diag", "torch.distributions.FisherSnedecor", "torch.distributions.Categorical", "torch.stack", "torch.distributions.RelaxedOneHotCategorical", "numpy.cos", "torch.distributions.MultivariateNormal", "torch.distributions.utils.probs_to_logits", "torch.distributions.TransformedDistribution", "numpy.max", "torch.get_rng_state", "torch.distributions.HalfNormal", "torch.distributions.NegativeBinomial", "torch.autograd.grad", "torch.testing._internal.common_utils.set_rng_seed", "torch.distributions.Multinomial", "torch.distributions.Distribution.set_default_validate_args", "torch.distributions.Laplace", "torch.distributions.ContinuousBernoulli", "torch.distributions.constraints.interval", "numpy.asarray", "torch.distributions.Normal", "torch.no_grad", "numpy.abs", "torch.distributions.LogisticNormal", "torch.distributions.kl._kl_expfamily_expfamily", "numpy.ones_like", "numpy.exp", "torch.ones", "torch.distributions.Gumbel", "torch.eye", "torch.jit.trace", "numpy.random.normal", "torch.distributions.Weibull", "torch.distributions.transforms.TanhTransform", "numpy.isfinite", "numpy.sqrt", "torch.empty", "numpy.pad", "torch.distributions.constraints.less_than", "torch.distributions.Cauchy", "torch.autograd.gradcheck", "torch.full", "torch.distributions.constraint_registry.biject_to", "torch.distributions.StudentT", "torch.distributions.Gamma", "torch.arange", "torch.distributions.VonMises", "torch.ones_like", "torch.diag", "torch.randn", "numpy.unique" ] ]
Eravalord/OpenPNM
[ "1546fa1ac2204443bde916f2037fac383c5069ae", "031894a53650c3193d767b0460c8ea2e66799825", "8298608a38f6aae4a96b0f3b9dd9b9e4460e8692" ]
[ "openpnm/phases/GenericPhase.py", "openpnm/io/CSV.py", "scripts/example_Nernst_Planck_multiphysics_2d.py" ]
[ "from openpnm.core import Base, ModelsMixin\nfrom openpnm.utils import Workspace, logging, Docorator\nfrom numpy import ones\nimport openpnm.models as mods\ndocstr = Docorator()\nlogger = logging.getLogger(__name__)\nws = Workspace()\n\n\n@docstr.get_sectionsf('GenericPhase', sections=['Parameters'])\n@docstr.dedent\nclass GenericPhase(Base, ModelsMixin):\n r\"\"\"\n This generic class is meant as a starter for custom Phase objects\n\n This class produces a blank-slate object with no pore-scale models for\n calculating any thermophysical properties. Users must add models and\n specify parameters for all the properties they require.\n\n Parameters\n ----------\n network : OpenPNM network object\n The network with which this object is associated\n %(Base.parameters)s\n\n Examples\n --------\n Create a new empty phase:\n\n >>> import openpnm as op\n >>> pn = op.network.Cubic([10, 10, 10])\n >>> phase = op.phases.GenericPhase(network=pn)\n\n And add a model:\n\n >>> phase.add_model(propname='pore.molar_density',\n ... model=op.models.phases.molar_density.ideal_gas)\n\n Now confirm that the model was added and data was calculated. The\n ``models`` attribute can be printed:\n\n >>> print(phase.models)\n ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n # Property Name Parameter Value\n ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n 1 pore.molar_density model: ideal_gas\n pressure: pore.pressure\n temperature: pore.temperature\n regeneration mode: normal\n ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n\n And the Phase itself has a nice printout using ``print(phase)``.\n\n \"\"\"\n\n def __init__(self, network=None, project=None, settings={}, **kwargs):\n # Define some default settings\n self.settings.update({'prefix': 'phase'})\n # Overwrite with user supplied settings, if any\n self.settings.update(settings)\n\n # Deal with network or project arguments\n if network is not None:\n if project is not None:\n assert network is project.network\n else:\n project = network.project\n\n super().__init__(project=project, **kwargs)\n\n # If project has a network object, adjust pore and throat sizes\n network = self.project.network\n if network:\n self['pore.all'] = ones((network.Np, ), dtype=bool)\n self['throat.all'] = ones((network.Nt, ), dtype=bool)\n\n # Set standard conditions on the fluid to get started\n self['pore.temperature'] = 298.0\n self['pore.pressure'] = 101325.0\n\n def __getitem__(self, key):\n element, prop = key.split('.', 1)\n # Deal with special keys first\n if prop == '_id':\n net = self.project.network\n return net[element+'._id']\n if prop == self.name:\n return self[element+'.all']\n # An attempt at automatic interpolation if key not found\n if key not in self.keys():\n not_el = list(set(['pore', 'throat']).difference(set([element])))[0]\n if (not_el + '.' + prop) in self.keys():\n mod = {'pore': mods.misc.from_neighbor_throats,\n 'throat': mods.misc.from_neighbor_pores}\n self.add_model(propname=key,\n model=mod[element],\n prop=not_el + '.' + prop,\n mode='mean')\n vals = super().__getitem__(key)\n return vals\n", "import re\nimport numpy as np\nimport scipy as sp\nfrom openpnm.io.Pandas import Pandas\nfrom openpnm.io import GenericIO, Dict\nfrom openpnm.utils import logging, Workspace\nlogger = logging.getLogger(__name__)\nws = Workspace()\n\n\nclass CSV(GenericIO):\n r\"\"\"\n Reads and writes CSV (comma-separated-value files) containing pore and\n throat data\n\n Notes\n -----\n There are a few rules governing how the data is be stored:\n\n 1. The first row of the file (column headers) must contain the\n property names. The subsequent rows contain the data.\n\n 2. The property names should be in the usual OpenPNM format, such as\n of ``pore.volume`` or ``throat.surface_area``.\n\n 3. Each column represents a specific property. For Np x 1 or Nt x 1\n data such as *pore.volume* this is straightforward. For Np x *m* or\n Nt x *m* data, each of the *m* columns should have their own column in\n in the CSV file, with a numpy-style index indicating which axis it\n corresponds to. For instance, the *pore.coords* values should be stored\n as three separate columns with the headings: *pore.coords[0]*,\n *pore.coords[1]*, and *pore.coords[2]*. OpenPNM will convert that back\n into an Np x *m* array upon loading.\n\n 4. The file can contain both or either pore and throat data.\n\n 5. Labels can be imported by placing the characters TRUE and FALSE\n in a column corresponding to the label name (i.e. *pore.front*). TRUE\n indicates where the label applies and FALSE otherwise.\n\n \"\"\"\n\n @classmethod\n def save(cls, *args, **kwargs):\n r\"\"\"\n This method is to be deprecated. Use ``export_data`` instead.\n \"\"\"\n cls.export_data(*args, **kwargs)\n\n @classmethod\n def export_data(cls, network=None, phases=[], filename='', delim=' | '):\n r\"\"\"\n Save all the pore and throat property data on the Network (and\n optionally on any Phases objects) to CSV files.\n\n Parameters\n ----------\n network : OpenPNM Network\n The Network containing the data to be stored\n\n phases : list of OpenPNM Phases (optional)\n The Phases whose data should be stored.\n\n filename : string or path object\n The name of the file to store the data\n\n Notes\n -----\n The data from all Geometry objects is added to the file automatically.\n\n \"\"\"\n project, network, phases = cls._parse_args(network=network,\n phases=phases)\n df = Pandas.to_dataframe(network=network, phases=phases,\n join=True, delim=delim)\n # Write to file\n if filename == '':\n filename = project.name\n fname = cls._parse_filename(filename=filename, ext='csv')\n df.to_csv(fname, index=False)\n\n @classmethod\n def load(cls, *args, **kwargs):\n r\"\"\"\n This method will be deprecated. Use ``import_data`` instead.\n \"\"\"\n proj = cls.import_data(*args, **kwargs)\n return proj\n\n @classmethod\n def import_data(cls, filename, project=None, delim=' | '):\n r\"\"\"\n Opens a 'csv' file, reads in the data, and adds it to the **Network**\n\n Parameters\n ----------\n filename : string (optional)\n The name of the file containing the data to import. The formatting\n of this file is outlined below.\n\n project : OpenPNM Project object\n A GenericNetwork is created and added to the specified Project.\n If no Project object is supplied then one will be created and\n returned.\n\n Returns\n -------\n project : list\n An OpenPNM project containing the data assigned to Generic\n versions of the objects from which it was exported.\n\n \"\"\"\n from pandas import read_table\n\n if project is None:\n project = ws.new_project()\n\n fname = cls._parse_filename(filename, ext='csv')\n a = read_table(filepath_or_buffer=fname,\n sep=',',\n skipinitialspace=True,\n index_col=False,\n true_values=['T', 't', 'True', 'true', 'TRUE'],\n false_values=['F', 'f', 'False', 'false', 'FALSE'])\n\n dct = {}\n # First parse through all the items and re-merge columns\n keys = sorted(list(a.keys()))\n for item in keys:\n m = re.search(r'\\[.\\]', item) # The dot '.' is a wildcard\n if m: # m is None if pattern not found, otherwise merge cols\n pname = re.split(r'\\[.\\]', item)[0] # Get base propname\n # Find all other keys with same base propname\n merge_keys = [k for k in a.keys() if k.startswith(pname)]\n # Rerieve and remove arrays with same base propname\n merge_cols = [a.pop(k) for k in merge_keys]\n # Merge arrays into multi-column array and store in DataFrame\n dct[pname] = np.vstack(merge_cols).T\n # Remove key from list of keys\n for k in keys:\n if k.startswith(pname):\n keys.pop(keys.index(k))\n else:\n dct[item] = np.array(a.pop(item))\n\n project = Dict.from_dict(dct, project=project, delim=delim)\n\n return project\n", "import openpnm as op\nfrom openpnm.phases import mixtures\nimport numpy as np\nws = op.Workspace()\nproj = ws.new_project()\n# ws.settings['loglevel'] = 20\n\n\n\"\"\"\n Details about the continum and numerical model equations can be found on:\n Agnaou, M., Sadeghi, M. A., Tranter, T. G., & Gostick, J. (2020).\n Modeling transport of charged species in pore networks: solution of the\n Nernst-Planck equations coupled with fluid flow and charge conservation\n equations.\n Computers & Geosciences, 104505.\n\"\"\"\n\n\n# network, geometry, phase\nnp.random.seed(0)\n\nnet = op.network.Cubic(shape=[23, 15, 1], spacing=1e-6, project=proj)\nprs = (net['pore.back'] * net['pore.right'] + net['pore.back']\n * net['pore.left'] + net['pore.front'] * net['pore.right']\n + net['pore.front'] * net['pore.left'])\nprs = net.Ps[prs]\n\nthrts = net['throat.surface']\nthrts = net.Ts[thrts]\n\nop.topotools.trim(network=net, pores=prs, throats=thrts)\n\nnp.random.seed(0)\nop.topotools.reduce_coordination(net, 3)\n\nnp.random.seed(0)\ngeo = op.geometry.StickAndBall(network=net, pores=net.Ps, throats=net.Ts)\n\n\nsw = mixtures.SalineWater(network=net)\n# Retrieve handles to each species for use below\nNa = sw.components['Na_' + sw.name]\nCl = sw.components['Cl_' + sw.name]\nH2O = sw.components['H2O_' + sw.name]\n\n# physics\nphys = op.physics.GenericPhysics(network=net, phase=sw, geometry=geo)\n\nflow = op.models.physics.hydraulic_conductance.hagen_poiseuille_2D\nphys.add_model(propname='throat.hydraulic_conductance',\n pore_viscosity='pore.viscosity',\n throat_viscosity='throat.viscosity',\n model=flow, regen_mode='normal')\n\ncurrent = op.models.physics.ionic_conductance.electroneutrality_2D\nphys.add_model(propname='throat.ionic_conductance', ions=[Na.name, Cl.name],\n model=current, regen_mode='normal')\n\neA_dif = op.models.physics.diffusive_conductance.ordinary_diffusion_2D\nphys.add_model(propname='throat.diffusive_conductance.' + Na.name,\n pore_diffusivity='pore.diffusivity.' + Na.name,\n throat_diffusivity='throat.diffusivity.' + Na.name,\n model=eA_dif, regen_mode='normal')\n\neB_dif = op.models.physics.diffusive_conductance.ordinary_diffusion_2D\nphys.add_model(propname='throat.diffusive_conductance.' + Cl.name,\n pore_diffusivity='pore.diffusivity.' + Cl.name,\n throat_diffusivity='throat.diffusivity.' + Cl.name,\n model=eB_dif, regen_mode='normal')\n\nscheme = 'powerlaw'\nad_dif_mig_Na = op.models.physics.ad_dif_mig_conductance.ad_dif_mig\nphys.add_model(propname='throat.ad_dif_mig_conductance.' + Na.name,\n pore_pressure='pore.pressure', model=ad_dif_mig_Na,\n ion=Na.name, s_scheme=scheme)\n\nad_dif_mig_Cl = op.models.physics.ad_dif_mig_conductance.ad_dif_mig\nphys.add_model(propname='throat.ad_dif_mig_conductance.' + Cl.name,\n pore_pressure='pore.pressure', model=ad_dif_mig_Cl,\n ion=Cl.name, s_scheme=scheme)\n\n# settings for algorithms\nsetts1 = {'solver_max_iter': 5, 'solver_tol': 1e-08, 'solver_rtol': 1e-08,\n 'nlin_max_iter': 10, 'cache_A': False, 'cache_b': False}\nsetts2 = {'g_tol': 1e-4, 'g_max_iter': 100}\n\n# algorithms\nsf = op.algorithms.StokesFlow(network=net, phase=sw, settings=setts1)\nsf.set_value_BC(pores=net.pores('back'), values=11)\nsf.set_value_BC(pores=net.pores('front'), values=10)\nsf.run()\nsw.update(sf.results())\n\np = op.algorithms.IonicConduction(network=net, phase=sw, settings=setts1)\np.set_value_BC(pores=net.pores('left'), values=0.02)\np.set_value_BC(pores=net.pores('right'), values=0.01)\np.settings['charge_conservation'] = 'electroneutrality_2D'\n\neA = op.algorithms.NernstPlanck(network=net, phase=sw, ion=Na.name,\n settings=setts1)\neA.set_value_BC(pores=net.pores('back'), values=20)\neA.set_value_BC(pores=net.pores('front'), values=10)\n\neB = op.algorithms.NernstPlanck(network=net, phase=sw, ion=Cl.name,\n settings=setts1)\neB.set_value_BC(pores=net.pores('back'), values=20)\neB.set_value_BC(pores=net.pores('front'), values=10)\n\npnp = op.algorithms.NernstPlanckMultiphysicsSolver(network=net, phase=sw,\n settings=setts2)\npnp.setup(potential_field=p.name, ions=[eA.name, eB.name])\npnp.run()\n\nsw.update(sf.results())\nsw.update(p.results())\nsw.update(eA.results())\nsw.update(eB.results())\n\n# output data to Paraview\n# proj.export_data(phases=[sw], filename='OUT', filetype='xdmf')\n" ]
[ [ "numpy.ones" ], [ "pandas.read_table", "numpy.vstack" ], [ "numpy.random.seed" ] ]
ffangsong/Retrieval_Based_Chatbot
[ "4e804a4a733f37e15a3e4c8f42f8334e9a32173c" ]
[ "src/word2vec_LSTM.py" ]
[ "import itertools\nimport re\nimport nltk\nimport numpy as np\nimport pandas as pd\nfrom gensim.models import KeyedVectors\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom string import punctuation\n\nnltk.download('stopwords')\nnltk.download('wordnet')\nnltk.download('punkt')\n\nfrom sklearn.model_selection import train_test_split\nfrom keras import backend as K\nfrom keras.layers import Input, Embedding, LSTM, Lambda\nfrom keras.optimizers import Adadelta\nfrom keras.models import Model\nfrom keras.preprocessing.sequence import pad_sequences\n\nDATA_FILE_PATH = './data/train_data.txt'\nEMBEDDING_FILE_PATH = './docs/pretrained/GoogleNews-vectors-negative300.bin.gz'\nEMBEDDING_DIM = 300\nMAX_SEQ_LENGTH = 130\nGRADIENT_CLIPPING_NORM = 1.25\n\n\nclass word2vec_LSTM():\n def __init__(self):\n self.MAX_SEQ_LENGTH = 0\n self.vocabulary = {}\n self.inverse_vocabulary = inverse_vocabulary = ['<unk']\n self.w2v_model = KeyedVectors.load_word2vec_format(\n EMBEDDING_FILE_PATH,\n binary=True)\n\n def load_dataset(self):\n ''' load_dataset '''\n df = pd.read_csv(DATA_FILE_PATH, header=0, sep=' ').dropna()\n return df\n\n def text_to_word(self, text):\n ''' clean_text, and lemmatize the words'''\n text = str(text)\n text = text.lower()\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", 'what is', text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"cannot \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\",\", \" \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n text = re.sub('[0-9]+\\.[0-9]+', \" 87 \", text)\n # Remove punctuation from text\n text = ''.join([c for c in text if c not in punctuation]).lower()\n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n text = str(text)\n \n # split text into individual words''\n words = word_tokenize(text)\n # Lemmatize\n words = list(map(lambda x: lemmatizer.lemmatize(x, 'v'), words))\n return words\n\n def get_indicies(self, df):\n '''Replace questions with of lists of indices,\n include stopwords if they have embedding'''\n stop_words = set(stopwords.words('english'))\n questions_cols = ['question1', 'question2']\n for index, row in df.iterrows():\n for question in questions_cols:\n q2n = []\n for word in self.text_to_word(row[question]):\n if word in stop_words and word not in self.w2v_model.vocab:\n continue\n if word not in self.vocabulary:\n self.vocabulary[word] = len(self.inverse_vocabulary)\n q2n.append(len(self.inverse_vocabulary))\n self.inverse_vocabulary.append(word)\n else:\n q2n.append(self.vocabulary[word])\n df.set_value(index, question, q2n)\n return df\n\n def creat_embedding_matrix(self):\n '''create the embedding matrix'''\n embeddings = 1 * np.random.randn(len(self.vocabulary) + 1, EMBEDDING_DIM)\n embeddings[0] = 0\n for word, index in self.vocabulary.items():\n if word in self.w2v_model.vocab:\n embeddings[index] = self.w2v_model.word_vec(word)\n return embeddings\n\n def trainTestSplit(self, df):\n X = df[['question1', ' question2']]\n y = df['is_duplicate']\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n X_train = {'left': X_train.question1, 'right': X_train.question2}\n X_test = {'left': X_test.question1, 'right': X_test.question2}\n y_train = y_train.values\n y_test = y_test.values\n return X_train, X_test, y_train, y_test\n\n def pad_sequence(self, X_train, X_test):\n '''pad the sequence'''\n for dataset, side in itertools.product([X_train, X_test], ['left', 'right']):\n dataset[side] = pad_sequences(dataset[side], maxlen=MAX_SEQ_LENGTH)\n return X_train, X_test\n\n def build_model(self, embeddings):\n def exponent_neg_manhattan_distance(left, right):\n return K.exp(-K.sum(K.abs(left - right), axis=1, keepdims=True))\n\n # The Input layer\n left_input = Input(shape=(MAX_SEQ_LENGTH,), dtype='int32')\n right_input = Input(shape=(MAX_SEQ_LENGTH,), dtype='int32')\n embedding_layer = Embedding(len(embeddings), EMBEDDING_DIM, weights=[embeddings], input_length=MAX_SEQ_LENGTH,\n trainable=False)\n\n # Embedded version of the inputs\n encoded_left = embedding_layer(left_input)\n encoded_right = embedding_layer(right_input)\n\n # Since this is a siamese network, both sides share the same LSTM\n shared_lstm = LSTM(50)\n\n left_output = shared_lstm(encoded_left)\n right_output = shared_lstm(encoded_right)\n\n # Calculates the distance as defined by the MaLSTM model\n malstm_distance = Lambda(function=lambda x: exponent_neg_manhattan_distance(x[0], x[1]),\n output_shape=lambda x: (x[0][0], 1))([left_output, right_output])\n\n # Pack it all up into a model\n model = Model(input=[left_input, right_input], output=[malstm_distance])\n\n # Adadelta optimizer, with gradient clipping by norm\n optimizer = Adadelta(clipnorm=GRADIENT_CLIPPING_NORM)\n model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])\n return model\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.read_csv" ] ]
syfung/laminate
[ "72b728f223817aac7a9608beb70f7518b8261fc9" ]
[ "deform.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 27 17:22:11 2021\n\n@author: Joshua Fung\n\"\"\"\nimport numpy as np\n\ndef deformation(abd_inv, load):\n return abd_inv.dot(load)\n\ndef strain_mid_ply(laminate, mid_plane_deformation):\n ply_deformation = []\n for z in laminate.mid_ply_zs:\n strain = mid_plane_deformation[:3]\n curvature = mid_plane_deformation[3:]\n ply_deformation.append(strain + z * curvature)\n return ply_deformation\n\ndef strain_top_bottom_ply(laminate, mid_plane_deformation):\n ply_deformation = []\n for z in laminate.z_s:\n strain = mid_plane_deformation[:3]\n curvature = mid_plane_deformation[3:]\n ply_deformation.append((strain + z[0] * curvature, strain + z[1] * curvature))\n return ply_deformation\n\ndef stress_mid_ply(laminate, mid_plane_deformation, local=False):\n strain = strain_mid_ply(laminate, mid_plane_deformation)\n stress = []\n if local:\n for k in range(len(laminate.plies)):\n stress.append(laminate.plies[k].t.dot(laminate.plies[k].q_bar.dot(strain[k])))\n else: \n for k in range(len(laminate.plies)):\n stress.append(laminate.plies[k].q_bar.dot(strain[k]))\n return stress\n\ndef stress_top_bottom_ply(laminate, mid_plane_deformation, local=False):\n strain = strain_top_bottom_ply(laminate, mid_plane_deformation)\n stress = []\n if local:\n for k in range(len(laminate.plies)):\n s = (laminate.plies[k].t.dot(laminate.plies[k].q_bar.dot(strain[k][0])),\n laminate.plies[k].t.dot(laminate.plies[k].q_bar.dot(strain[k][1])))\n stress.append(s)\n else:\n for k in range(len(laminate.plies)):\n s = (laminate.plies[k].q_bar.dot(strain[k][0]), laminate.plies[k].q_bar.dot(strain[k][1]))\n stress.append(s)\n return stress\n \nif __name__ == \"__main__\":\n import stiffmat\n import matplotlib.pyplot as plt\n np.set_printoptions(precision=5)\n\n # =============================================================================\n # Ply properties\n # =============================================================================\n # Elastic properties\n E1 = 125 * 1e3 # MPa\n E2 = 9.8 * 1e3 # MPa\n G12 = 5.5 * 1e3 # MPa\n nu12 = 0.24 # -\n \n # Failure properties\n sigma_lp = 900 # MPa\n sigma_ln = 800 # MPa\n sigma_tp = 55 # MPa\n sigma_tn = 170 # MPa\n tau_lt = 90 # MPa\n \n # Thickness\n t = 0.125 # mm\n \n # Layup angle\n a = [0, 0, 0, 90, 0, 0, 45, 0] # 45 increment\n # a = [0, 0, 0, 90, 0, 0, 30, 0] # 30 increment\n # a = [0, 0, 0, 75, 75, 45, 45, 45] # 15 increment\n # =============================================================================\n # Load Vector\n # =============================================================================\n load = np.matrix((240., 82., 4., -63., 0., 0.)).T\n print(\"Applied Load\")\n print(\"({0[0]:2.2f}N/mm, {1[0]:2.2f}N/mm, {2[0]:2.2f}N/mm, {3[0]:2.2f}N, {4[0]:2.2f}N,{5[0]:2.2f}N).T\\n\".format(*np.array(load)))\n \n lam = stiffmat.Laminate(a, t, E1, E2, nu12, G12)\n print(\"ABD Matrix:\")\n print(lam.abd)\n print(\"Unit:\")\n print(np.matrix([[\"N/mm (MPa.mm)\", \"N (MPa.mm2)\"],[\"N (MPa.mm2)\", \"N.mm(MPa.mm3)\"]]), \"\\n\")\n \n mid_plane_deformation = deformation(lam.abd_inv, load)\n print(\"Mid plane deforamtion:\")\n print(\"({0[0]:2.4f} {1[0]:2.4f} {2[0]:2.4f} {3[0]:2.4f}1/mm {4[0]:2.4f}1/mm {5[0]:2.4f}1/mm).T\".format(*np.array(mid_plane_deformation)))\n m_deform = mid_plane_deformation.copy()\n m_deform[3:6] = m_deform[3:6]*1000\n print(\"({0[0]:2.4f} {1[0]:2.4f} {2[0]:2.4f} {3[0]:2.4f}1/m {4[0]:2.4f}1/m {5[0]:2.2f}1/m).T\\n\".format(*np.array(m_deform)))\n \n strain = strain_mid_ply(lam, mid_plane_deformation)\n print(strain)\n strain_top_bottom = strain_top_bottom_ply(lam, mid_plane_deformation)\n # print(strain_top_bottom)\n plt.figure()\n plt.plot(lam.mid_ply_zs, [s.item(0) for s in strain], \"x\")\n plt.plot(list(sum(lam.z_s, ())), [s.item(0) for s in (list(sum(strain_top_bottom, ())))])\n \n stress = stress_mid_ply(lam, mid_plane_deformation)\n stress_top_bottom = stress_top_bottom_ply(lam, mid_plane_deformation)\n \n plt.figure()\n plt.subplot(1, 3, 1)\n plt.plot([s.item(0) for s in (list(sum(stress_top_bottom, ())))], list(sum(lam.z_s, ())), \"c-x\")\n plt.plot([s.item(0) for s in stress], lam.mid_ply_zs, \"kx\")\n ax = plt.gca()\n ax.invert_yaxis()\n \n plt.subplot(1, 3, 2)\n plt.plot([s.item(1) for s in (list(sum(stress_top_bottom, ())))], list(sum(lam.z_s, ())), \"c-x\")\n plt.plot([s.item(1) for s in stress], lam.mid_ply_zs, \"kx\")\n ax = plt.gca()\n ax.invert_yaxis()\n \n plt.subplot(1, 3, 3)\n plt.plot([s.item(2) for s in (list(sum(stress_top_bottom, ())))], list(sum(lam.z_s, ())), \"c-x\")\n plt.plot([s.item(2) for s in stress], lam.mid_ply_zs, \"kx\")\n ax = plt.gca()\n ax.invert_yaxis()\n \n print()\n \n stress_local = stress_mid_ply(lam, mid_plane_deformation, local=True)\n stress_top_bottom_local = stress_top_bottom_ply(lam, mid_plane_deformation, local=True)\n import failure\n for s in stress_local:\n print(\"Tsai-Wu Failed {}, with T = {}\".format(*failure.tsai_wu(s, sigma_lp, sigma_ln, sigma_tp, sigma_tn, tau_lt)))\n \n print()\n \n for s in stress_local:\n print(\"Tsai-Hill Failed {}, with T = {}\".format(*failure.tsai_hill(s, sigma_lp, sigma_ln, sigma_tp, sigma_tn, tau_lt)))\n \n print()\n \n for s in list(sum(stress_top_bottom_local, ())):\n print(\"Tsai-Wu Failed {}, with T = {}\".format(*failure.tsai_wu(s, sigma_lp, sigma_ln, sigma_tp, sigma_tn, tau_lt)))\n " ]
[ [ "numpy.matrix", "numpy.array", "numpy.set_printoptions", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplot" ] ]
amalolan/vehicle-counter
[ "c48c3f897107e4b7c8442529d38a141f127fd778" ]
[ "deep_sort/tracker.py" ]
[ "# vim: expandtab:ts=4:sw=4\r\nfrom __future__ import absolute_import\r\nimport numpy as np\r\nfrom . import kalman_filter\r\nfrom . import linear_assignment\r\nfrom . import iou_matching\r\nfrom .track import Track\r\n\r\n\r\nclass Tracker:\r\n \"\"\"\r\n This is the multi-target tracker.\r\n\r\n Parameters\r\n ----------\r\n metric : nn_matching.NearestNeighborDistanceMetric\r\n A distance metric for measurement-to-track association.\r\n max_age : int\r\n Maximum number of missed misses before a track is deleted.\r\n n_init : int\r\n Number of consecutive detections before the track is confirmed. The\r\n track state is set to `Deleted` if a miss occurs within the first\r\n `n_init` frames.\r\n\r\n Attributes\r\n ----------\r\n metric : nn_matching.NearestNeighborDistanceMetric\r\n The distance metric used for measurement to track association.\r\n max_age : int\r\n Maximum number of missed misses before a track is deleted.\r\n n_init : int\r\n Number of frames that a track remains in initialization phase.\r\n kf : kalman_filter.KalmanFilter\r\n A Kalman filter to filter target trajectories in image space.\r\n tracks : List[Track]\r\n The list of active tracks at the current time step.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, metric, max_iou_distance=0.7, max_age=60, n_init=3):\r\n self.metric = metric\r\n self.max_iou_distance = max_iou_distance\r\n self.max_age = max_age\r\n self.n_init = n_init\r\n\r\n self.kf = kalman_filter.KalmanFilter()\r\n self.tracks = []\r\n self._next_id = 1\r\n\r\n def predict(self):\r\n \"\"\"Propagate track state distributions one time step forward.\r\n\r\n This function should be called once every time step, before `update`.\r\n \"\"\"\r\n for track in self.tracks:\r\n track.predict(self.kf)\r\n\r\n def update(self, detections):\r\n \"\"\"Perform measurement update and track management.\r\n\r\n Parameters\r\n ----------\r\n detections : List[deep_sort.detection.Detection]\r\n A list of detections at the current time step.\r\n\r\n \"\"\"\r\n # Run matching cascade.\r\n matches, unmatched_tracks, unmatched_detections = \\\r\n self._match(detections)\r\n\r\n # Update track set.\r\n for track_idx, detection_idx in matches:\r\n self.tracks[track_idx].update(\r\n self.kf, detections[detection_idx])\r\n for track_idx in unmatched_tracks:\r\n self.tracks[track_idx].mark_missed()\r\n for detection_idx in unmatched_detections:\r\n self._initiate_track(detections[detection_idx])\r\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\r\n\r\n # Update distance metric.\r\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\r\n features, targets = [], []\r\n for track in self.tracks:\r\n if not track.is_confirmed():\r\n continue\r\n features += track.features\r\n targets += [track.track_id for _ in track.features]\r\n track.features = []\r\n self.metric.partial_fit(\r\n np.asarray(features), np.asarray(targets), active_targets)\r\n\r\n def _match(self, detections):\r\n\r\n def gated_metric(tracks, dets, track_indices, detection_indices):\r\n features = np.array([dets[i].feature for i in detection_indices])\r\n targets = np.array([tracks[i].track_id for i in track_indices])\r\n cost_matrix = self.metric.distance(features, targets)\r\n cost_matrix = linear_assignment.gate_cost_matrix(\r\n self.kf, cost_matrix, tracks, dets, track_indices,\r\n detection_indices)\r\n\r\n return cost_matrix\r\n\r\n # Split track set into confirmed and unconfirmed tracks.\r\n confirmed_tracks = [\r\n i for i, t in enumerate(self.tracks) if t.is_confirmed()]\r\n unconfirmed_tracks = [\r\n i for i, t in enumerate(self.tracks) if not t.is_confirmed()]\r\n\r\n # Associate confirmed tracks using appearance features.\r\n matches_a, unmatched_tracks_a, unmatched_detections = \\\r\n linear_assignment.matching_cascade(\r\n gated_metric, self.metric.matching_threshold, self.max_age,\r\n self.tracks, detections, confirmed_tracks)\r\n\r\n # Associate remaining tracks together with unconfirmed tracks using IOU.\r\n iou_track_candidates = unconfirmed_tracks + [\r\n k for k in unmatched_tracks_a if\r\n self.tracks[k].time_since_update == 1]\r\n unmatched_tracks_a = [\r\n k for k in unmatched_tracks_a if\r\n self.tracks[k].time_since_update != 1]\r\n matches_b, unmatched_tracks_b, unmatched_detections = \\\r\n linear_assignment.min_cost_matching(\r\n iou_matching.iou_cost, self.max_iou_distance, self.tracks,\r\n detections, iou_track_candidates, unmatched_detections)\r\n\r\n matches = matches_a + matches_b\r\n unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))\r\n return matches, unmatched_tracks, unmatched_detections\r\n\r\n def _initiate_track(self, detection):\r\n mean, covariance = self.kf.initiate(detection.to_xyah())\r\n class_name = detection.get_class()\r\n self.tracks.append(Track(\r\n mean, covariance, self._next_id, self.n_init, self.max_age,\r\n detection.feature, class_name, confidence_value=detection.confidence))\r\n self._next_id += 1\r\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
lavanyashukla/ray
[ "fece8db70d703da1aad192178bd50923e83cc99a" ]
[ "python/ray/util/sgd/torch/torch_trainer.py" ]
[ "import inspect\nimport time\n\nimport numpy as np\nimport logging\nimport os\nimport numbers\nimport tempfile\nimport torch\nimport torch.distributed as dist\n\nimport ray\nfrom ray.tune import Trainable\nfrom ray.tune.resources import Resources\nfrom ray.tune.utils.util import merge_dicts\nfrom ray.util import log_once\nfrom ray.util.sgd.torch.worker_group import LocalWorkerGroup, \\\n RemoteWorkerGroup, DeactivatedWorkerGroup\nfrom ray.util.sgd.utils import NUM_SAMPLES, BATCH_SIZE\nfrom ray.util.sgd.torch.constants import VALID_SCHEDULER_STEP, NCCL_TIMEOUT_S\nfrom ray.util.sgd.data import Dataset\n\nlogger = logging.getLogger(__name__)\n\n\ndef _validate_scheduler_step_freq(scheduler_step_freq):\n \"\"\"This validation check only happens if a scheduler is passed in.\"\"\"\n if scheduler_step_freq not in VALID_SCHEDULER_STEP:\n raise ValueError(\"Scheduler step freq must be in {}. Got {}\".format(\n VALID_SCHEDULER_STEP, scheduler_step_freq))\n\n\ndef _remind_gpu_usage(use_gpu):\n if not use_gpu and torch.cuda.is_available():\n logger.info(\"GPUs detected but not using them. Set `use_gpu` to \"\n \"enable GPU usage. \")\n\n\nclass TorchTrainer:\n \"\"\"Train a PyTorch model using distributed PyTorch.\n\n Launches a set of actors which connect via distributed PyTorch and\n coordinate gradient updates to train the provided model. If Ray is not\n initialized, TorchTrainer will automatically initialize a local Ray\n cluster for you. Be sure to run `ray.init(address=\"auto\")` to leverage\n multi-node training.\n\n .. code-block:: python\n\n class MyTrainingOperator(TrainingOperator):\n\n def setup(self, config):\n model = nn.Linear(1, 1)\n optimizer = torch.optim.SGD(\n model.parameters(), lr=config.get(\"lr\", 1e-4))\n loss = torch.nn.MSELoss()\n\n batch_size = config[\"batch_size\"]\n train_data, val_data = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_data, batch_size=batch_size)\n val_loader = DataLoader(val_data, batch_size=batch_size)\n\n self.model, self.optimizer = self.register(\n models=model,\n optimizers=optimizer,\n criterion=loss)\n\n self.register_data(\n train_loader=train_loader,\n validation_loader=val_loader)\n\n\n trainer = TorchTrainer(\n training_operator_cls=MyTrainingOperator,\n config={\"batch_size\": 32},\n use_gpu=True\n )\n for i in range(4):\n trainer.train()\n\n Args:\n training_operator_cls (type): Custom training operator class\n that subclasses the TrainingOperator class. This class\n will be copied onto all remote workers and used to specify\n training components and custom training and validation operations.\n initialization_hook (function): A function to call on all training\n workers when they are first initialized. This could be useful to\n set environment variables for all the worker processes.\n config (dict): Custom configuration value to be passed to\n all operator constructors.\n num_workers (int): the number of workers used in distributed\n training. If 1, the worker will not be wrapped with\n DistributedDataParallel. TorchTrainer will scale down the number\n of workers if enough resources are not available, and will scale\n back up once they are. The total number of\n workers will never exceed `num_workers` amount.\n num_cpus_per_worker (int): Sets the cpu requirement for each worker.\n use_gpu (bool): Sets resource allocation for workers to 1 GPU\n if true, and automatically moves both the model and optimizer\n to the available CUDA device.\n backend (string): backend used by distributed PyTorch. Currently\n support \"nccl\", \"gloo\", and \"auto\". If \"auto\", RaySGD will\n automatically use \"nccl\" if `use_gpu` is True, and \"gloo\"\n otherwise.\n wrap_ddp (bool): Whether to automatically wrap DistributedDataParallel\n over each model. If False, you are expected to call it yourself.\n timeout_s (float): Seconds before the torch process group\n times out. Useful when machines are unreliable. If not set, default\n to 30 min, which is the same default as\n ``torch.init_process_group(...)``.\n add_dist_sampler (bool): Whether to automatically add a\n DistributedSampler to all created dataloaders. Only applicable\n if num_workers > 1.\n use_fp16 (bool): Enables mixed precision training via apex if apex\n is installed. This is automatically done after the model and\n optimizers are constructed and will work for multi-model training.\n Please see https://github.com/NVIDIA/apex for more details.\n scheduler_step_freq: \"batch\", \"epoch\", \"manual\", or None. This will\n determine when ``scheduler.step`` is called. If \"batch\",\n ``step`` will be called after every optimizer step. If \"epoch\",\n ``step`` will be called after one pass of the DataLoader. If\n \"manual\", the scheduler will not be incremented automatically -\n you are expected to call ``trainer.update_scheduler`` manually.\n If a scheduler is passed in, this value is expected to not be None.\n use_local (bool): If True, 1 worker will be a local worker running\n on the driver process, and all other workers will be remote. If\n False, all workers will be remote. Set this to True for easy\n debugging of worker on driver process, but could also\n lead to issues with Cuda devices. Defaults to False.\n \"\"\"\n\n # TODO: Implement autoscaling. If num_workers=-1, the trainer will use as\n # many resources as available. Upon each train call, TorchTrainer will\n # query the Ray global state for total available resources and resize\n # its remote workers to consume all available resources.\n\n def __init__(\n self,\n *,\n training_operator_cls,\n initialization_hook=None,\n config=None,\n num_workers=1,\n num_cpus_per_worker=1,\n use_gpu=\"auto\",\n backend=\"auto\",\n wrap_ddp=True,\n timeout_s=1800,\n use_fp16=False,\n use_tqdm=False,\n add_dist_sampler=True,\n scheduler_step_freq=None,\n use_local=False,\n # Deprecated Args.\n num_replicas=None,\n batch_size=None,\n model_creator=None,\n data_creator=None,\n optimizer_creator=None,\n scheduler_creator=None,\n loss_creator=None,\n serialize_data_creation=None,\n data_loader_args=None,\n apex_args=None,\n ):\n if (model_creator or data_creator or optimizer_creator\n or scheduler_creator or loss_creator):\n raise DeprecationWarning(\n \"Creator functions are deprecated. You should create a \"\n \"custom TrainingOperator, override setup, and register all \"\n \"training state there. See TrainingOperator for more info. \"\n \"If you would still like to use creator functions, you can \"\n \"do CustomOperator = TrainingOperator.from_creators(\"\n \"model_creator, ...) and pass in CustomOperator into \"\n \"TorchTrainer.\")\n\n if use_local and log_once(\"use_local\"):\n logger.warning(\"use_local is set to True. This could lead to \"\n \"issues with Cuda devices. If you are seeing this \"\n \"issue, try setting use_local to False. For more \"\n \"information, see \"\n \"https://github.com/ray-project/ray/issues/9202.\")\n\n if num_workers > 1 and not dist.is_available():\n raise ValueError(\n (\"Distributed PyTorch is not supported on macOS. \"\n \"To run without distributed PyTorch, set 'num_workers=1'. \"\n \"For more information, see \"\n \"https://github.com/pytorch/examples/issues/467.\"))\n\n if num_replicas is not None:\n raise DeprecationWarning(\n \"num_replicas is deprecated. Use num_workers instead.\")\n\n if batch_size is not None:\n raise DeprecationWarning(\n \"batch_size is deprecated. Use config={'batch_size': N} \"\n \"specify a batch size for each worker or \"\n \"config={ray.util.sgd.utils.BATCH_SIZE: N} to specify a \"\n \"batch size to be used across all workers.\")\n\n if apex_args is not None:\n raise DeprecationWarning(\n \"apex_args is deprecated. Pass in apex_args when calling \"\n \"`register` in the `setup` method of your `TrainingOperator` \"\n \"instead.\")\n\n if serialize_data_creation is True:\n if log_once(\"serialize_data_creation\"):\n logging.warning(\n \"serialize_data_creation is deprecated and will be \"\n \"ignored. If you require serialized data loading you \"\n \"should implement this in TrainingOperator.setup. \"\n \"You may find FileLock useful here.\")\n\n if data_loader_args:\n raise DeprecationWarning(\n \"data_loader_args is deprecated. You can return a \"\n \"torch.utils.data.DataLoader in data_creator. Ray will \"\n \"automatically set a DistributedSampler if a DataLoader is \"\n \"returned and num_workers > 1.\")\n\n self.training_operator_cls = training_operator_cls\n\n self.initialization_hook = initialization_hook\n self.config = {} if config is None else config\n if use_gpu == \"auto\":\n use_gpu = torch.cuda.is_available()\n\n _remind_gpu_usage(use_gpu)\n\n if backend == \"auto\":\n backend = \"nccl\" if use_gpu else \"gloo\"\n\n if backend == \"nccl\":\n timeout_s = NCCL_TIMEOUT_S\n\n logger.debug(f\"Using {backend} as backend.\")\n self.backend = backend\n self.num_cpus_per_worker = num_cpus_per_worker\n self.use_gpu = use_gpu\n self.max_replicas = num_workers\n\n self.serialize_data_creation = serialize_data_creation\n self.wrap_ddp = wrap_ddp\n self.timeout_s = timeout_s\n self.use_fp16 = use_fp16\n self.use_tqdm = use_tqdm\n self.add_dist_sampler = add_dist_sampler\n self.use_local = use_local\n\n self.temp_dir = tempfile.mkdtemp(prefix=\"raysgd\")\n self._num_failures = 0\n self._last_resize = float(\"-inf\")\n\n if scheduler_step_freq:\n _validate_scheduler_step_freq(scheduler_step_freq)\n\n self.scheduler_step_freq = scheduler_step_freq\n\n if not ray.is_initialized() and self.max_replicas > 1:\n logger.info(\"Automatically initializing single-node Ray. To use \"\n \"multi-node training, be sure to run `ray.init(\"\n \"address='auto')` before instantiating the Trainer.\")\n ray.init()\n self._start_workers(self.max_replicas)\n\n def _configure_and_split_batch(self, num_workers):\n \"\"\"If sgd.utils.BATCH_SIZE is provided, split among workers.\"\"\"\n if BATCH_SIZE not in self.config:\n return\n # Compute batch size per worker\n logger.debug(\"BATCH_SIZE parameter detected. Splitting among workers.\")\n batch_size = self.config[BATCH_SIZE]\n batch_size_per_worker = batch_size // num_workers\n if batch_size % num_workers > 0:\n new_batch_size = batch_size_per_worker * num_workers\n logger.warning(\n (\"Changing batch size from {old_batch_size} to \"\n \"{new_batch_size} to evenly distribute batches across \"\n \"{num_workers} workers.\").format(\n old_batch_size=batch_size,\n new_batch_size=new_batch_size,\n num_workers=num_workers))\n self.config[BATCH_SIZE] = new_batch_size\n return batch_size_per_worker\n\n def _start_workers(self, num_workers):\n worker_config = self.config.copy()\n batch_size_per_worker = self._configure_and_split_batch(num_workers)\n if batch_size_per_worker:\n worker_config[BATCH_SIZE] = batch_size_per_worker\n params = dict(\n training_operator_cls=self.training_operator_cls,\n config=worker_config,\n serialize_data_creation=self.serialize_data_creation,\n use_fp16=self.use_fp16,\n use_gpu=self.use_gpu,\n use_tqdm=self.use_tqdm,\n scheduler_step_freq=self.scheduler_step_freq)\n\n dist_params = dict(\n backend=self.backend,\n add_dist_sampler=self.add_dist_sampler,\n wrap_ddp=self.wrap_ddp)\n\n worker_args = {\n \"max_workers\": self.max_replicas,\n \"params\": params,\n \"dist_params\": dist_params,\n \"initialization_hook\": self.initialization_hook,\n \"num_cpus_per_worker\": self.num_cpus_per_worker,\n \"use_gpu\": self.use_gpu,\n \"timeout_s\": self.timeout_s\n }\n\n if self.use_local:\n self.worker_group = LocalWorkerGroup(**worker_args)\n else:\n self.worker_group = RemoteWorkerGroup(**worker_args)\n\n # TODO(amogkam): If not enough resources are available to create\n # num_workers workers, this command will hang. Instead,\n # start_workers should take into account available resources when\n # determining how many workers to create.\n self.worker_group.start_workers(num_workers)\n\n def _resize_worker_group(self, max_retries=10):\n \"\"\"Resizes the number of remote workers based on available resources.\n Total number of workers will never exceed `num_workers` amount.\n\n Args:\n max_retries (int): How many times to attempt to resize workers\n before failing.\n \"\"\"\n state_dict = self.state_dict()\n old_workers = self.worker_group.num_workers\n self.worker_group.reset()\n\n time.sleep(1)\n for i in range(max_retries):\n new_workers = self.worker_group.new_workers_size()\n if new_workers:\n self._last_resize = time.time()\n self._start_workers(int(new_workers))\n self.load_state_dict(state_dict, blocking=True)\n if self.use_local and new_workers == 1 and old_workers > 1:\n # Major hack. If we go from LocalDistributedRunner to a\n # standard TorchRunner we have to manually reset the\n # dummy actor handle global vars.\n # TODO(amog): Refactor LocalDistributedTorchRunner to\n # not use global variables for resource reservation.\n ray.util.sgd.torch.distributed_torch_runner\\\n ._dummy_cuda_actor = None\n ray.util.sgd.torch.distributed_torch_runner\\\n ._dummy_cpu_actor = None\n return\n else:\n delay = 2**i\n logger.warning(\n \"No new workers found. Retrying in %d sec.\" % delay)\n time.sleep(delay)\n raise RuntimeError(\"Exceeded max_retries for relaunching workers.\")\n\n def train(self,\n num_steps=None,\n profile=False,\n reduce_results=True,\n max_retries=3,\n info=None,\n dataset=None):\n \"\"\"Runs a training epoch.\n\n Calls `operator.train_epoch()` on N parallel workers simultaneously\n underneath the hood.\n\n Set `max_retries` to enable fault handling in case of\n instance preemption.\n\n Args:\n num_steps (int): Number of batches to compute update steps on\n per worker. This corresponds also to the number of times\n ``TrainingOperator.train_batch`` is called per worker.\n profile (bool): Returns time stats for the training procedure.\n reduce_results (bool): Whether to average all metrics across\n all workers into one dict. If a metric is a non-numerical\n value (or nested dictionaries), one value will be randomly\n selected among the workers. If False, returns a list of dicts.\n max_retries (int): Must be non-negative. If set to N, TorchTrainer\n will detect and recover from training failure. The recovery\n process will kill all current workers, query the Ray\n global state for total available resources, and re-launch up to\n the available resources. Behavior is not well-defined\n in case of shared cluster usage. Defaults to 3.\n info (dict): Optional dictionary passed to the training\n operator for ``train_epoch`` and ``train_batch``.\n dataset (Dataset): Optional dataset to train with. If specified,\n the dataloader passed in via data_creator will be ignored.\n\n Returns:\n (dict | list) A dictionary of metrics for training.\n You can provide custom metrics by implementing a custom\n training loop. If ``reduce_results=False``, this will return a\n list of metric dictionaries whose length will be equal to\n ``num_workers``.\n \"\"\"\n assert max_retries >= 0, \"`max_retries` must be non-negative.\"\n assert isinstance(dataset, Dataset) is not None \\\n or self.data_creator, \\\n \"Must specify either a data creator or a dataset\"\n if self.worker_group.should_scale_up():\n logger.info(\"Resize opportunity detected. Attempting to scale up.\")\n self._resize_worker_group()\n success, worker_stats = self.worker_group.train(\n num_steps=num_steps, profile=profile, info=info, dataset=dataset)\n # Fault handling\n for i in range(max_retries):\n if success:\n break\n else:\n self._num_failures += 1\n self._resize_worker_group()\n logger.info(\"Retrying training step with %d workers.\" %\n self.worker_group.num_workers)\n success, worker_stats = self.worker_group.train(\n num_steps=num_steps,\n profile=profile,\n info=info,\n dataset=dataset)\n if not success:\n raise RuntimeError(\"Training run failed.\")\n\n if reduce_results:\n return self._process_stats(worker_stats)\n else:\n return worker_stats\n\n def _process_stats(self, worker_stats):\n stats = {\n NUM_SAMPLES: sum(\n stats.pop(NUM_SAMPLES, np.nan) for stats in worker_stats)\n }\n for stat_key in worker_stats[0]:\n if isinstance(worker_stats[0][stat_key], numbers.Number):\n stats[stat_key] = np.nanmean(\n [s.get(stat_key, np.nan) for s in worker_stats])\n else:\n stats[stat_key] = worker_stats[0][stat_key]\n return stats\n\n def apply_all_workers(self, fn):\n \"\"\"Run a function on all operators on the workers.\n\n Args:\n fn (Callable): A function that takes in no arguments.\n\n Returns:\n A list of objects returned by ``fn`` on each worker.\n\n \"\"\"\n return self.worker_group.apply_all_workers(fn)\n\n def apply_all_operators(self, fn):\n \"\"\"Run a function on all operators on the workers.\n\n Args:\n fn (Callable[TrainingOperator]): A function that takes in a\n TrainingOperator.\n\n Returns:\n A list of objects returned by ``fn`` on each operator.\n\n \"\"\"\n return self.worker_group.apply_all_operators(fn)\n\n def validate(self,\n num_steps=None,\n profile=False,\n reduce_results=True,\n info=None):\n \"\"\"Evaluates the model on the validation data set.\n\n Args:\n num_steps (int): Number of batches to compute update steps on\n per worker. This corresponds also to the number of times\n ``TrainingOperator.validate_batch`` is called per worker.\n profile (bool): Returns time stats for the evaluation procedure.\n reduce_results (bool): Whether to average all metrics across\n all workers into one dict. If a metric is a non-numerical\n value (or nested dictionaries), one value will be randomly\n selected among the workers. If False, returns a list of dicts.\n info (dict): Optional dictionary passed to the training\n operator for `validate` and `validate_batch`.\n\n Returns:\n A dictionary of metrics for validation.\n You can provide custom metrics by passing in a custom\n ``training_operator_cls``.\n \"\"\"\n worker_stats = self.worker_group.validate(\n num_steps=num_steps, profile=profile, info=info)\n\n if reduce_results:\n return self._process_stats(worker_stats)\n else:\n return worker_stats\n\n def update_scheduler(self, metric):\n \"\"\"Calls ``scheduler.step(metric)`` on all registered schedulers.\n\n This is useful for lr_schedulers such as ``ReduceLROnPlateau``.\n \"\"\"\n self.worker_group.apply_all_operators(\n lambda op: [sched.step(metric) for sched in op._schedulers])\n\n def get_model(self):\n \"\"\"Returns the learned model(s).\"\"\"\n unwrapped = []\n models = self.worker_group.get_model()\n for model in models:\n unwrapped += [model.module if hasattr(model, \"module\") else model]\n if len(unwrapped) == 1:\n return unwrapped[0]\n return unwrapped\n\n def get_local_operator(self):\n \"\"\"Returns the local TrainingOperator object.\n\n Be careful not to perturb its state, or else you can cause the system\n to enter an inconsistent state.\n\n Returns:\n TrainingOperator: The local TrainingOperator object.\n \"\"\"\n return self.worker_group.get_local_operator()\n\n def state_dict(self):\n return self.worker_group.state_dict()\n\n def load_state_dict(self, state_dict, blocking=False):\n self.worker_group.load_state_dict(state_dict, blocking=blocking)\n\n def save(self, checkpoint):\n \"\"\"Saves the Trainer state to the provided checkpoint path.\n\n Args:\n checkpoint (str): Path to target checkpoint file.\n \"\"\"\n torch.save(self.state_dict(), checkpoint)\n return checkpoint\n\n def load(self, checkpoint):\n \"\"\"Loads the Trainer and all workers from the provided checkpoint.\n\n Args:\n checkpoint (str): Path to target checkpoint file.\n \"\"\"\n state_dict = torch.load(checkpoint)\n self.load_state_dict(state_dict)\n\n def restore(self, *args):\n raise DeprecationWarning(\"Use `TorchTrainer.load()` instead.\")\n\n def shutdown(self, force=False):\n \"\"\"Shuts down workers and releases resources.\n\n Args:\n force (bool): If True, forcefully kill all workers. If False,\n attempt a graceful shutdown first, and then forcefully kill if\n unsuccessful.\n\n \"\"\"\n self.worker_group.shutdown(force=force)\n self.worker_group = DeactivatedWorkerGroup()\n\n @classmethod\n def as_trainable(cls, *args, override_tune_step=None, **kwargs):\n \"\"\"Creates a BaseTorchTrainable class compatible with Tune.\n\n Any configuration parameters will be overridden by the Tune\n Trial configuration. You can also pass in a custom\n ``override_tune_step`` to implement your own iterative optimization\n routine and override the default implementation.\n\n .. code-block:: python\n\n def step(trainer, info):\n # Implement custom objective function here.\n train_stats = trainer.train()\n ...\n # Return the metrics to report to tune.\n # Do not call tune.report here.\n return train_stats\n\n TorchTrainable = TorchTrainer.as_trainable(\n training_operator_cls=MyTrainingOperator,\n num_gpus=2,\n override_tune_step=step\n )\n analysis = tune.run(\n TorchTrainable,\n config={\"lr\": tune.grid_search([0.01, 0.1])}\n )\n\n Args:\n override_tune_step (Callable[[TorchTrainer, Dict], Dict]): A\n function to override the default training step to be used\n for Ray Tune. It accepts two arguments: the first one is an\n instance of your TorchTrainer, and the second one is a info\n dictionary, containing information about the Trainer\n state. If None is passed in, the default step\n function will be\n used: run 1 epoch of training, 1 epoch of validation,\n and report both results to Tune. Passing in\n ``override_tune_step`` is useful to define\n custom step functions, for example if you need to\n manually update the scheduler or want to run more than 1\n training epoch for each tune iteration.\n\n \"\"\"\n if override_tune_step is not None:\n callback_args = inspect.signature(override_tune_step)\n if not len(callback_args.parameters) == 2:\n raise ValueError(\"override_tune_step must take in exactly 2 \"\n \"arguments. The passed in function \"\n \"currently takes in {} \"\n \"args\".format(\n str(len(callback_args.parameters))))\n\n class TorchTrainable(BaseTorchTrainable):\n @classmethod\n def default_resource_request(cls, config):\n num_workers = config.get(\"num_workers\",\n kwargs.get(\"num_workers\", 1))\n num_cpus_per_worker = config.get(\n \"num_cpus_per_worker\", kwargs.get(\"num_cpus_per_worker\",\n 1))\n use_gpu = config.get(\"use_gpu\", kwargs.get(\"use_gpu\"))\n use_local = config.get(\"use_local\",\n kwargs.get(\"use_local\", False))\n\n if use_local:\n remote_worker_count = num_workers - 1\n local_cpus = 1\n local_gpus = int(use_gpu)\n else:\n remote_worker_count = num_workers\n local_cpus = 0\n local_gpus = 0\n\n return Resources(\n cpu=int(local_cpus * num_cpus_per_worker),\n gpu=int(local_gpus),\n extra_cpu=int(remote_worker_count * num_cpus_per_worker),\n extra_gpu=int(int(use_gpu) * remote_worker_count))\n\n def step(self):\n if override_tune_step is not None:\n output = override_tune_step(\n self._trainer, {\"iteration\": self.training_iteration})\n return output\n else:\n return super(TorchTrainable, self).step()\n\n def _create_trainer(self, tune_config):\n \"\"\"Overrides the provided config with Tune config.\"\"\"\n provided_config = kwargs.get(\"config\", {}).copy()\n provided_config.update(tune_config)\n kwargs[\"config\"] = provided_config\n trainer = TorchTrainer(*args, **kwargs)\n return trainer\n\n return TorchTrainable\n\n\nclass BaseTorchTrainable(Trainable):\n \"\"\"Base class for converting TorchTrainer to a Trainable class.\n\n This class is produced when you call ``TorchTrainer.as_trainable(...)``.\n\n By default one step of training runs ``trainer.train()`` once and\n ``trainer.validate()`` once. You can implement custom iterative\n training procedures by passing in a ``override_tune_step`` function to\n ``as_trainable``:\n\n .. code-block:: python\n\n def custom_step(trainer, info):\n for i in range(5):\n train_stats = trainer.train()\n validation_stats = trainer.validate()\n train_stats.update(validation_stats)\n return train_stats\n\n # TorchTrainable is subclass of BaseTorchTrainable.\n TorchTrainable = TorchTrainer.as_trainable(\n training_operator_cls=MyTrainingOperator,\n num_gpus=2,\n override_tune_step=custom_step\n )\n\n analysis = tune.run(\n TorchTrainable,\n config={\"lr\": tune.grid_search([0.01, 0.1])}\n )\n\n \"\"\"\n\n def setup(self, config):\n \"\"\"Constructs a TorchTrainer object as `self.trainer`.\"\"\"\n self._trainer = self._create_trainer(config)\n\n def step(self):\n \"\"\"Calls `self.trainer.train()` and `self.trainer.validate()` once.\"\"\"\n if self._is_overridden(\"_train\"):\n raise DeprecationWarning(\n \"Trainable._train is deprecated and will be \"\n \"removed in \"\n \"a future version of Ray. Override Trainable.step instead.\")\n\n train_stats = self.trainer.train(max_retries=0, profile=True)\n validation_stats = self.trainer.validate(profile=True)\n stats = merge_dicts(train_stats, validation_stats)\n return stats\n\n def save_checkpoint(self, checkpoint_dir):\n \"\"\"Returns a path containing the trainer state.\"\"\"\n checkpoint_path = os.path.join(checkpoint_dir, \"trainer.checkpoint\")\n self.trainer.save(checkpoint_path)\n return checkpoint_path\n\n def load_checkpoint(self, checkpoint_path):\n \"\"\"Restores the trainer state.\n\n Override this if you have state external to the Trainer object.\n \"\"\"\n return self.trainer.load(checkpoint_path)\n\n def cleanup(self):\n \"\"\"Shuts down the trainer.\"\"\"\n self.trainer.shutdown()\n\n def _create_trainer(self, config):\n raise NotImplementedError\n\n @property\n def trainer(self):\n \"\"\"An instantiated TorchTrainer object.\n\n Use this when specifying custom training procedures for Tune.\n \"\"\"\n return self._trainer\n" ]
[ [ "torch.distributed.is_available", "torch.cuda.is_available", "torch.load" ] ]
HanSeokhyeon/Speech_recogniton_for_English_and_Korean
[ "e0eaf1da1e1ac15f34402fea8cb330d008140d61", "e0eaf1da1e1ac15f34402fea8cb330d008140d61" ]
[ "util/timit/old/timit_preprocess_mfcc40_spikegram32.py", "figure/figure5.py" ]
[ "# reference : https://github.com/Faur/TIMIT\n# \t\t\t https://github.com/jameslyons/python_speech_features/issues/53\nimport os\nimport sys\nimport timeit; program_start_time = timeit.default_timer()\nimport random; random.seed(int(timeit.default_timer()))\nfrom six.moves import cPickle\nimport numpy as np\nimport librosa\n# a python package for speech features at https://github.com/jameslyons/python_speech_features\n\nif len(sys.argv) != 3:\n print('Usage: python3 preprocess.py <timit directory> <output_file>')\n\n##### SCRIPT META VARIABLES #####\nphn_file_postfix = '.PHN'\nwav_file_postfix = '.WAV'\ndata_type = 'float32'\n\nwork_dir = os.getcwd()\n\npaths = sys.argv[1]\nlast_paths = paths.split('/')[-1]\n\n# Train 3696 valid 400 test 192\ntrain_path\t= np.loadtxt(\"timit_dataset_list/TRAIN_list.csv\", dtype=str)\nvalid_path\t= np.loadtxt(\"timit_dataset_list/TEST_developmentset_list.csv\", dtype=str)\ntest_path\t= np.loadtxt(\"timit_dataset_list/TEST_coreset_list.csv\", dtype=str)\ntarget_path\t= os.path.join(paths, sys.argv[2])\n\nspike_frame = 2048 * 6\nn_band = 32\nn_time = 8\nn_structure = 4\n\n# 61 different phonemes\nphonemes = [\"b\", \"bcl\", \"d\", \"dcl\", \"g\", \"gcl\", \"p\", \"pcl\", \"t\", \"tcl\", \"k\", \"kcl\", \"dx\", \"q\", \"jh\", \"ch\", \"s\", \"sh\", \"z\", \"zh\",\n \"f\", \"th\", \"v\", \"dh\", \"m\", \"n\", \"ng\", \"em\", \"en\", \"eng\", \"nx\", \"l\", \"r\", \"w\", \"y\",\n \"hh\", \"hv\", \"el\", \"iy\", \"ih\", \"eh\", \"ey\", \"ae\", \"aa\", \"aw\", \"ay\", \"ah\", \"ao\", \"oy\",\n \"ow\", \"uh\", \"uw\", \"ux\", \"er\", \"ax\", \"ix\", \"axr\", \"ax-h\", \"pau\", \"epi\", \"h#\"]\n\nphonemes2index = {k:v for v,k in enumerate(phonemes)}\n\ni_max = 0\n\n\ndef get_total_duration(file):\n \"\"\"Get the length of the phoneme file, i.e. the 'time stamp' of the last phoneme\"\"\"\n for line in reversed(list(open(file))):\n [_, val, _] = line.split()\n return int(val)\n\n\ndef get_delta(x, N):\n pad_x = np.pad(x, ((0, 0), (N, N)), 'edge')\n delta = np.zeros(np.shape(x))\n iterator = [i + 1 for i in range(N)]\n for t in range(np.shape(x)[1]):\n tmp1, tmp2 = 0, 0\n for n in iterator:\n tmp1 += n * (pad_x[:, (t + N) + n] - pad_x[:, (t + N) - n])\n tmp2 += 2 * n * n\n delta[:, t] = np.divide(tmp1, tmp2)\n\n return delta\n\n\ndef create_spikegram(filename):\n \"\"\"Perform standard preprocessing, as described by Alex Graves (2012)\n http://www.cs.toronto.edu/~graves/preprint.pdf\n Output consists of 12 MFCC and 1 energy, as well as the first derivative of these.\n [1 energy, 12 MFCC, 1 diff(energy), 12 diff(MFCC)\n \"\"\"\n rate, sample = 16000, np.fromfile(filename, dtype=np.int16)[512:]\n sample = sample / 32767.5\n mfcc = librosa.feature.mfcc(sample,\n sr=rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=40,\n center=False)\n\n filename_spikegram = filename.replace('TIMIT', 'TIMIT_spikegram')\n rate, spikegram = 16000, get_data(filename_spikegram[:-4], sample.shape[0])\n\n feature = make_feature(y=spikegram,\n frame=400,\n hop_length=160)\n feature = np.concatenate((mfcc, feature), axis=0)\n d_feature = get_delta(feature, 2)\n a_feature = get_delta(d_feature, 2)\n\n out = np.concatenate([feature.T, d_feature.T, a_feature.T], axis=1)\n\n return out, out.shape[0]\n\n\ndef get_data(filename, wav_length):\n raw_filename = filename + \"_spike.raw\"\n num_filename = filename + \"_num.raw\"\n\n x = np.fromfile(raw_filename, dtype=np.float64)\n x = np.reshape(x, (-1, n_structure))\n num = np.fromfile(num_filename, dtype=np.int32)\n\n n_data = np.shape(num)[0]\n acc_num = [sum(num[:i]) for i in range(n_data + 1)]\n\n for k in range(n_data):\n x[acc_num[k]:acc_num[k + 1], 2] += k * spike_frame\n\n spikegram = get_spikegram(x=x, num=num, acc_num=acc_num, n_data=n_data)\n spikegram = spikegram[:, :wav_length]\n\n return spikegram\n\n\ndef get_delay():\n gammatone_filter = np.fromfile(\"../timit_dataset_list/Gammatone_Filter_Order4.raw\", dtype=np.float64)\n gammatone_filter = np.reshape(gammatone_filter, (n_band, -1))\n gammatone_filter = gammatone_filter[:, 1:-1]\n\n max_point = np.argmax(np.abs(gammatone_filter), axis=1)\n\n return max_point\n\n\nmax_point = get_delay()\n\n\ndef get_spikegram(x, num, acc_num, n_data):\n # get spikegram_old by SNR\n spikegram = np.zeros((n_band, spike_frame * n_data))\n for k in range(n_data):\n for n in range(num[k]):\n spikegram[int(x[acc_num[k] + n, 0])][int(x[acc_num[k] + n, 2])] \\\n += np.abs(x[acc_num[k] + n, 1])\n\n for idx, point in enumerate(max_point):\n spikegram[idx, point:] = spikegram[idx, :-point]\n\n return spikegram\n\n\ndef make_feature(y, frame, hop_length):\n feature = []\n feature_tmp = np.zeros(n_band+n_time)\n num_of_frame = int((y.shape[1] - frame) / hop_length + 1)\n start, end = 0, frame\n\n if y.shape[1] % frame != 0:\n y = np.pad(y, ((0, 0), (0, frame - y.shape[1] % frame)), 'constant', constant_values=0)\n\n for i in range(num_of_frame):\n feature_tmp[:n_band] = librosa.power_to_db(np.sum(y[:, start:end], axis=1)+1)\n tmp_sum = np.reshape(np.sum(y[:, start:end], axis=0), (n_time, -1))\n feature_tmp[n_band:] = librosa.power_to_db(np.sum(tmp_sum, axis=1)+1)\n start += hop_length\n end += hop_length\n feature.append(np.copy(feature_tmp.reshape(1, -1)))\n\n feature = np.concatenate(feature, axis=0).transpose()\n return feature[:n_band]\n\n\ndef calc_norm_param(X):\n \"\"\"Assumes X to be a list of arrays (of differing sizes)\"\"\"\n total_len = 0\n mean_val = np.zeros(X[0].shape[1]) # 39\n std_val = np.zeros(X[0].shape[1]) # 39\n for obs in X:\n obs_len = obs.shape[0]\n mean_val += np.mean(obs,axis=0) * obs_len\n std_val += np.std(obs, axis=0) * obs_len\n total_len += obs_len\n\n mean_val /= total_len\n std_val /= total_len\n\n return mean_val, std_val, total_len\n\ndef normalize(X, mean_val, std_val):\n for i in range(len(X)):\n X[i] = (X[i] - mean_val)/std_val\n return X\n\ndef set_type(X, type):\n for i in range(len(X)):\n X[i] = X[i].astype(type)\n return X\n\n\ndef preprocess_dataset(file_list):\n \"\"\"Preprocess data, ignoring compressed files and files starting with 'SA'\"\"\"\n i = 0\n X = []\n Y = []\n\n for fname in file_list:\n phn_fname = \"{}/{}{}\".format(paths, fname, phn_file_postfix)\n wav_fname = \"{}/{}{}\".format(paths, fname, wav_file_postfix)\n\n total_duration = get_total_duration(phn_fname)\n fr = open(phn_fname)\n\n X_val, total_frames = create_spikegram(wav_fname)\n total_frames = int(total_frames)\n\n X.append(X_val)\n\n y_val = np.zeros(total_frames) - 1\n start_ind = 0\n for j, line in enumerate(fr):\n [start_time, end_time, phoneme] = line.rstrip('\\n').split()\n start_time = int(start_time)\n end_time = int(end_time)\n\n phoneme_num = phonemes2index[phoneme] if phoneme in phonemes2index else -1\n end_ind = int(np.round((end_time) / total_duration * total_frames))\n y_val[start_ind:end_ind] = phoneme_num\n\n start_ind = end_ind\n fr.close()\n\n global i_max\n i_max = max(j, i_max)\n\n if -1 in y_val:\n print('WARNING: -1 detected in TARGET')\n print(y_val)\n\n Y.append(y_val.astype('int32'))\n\n i += 1\n print('file No.', i, end='\\r', flush=True)\n\n print('Done')\n return X, Y\n\n\n##### PREPROCESSING #####\nprint()\n\nprint('Preprocessing train data...')\nX_train, y_train = preprocess_dataset(train_path)\nmax_length1 = np.shape(max(X_train, key=lambda x: np.shape(x)))[0]\nprint('Preprocessing valid data...')\nX_valid, y_valid = preprocess_dataset(valid_path)\nmax_length2 = np.shape(max(X_valid, key=lambda x: np.shape(x)))[0]\nprint('Preprocessing test data...')\nX_test, y_test = preprocess_dataset(test_path)\nmax_length3 = np.shape(max(X_test, key=lambda x: np.shape(x)))[0]\nprint('Preprocessing completed.')\nmax_length = max(max_length1, max_length2, max_length3)\nprint(\"{} {} {} {}\".format(max_length1, max_length2, max_length3, max_length))\nprint(i_max)\n\nprint()\nprint('Collected {} training instances (should be 3696 in complete TIMIT )'.format(len(X_train)))\nprint('Collected {} validating instances (should be 400 in complete TIMIT )'.format(len(X_valid)))\nprint('Collected {} testing instances (should be 192 in complete TIMIT )'.format(len(X_test)))\n\nprint()\nprint('Normalizing data to let mean=0, sd=1 for each channel.')\n\nmean_val, std_val, _ = calc_norm_param(X_train)\n\nX_train = normalize(X_train, mean_val, std_val)\nX_valid\t= normalize(X_valid, mean_val, std_val)\nX_test \t= normalize(X_test, mean_val, std_val)\n\nX_train = set_type(X_train, data_type)\nX_valid\t= set_type(X_valid, data_type)\nX_test \t= set_type(X_test, data_type)\n\nprint()\nprint('Saving data to ',target_path)\nwith open(target_path + '.pkl', 'wb') as cPickle_file:\n cPickle.dump(\n [X_train, y_train, X_valid, y_valid, X_test, y_test],\n cPickle_file,\n protocol=cPickle.HIGHEST_PROTOCOL)\n\nprint()\nprint('Preprocessing completed in {:.3f} secs.'.format(timeit.default_timer() - program_start_time))\n", "\"\"\"\nκ·Έλ¦Ό 5. 32-λ°΄λ“œ 멜-μŠ€νŽ™νŠΈλ‘œκ·Έλž¨(μœ„)κ³Ό 32-λ°΄λ“œ 슀파이크그램(μ•„λž˜) νŠΉμ„±μ˜ ν™•λ₯  뢄포\n\"\"\"\n\nfrom util.timit_dataset import load_dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nplt.rcParams['font.family'] = 'Times New Roman'\nplt.rcParams['font.size'] = 20\nfig = plt.figure(figsize=(8, 8), edgecolor='k')\n\n# 32 λ°΄λ“œ 멜 μŠ€νŽ™νŠΈλ‘œκ·Έλž¨\nX_mel, _, _, _, _, _ = load_dataset(data_path='../dataset/TIMIT/timit_mel_spectrogram_96.pkl')\nmel = np.concatenate(X_mel, axis=0)[:, :32].T\n\n# plt.hist2dλ₯Ό μœ„ν•œ mel x, mel y\nmel_x = np.repeat(range(0, 32), mel.shape[1])\nmel_y = np.reshape(mel, -1)\n\n# 32 λ°΄λ“œ 슀파이크그램 νŠΉμ„±\nX_train, _, _, _, _, _ = load_dataset(data_path='../dataset/TIMIT/timit_mel_spikegram_240.pkl')\n\nX = np.concatenate(X_train, axis=0)[:, :72]\nspike = X[:, 40:].T # 40~72κ°€ 32λ°΄λ“œ μ£ΌνŒŒμˆ˜νŠΉμ„±\n\n# plt.hist2dλ₯Ό μœ„ν•œ spike x, spike y\nspike_x = np.repeat(range(0, 32), spike.shape[1])\nspike_y = np.reshape(spike, -1)\n\n# νžˆμŠ€ν† κ·Έλž¨ 멜, -3μ—μ„œ 3\nhist_mel, _, _, _ = plt.hist2d(mel_x, mel_y, bins=(32, 1000), range=[[0, 32], [-3, 3]], cmap='binary', vmax=4000)\nhist_mel[np.where(hist_mel > 4000)] = 4000 # 4000보닀 λ„˜λŠ” 값듀은 4000으둜 클리핑\nhist_mel = (hist_mel / 4000).T # 1둜 normalize\n\n# νžˆμŠ€ν† κ·Έλž¨ 슀파이크그램\nhist_spike, _, _, _ = plt.hist2d(spike_x, spike_y, bins=(32, 1000), range=[[0, 32], [-3, 3]], cmap='binary', vmax=4000)\nhist_spike[np.where(hist_spike > 4000)] = 4000\nhist_spike = (hist_spike / 4000).T\n\nplt.clf()\n\nplt.subplot(2, 1, 1)\n\nres = sns.heatmap(hist_mel, cmap='binary') # 히트맡\nres.invert_yaxis()\n\nfor _, spine in res.spines.items():\n spine.set_visible(True)\n\n\nplt.title(\"Mel-spectrogram\")\nplt.xlabel(\"Band\")\nplt.xticks([0, 10, 20, 30], [0, 10, 20, 30])\nplt.ylabel(\"Value\")\nplt.yticks([1000//6, 1000//6*3, 1000//6*5], [-2, 0, 2])\n\nplt.subplot(2, 1, 2)\n\nres = sns.heatmap(hist_spike, cmap='binary')\nres.invert_yaxis()\n\nfor _, spine in res.spines.items():\n spine.set_visible(True)\n\n\nplt.title(\"Spikegram\")\nplt.xlabel(\"Band\")\nplt.xticks([0, 10, 20, 30], [0, 10, 20, 30])\nplt.ylabel(\"Value\")\nplt.yticks([1000//6, 1000//6*3, 1000//6*5], [-2, 0, 2])\n\nfig1 = plt.gcf()\nplt.show()\n\nfig1.savefig(\"figures/figure5.png\")\n" ]
[ [ "numpy.concatenate", "numpy.divide", "numpy.pad", "numpy.reshape", "numpy.zeros", "numpy.round", "numpy.sum", "numpy.shape", "numpy.mean", "numpy.std", "numpy.loadtxt", "numpy.fromfile", "numpy.abs" ], [ "numpy.concatenate", "numpy.reshape", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.hist2d", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "numpy.where", "matplotlib.pyplot.show", "matplotlib.pyplot.gcf", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ] ]
JakeNeyer/MLServer
[ "a283d3c0008c944b28cdd39c2ffec73f59296603" ]
[ "runtimes/alibi-explain/tests/test_black_box.py" ]
[ "import json\nimport os\nfrom pathlib import Path\nfrom unittest.mock import patch, MagicMock\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\nfrom alibi.saving import load_explainer\nfrom numpy.testing import assert_array_equal\n\nfrom helpers.tf_model import get_tf_mnist_model_uri\nfrom mlserver import MLModel\nfrom mlserver.codecs import NumpyCodec, StringCodec\nfrom mlserver.types import (\n InferenceRequest,\n Parameters,\n RequestInput,\n MetadataModelResponse,\n MetadataTensor,\n RequestOutput,\n)\nfrom mlserver_alibi_explain import AlibiExplainRuntime\nfrom mlserver_alibi_explain.common import (\n convert_from_bytes,\n to_v2_inference_request,\n _DEFAULT_INPUT_NAME,\n)\n\nTESTS_PATH = Path(os.path.dirname(__file__))\n_DEFAULT_ID_NAME = \"dummy_id\"\n\n\n@pytest.fixture\ndef payload() -> InferenceRequest:\n data = np.random.randn(1, 28, 28, 1) * 255\n\n # now we go via the inference model and see if we get the same results\n inference_request = InferenceRequest(\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data.shape,\n data=data.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n return inference_request\n\n\nasync def test_predict_impl(\n anchor_image_runtime_with_remote_predict_patch: AlibiExplainRuntime,\n custom_runtime_tf: MLModel,\n):\n # note: custom_runtime_tf is the underlying inference runtime\n # we want to test that the underlying impl predict is functionally correct\n # anchor_image_runtime fixture is already mocking\n # `remote_predict` -> custom_runtime_tf.predict\n\n # [batch, image_x, image_y, channel]\n data = np.random.randn(10, 28, 28, 1) * 255\n actual_result = anchor_image_runtime_with_remote_predict_patch._rt._infer_impl(data)\n\n # now we go via the inference model and see if we get the same results\n inference_request = InferenceRequest(\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n inputs=[\n RequestInput(\n name=\"predict\",\n shape=data.shape,\n data=data.tolist(),\n datatype=\"FP32\",\n )\n ],\n )\n expected_result = await custom_runtime_tf.predict(inference_request)\n expected_result_numpy = NumpyCodec.decode_response_output(\n expected_result.outputs[0]\n )\n\n assert_array_equal(actual_result, expected_result_numpy)\n\n\n@pytest.fixture()\ndef alibi_anchor_image_model(anchor_image_directory):\n inference_model = tf.keras.models.load_model(get_tf_mnist_model_uri())\n model = load_explainer(anchor_image_directory, inference_model.predict)\n return model\n\n\nasync def test_end_2_end(\n anchor_image_runtime_with_remote_predict_patch: AlibiExplainRuntime,\n alibi_anchor_image_model,\n payload: InferenceRequest,\n):\n # in this test we are getting explanation and making sure that is the same one\n # as returned by alibi directly\n runtime_result = await anchor_image_runtime_with_remote_predict_patch.predict(\n payload\n )\n decoded_runtime_results = json.loads(\n convert_from_bytes(runtime_result.outputs[0], ty=str)\n )\n alibi_result = alibi_anchor_image_model.explain(\n NumpyCodec.decode(payload.inputs[0])[0] # payload has batch dimension,\n # we remove it for alibi\n )\n\n assert_array_equal(\n np.array(decoded_runtime_results[\"data\"][\"anchor\"]), alibi_result.data[\"anchor\"]\n )\n\n\nasync def test_end_2_end_explain_v1_output(\n anchor_image_runtime_with_remote_predict_patch: AlibiExplainRuntime,\n alibi_anchor_image_model,\n payload: InferenceRequest,\n):\n # in this test we get raw explanation as opposed to v2\n\n response = (\n await anchor_image_runtime_with_remote_predict_patch._rt.explain_v1_output(\n payload\n )\n )\n\n response_body = json.loads(response.body)\n assert \"meta\" in response_body\n assert \"data\" in response_body\n\n\n@pytest.mark.parametrize(\n \"payload, metadata, expected_v2_request\",\n [\n # numpy payload\n (\n np.zeros([2, 4]),\n None,\n InferenceRequest(\n id=_DEFAULT_ID_NAME,\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n inputs=[\n RequestInput(\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n name=_DEFAULT_INPUT_NAME,\n data=np.zeros([2, 4]).flatten().tolist(),\n shape=[2, 4],\n datatype=\"FP64\", # default for np.zeros\n )\n ],\n outputs=[],\n ),\n ),\n # numpy with metadata\n (\n np.zeros([2, 4]),\n MetadataModelResponse(\n name=\"dummy\",\n platform=\"dummy\",\n inputs=[MetadataTensor(name=\"input_name\", datatype=\"dummy\", shape=[])],\n outputs=[\n MetadataTensor(name=\"output_name\", datatype=\"dummy\", shape=[])\n ],\n ),\n InferenceRequest(\n id=_DEFAULT_ID_NAME,\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n inputs=[\n RequestInput(\n parameters=Parameters(content_type=NumpyCodec.ContentType),\n name=\"input_name\", # inserted from metadata above\n data=np.zeros([2, 4]).flatten().tolist(),\n shape=[2, 4],\n datatype=\"FP64\", # default for np.zeros\n )\n ],\n outputs=[\n RequestOutput(name=\"output_name\")\n ], # inserted from metadata above\n ),\n ),\n # List[str] payload\n (\n [\"dummy\", \"dummy text\"],\n None,\n InferenceRequest(\n id=_DEFAULT_ID_NAME,\n parameters=Parameters(content_type=StringCodec.ContentType),\n inputs=[\n RequestInput(\n parameters=Parameters(content_type=StringCodec.ContentType),\n name=_DEFAULT_INPUT_NAME,\n data=[\"dummy\", \"dummy text\"],\n shape=[2],\n datatype=\"BYTES\",\n )\n ],\n outputs=[],\n ),\n ),\n ],\n)\n@patch(\n \"mlserver_alibi_explain.common.generate_uuid\",\n MagicMock(return_value=_DEFAULT_ID_NAME),\n)\ndef test_encode_inference_request__as_expected(payload, metadata, expected_v2_request):\n encoded_request = to_v2_inference_request(payload, metadata)\n assert encoded_request == expected_v2_request\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal", "numpy.random.randn", "numpy.zeros" ] ]
rjdirisio/pyvibdmc
[ "9eaf05f42e95c3f98b51399e1453073b53169b4d" ]
[ "pyvibdmc/simulation_utilities/imp_samp_manager.py" ]
[ "import numpy as np\nimport os, sys\nimport importlib\nimport itertools as itt\nfrom itertools import repeat\n\nfrom .potential_manager import Potential, Potential_NoMP, NN_Potential\nfrom .imp_samp import *\n\n\nclass ImpSampManager:\n \"\"\"Imports and Wraps around the user-provided trial wfn and (optionally) the first and second derivatives.\n Parallelized using multiprocessing, which is considered the default for pyvibdmc.\"\"\"\n\n def __init__(self,\n trial_function,\n trial_directory,\n python_file,\n pot_manager,\n new_pool_num_cores=None,\n deriv_function=None,\n trial_kwargs=None,\n deriv_kwargs=None):\n self.trial_func = trial_function\n self.trial_dir = trial_directory\n self.python_file = python_file\n self.deriv_func = deriv_function\n self.trial_kwargs = trial_kwargs\n self.deriv_kwargs = deriv_kwargs\n self.pot_manager = pot_manager\n self.nomp_pool_cores = new_pool_num_cores # Only when one wants to do multiprocessing importance sampling with noMP potential (like NN-DMC)\n if isinstance(self.pot_manager, Potential):\n self.pool = self.pot_manager.pool\n self.num_cores = self.pot_manager.num_cores\n self._reinit_pool()\n elif (isinstance(self.pot_manager, Potential_NoMP) or isinstance(self.pot_manager,\n NN_Potential)) and self.nomp_pool_cores is not None:\n \"\"\"Really only for NN_Potential using multi-core imp samp\"\"\"\n from multiprocessing import Pool\n self.pool = Pool(self.nomp_pool_cores)\n self.num_cores = self.nomp_pool_cores\n self._reinit_pool()\n\n def __getstate__(self):\n \"\"\"Since pool is a variable inside this class, the object cannot be pickled + used for multiprocessing.\n The solution is to use __getstate__/__setstate, which will delete the pool and pot_manager internally\n when needed.\"\"\"\n self_dict = self.__dict__.copy()\n del self_dict['pool']\n del self_dict['pot_manager']\n return self_dict\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n def _init_wfn_mp(self, chdir=False):\n \"\"\"Import the python functions of the pool workers on the pool.\n For when you have a Potential object. For simplicity, efficiency, and restrictiveness, the imp samp stuff\n should be in the same directory as the potential energy callers.\"\"\"\n if chdir:\n # For main process\n cur_dir = os.getcwd()\n os.chdir(self.trial_dir)\n sys.path.insert(0, os.getcwd())\n module = self.python_file.split(\".\")[0]\n x = importlib.import_module(module)\n self.trial_wfn = getattr(x, self.trial_func)\n if self.deriv_func is None:\n # bool for pyvibdmc sim code to do both derivs at once.\n self.all_finite = True\n self.derivs = ImpSamp.finite_diff\n else: # Supplied derivatives, just import them\n self.all_finite = False\n self.derivs = getattr(x, self.deriv_func)\n if chdir:\n # For main process\n os.chdir(cur_dir)\n\n def _reinit_pool(self):\n \"\"\"Imports the appropriate modules that are in the potential_manager directory\"\"\"\n empt = [() for _ in range(self.num_cores)]\n self._init_wfn_mp(chdir=True)\n self.pot_manager.pool.starmap(self._init_wfn_mp, empt, chunksize=1)\n\n def call_trial(self, cds):\n \"\"\"Get trial wave function using multiprocessing\"\"\"\n cds = np.array_split(cds, self.pot_manager.num_cores)\n if self.trial_kwargs is not None:\n res = self.pool.starmap(self.trial_wfn, zip(cds, repeat(self.trial_kwargs, len(cds))))\n else:\n res = self.pool.map(self.trial_wfn, cds)\n res = np.concatenate(res)\n return res\n\n def call_trial_no_mp(self, cds):\n \"\"\"For call_derivs (finite diff), get trial wave function.\n Still used in the mp.pool context, just doesn't call pool itself\"\"\"\n if self.trial_kwargs is None:\n trial = self.trial_wfn(cds)\n else:\n trial = self.trial_wfn(cds, self.trial_kwargs)\n return trial\n\n def call_derivs(self, cds):\n \"\"\"For when derivatives are not supplied, call finite difference function.\n This is still parallelized.\"\"\"\n cds = np.array_split(cds, self.num_cores)\n if self.all_finite:\n # Divide by trial wfn if finite difference\n derivz, sderivz, trial_wfn = zip(*self.pool.starmap(self.derivs,\n zip(cds, repeat(self.call_trial_no_mp, len(cds)))))\n derivz = np.concatenate(derivz) / np.concatenate(trial_wfn)[:, np.newaxis, np.newaxis]\n sderivz = np.concatenate(sderivz) / np.concatenate(trial_wfn)[:, np.newaxis, np.newaxis]\n else:\n if self.deriv_kwargs is None:\n derivz, sderivz = zip(*self.pool.map(self.derivs, cds))\n else:\n derivz, sderivz = zip(*self.pool.starmap(self.derivs, zip(cds, repeat(self.deriv_kwargs, len(cds)))))\n derivz = np.concatenate(derivz)\n sderivz = np.concatenate(sderivz)\n ##Testing\n # fderivz, fsderivz, trial_wfn = ImpSamp.finite_diff(np.concatenate(cds), trial_func=self.call_trial_no_mp)\n # fderivz = fderivz / trial_wfn[:, np.newaxis, np.newaxis]\n # fsderivz = fsderivz / trial_wfn[:, np.newaxis, np.newaxis]\n # print('deriv:', np.average(fderivz-derivz))\n # print('sderiv:', np.average(fsderivz-sderivz))\n # print('hi')\n return derivz, sderivz\n\n\nclass ImpSampManager_NoMP:\n \"\"\"Version of the manager that does not use any multiprocessing. If we ever evaluate the trial wfns with GPUs\n this could be useful. Could also be useful if multiprocessing is incompatible with your workflow.\"\"\"\n\n def __init__(self,\n trial_function,\n trial_directory,\n python_file,\n chdir=False,\n deriv_function=None,\n trial_kwargs=None,\n deriv_kwargs=None, ):\n self.trial_fuc = trial_function\n self.trial_dir = trial_directory\n self.python_file = python_file\n self.deriv_func = deriv_function\n self.trial_kwargs = trial_kwargs\n self.deriv_kwargs = deriv_kwargs\n self.chdir = chdir\n self._import_modz()\n\n def _import_modz(self):\n self._curdir = os.getcwd()\n os.chdir(self.trial_dir)\n sys.path.insert(0, os.getcwd())\n module = self.python_file.split(\".\")[0]\n x = importlib.import_module(module)\n self.trial = getattr(x, self.trial_fuc)\n if self.deriv_func is None:\n self.all_finite = True\n self.derivs = ImpSamp.finite_diff\n else: # Supplied derivatives, just import them\n self.all_finite = False\n self.derivs = getattr(x, self.deriv_func)\n os.chdir(self._curdir)\n\n def call_imp_func(self, func, cds, func_kwargs=None):\n \"\"\"Convenience function for trial, deriv, and sderiv so I don't have to have triplicates of code\"\"\"\n if self.chdir:\n os.chdir(self.trial_dir)\n if func_kwargs is None:\n ret_val = func(cds)\n else:\n ret_val = func(cds, func_kwargs)\n if self.chdir:\n os.chdir(self._curdir)\n return ret_val\n\n def call_trial(self, cds):\n \"\"\"Call trial wave function.\"\"\"\n trial = self.call_imp_func(self.trial, cds, self.trial_kwargs)\n return trial\n\n def call_derivs(self, cds):\n \"\"\"For when derivatives are not supplied, call finite difference function.\n Returns derivatives divided by psi already\"\"\"\n if self.all_finite:\n derivz, sderivz, trial_wfn = self.derivs(cds, trial_func=self.call_trial)\n derivz = derivz / trial_wfn[:, np.newaxis, np.newaxis]\n sderivz = sderivz / trial_wfn[:, np.newaxis, np.newaxis]\n else:\n derivz, sderivz = self.call_imp_func(self.derivs, cds, self.deriv_kwargs)\n ###Testing\n # fderivz, fsderivz, trial_wfn = ImpSamp.finite_diff(cds, trial_func=self.call_trial)\n # fderivz = fderivz / trial_wfn[:, np.newaxis, np.newaxis]\n # fsderivz = fsderivz / trial_wfn[:, np.newaxis, np.newaxis]\n # max_d = np.average(fderivz - derivz)\n # max_sd = np.average(fsderivz - sderivz)\n # print(f\"Avg Psi: {max_d}\")\n # print(f\"Avg 2Psi: {max_sd}\")\n ###/Testing\n return derivz, sderivz\n" ]
[ [ "numpy.concatenate", "numpy.array_split" ] ]
linxdcn/tsi-microservice
[ "23d4f034e4afe32bacbbf611c02416eb5ec87e19" ]
[ "hello-ms/src/main/resources/py/test.py" ]
[ "import numpy as np\n\na = np.array([[1., 7., 0.], [-2., 1., 2.]])\nprint(a)" ]
[ [ "numpy.array" ] ]
jyuno426/KCSS
[ "0d23130e9e79f6089d2f942ff96cb414e17448f8" ]
[ "utils/lstm_model.py" ]
[ "import unidecode\nimport functools\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import load_model\n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\n\ndef is_alpha(name):\n \"\"\"\n name이 μ•ŒνŒŒλ²³μœΌλ‘œλ§Œ 이루어진 λ¬Έμžμ—΄μΈμ§€ νŒλ³„\n \"\"\"\n for c in name:\n if c.lower() not in alphabet:\n return False\n return True\n\n\ndef normalize(name):\n \"\"\"\n μœ λ‹ˆμ½”λ“œ 문자(ex. ν•œμž or Γ€ 이런 μ• λ“€)λ₯Ό ascii μ•ŒνŒŒλ²³μœΌλ‘œ λ°”κΎΈκ³ \n μ•ŒνŒŒλ²³λ§Œ 남김\n \"\"\"\n result = \"\"\n for c in unidecode.unidecode(name):\n if c.lower() in alphabet:\n result += c\n return result\n\n\ndef scale(x):\n \"\"\"\n first name이 kr인지 계산할 λ•Œ μ“°μ΄λŠ” scaleν•¨μˆ˜\n \"\"\"\n if x < 1 / 3:\n return x * 1.5\n else:\n return 0.75 * x + 0.25\n\n\ndef name_one_hot(name, max_seq_len):\n \"\"\"\n name의 각 μ•ŒνŒŒλ²³μ„ one_hot_vector둜 인코딩\n max_seq_lenκΉŒμ§€ zero padding μΆ”κ°€\n \"\"\"\n if not is_alpha(name):\n raise Exception(\"input name is not alphabet string!: \" + name)\n\n result = []\n for char in name.lower()[:max_seq_len]:\n v = np.zeros(26, dtype=np.int)\n try:\n v[alphabet.index(char)] = 1\n result.append(v)\n except ValueError:\n pass\n\n while len(result) < max_seq_len:\n result.append(np.zeros(26, dtype=np.int))\n\n return np.array(result)\n\n\ndef as_keras_metric(method):\n @functools.wraps(method)\n def wrapper(self, args, **kwargs):\n \"\"\" Wrapper for turning tensorflow metrics into keras metrics \"\"\"\n value, update_op = method(self, args, **kwargs)\n K.get_session().run(tf.local_variables_initializer())\n with tf.control_dependencies([update_op]):\n value = tf.identity(value)\n return value\n\n return wrapper\n\n\nauc = {\"auc\": as_keras_metric(tf.metrics.AUC)}\n\n\nclass LSTM_Model:\n def __init__(self, model_path):\n if \"korean_prob\" in model_path:\n self.type = \"korean_prob\"\n elif \"woman_prob\" in model_path:\n self.type = \"woman_prob\"\n else:\n raise Exception(\"wrong model path:\", model_path)\n\n self.model = load_model(model_path, custom_objects=auc)\n\n def _pred(self, string):\n return self.model.predict(name_one_hot(string, 15).reshape(1, 15, 26))[0]\n\n def pred(self, string):\n if self.type == \"korean_prob\":\n return self.korean_prob_first_name(string)\n elif self.type == \"woman_prob\":\n return self.woman_prob_first_name(string)\n else:\n raise Exception(\"wrong type:\", self.type)\n\n def korean_prob_first_name(self, first_name):\n \"\"\"\n Calculate korean_prob of first_name\n Basically the model predicts (kr/ch/en) probs.\n \"\"\"\n\n normalized_first_name = \"\"\n nationalities_of_parts = set()\n\n max_korean_prob = 0\n max_chinese_prob = 0\n min_korean_prob = 0.5\n\n for _part in first_name.split():\n part = normalize(_part)\n if len(part) > 1:\n probs = self._pred(part)\n maxarg = np.argmax(probs)\n\n if maxarg == 0:\n max_korean_prob = max(max_korean_prob, scale(probs[0]))\n nationalities_of_parts.add(\"korean\")\n elif maxarg == 1:\n max_chinese_prob = max(max_chinese_prob, scale(probs[1]))\n nationalities_of_parts.add(\"chinese\")\n\n min_korean_prob = min(min_korean_prob, scale(probs[0]))\n normalized_first_name += part\n\n if len(normalized_first_name) > 1:\n probs = self._pred(normalized_first_name)\n maxarg = np.argmax(probs)\n\n if maxarg == 0:\n \"\"\"\n If normalized first name is most likely korean,\n its prob >= 1/3 so that scale(prob) >= 1/2.\n => Classify it as korean directly. \n \"\"\"\n return scale(probs[0])\n\n elif maxarg == 1:\n max_chinese_prob = max(max_chinese_prob, scale(probs[1]))\n nationalities_of_parts.add(\"chinese\")\n\n min_korean_prob = min(min_korean_prob, scale(probs[0]))\n\n if \"korean\" in nationalities_of_parts:\n if \"chinese\" in nationalities_of_parts:\n return 1 - max_chinese_prob\n else:\n return max_korean_prob\n else:\n return min_korean_prob\n\n def woman_prob_first_name(self, first_name):\n \"\"\"\n Calculate korean_female_prob of korean first_name\n \"\"\"\n return self._pred(normalize(first_name))[0]\n\n def build_model(self):\n model = Sequential()\n model.add(Bidirectional(LSTM(64, input_shape=(15, 26))))\n model.add(Dense(3, activation=\"softmax\", kernel_initializer=\"normal\"))\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=Adam(0.0005),\n metrics=[\"accuracy\", as_keras_metric(tf.metrics.auc)],\n )\n self.model = model\n\n def show_train_graph(self, hist):\n fig, loss_ax = plt.subplots()\n acc_ax = loss_ax.twinx()\n\n loss_ax.plot(hist.history[\"loss\"], \"y\", label=\"train loss\")\n loss_ax.plot(hist.history[\"val_loss\"], \"r\", label=\"val loss\")\n\n acc_ax.plot(hist.history[\"acc\"], \"b\", label=\"train acc\")\n acc_ax.plot(hist.history[\"val_acc\"], \"g\", label=\"val acc\")\n\n acc_ax.plot(hist.history[\"auc\"], \"m\", label=\"train auc\")\n acc_ax.plot(hist.history[\"val_auc\"], \"k\", label=\"val auc\")\n\n loss_ax.set_xlabel(\"epoch\")\n loss_ax.set_ylabel(\"loss\")\n acc_ax.set_ylabel(\"auc_roc\")\n\n loss_ax.legend(loc=\"upper left\")\n acc_ax.legend(loc=\"lower left\")\n\n plt.show()\n fig.savefig(\"./static/data/train_graph.png\")\n\n def train_korean_prob_model(self):\n # ------------------------------------\n max_seq_len = 15\n np.random.seed(5)\n # ------------------------------------\n\n kr_list = get_file(\"./static/data/kr_first_names.txt\")\n ch_list = get_file(\"./static/data/ch_first_names.txt\")\n us_list = get_file(\"./static/data/us_first_names.txt\")\n\n a = len(kr_list)\n b = len(ch_list)\n c = len(us_list)\n data_len = a + b + c\n\n X, Y = [], []\n\n for _ in range(1):\n for name in kr_list:\n X.append(name_one_hot(name, max_seq_len))\n Y.append(np.array([1, 0, 0]))\n for _ in range(1):\n for name in ch_list:\n X.append(name_one_hot(name, max_seq_len))\n Y.append(np.array([0, 1, 0]))\n for name in us_list:\n X.append(name_one_hot(name, max_seq_len))\n Y.append(np.array([0, 0, 1]))\n\n X, Y = np.array(X), np.array(Y)\n\n np.reshape(X, (data_len, max_seq_len, 26))\n np.reshape(Y, (data_len, 1, 3))\n\n permutation = np.random.permutation(X.shape[0])\n X = X[permutation]\n Y = Y[permutation]\n\n train_len = int(data_len * 0.99)\n\n x_train = X[:train_len]\n y_train = Y[:train_len]\n x_val = X[train_len:]\n y_val = Y[train_len:]\n\n loss_CP = ModelCheckpoint(\n \"./static/temp/loss.h5\",\n monitor=\"val_loss\",\n mode=\"min\",\n verbose=0,\n save_best_only=True,\n )\n acc_CP = ModelCheckpoint(\n \"./static/temp/acc.h5\",\n monitor=\"val_acc\",\n mode=\"max\",\n verbose=0,\n save_best_only=True,\n )\n auc_CP = ModelCheckpoint(\n \"./static/temp/auc.h5\",\n monitor=\"val_auc\",\n mode=\"max\",\n verbose=0,\n save_best_only=True,\n )\n\n self.build_model()\n model = self.model\n hist = model.fit(\n x_train,\n y_train,\n epochs=300,\n batch_size=512,\n validation_data=(x_val, y_val),\n verbose=2,\n callbacks=[loss_CP, acc_CP, auc_CP],\n )\n\n # score = model.evaluate(x_test, y_test)\n # print(\"%s: %.2f%%\" %(model.metrics_names[1], score[1] * 100))\n\n # model.save(self.model_path)\n self.show_train_graph(hist)\n\n # def train_woman_prob_model(self):\n # # ------------------------------------\n # max_seq_len = 15\n # np.random.seed(5)\n # # ------------------------------------\n # # \n # names_by_gender = json.load(open(\"./names_by_gender.json\"))\n\n # female_list = names_by_gender[\"female\"]\n # male_list = names_by_gender[\"male\"]\n\n # a = len(female_list)\n # b = len(male_list)\n\n # data_len = a + b\n\n # X, Y = [], []\n\n # for _ in range(1):\n # for name in female_list:\n # X.append(name_one_hot(name, max_seq_len))\n # Y.append(np.array([1, 0]))\n # for _ in range(1):\n # for name in male_list:\n # X.append(name_one_hot(name, max_seq_len))\n # Y.append(np.array([0, 1]))\n\n # X, Y = np.array(X), np.array(Y)\n\n # np.reshape(X, (data_len, max_seq_len, 26))\n # np.reshape(Y, (data_len, 1, 2))\n\n # permutation = np.random.permutation(X.shape[0])\n # X = X[permutation]\n # Y = Y[permutation]\n\n # train_len = int(data_len * 0.99)\n\n # x_train = X[:train_len]\n # y_train = Y[:train_len]\n # x_val = X[train_len:]\n # y_val = Y[train_len:]\n\n # loss_CP = ModelCheckpoint(\n # \"./model/gender_loss.h5\",\n # monitor=\"val_loss\",\n # mode=\"min\",\n # verbose=0,\n # save_best_only=True,\n # )\n # acc_CP = ModelCheckpoint(\n # \"./model/gender_acc.h5\",\n # monitor=\"val_acc\",\n # mode=\"max\",\n # verbose=0,\n # save_best_only=True,\n # )\n # auc_CP = ModelCheckpoint(\n # \"./model/gender_auc.h5\",\n # monitor=\"val_auc\",\n # mode=\"max\",\n # verbose=0,\n # save_best_only=True,\n # )\n\n # self.build_model()\n # model = self.model\n # hist = model.fit(\n # x_train,\n # y_train,\n # epochs=100,\n # batch_size=512,\n # validation_data=(x_val, y_val),\n # verbose=2,\n # callbacks=[loss_CP, acc_CP, auc_CP],\n # )\n\n # # score = model.evaluate(x_test, y_test)\n # # print(\"%s: %.2f%%\" %(model.metrics_names[1], score[1] * 100))\n\n # # model.save(self.model_path)\n # self.show_train_graph(hist)\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.random.seed", "numpy.random.permutation", "tensorflow.keras.models.load_model", "tensorflow.keras.backend.get_session", "numpy.argmax", "tensorflow.control_dependencies", "tensorflow.local_variables_initializer", "tensorflow.identity" ] ]
jetbrains-academy/Python-Libraries-NumPy
[ "7ce0f2d08f87502d5d97bbc6921f0566184d4ebb", "7ce0f2d08f87502d5d97bbc6921f0566184d4ebb", "7ce0f2d08f87502d5d97bbc6921f0566184d4ebb" ]
[ "NumPy/Array Basics/Create an Array from Range/task.py", "NumPy/Arrays of String and Unicode Values/Translate/tests/test_task.py", "Projects/SVD/SVD on One Matrix/tests/test_task.py" ]
[ "import numpy as np\n\n\ndef array_from_range(start, stop, step=1):\n return np.arange(start, stop, step)\n\n\nif __name__ == '__main__':\n up = array_from_range(100, 110)\n down = array_from_range(100, 0, -10)\n print(up) # Should print '[100 101 102 103 104 105 106 107 108 109]'\n print(down) # Should print '[100 90 80 70 60 50 40 30 20 10]'\n", "import unittest\nimport numpy as np\nimport string\n\nfrom task import remove_extra_stuff, text\n\ntest_text = np.array(['Method #1:', 'Using isdigit() method', ' Method #2:', 'Using regex'])\n\nclass TestCase(unittest.TestCase):\n def test_array(self):\n txt = remove_extra_stuff(text)\n np.testing.assert_array_equal(txt,\n np.char.translate(text, str.maketrans('AEIOUY', 'aeiouy', string.punctuation + string.digits + string.whitespace)),\n err_msg=\"Your function does something else. Please check out the expected result in the\"\n \"task description.\")\n\n def test_punctuation(self):\n txt = remove_extra_stuff(test_text)\n check_punctuation = [char not in element for char in string.punctuation for element in txt]\n self.assertTrue(all(check_punctuation), msg='Your result still contains punctuation.')\n\n def test_digits(self):\n txt = remove_extra_stuff(test_text)\n check_digits = [char not in element for char in string.digits for element in txt]\n self.assertTrue(all(check_digits), msg='Your result still contains digits.')\n\n def test_whitespaces(self):\n txt = remove_extra_stuff(test_text)\n check_whitespace = [char not in element for char in string.whitespace for element in txt]\n self.assertTrue(all(check_whitespace), msg='Your result still contains whitespaces.')\n", "import unittest\nimport numpy as np\nfrom numpy import linalg\n\nfrom task import img_gray, U, s, Vt\n\n\nclass TestCase(unittest.TestCase):\n def test_svd(self):\n test_U, test_s, test_Vt = linalg.svd(img_gray)\n np.testing.assert_array_equal(U, test_U,\n 'Matrix U doesn\\'t match the expected. Use linalg.svd to complete the task.')\n np.testing.assert_array_equal(s, test_s,\n 'Matrix s doesn\\'t match the expected. Use linalg.svd to complete the task.')\n np.testing.assert_array_equal(Vt, test_Vt,\n 'Matrix Vt doesn\\'t match the expected.Use linalg.svd to complete the task.')\n" ]
[ [ "numpy.arange" ], [ "numpy.array" ], [ "numpy.testing.assert_array_equal", "numpy.linalg.svd" ] ]
jesussantana/Machine-Learning-Stanford-University
[ "7ee8527d8a2df43c674757e060d1c7ccad2926a4" ]
[ "notebooks/Ex2-Logistic-Regression/ex2-py/ex2.py" ]
[ "import numpy as np\r\nfrom scipy.optimize import fmin_bfgs\r\n\r\nfrom sigmoid import sigmoid\r\nfrom plotData import plotData\r\nfrom costFunction import costFunction\r\nfrom plotDecisionBoundary import plotDecisionBoundary\r\nfrom predict import predict\r\n\r\n\r\ndata = np.loadtxt('ex2data1.txt', delimiter=',')\r\nX = data[:, [0, 1]]\r\ny = data[:, [2]]\r\n\r\n\r\n# ==================== Part 1: Plotting ====================\r\n# We start the exercise by first plotting the data to understand the\r\n# the problem we are working with.\r\n\r\nprint('Plotting data with + indicating (y = 1) examples,',\r\n 'and o indicating (y = 0) examples.\\n')\r\nplotData(X, y, xlabel='Exam 1 score', ylabel='Exam 2 score',\r\n legends=['Admitted', 'Not Admitted'])\r\n\r\n\r\n# ============ Part 2: Compute Cost and Gradient ============\r\n# In this part of the exercise, you will implement the cost and gradient\r\n# for logistic regression. You neeed to complete the code in\r\n# costFunction.py\r\n\r\nm, n = X.shape\r\nX = np.hstack((np.ones((m, 1)), X))\r\ninitial_theta = np.zeros(n + 1)\r\n\r\ncost, grad = costFunction(initial_theta, X, y)\r\nprint('Cost at initial theta (zeros):', cost)\r\nprint('Gradient at initial theta (zeros):', grad, '\\n')\r\n\r\n\r\n# =========== Part 3: Optimizing using fmin_bfgs ===========\r\n# In this exercise, you will use a built-in function (fminunc) to find the\r\n# optimal parameters theta.\r\n\r\ncost_function = lambda p: costFunction(p, X, y)[0]\r\ngrad_function = lambda p: costFunction(p, X, y)[1]\r\n\r\ntheta = fmin_bfgs(cost_function, initial_theta, fprime=grad_function)\r\nprint('theta:', theta, '\\n')\r\n\r\nplotDecisionBoundary(theta, X[:, 1:], y, xlabel='Exam 1 score', ylabel='Exam 2 score',\r\n legends=['Admitted', 'Not Admitted', 'Decision Boundary'])\r\n\r\n\r\n# ============== Part 4: Predict and Accuracies ==============\r\n# After learning the parameters, you'll like to use it to predict the outcomes\r\n# on unseen data. In this part, you will use the logistic regression model\r\n# to predict the probability that a student with score 45 on exam 1 and\r\n# score 85 on exam 2 will be admitted.\r\n#\r\n# Furthermore, you will compute the training and test set accuracies of\r\n# our model.\r\n#\r\n# Your task is to complete the code in predict.py\r\n\r\nprob = sigmoid(np.array([1, 45, 85]).dot(theta))\r\nprint('For a student with scores 45 and 85, we predict an admission',\r\n 'probability of %f' % prob)\r\n\r\np = predict(theta, X)\r\np = np.mean(p == y) * 100\r\nprint('Train Accuracy: %.2f %%' % p)\r\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones", "scipy.optimize.fmin_bfgs", "numpy.mean", "numpy.loadtxt" ] ]
visym/keynet
[ "7e3aeb0fd35955ef7ca7499170a337f261655f6e", "7e3aeb0fd35955ef7ca7499170a337f261655f6e" ]
[ "keynet/vgg.py", "test/test_keynet.py" ]
[ "import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\n\n\ndef prepare_vgg16_image(img):\n \"\"\"\n Convert an RGB byte image to a FloatTensor suitable for processing with the network.\n This function assumes the image has already been resized, cropped, jittered, etc.\n \"\"\"\n # Convert to BGR\n img_bgr = np.array(img)[...,[2,1,0]]\n # Subtract mean pixel value\n img_bgr_fp = img_bgr - np.array((93.5940, 104.7624, 129.1863))\n # Permute dimensions so output is 3xRxC\n img_bgr_fp = np.rollaxis(img_bgr_fp, 2, 0)\n return torch.from_numpy(img_bgr_fp).float()\n\n\ndef vgg16_preprocess(jitter=False, blur_radius=None, blur_prob=1.0):\n transform_list = [transforms.Resize(256),]\n if jitter:\n transform_list.append(transforms.RandomCrop((224,224)))\n transform_list.append(transforms.RandomHorizontalFlip())\n #transform_list.append(transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1))\n else:\n transform_list.append(transforms.CenterCrop((224,224)))\n if blur_radius is not None and blur_prob > 0:\n transform_list.append(transforms.Lambda(generate_random_blur(blur_radius, blur_prob)))\n # finally, convert PIL RGB image to FloatTensor\n transform_list.append(transforms.Lambda(prepare_vgg16_image))\n return transforms.Compose(transform_list)\n\n\nclass VGG16(nn.Module):\n \"\"\"\n The VGG16 network, with average pooling replacing maxpooling\n \"\"\"\n def __init__(self, num_classes=2622, avgpool=True):\n super(VGG16, self).__init__()\n\n # Layers must be repeated in order for netshape to work\n self.conv1_1 = nn.Conv2d(3,64,(3, 3),(1, 1),(1, 1))\n self.relu1_1 = nn.ReLU() \n self.conv1_2 = nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1))\n self.relu1_2 = nn.ReLU()\n self.pool1_2 = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) if avgpool else nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)\n \n self.conv2_1 = nn.Conv2d(64,128,(3, 3),(1, 1),(1, 1))\n self.relu2_1 = nn.ReLU()\n self.conv2_2 = nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1))\n self.relu2_2 = nn.ReLU()\n self.pool2_2 = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) if avgpool else nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)\n \n self.conv3_1 = nn.Conv2d(128,256,(3, 3),(1, 1),(1, 1))\n self.relu3_1 = nn.ReLU() \n self.conv3_2 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))\n self.relu3_2 = nn.ReLU() \n self.conv3_3 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))\n self.relu3_3 = nn.ReLU()\n self.pool3_3 = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) if avgpool else nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)\n \n self.conv4_1 = nn.Conv2d(256,512,(3, 3),(1, 1),(1, 1))\n self.relu4_1 = nn.ReLU() \n self.conv4_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))\n self.relu4_2 = nn.ReLU() \n self.conv4_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))\n self.relu4_3 = nn.ReLU()\n self.pool4_3 = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) if avgpool else nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)\n\n self.conv5_1 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))\n self.relu5_1 = nn.ReLU() \n self.conv5_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))\n self.relu5_2 = nn.ReLU() \n self.conv5_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))\n self.relu5_3 = nn.ReLU()\n self.pool5_3 = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) if avgpool else nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)\n\n self.fc6 = nn.Linear(25088,4096)\n self.relu6 = nn.ReLU()\n \n self.dropout7 = nn.Dropout(0.5) \n self.fc7 = nn.Linear(4096,4096) \n self.relu7 = nn.ReLU()\n\n self.dropout8 = nn.Dropout(0.5) \n self.fc8 = nn.Linear(4096, num_classes)\n\n\n def forward(self, input):\n assert len(input.size()) == 4\n assert input.shape[1] == 3 and input.shape[2] == 224 and input.shape[3] == 224, \"Invalid input shape - must be Nx3n224x224\"\n\n e1_1 = self.relu1_1(self.conv1_1(input))\n e1_2 = self.pool1_2(self.relu1_2(self.conv1_2(e1_1)))\n\n e2_1 = self.relu2_1(self.conv2_1(e1_2))\n e2_2 = self.pool2_2(self.relu2_2(self.conv2_2(e2_1)))\n\n e3_1 = self.relu3_1(self.conv3_1(e2_2))\n e3_2 = self.relu3_2(self.conv3_2(e3_1))\n e3_3 = self.pool3_3(self.relu3_3(self.conv3_3(e3_2)))\n\n e4_1 = self.relu4_1(self.conv4_1(e3_3))\n e4_2 = self.relu4_2(self.conv4_2(e4_1))\n e4_3 = self.pool4_3(self.relu4_3(self.conv4_3(e4_2)))\n\n e5_1 = self.relu5_1(self.conv5_1(e4_3))\n e5_2 = self.relu5_2(self.conv5_2(e5_1))\n e5_3 = self.pool5_3(self.relu5_3(self.conv5_3(e5_2)))\n\n e5_3_flat = e5_3.view(e5_3.size(0), -1)\n\n e6 = self.relu6(self.fc6(e5_3_flat))\n e7_pre = self.fc7(self.dropout7(e6))\n e7 = self.relu7(e7_pre)\n\n e8 = self.fc8(self.dropout8(e7))\n return e8\n\n\ndef vgg16(pthfile):\n \"\"\"\n Constructs a VGG-16 model\n \"\"\"\n model = VGG16()\n model.load_state_dict(torch.load(pthfile))\n return model\n", "import sys\nimport numpy as np\nimport scipy.linalg\nimport PIL\nimport copy\nimport torch \nfrom torch import nn\nimport torch.nn.functional as F\nimport keynet.sparse\nfrom keynet.sparse import sparse_permutation_matrix, sparse_identity_matrix, sparse_identity_matrix_like\nfrom keynet.torch import affine_to_linear, linear_to_affine, affine_to_linear_matrix\nfrom keynet.sparse import sparse_toeplitz_conv2d, sparse_toeplitz_avgpool2d\nfrom keynet.util import torch_avgpool2d_in_scipy, torch_conv2d_in_scipy\nfrom keynet.dense import uniform_random_diagonal_matrix, random_positive_definite_matrix\nimport keynet.util\nimport keynet.mnist\nimport keynet.cifar10\nimport keynet.torch\nimport keynet.system\nimport keynet.vgg\nimport vipy\nfrom vipy.util import Stopwatch\nfrom keynet.globals import GLOBAL\n\n\ndef test_identity_keynet():\n inshape = (1,28,28)\n x = torch.randn(1, *inshape)\n net = keynet.mnist.LeNet_AvgPool()\n net.load_state_dict(torch.load('./models/mnist_lenet_avgpool.pth'))\n\n (sensor, knet) = keynet.system.IdentityKeynet(inshape, net)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('[test_keynet]: IdentityKeynet PASSED')\n\n \ndef test_tiled_keynet():\n inshape = (1,28,28)\n x = torch.randn(1, *inshape)\n net = keynet.mnist.LeNet_AvgPool()\n (sensor, knet) = keynet.system.Keynet(inshape, net, backend='scipy', tileshape=None)\n (sensor, knet_tiled) = keynet.system.Keynet(inshape, net, backend='scipy', tileshape=(28,28))\n\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten()\n print(yh,y)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('[test_keynet]: tiled IdentityKeynet PASSED') \n \ndef test_permutation_keynet():\n inshape = (1,28,28)\n x = torch.randn(1, *inshape)\n net = keynet.mnist.LeNet_AvgPool()\n net.load_state_dict(torch.load('./models/mnist_lenet_avgpool.pth'))\n\n (sensor, knet) = keynet.system.PermutationKeynet(inshape, net)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('[test_keynet]: global PermutationKeynet - PASSED') \n \n (sensor, knet) = keynet.system.Keynet(inshape, net, global_geometric='permutation', memoryorder='block', blocksize=14)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5) \n print('[test_keynet]: global PermutationKeynet - PASSED')\n\n\ndef test_photometric_keynet():\n inshape = (1,28,28)\n x = torch.randn(1, *inshape)\n net = keynet.mnist.LeNet_AvgPool()\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, global_photometric='uniform_random_gain', beta=1.0)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('[test_keynet]: Analog Gain Keynet - PASSED')\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, global_photometric='uniform_random_bias', gamma=1.0)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('[test_keynet]: Analog Bias Keynet - PASSED')\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, global_photometric='uniform_random_affine', beta=1.0, gamma=1.0)\n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-4)\n print('[test_keynet]: Analog Affine Keynet - PASSED')\n \n\ndef test_vgg16_identity():\n\n inshape = (3,224,224)\n x = torch.randn(1, *inshape)\n net = keynet.vgg.VGG16()\n\n print('vgg16: num parameters=%d' % keynet.torch.count_parameters(net))\n (sensor, knet) = keynet.system.IdentityKeynet(inshape, net)\n\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten()\n assert np.allclose(yh, y, atol=1E-3)\n print('vgg16: keynet-56 num parameters=%d' % knet.num_parameters())\n\n\ndef test_vgg16_identity_tiled():\n\n inshape = (3,224,224)\n x = torch.randn(1, *inshape)\n net = keynet.vgg.VGG16()\n\n print('vgg16: num parameters=%d' % keynet.torch.count_parameters(net))\n (sensor, knet) = keynet.system.TiledIdentityKeynet(inshape, net, 224//4)\n print(vipy.util.save((sensor, knet), 'test_vgg16.pkl'))\n #(sensor, knet) = vipy.util.load('test_vgg16.pkl')\n\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten()\n \n assert np.allclose(yh, y, atol=1E-3)\n print('vgg16: keynet-56 num parameters=%d' % knet.num_parameters())\n\n\ndef test_vgg16_stochastic():\n inshape = (3,224,224)\n x = torch.randn(1, *inshape)\n net = keynet.vgg.VGG16()\n print('vgg16: num parameters=%d' % keynet.torch.count_parameters(net))\n\n keynet.globals.num_processes(48)\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=(224//16, 224//16), \n global_geometric='hierarchical_permutation', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0,1,2),\n local_geometric='doubly_stochastic', alpha=2.0, blocksize=224//16,\n local_photometric='uniform_random_affine', beta=1.0, gamma=1.0,\n memoryorder='channel')\n \n assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('vgg16: keynet-orthogonal-56 num parameters=%d' % knet.num_parameters())\n\n\ndef test_vgg16_orthogonal():\n inshape = (3,224,224)\n x = torch.randn(1, *inshape)\n net = keynet.vgg.VGG16()\n print('vgg16: num parameters=%d' % keynet.torch.count_parameters(net))\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=(224//16, 224//16), \n global_geometric='identity', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0,1,2),\n local_geometric='givens_orthogonal', alpha=2.0, blocksize=224//16,\n local_photometric='uniform_random_affine', beta=1.0, gamma=1.0,\n memoryorder='channel')\n print(vipy.util.save((sensor, knet, net), 'test_vgg16_orthogonal.pkl'))\n \n #(sensor, knet) = vipy.util.load('test_vgg16_orthogonal.pkl')\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten() \n print(y)\n print(yh)\n assert np.allclose(yh, y, atol=1E-3)\n #assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('vgg16: keynet-orthogonal-56 num parameters=%d' % knet.num_parameters())\n\ndef test_vgg16_orthogonal_8():\n inshape = (3,224,224)\n x = torch.randn(1, *inshape)\n net = keynet.vgg.VGG16()\n print('vgg16: num parameters=%d' % keynet.torch.count_parameters(net))\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=(224//8, 224//8), \n global_geometric='identity', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0,1,2),\n local_geometric='givens_orthogonal', alpha=2.0, blocksize=224//8,\n local_photometric='uniform_random_affine', beta=1.0, gamma=1.0,\n memoryorder='channel')\n print(vipy.util.save((sensor, knet, net), 'test_vgg16_orthogonal_8.pkl'))\n \n #(sensor, knet) = vipy.util.load('test_vgg16_orthogonal_4.pkl')\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten() \n print(y)\n print(yh)\n assert np.allclose(yh, y, atol=1E-3)\n #assert np.allclose(knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten(), net.forward(x).detach().numpy().flatten(), atol=1E-5)\n print('vgg16: keynet-orthogonal-56 num parameters=%d' % knet.num_parameters())\n\n\ndef test_lenet_orthogonal():\n inshape = (1,28,28)\n x = torch.randn(1, *inshape)\n net = keynet.mnist.LeNet_AvgPool()\n print('lenet: num parameters=%d' % keynet.torch.count_parameters(net))\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=None, \n global_geometric='hierarchical_rotation', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0),\n global_photometric='uniform_random_bias', \n local_geometric='givens_orthogonal', alpha=2.0, blocksize=8,\n local_photometric='uniform_random_affine', beta=1.0, gamma=1.0,\n memoryorder='block')\n\n print(vipy.util.save((sensor, knet), 'test_lenet_orthogonal.pkl'))\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten() \n print(y)\n print(yh)\n assert np.allclose(y, yh, atol=1E-5)\n print('lenet: keynet-orthogonal-8 num parameters=%d' % knet.num_parameters())\n\n\ndef test_lenet_orthogonal_tiled():\n inshape = (1,28,28)\n x = torch.randn(1, *inshape)\n net = keynet.mnist.LeNet_AvgPool()\n print('lenet: num parameters=%d' % keynet.torch.count_parameters(net))\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=(4,4), \n global_geometric='hierarchical_permutation', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0,1),\n global_photometric='identity',\n local_geometric='givens_orthogonal', alpha=2.0, blocksize=4,\n local_photometric='uniform_random_affine', beta=1.0, gamma=1.0,\n memoryorder='block')\n\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten()\n \n print(y)\n print(yh)\n assert np.allclose(y, yh, atol=1E-5)\n print('lenet-keyed: orthogonal-tiled-4 num parameters=%d' % knet.num_parameters())\n\n\ndef test_allconvnet_orthogonal_tiled():\n inshape = (3,32,32)\n x = torch.randn(1, *inshape)\n net = keynet.cifar10.AllConvNet(batchnorm=False)\n net.eval() \n print('allconvnet: num parameters=%d' % keynet.torch.count_parameters(net))\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=(8,8), \n global_geometric='hierarchical_permutation', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0,1),\n global_photometric='identity',\n local_geometric='givens_orthogonal', alpha=8, blocksize=8,\n local_photometric='uniform_random_affine', beta=1.0, gamma=1.0,\n memoryorder='block')\n\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten() \n print(y, yh)\n assert np.allclose(y, yh, atol=1E-5)\n print('allconvnet-keyed: orthogonal-tiled-8 num parameters=%d' % knet.num_parameters())\n\n\ndef test_allconvnet_identity(tiled=False):\n inshape = (3,32,32)\n x = torch.randn(1, *inshape)\n net = keynet.cifar10.AllConvNet(batchnorm=True)\n net.eval() \n print('allconvnet: num parameters=%d' % keynet.torch.count_parameters(net))\n\n (sensor, knet) = keynet.system.Keynet(inshape, net, tileshape=None if not tiled else (8,8), \n global_geometric='identity', hierarchical_blockshape=(2,2), hierarchical_permute_at_level=(0,1),\n global_photometric='identity',\n local_geometric='identity', alpha=2.0, blocksize=8,\n local_photometric='identity', beta=1.0, gamma=1.0,\n memoryorder='channel')\n\n yh = knet.forward(sensor.fromtensor(x).encrypt().astensor()).detach().numpy().flatten()\n y = net.forward(x).detach().numpy().flatten() \n print(y, yh)\n assert np.allclose(y, yh, atol=1E-5)\n print('allconvnet-keyed: identity%s num parameters=%d' % ('-tiled' if tiled else '', knet.num_parameters()))\n\n\nif __name__ == '__main__':\n \n if len(sys.argv) == 1:\n test_tiled_keynet()\n test_identity_keynet()\n test_permutation_keynet()\n test_photometric_keynet()\n\n test_lenet_orthogonal()\n test_lenet_orthogonal_tiled()\n\n test_allconvnet_identity()\n test_allconvnet_identity(tiled=True)\n test_allconvnet_orthogonal_tiled()\n\n elif sys.argv[1] == 'vgg16-identity-tiled':\n test_vgg16_identity_tiled()\n elif sys.argv[1] == 'vgg16-identity':\n test_vgg16_identity()\n elif sys.argv[1] == 'vgg16-orthogonal-8':\n test_vgg16_orthogonal_8()\n elif sys.argv[1] == 'lenet-orthogonal-tiled':\n test_lenet_orthogonal_tiled()\n elif sys.argv[1] == 'lenet-orthogonal':\n test_lenet_orthogonal()\n elif sys.argv[1] == 'allconvnet-orthogonal-tiled':\n test_allconvnet_orthogonal_tiled()\n elif sys.argv[1] == 'allconvnet-identity':\n test_allconvnet_identity()\n elif sys.argv[1] == 'allconvnet-identity-tiled':\n test_allconvnet_identity(tiled=True)\n else:\n raise ValueError('unknown option \"%s\"' % sys.argv[1])\n\n" ]
[ [ "torch.nn.Linear", "numpy.array", "torch.nn.Dropout", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "numpy.rollaxis", "torch.from_numpy", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.load" ], [ "numpy.allclose", "torch.randn", "torch.load" ] ]
herrmannlab/highdicom
[ "95f39dd722ae6d357af3f942e2130d0ff4d68bfc" ]
[ "src/highdicom/pr/content.py" ]
[ "\"\"\"Data Elements that are specific to the Presentation State IODs.\"\"\"\nimport datetime\nimport logging\nfrom collections import defaultdict\nfrom io import BytesIO\n\nimport numpy as np\nfrom PIL.ImageCms import ImageCmsProfile\nfrom pydicom.dataset import Dataset\nfrom pydicom.sr.coding import Code\nfrom pydicom.multival import MultiValue\nfrom pydicom.valuerep import DA, PersonName, TM\nfrom typing import Optional, Union, Sequence, Tuple\n\nfrom highdicom.color import CIELabColor\nfrom highdicom.content import (\n ContentCreatorIdentificationCodeSequence,\n ModalityLUTTransformation,\n PaletteColorLUTTransformation,\n PresentationLUTTransformation,\n ReferencedImageSequence,\n VOILUT,\n VOILUTTransformation,\n)\nfrom highdicom.enum import (\n RescaleTypeValues,\n VOILUTFunctionValues,\n)\nfrom highdicom.pr.enum import (\n AnnotationUnitsValues,\n BlendingModeValues,\n GraphicTypeValues,\n TextJustificationValues,\n)\nfrom highdicom.sr.coding import CodedConcept\nfrom highdicom.uid import UID\nfrom highdicom.utils import is_tiled_image\nfrom highdicom.valuerep import (\n check_person_name,\n _check_code_string,\n _check_long_string,\n _check_short_text\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass GraphicLayer(Dataset):\n\n \"\"\"A layer of graphic annotations that should be rendered together.\"\"\"\n\n def __init__(\n self,\n layer_name: str,\n order: int,\n description: Optional[str] = None,\n display_color: Optional[CIELabColor] = None\n ):\n \"\"\"\n\n Parameters\n ----------\n layer_name: str\n Name for the layer. Should be a valid DICOM Code String (CS), i.e.\n 16 characters or fewer containing only uppercase letters, spaces\n and underscores.\n order: int\n Integer indicating the order in which this layer should be rendered.\n Lower values are rendered first.\n description: Union[str, None], optional\n A description of the contents of this graphic layer.\n display_color: Union[CIELabColor, None], optional\n A default color value for rendering this layer.\n\n \"\"\"\n super().__init__()\n _check_code_string(layer_name)\n self.GraphicLayer = layer_name\n if not isinstance(order, int):\n raise TypeError('\"order\" must be an integer.')\n self.GraphicLayerOrder = order\n if description is not None:\n _check_long_string(description)\n self.GraphicLayerDescription = description\n if display_color is not None:\n if not isinstance(display_color, CIELabColor):\n raise TypeError(\n '\"recommended_display_color\" must be of type '\n 'highdicom.color.CIELabColor.'\n )\n self.GraphicLayerRecommendedDisplayCIELabValue = list(\n display_color.value\n )\n\n\nclass GraphicGroup(Dataset):\n\n \"\"\"Dataset describing a grouping of annotations.\n\n Note\n ----\n ``GraphicGroup`` s represent an independent concept from ``GraphicLayer``\n s. Where a ``GraphicLayer`` (:class:`highdicom.pr.GraphicLayer`) specifies\n which annotations are rendered first, a ``GraphicGroup`` specifies which\n annotations belong together and shall be handled together (e.g., rotate,\n move) independent of the ``GraphicLayer`` to which they are assigned.\n\n Each annotation (:class:`highdicom.pr.GraphicObject` or\n :class:`highdicom.pr.TextObject`) may optionally be assigned to a single\n ``GraphicGroup`` upon construction, whereas assignment to a\n :class:`highdicom.pr.GraphicLayer` is required.\n\n For example, suppose a presentation state is to include two\n ``GraphicObject`` s, each accompanied by a corresponding ``TextObject`` that\n indicates the meaning of the graphic and should be rendered above the\n ``GraphicObject`` if they overlap. In this situation, it may be useful to\n group each ``TextObject`` with the corresponding ``GraphicObject`` as a\n distinct ``GraphicGroup`` (giving two ``GraphicGroup`` s each containing one\n ``TextObject`` and one ``GraphicObject``) and also place both\n ``GraphicObject`` s in one ``GraphicLayer`` and both ``TextObject`` s in a\n second ``GraphicLayer`` with a higher ``order`` to control rendering.\n\n \"\"\"\n def __init__(\n self,\n graphic_group_id: int,\n label: str,\n description: Optional[str] = None\n ):\n \"\"\"\n\n Parameters\n ----------\n graphic_group_id: int\n A positive integer that uniquely identifies this graphic group.\n label: str\n Name used to identify the Graphic Group (maximum 64 characters).\n description: Union[str, None], optional\n Description of the group (maxiumum 10240 characters).\n\n \"\"\"\n super().__init__()\n if not isinstance(graphic_group_id, int):\n raise TypeError(\n 'Argument \"graphic_group_id\" must be an integer.'\n )\n if graphic_group_id < 1:\n raise ValueError(\n 'Argument \"graphic_group_id\" must be a positive integer.'\n )\n self.GraphicGroupID = graphic_group_id\n _check_long_string(label)\n self.GraphicGroupLabel = label\n if description is not None:\n _check_short_text(description)\n self.GraphicGroupDescription = description\n\n @property\n def graphic_group_id(self) -> int:\n \"\"\"int: The ID of the graphic group.\"\"\"\n return self.GraphicGroupID\n\n\nclass GraphicObject(Dataset):\n\n \"\"\"Dataset describing a graphic annotation object.\"\"\"\n\n def __init__(\n self,\n graphic_type: Union[GraphicTypeValues, str],\n graphic_data: np.ndarray,\n units: Union[AnnotationUnitsValues, str],\n is_filled: bool = False,\n tracking_id: Optional[str] = None,\n tracking_uid: Optional[str] = None,\n graphic_group: Optional[GraphicGroup] = None,\n ):\n \"\"\"\n\n Parameters\n ----------\n graphic_type: Union[highdicom.pr.GraphicTypeValues, str]\n Type of the graphic data.\n graphic_data: numpy.ndarray\n Graphic data contained in a 2D NumPy array. The shape of the array\n should be (N, 2), where N is the number of 2D points in this\n graphic object. Each row of the array therefore describes a\n (column, row) value for a single 2D point, and the interpretation\n of the points depends upon the graphic type. See\n :class:`highdicom.pr.enum.GraphicTypeValues` for details.\n units: Union[highdicom.pr.AnnotationUnitsValues, str]\n The units in which each point in graphic data is expressed.\n is_filled: bool, optional\n Whether the graphic object should be rendered as a solid shape\n (``True``), or just an outline (``False``). Using ``True`` is only\n valid when the graphic type is ``'CIRCLE'`` or ``'ELLIPSE'``, or\n the graphic type is ``'INTERPOLATED'`` or ``'POLYLINE'`` and the\n first and last points are equal giving a closed shape.\n tracking_id: str, optional\n User defined text identifier for tracking this finding or feature.\n Shall be unique within the domain in which it is used.\n tracking_uid: str, optional\n Unique identifier for tracking this finding or feature.\n graphic_group: Union[highdicom.pr.GraphicGroup, None]\n Graphic group to which this annotation belongs.\n\n \"\"\"\n super().__init__()\n\n self.GraphicDimensions = 2\n graphic_type = GraphicTypeValues(graphic_type)\n self.GraphicType = graphic_type.value\n units = AnnotationUnitsValues(units)\n self.GraphicAnnotationUnits = units.value\n\n if not isinstance(graphic_data, np.ndarray):\n raise TypeError('Argument \"graphic_data\" must be a numpy array.')\n if graphic_data.ndim != 2:\n raise ValueError('Argument \"graphic_data\" must be a 2D array.')\n if graphic_data.shape[1] != 2:\n raise ValueError(\n 'Argument \"graphic_data\" must be an array of shape (N, 2).'\n )\n num_points = graphic_data.shape[0]\n self.NumberOfGraphicPoints = num_points\n\n if graphic_type == GraphicTypeValues.POINT:\n if num_points != 1:\n raise ValueError(\n 'Graphic data of type \"POINT\" '\n 'must be a single (column, row)'\n 'pair.'\n )\n if is_filled:\n raise ValueError(\n 'Setting \"is_filled\" to True is invalid when using a '\n '\"POINT\" graphic type.'\n )\n elif graphic_type == GraphicTypeValues.CIRCLE:\n if num_points != 2:\n raise ValueError(\n 'Graphic data of type \"CIRCLE\" '\n 'must be two (column, row) pairs.'\n )\n elif graphic_type == GraphicTypeValues.ELLIPSE:\n if num_points != 4:\n raise ValueError(\n 'Graphic data of type \"ELLIPSE\" '\n 'must be four (column, row) pairs.'\n )\n elif graphic_type in (\n GraphicTypeValues.POLYLINE,\n GraphicTypeValues.INTERPOLATED,\n ):\n if num_points < 2:\n raise ValueError(\n 'Graphic data of type \"POLYLINE\" or \"INTERPOLATED\" '\n 'must be two or more (column, row) pairs.'\n )\n if is_filled:\n if not np.array_equal(graphic_data[0, :], graphic_data[-1, :]):\n raise ValueError(\n 'Setting \"is_filled\" to True when using a '\n '\"POLYLINE\" or \"INTERPOLATED\" graphic type requires '\n 'that the first and last points are equal, '\n 'i.e., that the graphic has a closed contour. '\n )\n if (\n units == AnnotationUnitsValues.PIXEL or\n units == AnnotationUnitsValues.MATRIX\n ):\n if graphic_data.min() < 0.0:\n raise ValueError('Graphic data must be non-negative.')\n elif units == AnnotationUnitsValues.DISPLAY:\n if graphic_data.min() < 0.0 or graphic_data.max() > 1.0:\n raise ValueError(\n 'Graphic data must be in the range 0.0 to 1.0 when using '\n '\"DISPLAY\" units.'\n )\n self.GraphicData = graphic_data.flatten().tolist()\n self.GraphicFilled = 'Y' if is_filled else 'N'\n\n if (tracking_id is None) != (tracking_uid is None):\n raise TypeError(\n 'If either \"tracking_id\" or \"tracking_uid\" is provided, the '\n 'other must also be provided.'\n )\n if tracking_id is not None:\n self.TrackingID = tracking_id\n self.TrackingUID = tracking_uid\n\n if graphic_group is not None:\n if not isinstance(graphic_group, GraphicGroup):\n raise TypeError(\n 'Argument \"graphic_group\" should be of type '\n 'highdicom.pr.GraphicGroup.'\n )\n self.GraphicGroupID = graphic_group.graphic_group_id\n\n @property\n def graphic_data(self) -> np.ndarray:\n \"\"\"numpy.ndarray: n x 2 array of 2D coordinates\"\"\"\n return np.array(self.GraphicData).reshape(-1, 2)\n\n @property\n def graphic_type(self) -> GraphicTypeValues:\n \"\"\"highdicom.pr.GraphicTypeValues: graphic type\"\"\"\n return GraphicTypeValues(self.GraphicType)\n\n @property\n def units(self) -> AnnotationUnitsValues:\n \"\"\"highdicom.pr.AnnotationUnitsValues: annotation units\"\"\"\n return AnnotationUnitsValues(self.GraphicAnnotationUnits)\n\n @property\n def tracking_id(self) -> Union[str, None]:\n \"\"\"Union[str, None]: tracking identifier\"\"\"\n return getattr(self, 'TrackingID', None)\n\n @property\n def tracking_uid(self) -> Union[UID, None]:\n \"\"\"Union[highdicom.UID, None]: tracking UID\"\"\"\n if hasattr(self, 'TrackingUID'):\n return UID(self.TrackingUID)\n return None\n\n @property\n def graphic_group_id(self) -> Union[int, None]:\n \"\"\"Union[int, None]: The ID of the graphic group, if any.\"\"\"\n return getattr(self, 'GraphicGroupID', None)\n\n\nclass TextObject(Dataset):\n\n \"\"\"Dataset describing a text annotation object.\"\"\"\n\n def __init__(\n self,\n text_value: str,\n units: Union[AnnotationUnitsValues, str],\n bounding_box: Optional[Tuple[float, float, float, float]] = None,\n anchor_point: Optional[Tuple[float, float]] = None,\n text_justification: Union[\n TextJustificationValues, str\n ] = TextJustificationValues.CENTER,\n anchor_point_visible: bool = True,\n tracking_id: Optional[str] = None,\n tracking_uid: Optional[str] = None,\n graphic_group: Optional[GraphicGroup] = None,\n ):\n \"\"\"\n\n Parameters\n ----------\n text_value: str\n The unformatted text value.\n units: Union[highdicom.pr.AnnotationUnitsValues, str]\n The units in which the coordinates of the bounding box and/or\n anchor point are expressed.\n bounding_box: Union[Tuple[float, float, float, float], None], optional\n Coordinates of the bounding box in which the text should be\n displayed, given in the following order [left, top, right, bottom],\n where 'left' and 'right' are the horizontal offsets of the left and\n right sides of the box, respectively, and 'top' and 'bottom' are\n the vertical offsets of the upper and lower sides of the box.\n anchor_point: Union[Tuple[float, float], None], optional\n Location of a point in the image to which the text value is related,\n given as a (Column, Row) pair.\n anchor_point_visible: bool, optional\n Whether the relationship between the anchor point and the text\n should be displayed in the image, for example via a line or arrow.\n This parameter is ignored if the anchor_point is not provided.\n tracking_id: str, optional\n User defined text identifier for tracking this finding or feature.\n Shall be unique within the domain in which it is used.\n tracking_uid: str, optional\n Unique identifier for tracking this finding or feature.\n graphic_group: Union[highdicom.pr.GraphicGroup, None], optional\n Graphic group to which this annotation belongs.\n\n Note\n ----\n Either the ``anchor_point`` or the ``bounding_box`` parameter (or both)\n must be provided to localize the text in the image.\n\n \"\"\"\n super().__init__()\n _check_short_text(text_value)\n self.UnformattedTextValue = text_value\n\n units = AnnotationUnitsValues(units)\n\n if bounding_box is None and anchor_point is None:\n raise TypeError(\n 'Either an anchor point or a bounding box (or both) must be '\n 'specified.'\n )\n\n if bounding_box is not None:\n if len(bounding_box) != 4:\n raise ValueError('Bounding box must contain four values.')\n if min(bounding_box) < 0.0:\n raise ValueError(\n 'All coordinates in the bounding box must be non-negative.'\n )\n if (\n bounding_box[0] >= bounding_box[2] or\n bounding_box[1] >= bounding_box[3]\n ):\n raise ValueError(\n 'The bottom right hand corner of the bounding box must be '\n 'below and to the right of the top left hand corner.'\n )\n self.BoundingBoxTopLeftHandCorner = list(bounding_box[:2])\n self.BoundingBoxBottomRightHandCorner = list(bounding_box[2:])\n text_justification = TextJustificationValues(text_justification)\n self.BoundingBoxTextHorizontalJustification = \\\n text_justification.value\n self.BoundingBoxAnnotationUnits = units.value\n if units == AnnotationUnitsValues.DISPLAY:\n if max(bounding_box) > 1.0:\n raise ValueError(\n 'All coordinates in the bounding box must be less '\n 'than or equal to 1 when using DISPLAY units.'\n )\n\n if anchor_point is not None:\n if len(anchor_point) != 2:\n raise ValueError('Anchor point must contain two values.')\n if min(anchor_point) < 0.0:\n raise ValueError(\n 'All coordinates in the bounding box must be non-negative.'\n )\n self.AnchorPoint = anchor_point\n self.AnchorPointAnnotationUnits = units.value\n self.AnchorPointVisibility = 'Y' if anchor_point_visible else 'N'\n if units == AnnotationUnitsValues.DISPLAY:\n if max(anchor_point) > 1.0:\n raise ValueError(\n 'All coordinates in the anchor point must be less '\n 'than or equal to 1 when using DISPLAY units.'\n )\n\n if (tracking_id is None) != (tracking_uid is None):\n raise TypeError(\n 'If either \"tracking_id\" or \"tracking_uid\" is provided, the '\n 'other must also be provided.'\n )\n if tracking_id is not None:\n self.TrackingID = tracking_id\n self.TrackingUID = tracking_uid\n\n if graphic_group is not None:\n if not isinstance(graphic_group, GraphicGroup):\n raise TypeError(\n 'Argument \"graphic_group\" should be of type '\n 'highdicom.pr.GraphicGroup.'\n )\n self.GraphicGroupID = graphic_group.graphic_group_id\n\n @property\n def text_value(self) -> str:\n \"\"\"str: unformatted text value\"\"\"\n return self.UnformattedTextValue\n\n @property\n def bounding_box(self) -> Union[Tuple[float, float, float, float], None]:\n \"\"\"Union[Tuple[float, float, float, float], None]:\n bounding box in the format [left, top, right, bottom]\n\n \"\"\"\n if not hasattr(self, 'BoundingBoxTopLeftHandCorner'):\n return None\n return tuple(self.BoundingBoxTopLeftHandCorner) + tuple(\n self.BoundingBoxBottomRightHandCorner\n )\n\n @property\n def anchor_point(self) -> Union[Tuple[float, float], None]:\n \"\"\"Union[Tuple[float, float], None]:\n anchor point as a (Row, Column) pair of image coordinates\n\n \"\"\"\n if not hasattr(self, 'AnchorPoint'):\n return None\n return tuple(self.AnchorPoint)\n\n @property\n def units(self) -> AnnotationUnitsValues:\n \"\"\"highdicom.pr.AnnotationUnitsValues: annotation units\"\"\"\n if hasattr(self, 'BoundingBoxAnnotationUnits'):\n return AnnotationUnitsValues(self.BoundingBoxAnnotationUnits)\n return AnnotationUnitsValues(self.AnchorPointAnnotationUnits)\n\n @property\n def tracking_id(self) -> Union[str, None]:\n \"\"\"Union[str, None]: tracking identifier\"\"\"\n return getattr(self, 'TrackingID', None)\n\n @property\n def tracking_uid(self) -> Union[UID, None]:\n \"\"\"Union[highdicom.UID, None]: tracking UID\"\"\"\n if hasattr(self, 'TrackingUID'):\n return UID(self.TrackingUID)\n return None\n\n @property\n def graphic_group_id(self) -> Union[int, None]:\n \"\"\"Union[int, None]: The ID of the graphic group, if any.\"\"\"\n return getattr(self, 'GraphicGroupID', None)\n\n\nclass GraphicAnnotation(Dataset):\n\n \"\"\"Dataset describing related graphic and text objects.\"\"\"\n\n def __init__(\n self,\n referenced_images: Sequence[Dataset],\n graphic_layer: GraphicLayer,\n referenced_frame_number: Union[int, Sequence[int], None] = None,\n referenced_segment_number: Union[int, Sequence[int], None] = None,\n graphic_objects: Optional[Sequence[GraphicObject]] = None,\n text_objects: Optional[Sequence[TextObject]] = None,\n ):\n \"\"\"\n Parameters\n ----------\n referenced_images: Sequence[pydicom.dataset.Dataset]\n Sequence of referenced datasets. Graphic and text objects shall be\n rendered on all images in this list.\n graphic_layer: highdicom.pr.GraphicLayer\n Graphic layer to which this annotation should belong.\n referenced_frame_number: Union[int, Sequence[int], None], optional\n Frame number(s) in a multiframe image upon which annotations shall\n be rendered.\n referenced_segment_number: Union[int, Sequence[int], None], optional\n Frame number(s) in a multi-frame image upon which annotations shall\n be rendered.\n graphic_objects: Union[Sequence[highdicom.pr.GraphicObject], None], optional\n Graphic objects to render over the referenced images.\n text_objects: Union[Sequence[highdicom.pr.TextObject], None], optional\n Text objects to render over the referenced images.\n\n \"\"\" # noqa: E501\n super().__init__()\n if len(referenced_images) == 0:\n raise ValueError('List of referenced images must not be empty.')\n referenced_series_uid = referenced_images[0].SeriesInstanceUID\n rows = referenced_images[0].Rows\n columns = referenced_images[0].Columns\n if not isinstance(graphic_layer, GraphicLayer):\n raise TypeError(\n 'Argument \"graphic_layer\" should be of type '\n 'highdicom.pr.GraphicLayer.'\n )\n self.GraphicLayer = graphic_layer.GraphicLayer\n\n is_multiframe = hasattr(referenced_images[0], 'NumberOfFrames')\n if is_multiframe and len(referenced_images) > 1:\n raise ValueError(\n 'If referenced images are multi-frame, only a single image '\n 'should be passed.'\n )\n if is_multiframe:\n if (\n referenced_frame_number is not None and\n referenced_segment_number is not None\n ):\n raise TypeError(\n 'At most one of \"referenced_frame_number\" or '\n '\"referenced_segment_number\" should be provided.'\n )\n for ref_im in referenced_images:\n if ref_im.SeriesInstanceUID != referenced_series_uid:\n raise ValueError(\n 'All referenced images must belong to the same series.'\n )\n if not is_tiled_image(ref_im):\n if ref_im.Columns != columns or ref_im.Rows != rows:\n raise ValueError(\n 'All referenced images must have the same number '\n 'of rows and columns.'\n )\n self.ReferencedImageSequence = ReferencedImageSequence(\n referenced_images=referenced_images,\n referenced_frame_number=referenced_frame_number,\n referenced_segment_number=referenced_segment_number\n )\n\n have_graphics = graphic_objects is not None and len(graphic_objects) > 0\n have_text = text_objects is not None and len(text_objects) > 0\n if not have_graphics and not have_text:\n raise TypeError(\n 'Either \"graphic_objects\" or \"text_objects\" must contain at '\n 'least one item.'\n )\n if have_graphics:\n for go in graphic_objects:\n if not isinstance(go, GraphicObject):\n raise TypeError(\n 'All items in \"graphic_objects\" must be of type '\n 'highdicom.pr.GraphicObject'\n )\n if go.units == AnnotationUnitsValues.MATRIX:\n if not is_tiled_image(referenced_images[0]):\n raise ValueError(\n 'Graphic Objects may only use MATRIX units if the '\n 'referenced images are tiled images. '\n )\n self._check_coords(\n go.graphic_data,\n referenced_images[0],\n go.units,\n )\n self.GraphicObjectSequence = graphic_objects\n if have_text:\n for to in text_objects:\n if not isinstance(to, TextObject):\n raise TypeError(\n 'All items in text_objects must be of type '\n 'highdicom.pr.TextObject'\n )\n if to.units == AnnotationUnitsValues.MATRIX:\n if not is_tiled_image(referenced_images[0]):\n raise ValueError(\n 'Text Objects may only use MATRIX units if the '\n 'referenced images are tiled images. '\n )\n if to.bounding_box is not None:\n graphic_data = np.array(to.bounding_box).reshape((2, 2))\n self._check_coords(\n graphic_data,\n referenced_images[0],\n to.units,\n )\n if to.anchor_point is not None:\n graphic_data = np.array(to.anchor_point).reshape((1, 2))\n self._check_coords(\n graphic_data,\n referenced_images[0],\n to.units,\n )\n self.TextObjectSequence = text_objects\n\n @staticmethod\n def _check_coords(\n graphic_data: np.ndarray,\n referenced_image: Dataset,\n units: AnnotationUnitsValues,\n ) -> None:\n \"\"\"Check whether graphic data is valid for an image.\n\n Parameters\n ----------\n graphic_data: np.ndarray\n Graphic data as stored within a GraphicObject.\n referenced_image: pydicom.Dataset\n Image to which the graphic data refers.\n units: highdicom.pr.AnnotationUnitsValues\n Units in which the graphic data are expressed.\n\n Raises\n ------\n ValueError:\n Raises an exception if any value in graphic_data is outside the\n valid range of coordinates for referenced_image when using the\n units specified by the units parameter.\n\n \"\"\"\n min_col = graphic_data[:, 1].min()\n max_col = graphic_data[:, 0].max()\n min_row = graphic_data[:, 1].min()\n max_row = graphic_data[:, 1].max()\n\n if units == AnnotationUnitsValues.DISPLAY:\n col_limit = 1.0\n row_limit = 1.0\n col_limit_msg = '1.0'\n row_limit_msg = '1.0'\n elif units == AnnotationUnitsValues.PIXEL:\n col_limit = float(referenced_image.Columns)\n row_limit = float(referenced_image.Rows)\n col_limit_msg = 'Columns'\n row_limit_msg = 'Rows'\n elif units == AnnotationUnitsValues.MATRIX:\n col_limit = float(referenced_image.TotalPixelMatrixColumns)\n row_limit = float(referenced_image.TotalPixelMatrixRows)\n col_limit_msg = 'TotalPixelMatrixColumns'\n row_limit_msg = 'TotalPixelMatrixRows'\n\n if (\n min_col < 0.0 or\n min_row < 0.0 or\n max_col > col_limit or\n max_row > row_limit\n ):\n raise ValueError(\n 'Found graphic data outside the valid range within one or '\n 'more GraphicObjects or TextObjects. When using units '\n f'of type {units.value}, all column coordinates must lie in '\n f'the range 0.0 to {col_limit_msg} and all row coordinates '\n f'must lie in the range 0.0 to {row_limit_msg}.'\n )\n\n\nclass SoftcopyVOILUTTransformation(VOILUTTransformation):\n\n \"\"\"Dataset describing the VOI LUT Transformation as part of the Pixel\n Transformation Sequence to transform the modality pixel values into\n pixel values that are of interest to a user or an application.\n\n The description is specific to the application of the VOI LUT\n Transformation in the context of a Softcopy Presentation State, where\n potentially only a subset of explicitly referenced images should be\n transformed.\n\n \"\"\"\n\n def __init__(\n self,\n window_center: Union[float, Sequence[float], None] = None,\n window_width: Union[float, Sequence[float], None] = None,\n window_explanation: Union[str, Sequence[str], None] = None,\n voi_lut_function: Union[VOILUTFunctionValues, str, None] = None,\n voi_luts: Optional[Sequence[VOILUT]] = None,\n referenced_images: Optional[ReferencedImageSequence] = None,\n ):\n \"\"\"\n\n Parameters\n ----------\n window_center: Union[float, Sequence[float], None], optional\n Center value of the intensity window used for display.\n window_width: Union[float, Sequence[float], None], optional\n Width of the intensity window used for display.\n window_explanation: Union[str, Sequence[str], None], optional\n Free-form explanation of the window center and width.\n voi_lut_function: Union[highdicom.VOILUTFunctionValues, str, None], optional\n Description of the LUT function parametrized by ``window_center``.\n and ``window_width``.\n voi_luts: Union[Sequence[highdicom.VOILUT], None], optional\n Intensity lookup tables used for display.\n referenced_images: Union[highdicom.ReferencedImageSequence, None], optional\n Images to which the VOI LUT Transformation described in this\n dataset applies. Note that if unspecified, the VOI LUT\n Transformation applies to every frame of every image referenced in\n the presentation state object that this dataset is included in.\n\n Note\n ----\n Either ``window_center`` and ``window_width`` should be provided or\n ``voi_luts`` should be provided, or both. ``window_explanation`` should\n only be provided if ``window_center`` is provided.\n\n \"\"\" # noqa: E501\n super().__init__(\n window_center=window_center,\n window_width=window_width,\n window_explanation=window_explanation,\n voi_lut_function=voi_lut_function,\n voi_luts=voi_luts\n )\n if referenced_images is not None:\n if not isinstance(referenced_images, ReferencedImageSequence):\n raise TypeError(\n 'Argument \"referenced_images\" must be of type '\n 'highdicom.ReferencedImageSequence.'\n )\n self.ReferencedImageSequence = referenced_images\n\n\ndef _add_equipment_attributes(\n dataset: Dataset,\n manufacturer: str,\n manufacturer_model_name: str,\n software_versions: Union[str, Tuple[str]],\n device_serial_number: str,\n institution_name: Optional[str] = None,\n institutional_department_name: Optional[str] = None,\n) -> None:\n \"\"\"Add attributes of module General Equipment.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n manufacturer: str\n Name of the manufacturer of the device (developer of the software)\n that creates the instance\n manufacturer_model_name: str\n Name of the device model (name of the software library or\n application) that creates the instance\n software_versions: Union[str, Tuple[str]]\n Version(s) of the software that creates the instance\n device_serial_number: Union[str, None]\n Manufacturer's serial number of the device\n institution_name: Union[str, None], optional\n Name of the institution of the person or device that creates the\n SR document instance.\n institutional_department_name: Union[str, None], optional\n Name of the department of the person or device that creates the\n SR document instance.\n\n \"\"\"\n dataset.Manufacturer = manufacturer\n if institution_name is not None:\n dataset.InstitutionName = institution_name\n if institutional_department_name is not None:\n dataset.InstitutionalDepartmentName = institutional_department_name\n dataset.DeviceSerialNumber = device_serial_number\n dataset.ManufacturerModelName = manufacturer_model_name\n dataset.SoftwareVersions = software_versions\n\n\ndef _add_presentation_state_identification_attributes(\n dataset: Dataset,\n content_label: str,\n content_description: Optional[str] = None,\n concept_name: Union[Code, CodedConcept, None] = None,\n content_creator_name: Optional[Union[str, PersonName]] = None,\n content_creator_identification: Optional[\n ContentCreatorIdentificationCodeSequence\n ] = None,\n) -> None:\n \"\"\"Add attributes of module Presentation State Identification.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n content_label: str\n A label used to describe the content of this presentation state.\n Must be a valid DICOM code string consisting only of capital\n letters, underscores and spaces.\n content_description: Union[str, None], optional\n Description of the content of this presentation state.\n concept_name: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept], optional\n A coded description of the content of this presentation state.\n content_creator_name: Union[str, pydicom.valuerep.PersonName, None], optional\n Name of the person who created the content of this presentation\n state.\n content_creator_identification: Union[highdicom.ContentCreatorIdentificationCodeSequence, None], optional\n Identifying information for the person who created the content of\n this presentation state.\n\n \"\"\" # noqa: E501\n _check_code_string(content_label)\n dataset.ContentLabel = content_label\n if content_description is not None:\n if len(content_description) > 64:\n raise ValueError(\n 'Argument \"content_description\" must not exceed 64 characters.'\n )\n dataset.ContentDescription = content_description\n now = datetime.datetime.now()\n dataset.PresentationCreationDate = DA(now.date())\n dataset.PresentationCreationTime = TM(now.time())\n\n if concept_name is not None:\n if not isinstance(concept_name, (Code, CodedConcept)):\n raise TypeError(\n 'Argument \"concept_name\" should be of type '\n 'pydicom.sr.coding.Code or '\n 'highdicom.sr.CodedConcept.'\n )\n dataset.ConceptNameCodeSequence = [\n CodedConcept(\n concept_name.value,\n concept_name.scheme_designator,\n concept_name.meaning,\n concept_name.scheme_version\n )\n ]\n\n if content_creator_name is not None:\n check_person_name(content_creator_name)\n dataset.ContentCreatorName = content_creator_name\n\n if content_creator_identification is not None:\n if not isinstance(\n content_creator_identification,\n ContentCreatorIdentificationCodeSequence\n ):\n raise TypeError(\n 'Argument \"content_creator_identification\" must be of type '\n 'ContentCreatorIdentificationCodeSequence.'\n )\n dataset.ContentCreatorIdentificationCodeSequence = \\\n content_creator_identification\n\n # Not technically part of PR IODs, but we include anyway\n now = datetime.datetime.now()\n dataset.ContentDate = DA(now.date())\n dataset.ContentTime = TM(now.time())\n\n\ndef _add_presentation_state_relationship_attributes(\n dataset: Dataset,\n referenced_images: Sequence[Dataset]\n) -> None:\n \"\"\"Add attributes of module Presentation State Relationship.\n\n Also perform checks that the referenced images are suitable.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n referenced_images: Sequence[pydicom.Dataset]\n Images that should be referenced\n\n \"\"\"\n # Assert referenced images are from the same series and have the same size\n ref_im = referenced_images[0]\n ref_im_items_mapping = defaultdict(list)\n for im in referenced_images:\n if im.Rows != ref_im.Rows or im.Columns != ref_im.Columns:\n raise ValueError(\n 'All referenced images must have the same dimensions.'\n )\n item = Dataset()\n item.ReferencedSOPClassUID = im.SOPClassUID\n item.ReferencedSOPInstanceUID = im.SOPInstanceUID\n ref_im_items_mapping[im.SeriesInstanceUID].append(item)\n\n dataset.ReferencedSeriesSequence = []\n for series_instance_uid, ref_images in ref_im_items_mapping.items():\n item = Dataset()\n item.SeriesInstanceUID = series_instance_uid\n item.ReferencedImageSequence = ref_images\n dataset.ReferencedSeriesSequence.append(item)\n\n\ndef _add_graphic_group_annotation_layer_attributes(\n dataset: Dataset,\n referenced_images: Sequence[Dataset],\n graphic_groups: Optional[Sequence[GraphicGroup]] = None,\n graphic_annotations: Optional[Sequence[GraphicAnnotation]] = None,\n graphic_layers: Optional[Sequence[GraphicLayer]] = None\n) -> None:\n \"\"\"Add attributes of modules Graphic Group/Annotation/Layer.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n referenced_images: Sequence[pydicom.Dataset]\n Images that should be referenced\n graphic_groups: Union[Sequence[highdicom.pr.GraphicGroup], None], optional\n Description of graphic groups used in this presentation state.\n graphic_annotations: Union[Sequence[highdicom.pr.GraphicAnnotation], None], optional\n Graphic annotations to include in this presentation state.\n graphic_layers: Union[Sequence[highdicom.pr.GraphicLayer], None], optional\n Graphic layers to include in this presentation state. All graphic\n layers referenced in \"graphic_annotations\" must be included.\n\n \"\"\" # noqa: E501\n # Graphic Group\n group_ids = []\n if graphic_groups is not None:\n for grp in graphic_groups:\n if not isinstance(grp, GraphicGroup):\n raise TypeError(\n 'Items of \"graphic_groups\" must be of type '\n 'highdicom.pr.GraphicGroup.'\n )\n group_ids.append(grp.graphic_group_id)\n described_groups_ids = set(group_ids)\n if len(described_groups_ids) != len(group_ids):\n raise ValueError(\n 'Each item in \"graphic_groups\" must have a unique graphic '\n 'group ID.'\n )\n dataset.GraphicGroupSequence = graphic_groups\n else:\n described_groups_ids = set()\n\n # Graphic Annotation and Graphic Layer\n ref_images_lut = {\n (ds.SOPClassUID, ds.SOPInstanceUID): ds\n for ds in referenced_images\n }\n if graphic_layers is not None:\n labels = [layer.GraphicLayer for layer in graphic_layers]\n if len(labels) != len(set(labels)):\n raise ValueError(\n 'Labels of graphic layers must be unique.'\n )\n labels_unique = set(labels)\n dataset.GraphicLayerSequence = graphic_layers\n\n if graphic_annotations is not None:\n for i, ann in enumerate(graphic_annotations):\n if not isinstance(ann, GraphicAnnotation):\n raise TypeError(\n f'Item #{i} of \"graphic_annotations\" must be of type '\n 'highdicom.pr.GraphicAnnotation.'\n )\n if ann.GraphicLayer not in labels_unique:\n raise ValueError(\n f'Graphic layer with name \"{ann.GraphicLayer}\" is '\n f'referenced in item #{i} of \"graphic_annotations\", '\n 'but not included \"graphic_layers\".'\n )\n for item in ann.ReferencedImageSequence:\n uids = (\n item.ReferencedSOPClassUID,\n item.ReferencedSOPInstanceUID\n )\n if uids not in ref_images_lut:\n raise ValueError(\n f'Instance with SOP Instance UID {uids[1]} and '\n f'SOP Class UID {uids[0]} is referenced in item #{i} '\n f'of \"graphic_annotations\", but not included '\n 'in \"referenced_images\".'\n )\n for obj in getattr(ann, 'GraphicObjectSequence', []):\n grp_id = obj.graphic_group_id\n if grp_id is not None:\n if grp_id not in described_groups_ids:\n raise ValueError(\n 'Found graphic object with graphic group '\n f'ID \"{grp_id}\", but no such group is '\n 'described in the \"graphic_groups\" '\n 'argument.'\n )\n for obj in getattr(ann, 'TextObjectSequence', []):\n grp_id = obj.graphic_group_id\n if grp_id is not None:\n if grp_id not in described_groups_ids:\n raise ValueError(\n 'Found text object with graphic group ID '\n f'\"{grp_id}\", but no such group is '\n 'described in the \"graphic_groups\" '\n 'argument.'\n )\n dataset.GraphicAnnotationSequence = graphic_annotations\n\n\ndef _add_displayed_area_attributes(\n dataset: Dataset,\n referenced_images: Sequence[Dataset],\n) -> None:\n \"\"\"Add attributes of module Displayed Area.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n referenced_images: Sequence[pydicom.Dataset]\n Images that should be referenced\n\n \"\"\"\n # This implements the simplest case - the entire area is selected for\n # display and the selection applies to all referenced images.\n # We may want to generalize this later.\n ref_im = referenced_images[0]\n display_area_item = Dataset()\n display_area_item.PixelOriginInterpretation = 'VOLUME'\n display_area_item.DisplayedAreaTopLeftHandCorner = [1, 1]\n if is_tiled_image(ref_im):\n # In case the images form a multi-resolution pyramid, select the image\n # at lowest resolution (top of the pyramid).\n sorted_images = sorted(\n referenced_images,\n key=lambda im: im.TotalPixelMatrixRows * im.TotalPixelMatrixColumns\n )\n low_res_im = sorted_images[0]\n display_area_item.ReferencedImageSequence = ReferencedImageSequence(\n referenced_images=[low_res_im],\n )\n display_area_item.DisplayedAreaBottomRightHandCorner = [\n low_res_im.TotalPixelMatrixColumns,\n low_res_im.TotalPixelMatrixRows,\n ]\n else:\n display_area_item.DisplayedAreaBottomRightHandCorner = [\n ref_im.Columns,\n ref_im.Rows,\n ]\n display_area_item.PresentationSizeMode = 'SCALE TO FIT'\n display_area_item.PresentationPixelAspectRatio = [1, 1]\n dataset.DisplayedAreaSelectionSequence = [display_area_item]\n\n\ndef _add_modality_lut_attributes(\n dataset: Dataset,\n modality_lut_transformation: ModalityLUTTransformation,\n) -> None:\n \"\"\"Add attributes of module Modality LUT.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n modality_lut_transformation: highdicom.ModalityLUTTransformation\n Description of the Modality LUT Transformation for transforming modality\n dependent into modality independent pixel values\n\n \"\"\"\n if not isinstance(modality_lut_transformation, ModalityLUTTransformation):\n raise ValueError(\n 'Argument \"modality_lut_transformation\" must have type '\n 'ModalityLUTTransformation.'\n )\n for element in modality_lut_transformation:\n dataset[element.tag] = element\n\n\ndef _get_modality_lut_transformation(\n referenced_images: Sequence[Dataset]\n) -> Union[ModalityLUTTransformation, None]:\n \"\"\"Get Modality LUT Transformation from the referenced images.\n\n Parameters\n ----------\n referenced_images: Sequence[pydicom.Dataset]\n The referenced images from which the attributes should be copied.\n\n Returns\n -------\n Union[highdicom.ModalityLUTTransformation, None]\n Description of the Modality LUT Transformation for transforming modality\n dependent into modality independent pixel values. None if no such\n attributes are found in the referenced images.\n\n Raises\n ------\n ValueError\n In case the presence or value of the RescaleSlope, RescaleIntercept,\n or RescaleType attributes are inconsistent between referenced images.\n\n \"\"\"\n # Multframe images\n if any(hasattr(im, 'NumberOfFrames') for im in referenced_images):\n im = referenced_images[0]\n if len(referenced_images) > 1 and not is_tiled_image(im):\n raise ValueError(\n \"Attributes of Modality LUT module are not available when \"\n \"multiple images are passed and any of them are multiframe.\"\n )\n\n # Check only the Shared Groups, as PRs require all frames to have\n # the same Modality LUT\n slope = None\n intercept = None\n rescale_type = None\n shared_grps = im.SharedFunctionalGroupsSequence[0]\n if hasattr(shared_grps, 'PixelValueTransformationSequence'):\n trans_seq = shared_grps.PixelValueTransformationSequence[0]\n if hasattr(trans_seq, 'RescaleSlope'):\n slope = trans_seq.RescaleSlope\n if hasattr(trans_seq, 'RescaleIntercept'):\n intercept = trans_seq.RescaleIntercept\n if hasattr(trans_seq, 'RescaleType'):\n rescale_type = trans_seq.RescaleType\n\n # Modality LUT data in the Per Frame Functional Groups will not\n # be copied, but we should check for it rather than silently\n # failing to copy it\n if hasattr(im, 'PerFrameFunctionalGroupsSequence'):\n perframe_grps = im.PerFrameFunctionalGroupsSequence\n if any(\n hasattr(frm_grps, 'PixelValueTransformationSequence')\n for frm_grps in perframe_grps\n ):\n raise ValueError(\n 'This multiframe image contains modality LUT '\n 'table data in the Per-Frame Functional Groups '\n 'Sequence. This is not compatible with the '\n 'Modality LUT module.'\n )\n\n else:\n have_slopes = [\n hasattr(ds, 'RescaleSlope') for ds in referenced_images\n ]\n have_intercepts = [\n hasattr(ds, 'RescaleIntercept') for ds in referenced_images\n ]\n have_type = [\n hasattr(ds, 'RescaleType') for ds in referenced_images\n ]\n\n if any(have_slopes) and not all(have_slopes):\n raise ValueError(\n 'Error while copying Modality LUT attributes: presence of '\n '\"RescaleSlope\" is inconsistent among referenced images.'\n )\n if any(have_intercepts) and not all(have_intercepts):\n raise ValueError(\n 'Error while copying Modality LUT attributes: presence of '\n '\"RescaleIntercept\" is inconsistent among referenced '\n 'images.'\n )\n if any(have_type) and not all(have_type):\n raise ValueError(\n 'Error while copying Modality LUT attributes: presence of '\n '\"RescaleType\" is inconsistent among referenced images.'\n )\n\n if all(have_intercepts) != all(have_slopes):\n raise ValueError(\n 'Error while copying Modality LUT attributes: datasets '\n 'should have both \"RescaleIntercept\" and \"RescaleSlope\", '\n 'or neither.'\n )\n\n if all(have_intercepts):\n if any(\n ds.RescaleSlope != referenced_images[0].RescaleSlope\n for ds in referenced_images\n ):\n raise ValueError(\n 'Error while copying Modality LUT attributes: values '\n 'of \"RescaleSlope\" are inconsistent among referenced '\n 'images.'\n )\n if any(\n ds.RescaleIntercept != referenced_images[0].RescaleIntercept\n for ds in referenced_images\n ):\n raise ValueError(\n 'Error while copying Modality LUT attributes: values '\n 'of \"RescaleIntercept\" are inconsistent among '\n 'referenced images.'\n )\n slope = referenced_images[0].RescaleSlope\n intercept = referenced_images[0].RescaleIntercept\n else:\n slope = None\n intercept = None\n\n if all(have_type):\n if any(\n ds.RescaleType != referenced_images[0].RescaleType\n for ds in referenced_images\n ):\n raise ValueError(\n 'Error while copying Modality LUT attributes: values '\n 'of \"RescaleType\" are inconsistent among referenced '\n 'images.'\n )\n rescale_type = referenced_images[0].RescaleType\n else:\n if intercept is None:\n rescale_type = None\n else:\n rescale_type = RescaleTypeValues.HU.value\n\n if intercept is None:\n return None\n\n return ModalityLUTTransformation(\n rescale_intercept=intercept,\n rescale_slope=slope,\n rescale_type=rescale_type\n )\n\n\ndef _add_softcopy_voi_lut_attributes(\n dataset: Dataset,\n referenced_images: Sequence[Dataset],\n voi_lut_transformations: Sequence[SoftcopyVOILUTTransformation]\n) -> None:\n \"\"\"Add attributes of module Softcopy VOI LUT.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n referenced_images: Sequence[pydicom.Dataset]\n Images that should be referenced\n voi_lut_transformations: Sequence[highdicom.pr.SoftcopyVOILUTTransformation]\n Description of the VOI LUT Transformation for transforming modality\n pixel values into pixel values that are of interest to a user or an\n application\n\n \"\"\" # noqa: E501\n if len(voi_lut_transformations) == 0:\n raise ValueError(\n 'Argument \"voi_lut_transformations\" must not be empty.'\n )\n for i, v in enumerate(voi_lut_transformations):\n if not isinstance(v, SoftcopyVOILUTTransformation):\n raise TypeError(\n f'Item #{i} of \"voi_lut_transformations\" must have '\n 'highdicom.pr.SoftcopyVOILUTTransformation.'\n )\n\n if len(voi_lut_transformations) > 1:\n if not all(\n hasattr(v, 'ReferencedImageSequence')\n for v in voi_lut_transformations\n ):\n raise ValueError(\n 'If multiple items of argument '\n '\"voi_lut_transformations\" are passed, '\n 'each must reference the images that it applies to.'\n )\n\n ref_images_lut = {\n (ds.SOPClassUID, ds.SOPInstanceUID): ds\n for ds in referenced_images\n }\n prev_ref_frames = defaultdict(list)\n prev_ref_segs = defaultdict(list)\n for transformation in voi_lut_transformations:\n # If the softcopy VOI LUT references specific images,\n # check that the references are valid\n if hasattr(transformation, 'ReferencedImageSequence'):\n for item in transformation.ReferencedImageSequence:\n uids = (\n item.ReferencedSOPClassUID,\n item.ReferencedSOPInstanceUID\n )\n if uids not in ref_images_lut:\n raise ValueError(\n f'Instance with SOP Instance UID {uids[1]} and '\n f'SOP Class UID {uids[0]} is referenced in '\n 'items of \"voi_lut_transformations\", but not '\n 'included in \"referenced_images\".'\n )\n ref_im = ref_images_lut[uids]\n is_multiframe = hasattr(\n ref_im,\n 'NumberOfFrames',\n )\n if uids in prev_ref_frames and not is_multiframe:\n raise ValueError(\n f'Instance with SOP Instance UID {uids[1]} '\n 'is referenced in more than one item of the '\n '\"softcopy_voi_luts\".'\n )\n nframes = getattr(ref_im, 'NumberOfFrames', 1)\n if hasattr(item, 'ReferencedFrameNumber'):\n ref_frames = item.ReferencedFrameNumber\n if not isinstance(ref_frames, MultiValue):\n ref_frames = [ref_frames]\n else:\n if hasattr(item, 'ReferencedSegmentNumber'):\n # Do not check frames if segments are specified\n ref_frames = []\n else:\n # If ReferencedFrameNumber is not present, the\n # reference refers to all frames\n ref_frames = list(range(1, nframes + 1))\n\n for f in ref_frames:\n if f in prev_ref_frames[uids]:\n raise ValueError(\n f'Frame {f} in image with SOP Instance '\n f'UID {uids[1]} is referenced in more '\n 'than one item of the '\n '\"softcopy_voi_luts\".'\n )\n prev_ref_frames[uids].append(f)\n\n if hasattr(item, 'ReferencedSegmentNumber'):\n ref_segs = item.ReferencedSegmentNumber\n if not isinstance(ref_segs, MultiValue):\n ref_segs = [ref_segs]\n\n if hasattr(ref_im, 'SegmentSequence'):\n nsegments = len(ref_im.SegmentSequence)\n if not hasattr(item, 'ReferencedSegmentNumber'):\n ref_segs = list(range(1, nsegments))\n for s in ref_segs:\n if s in prev_ref_segs[uids]:\n raise ValueError(\n f'Segment {s} in image with SOP '\n f'Instance UID {uids[1]} is '\n 'referenced in more than one item of '\n 'the \"softcopy_voi_luts\".'\n )\n prev_ref_segs[uids].append(s)\n\n dataset.SoftcopyVOILUTSequence = voi_lut_transformations\n\n\ndef _get_softcopy_voi_lut_transformations(\n referenced_images: Sequence[Dataset]\n) -> Sequence[SoftcopyVOILUTTransformation]:\n \"\"\"Get Softcopy VOI LUT Transformation from referenced images.\n\n Any Window Center, Window Width, Window Explanation, VOI LUT Function,\n or VOI LUT Sequence attributes the referenced images are copied to the\n new sequence. Missing values will cause no errors, and\n will result in the relevant (optional) attributes being omitted from\n the presentation state object. Inconsistent values between\n referenced images will result in multiple different items of the\n Softcopy VOI LUT Sequence in the presentation state object.\n\n Parameters\n ----------\n referenced_images: Sequence[pydicom.Dataset]\n The referenced images from which the attributes should be copied.\n\n Returns\n -------\n Sequence[highdicom.SoftcopyVOILUTTransformation]\n Dataset containing attributes of module Softcopy VOI LUT\n\n \"\"\"\n transformations = []\n if any(hasattr(im, 'NumberOfFrames') for im in referenced_images):\n if len(referenced_images) > 1:\n raise ValueError(\n \"If multiple images are passed and any of them are multiframe, \"\n \"a 'softcopy_voi_lut_transformation' must be explicitly \"\n \"provided.\"\n )\n\n im = referenced_images[0]\n shared_grps = im.SharedFunctionalGroupsSequence[0]\n perframe_grps = im.PerFrameFunctionalGroupsSequence\n if hasattr(shared_grps, 'FrameVOILUTSequence'):\n # Simple case where VOI information is in the Shared functional\n # groups and therefore are consistent between frames\n voi_seq = shared_grps.FrameVOILUTSequence[0]\n\n softcopy_voi_lut_transformation = SoftcopyVOILUTTransformation(\n window_center=voi_seq.WindowCenter,\n window_width=voi_seq.WindowWidth,\n window_explanation=getattr(\n voi_seq,\n 'WindowCenterWidthExplanation',\n None\n ),\n voi_lut_function=getattr(voi_seq, 'VOILUTFunction', None),\n )\n transformations.append(softcopy_voi_lut_transformation)\n\n else:\n # Check the per-frame functional groups, which may be\n # inconsistent between frames and require multiple entries\n # in the GSPS SoftcopyVOILUTSequence\n by_window = defaultdict(list)\n for frame_number, frm_grp in enumerate(perframe_grps, 1):\n if hasattr(frm_grp, 'FrameVOILUTSequence'):\n voi_seq = frm_grp.FrameVOILUTSequence[0]\n # Create unique ID for this VOI lookup as a tuple\n # of the contents\n by_window[(\n voi_seq.WindowWidth,\n voi_seq.WindowCenter,\n getattr(\n voi_seq,\n 'WindowCenterWidthExplanation',\n None\n ),\n getattr(voi_seq, 'VOILUTFunction', None),\n )].append(frame_number)\n\n for (width, center, exp, func), frame_list in by_window.items():\n if len(frame_list) == im.NumberOfFrames:\n # All frames included, no need to include the\n # referenced frames explicitly\n refs_to_include = None\n else:\n # Include specific references\n refs_to_include = ReferencedImageSequence(\n referenced_images=referenced_images,\n referenced_frame_number=frame_list,\n )\n\n transformations.append(\n SoftcopyVOILUTTransformation(\n window_center=center,\n window_width=width,\n window_explanation=exp,\n voi_lut_function=func,\n referenced_images=refs_to_include\n )\n )\n\n else: # single frame\n by_window = defaultdict(list)\n by_lut = defaultdict(list)\n for ref_im in referenced_images:\n has_width = hasattr(ref_im, 'WindowWidth')\n has_center = hasattr(ref_im, 'WindowCenter')\n has_lut = hasattr(ref_im, 'VOILUTSequence')\n\n if has_width != has_center:\n raise ValueError(\n 'Error while copying VOI LUT attributes: found dataset '\n 'with mismatched WindowWidth and WindowCenter '\n 'attributes.'\n )\n\n if has_width and has_lut:\n raise ValueError(\n 'Error while copying VOI LUT attributes: found dataset '\n 'with both window width/center and VOI LUT Sequence '\n 'attributes.'\n )\n\n if has_width:\n by_window[(\n ref_im.WindowWidth,\n ref_im.WindowCenter,\n getattr(ref_im, 'WindowCenterWidthExplanation', None),\n getattr(ref_im, 'VOILUTFunction', None),\n )].append(ref_im)\n elif has_lut:\n # Create a unique identifier for this list of LUTs\n lut_info = []\n for voi_lut in ref_im.VOILUTSequence:\n lut_info.append((\n voi_lut.LUTDescriptor[1],\n voi_lut.LUTDescriptor[2],\n getattr(voi_lut, 'LUTExplanation', None),\n voi_lut.LUTData\n ))\n lut_id = tuple(lut_info)\n by_lut[lut_id].append(ref_im)\n\n for (width, center, exp, func), im_list in by_window.items():\n if len(im_list) == len(referenced_images):\n # All datasets included, no need to include the referenced\n # images explicitly\n refs_to_include = None\n else:\n # Include specific references\n refs_to_include = ReferencedImageSequence(im_list)\n\n transformations.append(\n SoftcopyVOILUTTransformation(\n window_center=center,\n window_width=width,\n window_explanation=exp,\n voi_lut_function=func,\n referenced_images=refs_to_include\n )\n )\n\n for lut_id, im_list in by_lut.items():\n if len(im_list) == len(referenced_images):\n # All datasets included, no need to include the referenced\n # images explicitly\n refs_to_include = None\n else:\n # Include specific references\n refs_to_include = ReferencedImageSequence(im_list)\n\n luts = [\n VOILUT(\n first_mapped_value=fmv,\n lut_data=np.frombuffer(\n data,\n np.uint8 if ba == 8 else np.uint16\n ),\n lut_explanation=exp\n )\n for (fmv, ba, exp, data) in lut_id\n ]\n transformations.append(\n SoftcopyVOILUTTransformation(\n referenced_images=refs_to_include,\n voi_luts=luts\n )\n )\n\n return transformations\n\n\ndef _get_icc_profile(referenced_images: Sequence[Dataset]) -> bytes:\n \"\"\"Get ICC Profile from a referenced image.\n\n Parameters\n ----------\n referenced_images: Sequence[pydicom.Dataset]\n Image datasets from which to extract an ICC profile\n\n Returns\n -------\n bytes\n ICC Profile\n\n Raises\n ------\n ValueError:\n When no ICC profile is found in any of the referenced images or if\n more than one unique profile is found.\n\n \"\"\"\n icc_profiles = []\n for im in referenced_images:\n if hasattr(referenced_images, 'ICCProfile'):\n icc_profiles.append(im.ICCProfile)\n elif hasattr(im, 'OpticalPathSequence'):\n if len(im.OpticalPathSequence) > 1:\n raise ValueError(\n 'Cannot extract ICC Profile from referenced image. '\n 'Color image is expected to contain only a single optical '\n 'path.'\n )\n icc_profiles.append(im.OpticalPathSequence[0].ICCProfile)\n\n if len(icc_profiles) == 0:\n raise ValueError(\n 'Could not find an ICC Profile in any of the referenced images.'\n )\n if len(set(icc_profiles)) > 1:\n raise ValueError(\n 'Found more than one ICC Profile in referenced images.'\n )\n\n return icc_profiles[0]\n\n\ndef _add_icc_profile_attributes(\n dataset: Dataset,\n icc_profile: bytes\n) -> None:\n \"\"\"Add attributes of module ICC Profile.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n icc_profile: bytes\n ICC color profile to include in the presentation state.\n The profile must follow the constraints listed in :dcm:`C.11.15\n <part03/sect_C.11.15.html>`.\n\n \"\"\"\n if icc_profile is None:\n raise TypeError('Argument \"icc_profile\" is required.')\n\n cms_profile = ImageCmsProfile(BytesIO(icc_profile))\n device_class = cms_profile.profile.device_class.strip()\n if device_class not in ('scnr', 'spac'):\n raise ValueError(\n 'The device class of the ICC Profile must be \"scnr\" or \"spac\", '\n f'got \"{device_class}\".'\n )\n color_space = cms_profile.profile.xcolor_space.strip()\n if color_space != 'RGB':\n raise ValueError(\n 'The color space of the ICC Profile must be \"RGB\", '\n f'got \"{color_space}\".'\n )\n pcs = cms_profile.profile.connection_space.strip()\n if pcs not in ('Lab', 'XYZ'):\n raise ValueError(\n 'The profile connection space of the ICC Profile must '\n f'be \"Lab\" or \"XYZ\", got \"{pcs}\".'\n )\n\n dataset.ICCProfile = icc_profile\n\n\ndef _add_palette_color_lookup_table_attributes(\n dataset: Dataset,\n palette_color_lut_transformation: PaletteColorLUTTransformation\n) -> None:\n \"\"\"Add attributes from the Palette Color Lookup Table module.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n palette_color_lut_transformation: highdicom.PaletteColorLUTTransformation\n Description of the Palette Color LUT Transformation for transforming\n grayscale into RGB color pixel values\n\n \"\"\" # noqa: E501\n if not isinstance(\n palette_color_lut_transformation,\n PaletteColorLUTTransformation\n ):\n raise TypeError(\n 'Argument \"palette_color_lut_transformation\" must be of type '\n 'PaletteColorLUTTransformation.'\n )\n\n for element in palette_color_lut_transformation:\n dataset[element.tag] = element\n\n\ndef _add_softcopy_presentation_lut_attributes(\n dataset: Dataset,\n presentation_lut_transformation: PresentationLUTTransformation,\n) -> None:\n \"\"\"Add attributes of module Softcopy Presentation LUT.\n\n Parameters\n ----------\n dataset: pydicom.Dataset\n Dataset to which attributes should be added\n presentation_lut_transformation: highdicom.PresentationLUTTransformation\n Description of the Modality LUT Transformation for transforming modality\n dependent into modality independent pixel values\n\n \"\"\"\n if not isinstance(\n presentation_lut_transformation,\n PresentationLUTTransformation\n ):\n raise ValueError(\n 'Argument \"presenation_lut_transformation\" must have type '\n 'PresentationLUTTransformation.'\n )\n for element in presentation_lut_transformation:\n dataset[element.tag] = element\n\n\nclass AdvancedBlending(Dataset):\n\n \"\"\"Class for an item of the Advanced Blending Sequence.\"\"\"\n\n def __init__(\n self,\n referenced_images: Sequence[Dataset],\n blending_input_number: int,\n modality_lut_transformation: Optional[\n ModalityLUTTransformation\n ] = None,\n voi_lut_transformations: Optional[\n Sequence[SoftcopyVOILUTTransformation]\n ] = None,\n palette_color_lut_transformation: Optional[\n PaletteColorLUTTransformation\n ] = None,\n ) -> None:\n \"\"\"\n\n Parameters\n ----------\n referenced_images: Sequence[pydicom.Dataset]\n Images that should be referenced\n blending_input_number: int\n Relative one-based index of the item for input into the blending\n operation\n modality_lut_transformation: Union[highdicom.ModalityLUTTransformation, None], optional\n Description of the Modality LUT Transformation for transforming modality\n dependent into modality independent pixel values\n voi_lut_transformations: Union[Sequence[highdicom.pr.SoftcopyVOILUTTransformation], None], optional\n Description of the VOI LUT Transformation for transforming\n modality pixel values into pixel values that are of interest to a\n user or an application\n palette_color_lut_transformation: Union[highdicom.PaletteColorLUTTransformation, None], optional\n Description of the Palette Color LUT Transformation for transforming\n grayscale into RGB color pixel values\n\n \"\"\" # noqa: E501\n super().__init__()\n ref_im = referenced_images[0]\n if ref_im.SamplesPerPixel == 1:\n if palette_color_lut_transformation is None:\n raise ValueError(\n 'For advanced blending presentation, if referenced images '\n 'are grayscale a palette color lookup table must be '\n 'provided to pseudo-color the image prior to blending.'\n )\n for im in referenced_images:\n if im.SamplesPerPixel != ref_im.SamplesPerPixel:\n raise ValueError(\n 'For advanced blending presentation, all referenced '\n 'images of an advanced blending item must have the same '\n 'number of samples per pixel.'\n )\n if im.StudyInstanceUID != ref_im.StudyInstanceUID:\n raise ValueError(\n 'For advanced blending presentation, all referenced '\n 'images of an advanced blending item must be part of the '\n 'same study.'\n )\n if im.SeriesInstanceUID != ref_im.SeriesInstanceUID:\n raise ValueError(\n 'For advanced blending presentation, all referenced '\n 'images of an advanced blending item must be part of the '\n 'same series.'\n )\n\n self.BlendingInputNumber = blending_input_number\n\n ref_im = referenced_images[0]\n ref_series_uid = ref_im.SeriesInstanceUID\n ref_im_seq = []\n for im in referenced_images:\n series_uid = im.SeriesInstanceUID\n if series_uid != ref_series_uid:\n raise ValueError(\n 'All referenced images must belong to the same series.'\n )\n if not is_tiled_image(im):\n if im.Rows != ref_im.Rows or im.Columns != ref_im.Columns:\n raise ValueError(\n 'All referenced images must have the same dimensions.'\n )\n ref_im_item = Dataset()\n ref_im_item.ReferencedSOPClassUID = im.SOPClassUID\n ref_im_item.ReferencedSOPInstanceUID = im.SOPInstanceUID\n ref_im_seq.append(ref_im_item)\n self.ReferencedImageSequence = ref_im_seq\n self.StudyInstanceUID = ref_im.StudyInstanceUID\n self.SeriesInstanceUID = ref_im.SeriesInstanceUID\n\n if modality_lut_transformation is not None:\n _add_modality_lut_attributes(\n self,\n modality_lut_transformation=modality_lut_transformation\n )\n else:\n modality_lut_transformation = _get_modality_lut_transformation(\n referenced_images\n )\n if modality_lut_transformation is None:\n logger.debug(\n 'no Modality LUT attributes found in referenced images'\n )\n else:\n logger.debug(\n 'use Modality LUT attributes from referenced images'\n )\n _add_modality_lut_attributes(\n self,\n modality_lut_transformation=modality_lut_transformation\n )\n\n # Softcopy VOI LUT\n if voi_lut_transformations is not None:\n if len(voi_lut_transformations) == 0:\n raise ValueError(\n 'Argument \"voi_lut_transformations\" must not be '\n 'empty.'\n )\n for v in voi_lut_transformations:\n if not isinstance(v, SoftcopyVOILUTTransformation):\n raise TypeError(\n 'Items of argument \"voi_lut_transformations\" '\n 'must be of type SoftcopyVOILUTTransformation.'\n )\n\n if len(voi_lut_transformations) > 1:\n if not all(\n hasattr(v, 'ReferencedImageSequence')\n for v in voi_lut_transformations\n ):\n raise ValueError(\n 'If argument \"voi_lut_transformations\" '\n 'contains multiple items, each item must reference the '\n 'images that it applies to.'\n )\n _add_softcopy_voi_lut_attributes(\n self,\n referenced_images=referenced_images,\n voi_lut_transformations=voi_lut_transformations\n )\n else:\n voi_lut_transformations = _get_softcopy_voi_lut_transformations(\n referenced_images\n )\n if len(voi_lut_transformations) > 0:\n logger.debug('use VOI LUT attributes from referenced images')\n _add_softcopy_voi_lut_attributes(\n self,\n referenced_images=referenced_images,\n voi_lut_transformations=voi_lut_transformations\n )\n else:\n logger.debug('no VOI LUT attributes found in referenced images')\n\n # Palette Color Lookup Table\n palette_color_lut_item = Dataset()\n _add_palette_color_lookup_table_attributes(\n palette_color_lut_item,\n palette_color_lut_transformation=palette_color_lut_transformation\n )\n self.PaletteColorLookupTableSequence = [palette_color_lut_item]\n\n\nclass BlendingDisplayInput(Dataset):\n\n \"\"\"Class for an item of the Blending Display Input Sequence attribute.\"\"\"\n\n def __init__(\n self,\n blending_input_number: int\n ) -> None:\n \"\"\"\n\n Parameters\n ----------\n blending_input_number: int\n One-based identification index number of the input series to which\n the blending information should be applied\n\n \"\"\"\n super().__init__()\n self.BlendingInputNumber = blending_input_number\n\n\nclass BlendingDisplay(Dataset):\n\n \"\"\"Class for an item of the Blending Display Sequence attribute.\"\"\"\n\n def __init__(\n self,\n blending_mode: Union[BlendingModeValues, str],\n blending_display_inputs: Sequence[BlendingDisplayInput],\n blending_input_number: Optional[int] = None,\n relative_opacity: Optional[float] = None,\n ) -> None:\n \"\"\"\n\n Parameters\n ----------\n blending_mode: Union[str, highdicom.pr.BlendingModeValues]\n Method for weighting the different input images during the blending\n operation using alpha composition with premultiplication\n blending_display_inputs: Sequence[highdicom.pr.BlendingDisplayInput]\n Inputs for the blending operation. The order of items determines\n the order in which images will be blended.\n blending_input_number: Union[int, None], optional\n One-based identification index number of the result. Required if\n the output of the blending operation should not be directly\n displayed but used as input for a subsequent blending operation.\n relative_opacity: Union[float, None], optional\n Relative opacity (alpha value) that should be premultiplied with\n pixel values of the foreground image. Pixel values of the background\n image will be premultilied with 1 - `relative_opacity`.\n Required if `blending_mode` is ``\"FOREGROUND\"``. Will be ignored\n otherwise.\n\n \"\"\"\n super().__init__()\n blending_mode = BlendingModeValues(blending_mode)\n self.BlendingMode = blending_mode.value\n\n if not isinstance(blending_display_inputs, Sequence):\n raise TypeError(\n 'Argument \"blending_display_inputs\" must be a sequence.'\n )\n\n if blending_mode == BlendingModeValues.FOREGROUND:\n if len(blending_display_inputs) != 2:\n raise ValueError(\n 'Argument \"blending_display_inputs\" must contain exactly '\n 'two items if blending mode is \"FOREGROUND\".'\n )\n if relative_opacity is None:\n raise TypeError(\n 'Argument \"relative_opacity\" is required if blending mode '\n 'is \"FOREGROUND\".'\n )\n self.RelativeOpacity = float(relative_opacity)\n elif blending_mode == BlendingModeValues.EQUAL:\n if len(blending_display_inputs) == 0:\n raise ValueError(\n 'Argument \"blending_display_input\" must contain one or '\n 'more items if blending mode is \"EQUAL\".'\n )\n for item in blending_display_inputs:\n if not isinstance(item, BlendingDisplayInput):\n raise TypeError(\n 'Items of argument \"blending_display_input\" must have '\n 'type BlendingDisplayInput.'\n )\n self.BlendingDisplayInputSequence = blending_display_inputs\n" ]
[ [ "numpy.array", "numpy.array_equal", "numpy.frombuffer" ] ]
XiaoguangHu01/models
[ "a95d49323ed504e5a9164586f171f408954fd43a", "a95d49323ed504e5a9164586f171f408954fd43a", "a95d49323ed504e5a9164586f171f408954fd43a", "a95d49323ed504e5a9164586f171f408954fd43a" ]
[ "PaddleNLP/Research/MRQA2019-D-NET/server/xlnet_server/model/xlnet.py", "PaddleNLP/Research/ACL2019-KTNET/reading_comprehension/src/run_record.py", "PaddleNLP/Research/ACL2019-JEMT/train.py", "PaddleNLP/Research/ACL2019-KTNET/reading_comprehension/src/model/layers.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport json\nimport numpy as np\nimport paddle.fluid as fluid\nfrom model.transformer_encoder import encoder, pre_process_layer\nimport modeling\n\ndef _get_initiliaizer(args):\n if args.init == \"uniform\":\n param_initializer = fluid.initializer.Uniform(\n low=-args.init_range, high=args.init_range)\n elif args.init == \"normal\":\n param_initializer = fluid.initializer.Normal(scale=args.init_std)\n else:\n raise ValueError(\"Initializer {} not supported\".format(args.init))\n return param_initializer\n \ndef init_attn_mask(args, place):\n \"\"\"create causal attention mask.\"\"\"\n qlen = args.max_seq_length\n mlen=0 if 'mem_len' not in args else args.mem_len\n same_length=False if 'same_length' not in args else args.same_length\n dtype = 'float16' if args.use_fp16 else 'float32'\n attn_mask = np.ones([qlen, qlen], dtype=dtype)\n mask_u = np.triu(attn_mask)\n mask_dia = np.diag(np.diag(attn_mask))\n attn_mask_pad = np.zeros([qlen, mlen], dtype=dtype)\n attn_mask = np.concatenate([attn_mask_pad, mask_u - mask_dia], 1)\n if same_length:\n mask_l = np.tril(attn_mask)\n attn_mask = np.concatenate([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)\n attn_mask = attn_mask[:, :, None, None]\n attn_mask_t = fluid.global_scope().find_var(\"attn_mask\").get_tensor()\n attn_mask_t.set(attn_mask, place)\n\nclass XLNetConfig(object):\n def __init__(self, config_path):\n self._config_dict = self._parse(config_path)\n\n def _parse(self, config_path):\n try:\n with open(config_path) as json_file:\n config_dict = json.load(json_file)\n except Exception:\n raise IOError(\"Error in parsing xlnet model config file '%s'\" %\n config_path)\n else:\n return config_dict\n\n def __getitem__(self, key):\n return self._config_dict[key]\n\n def has_key(self, key):\n return self._config_dict.has_key(key)\n\n def print_config(self):\n for arg, value in sorted(six.iteritems(self._config_dict)):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\nclass XLNetModel(object):\n def __init__(self,\n xlnet_config,\n input_ids,\n seg_ids,\n input_mask,\n args,\n mems=None,\n perm_mask=None,\n target_mapping=None,\n inp_q=None):\n self._tie_weight = True\n\n self._d_head = xlnet_config['d_head']\n self._d_inner = xlnet_config['d_inner']\n self._d_model = xlnet_config['d_model']\n self._ff_activation = xlnet_config['ff_activation']\n self._n_head = xlnet_config['n_head']\n self._n_layer = xlnet_config['n_layer']\n self._n_token = xlnet_config['n_token']\n self._untie_r = xlnet_config['untie_r']\n\n self._mem_len=None if 'mem_len' not in args else args.mem_len\n self._reuse_len=None if 'reuse_len' not in args else args.reuse_len\n self._bi_data=False if 'bi_data' not in args else args.bi_data\n self._clamp_len=args.clamp_len\n self._same_length=False if 'same_length' not in args else args.same_length\n # Initialize all weigths by the specified initializer, and all biases \n # will be initialized by constant zero by default.\n self._param_initializer = _get_initiliaizer(args)\n\n tfm_args = dict(\n n_token=self._n_token,\n initializer=self._param_initializer,\n attn_type=\"bi\",\n n_layer=self._n_layer,\n d_model=self._d_model,\n\t\tn_head=self._n_head,\n\t\td_head=self._d_head,\n\t\td_inner=self._d_inner,\n\t\tff_activation=self._ff_activation,\n\t\tuntie_r=self._untie_r,\n\n\t\tuse_bfloat16=args.use_fp16,\n\t\tdropout=args.dropout,\n\t\tdropatt=args.dropatt,\n\n\t\tmem_len=self._mem_len,\n\t\treuse_len=self._reuse_len,\n\t\tbi_data=self._bi_data,\n\t\tclamp_len=args.clamp_len,\n\t\tsame_length=self._same_length,\n name='model_transformer')\n input_args = dict(\n inp_k=input_ids,\n seg_id=seg_ids,\n input_mask=input_mask,\n mems=mems,\n perm_mask=perm_mask,\n target_mapping=target_mapping,\n inp_q=inp_q)\n tfm_args.update(input_args)\n self.output, self.new_mems, self.lookup_table = modeling.transformer_xl(**tfm_args)\n #self._build_model(input_ids, sentence_ids, input_mask)\n\n def get_initializer(self):\n return self._param_initializer\n\n \n \n def get_sequence_output(self):\n return self.output\n\n def get_pooled_output(self):\n \"\"\"Get the first feature of each sequence for classification\"\"\"\n\n next_sent_feat = fluid.layers.slice(\n input=self._enc_out, axes=[1], starts=[0], ends=[1])\n next_sent_feat = fluid.layers.fc(\n input=next_sent_feat,\n size=self._emb_size,\n act=\"tanh\",\n param_attr=fluid.ParamAttr(\n name=\"pooled_fc.w_0\", initializer=self._param_initializer),\n bias_attr=\"pooled_fc.b_0\")\n return next_sent_feat\n\n def get_pretraining_output(self, mask_label, mask_pos, labels):\n \"\"\"Get the loss & accuracy for pretraining\"\"\"\n\n mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')\n\n # extract the first token feature in each sentence\n next_sent_feat = self.get_pooled_output()\n reshaped_emb_out = fluid.layers.reshape(\n x=self._enc_out, shape=[-1, self._emb_size])\n # extract masked tokens' feature\n mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)\n\n # transform: fc\n mask_trans_feat = fluid.layers.fc(\n input=mask_feat,\n size=self._emb_size,\n act=self._hidden_act,\n param_attr=fluid.ParamAttr(\n name='mask_lm_trans_fc.w_0',\n initializer=self._param_initializer),\n bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))\n # transform: layer norm \n mask_trans_feat = pre_process_layer(\n mask_trans_feat, 'n', name='mask_lm_trans')\n\n mask_lm_out_bias_attr = fluid.ParamAttr(\n name=\"mask_lm_out_fc.b_0\",\n initializer=fluid.initializer.Constant(value=0.0))\n if self._weight_sharing:\n word_emb = fluid.default_main_program().global_block().var(\n self._word_emb_name)\n if self._emb_dtype != self._dtype:\n word_emb = fluid.layers.cast(word_emb, self._dtype)\n fc_out = fluid.layers.matmul(\n x=mask_trans_feat, y=word_emb, transpose_y=True)\n fc_out += fluid.layers.create_parameter(\n shape=[self._voc_size],\n dtype=self._dtype,\n attr=mask_lm_out_bias_attr,\n is_bias=True)\n\n else:\n fc_out = fluid.layers.fc(input=mask_trans_feat,\n size=self._voc_size,\n param_attr=fluid.ParamAttr(\n name=\"mask_lm_out_fc.w_0\",\n initializer=self._param_initializer),\n bias_attr=mask_lm_out_bias_attr)\n\n mask_lm_loss = fluid.layers.softmax_with_cross_entropy(\n logits=fc_out, label=mask_label)\n mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)\n\n next_sent_fc_out = fluid.layers.fc(\n input=next_sent_feat,\n size=2,\n param_attr=fluid.ParamAttr(\n name=\"next_sent_fc.w_0\", initializer=self._param_initializer),\n bias_attr=\"next_sent_fc.b_0\")\n\n next_sent_loss, next_sent_softmax = fluid.layers.softmax_with_cross_entropy(\n logits=next_sent_fc_out, label=labels, return_softmax=True)\n\n next_sent_acc = fluid.layers.accuracy(\n input=next_sent_softmax, label=labels)\n\n mean_next_sent_loss = fluid.layers.mean(next_sent_loss)\n\n loss = mean_next_sent_loss + mean_mask_lm_loss\n return next_sent_acc, mean_mask_lm_loss, loss\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Finetuning on ReCoRD.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport multiprocessing\nimport os\nimport time\nimport logging\nimport random\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\n\nfrom reader.record import DataProcessor, write_predictions\nfrom model.bert import BertConfig, BertModel\nfrom model.layers import MemoryLayer, TriLinearTwoTimeSelfAttentionLayer\nfrom utils.args import ArgumentGroup, print_arguments\nfrom optimization import optimization\nfrom utils.init import init_pretraining_params, init_checkpoint\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', \n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogging.getLogger().setLevel(logging.INFO) \nlogger = logging.getLogger(__name__)\n\n# yapf: disable\nparser = argparse.ArgumentParser()\nmodel_g = ArgumentGroup(parser, \"model\", \"model configuration and paths.\")\nmodel_g.add_arg(\"bert_config_path\", str, None, \"Path to the json file for bert model config.\")\nmodel_g.add_arg(\"init_checkpoint\", str, None, \"Init checkpoint to resume training from.\")\nmodel_g.add_arg(\"init_pretraining_params\", str, None,\n \"Init pre-training params which preforms fine-tuning from. If the \"\n \"arg 'init_checkpoint' has been set, this argument wouldn't be valid.\")\nmodel_g.add_arg(\"checkpoints\", str, \"checkpoints\", \"Path to save checkpoints.\")\n\ntrain_g = ArgumentGroup(parser, \"training\", \"training options.\")\ntrain_g.add_arg(\"epoch\", int, 3, \"Number of epoches for fine-tuning.\")\ntrain_g.add_arg(\"learning_rate\", float, 5e-5, \"Learning rate used to train with warmup.\")\ntrain_g.add_arg(\"lr_scheduler\", str, \"linear_warmup_decay\",\n \"scheduler of learning rate.\", choices=['linear_warmup_decay', 'noam_decay'])\ntrain_g.add_arg(\"weight_decay\", float, 0.01, \"Weight decay rate for L2 regularizer.\")\ntrain_g.add_arg(\"warmup_proportion\", float, 0.1,\n \"Proportion of training steps to perform linear learning rate warmup for.\")\ntrain_g.add_arg(\"save_steps\", int, 1000, \"The steps interval to save checkpoints.\")\ntrain_g.add_arg(\"validation_steps\", int, 1000, \"The steps interval for validation (effective only when do_val is True).\")\ntrain_g.add_arg(\"use_ema\", bool, True, \"Whether to use ema.\")\ntrain_g.add_arg(\"ema_decay\", float, 0.9999, \"Decay rate for expoential moving average.\")\ntrain_g.add_arg(\"use_fp16\", bool, False, \"Whether to use fp16 mixed precision training.\")\ntrain_g.add_arg(\"loss_scaling\", float, 1.0,\n \"Loss scaling factor for mixed precision training, only valid when use_fp16 is enabled.\")\n\nlog_g = ArgumentGroup(parser, \"logging\", \"logging related.\")\nlog_g.add_arg(\"skip_steps\", int, 10, \"The steps interval to print loss.\")\nlog_g.add_arg(\"verbose\", bool, False, \"Whether to output verbose log.\")\n\ndata_g = ArgumentGroup(parser, \"data\", \"Data paths, vocab paths and data processing options\")\ndata_g.add_arg(\"train_file\", str, None, \"ReCoRD json for training. E.g., train.json.\")\ndata_g.add_arg(\"predict_file\", str, None, \"ReCoRD json for predictions. E.g. dev.json.\")\ndata_g.add_arg(\"vocab_path\", str, None, \"Vocabulary path.\")\ndata_g.add_arg(\"version_2_with_negative\", bool, False,\n \"If true, the SQuAD examples contain some that do not have an answer. If using squad v2.0, it should be set true.\")\ndata_g.add_arg(\"max_seq_len\", int, 512, \"Number of words of the longest seqence.\")\ndata_g.add_arg(\"max_query_length\", int, 64, \"Max query length.\")\ndata_g.add_arg(\"max_answer_length\", int, 30, \"Max answer length.\")\ndata_g.add_arg(\"batch_size\", int, 12, \"Total examples' number in batch for training. see also --in_tokens.\")\ndata_g.add_arg(\"in_tokens\", bool, False,\n \"If set, the batch size will be the maximum number of tokens in one batch. \"\n \"Otherwise, it will be the maximum number of examples in one batch.\")\ndata_g.add_arg(\"do_lower_case\", bool, True,\n \"Whether to lower case the input text. Should be True for uncased models and False for cased models.\")\ndata_g.add_arg(\"doc_stride\", int, 128,\n \"When splitting up a long document into chunks, how much stride to take between chunks.\")\ndata_g.add_arg(\"n_best_size\", int, 20,\n \"The total number of n-best predictions to generate in the nbest_predictions.json output file.\")\ndata_g.add_arg(\"null_score_diff_threshold\", float, 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\ndata_g.add_arg(\"random_seed\", int, 42, \"Random seed.\")\n\nrun_type_g = ArgumentGroup(parser, \"run_type\", \"running type options.\")\nrun_type_g.add_arg(\"use_cuda\", bool, True, \"If set, use GPU for training.\")\nrun_type_g.add_arg(\"use_fast_executor\", bool, False, \"If set, use fast parallel executor (in experiment).\")\nrun_type_g.add_arg(\"num_iteration_per_drop_scope\", int, 1, \"Ihe iteration intervals to clean up temporary variables.\")\nrun_type_g.add_arg(\"do_train\", bool, False, \"Whether to perform training.\")\nrun_type_g.add_arg(\"do_val\", bool, False, \"Whether to perform validation during training.\")\nrun_type_g.add_arg(\"do_predict\", bool, False, \"Whether to perform prediction.\")\nrun_type_g.add_arg(\"freeze\", bool, False, \"freeze bert parameters\")\n\nmem_settings_g = ArgumentGroup(parser, \"memory\", \"memory settings.\")\nmem_settings_g.add_arg('concept_embedding_path', str, None, 'path of pretrained concept file')\nmem_settings_g.add_arg('use_wordnet', bool, False, 'whether to use wordnet memory')\nmem_settings_g.add_arg('retrieved_synset_path', str, '../retrieve_concepts/retrieve_wordnet/output_record/retrived_synsets.data', 'path of retrieved synsets')\nmem_settings_g.add_arg('use_nell', bool, False, 'whether to use nell memory')\nmem_settings_g.add_arg('train_retrieved_nell_concept_path', str, '../retrieve_concepts/retrieve_nell/output_record/train.retrieved_nell_concepts.data', 'path of retrieved concepts for trainset')\nmem_settings_g.add_arg('dev_retrieved_nell_concept_path', str, '../retrieve_concepts/retrieve_nell/output_record/dev.retrieved_nell_concepts.data', 'path of retrieved concepts for devset')\n\nargs = parser.parse_args()\n# yapf: enable.\n\ndef create_model(pyreader_name, bert_config, max_concept_length, concept_embedding_mat, is_training=False, freeze=False):\n if is_training:\n pyreader = fluid.layers.py_reader(\n capacity=50,\n shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, max_concept_length, 1],\n [-1, args.max_seq_len, 1], [-1, 1], [-1, 1]],\n dtypes=[\n 'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'],\n lod_levels=[0, 0, 0, 0, 0, 0, 0],\n name=pyreader_name,\n use_double_buffer=True)\n (src_ids, pos_ids, sent_ids, concept_ids, input_mask, start_positions,\n end_positions) = fluid.layers.read_file(pyreader)\n else:\n pyreader = fluid.layers.py_reader(\n capacity=50,\n shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, 1],\n [-1, args.max_seq_len, max_concept_length, 1],\n [-1, args.max_seq_len, 1], [-1, 1]],\n dtypes=['int64', 'int64', 'int64', 'int64', 'float32', 'int64'],\n lod_levels=[0, 0, 0, 0, 0, 0],\n name=pyreader_name,\n use_double_buffer=True)\n (src_ids, pos_ids, sent_ids, concept_ids, input_mask, unique_id) = fluid.layers.read_file(pyreader)\n\n '''1st Layer: BERT Layer'''\n bert = BertModel(\n src_ids=src_ids,\n position_ids=pos_ids,\n sentence_ids=sent_ids,\n input_mask=input_mask,\n config=bert_config,\n use_fp16=args.use_fp16)\n\n enc_out = bert.get_sequence_output()\n if freeze:\n enc_out.stop_gradient=True\n logger.info(\"enc_out.stop_gradient: {}\".format(enc_out.stop_gradient))\n\n '''2nd layer: Memory Layer'''\n # get memory embedding\n concept_vocab_size = concept_embedding_mat.shape[0]\n concept_dim = concept_embedding_mat.shape[1]\n memory_embs = fluid.layers.embedding(concept_ids,\n size=(concept_vocab_size, concept_dim),\n param_attr=fluid.ParamAttr(name=\"concept_emb_mat\",\n do_model_average=False,\n trainable=False),\n dtype='float32')\n \n # get memory length\n concept_ids_reduced = fluid.layers.equal(concept_ids,\n fluid.layers.fill_constant(shape=[1], value=0, dtype=\"int64\")) # [batch_size, sent_size, concept_size, 1]\n concept_ids_reduced = fluid.layers.cast(concept_ids_reduced, dtype=\"float32\") # [batch_size, sent_size, concept_size, 1]\n concept_ids_reduced = fluid.layers.scale(\n fluid.layers.elementwise_sub(\n concept_ids_reduced,\n fluid.layers.fill_constant([1], \"float32\", 1)\n ),\n scale=-1\n )\n mem_length = fluid.layers.reduce_sum(concept_ids_reduced, dim=2) # [batch_size, sent_size, 1] \n\n # select and integrate\n memory_layer = MemoryLayer(bert_config, max_concept_length, concept_dim, mem_method='cat')\n memory_output = memory_layer.forward(enc_out, memory_embs, mem_length, ignore_no_memory_token=True) \n\n '''3rd layer: Self-Matching Layer'''\n # calculate input dim for self-matching layer\n if memory_layer.mem_method == 'add':\n memory_output_size = bert_config['hidden_size']\n elif memory_layer.mem_method == 'cat':\n memory_output_size = bert_config['hidden_size'] + concept_dim\n else:\n raise ValueError(\"memory_layer.mem_method must be 'add' or 'cat'\") \n logger.info(\"memory_output_size: {}\".format(memory_output_size))\n\n # do matching\n self_att_layer = TriLinearTwoTimeSelfAttentionLayer(\n memory_output_size, dropout_rate=0.0, \n cat_mul=True, cat_sub=True, cat_twotime=True,\n cat_twotime_mul=False, cat_twotime_sub=True) # [bs, sq, concat_hs]\n att_output = self_att_layer.forward(memory_output, input_mask) # [bs, sq, concat_hs]\n\n '''4th layer: Output Layer'''\n logits = fluid.layers.fc(\n input=att_output,\n size=2,\n num_flatten_dims=2,\n param_attr=fluid.ParamAttr(\n name=\"cls_squad_out_w\",\n initializer=fluid.initializer.NormalInitializer(loc=0.0, scale=bert_config['initializer_range'])),\n bias_attr=fluid.ParamAttr(\n name=\"cls_squad_out_b\", initializer=fluid.initializer.Constant(0.)))\n\n logits = fluid.layers.transpose(x=logits, perm=[2, 0, 1])\n start_logits, end_logits = fluid.layers.unstack(x=logits, axis=0)\n\n batch_ones = fluid.layers.fill_constant_batch_size_like(\n input=start_logits, dtype='int64', shape=[1], value=1)\n num_seqs = fluid.layers.reduce_sum(input=batch_ones)\n\n if is_training:\n\n def compute_loss(logits, positions):\n loss = fluid.layers.softmax_with_cross_entropy(\n logits=logits, label=positions)\n loss = fluid.layers.mean(x=loss)\n return loss\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2.0\n if args.use_fp16 and args.loss_scaling > 1.0:\n total_loss = total_loss * args.loss_scaling\n\n return pyreader, total_loss, num_seqs\n else:\n return pyreader, unique_id, start_logits, end_logits, num_seqs\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef predict(test_exe, test_program, test_pyreader, fetch_list, processor, eval_concept_settings, eval_output_name='eval_result.json'):\n if not os.path.exists(args.checkpoints):\n os.makedirs(args.checkpoints)\n output_prediction_file = os.path.join(args.checkpoints, \"predictions.json\")\n output_nbest_file = os.path.join(args.checkpoints, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(args.checkpoints, \"null_odds.json\")\n output_evaluation_result_file = os.path.join(args.checkpoints, eval_output_name)\n\n test_pyreader.start()\n all_results = []\n time_begin = time.time()\n while True:\n try:\n np_unique_ids, np_start_logits, np_end_logits, np_num_seqs = test_exe.run(\n fetch_list=fetch_list, program=test_program)\n for idx in range(np_unique_ids.shape[0]):\n if len(all_results) % 1000 == 0:\n logger.info(\"Processing example: %d\" % len(all_results))\n unique_id = int(np_unique_ids[idx])\n start_logits = [float(x) for x in np_start_logits[idx].flat]\n end_logits = [float(x) for x in np_end_logits[idx].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n except fluid.core.EOFException:\n test_pyreader.reset()\n break\n time_end = time.time()\n\n features = processor.get_features(\n processor.predict_examples, is_training=False, **eval_concept_settings)\n eval_result = write_predictions(processor.predict_examples, features, all_results,\n args.n_best_size, args.max_answer_length,\n args.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file,\n args.version_2_with_negative,\n args.null_score_diff_threshold, args.verbose, args.predict_file, output_evaluation_result_file)\n return eval_result\n\ndef read_concept_embedding(embedding_path):\n fin = open(embedding_path, encoding='utf-8')\n info = [line.strip() for line in fin]\n dim = len(info[0].split(' ')[1:])\n n_concept = len(info)\n embedding_mat = []\n id2concept, concept2id = [], {}\n # add padding concept into vocab\n id2concept.append('<pad_concept>')\n concept2id['<pad_concept>'] = 0\n embedding_mat.append([0.0 for _ in range(dim)])\n for line in info:\n concept_name = line.split(' ')[0]\n embedding = [float(value_str) for value_str in line.split(' ')[1:]] \n assert len(embedding) == dim and not np.any(np.isnan(embedding))\n embedding_mat.append(embedding)\n concept2id[concept_name] = len(id2concept)\n id2concept.append(concept_name)\n embedding_mat = np.array(embedding_mat, dtype=np.float32)\n return id2concept, concept2id, embedding_mat\n\ndef train(args):\n bert_config = BertConfig(args.bert_config_path)\n bert_config.print_config()\n\n if not (args.do_train or args.do_predict or args.do_val):\n raise ValueError(\"For args `do_train` and `do_predict`, at \"\n \"least one of them must be True.\")\n\n if args.use_cuda:\n place = fluid.CUDAPlace(0)\n dev_count = fluid.core.get_cuda_device_count()\n else:\n place = fluid.CPUPlace()\n dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n exe = fluid.Executor(place)\n\n id2concept, concept2id, concept_embedding_mat = read_concept_embedding(\n args.concept_embedding_path)\n\n processor = DataProcessor(\n vocab_path=args.vocab_path,\n do_lower_case=args.do_lower_case,\n max_seq_length=args.max_seq_len,\n in_tokens=args.in_tokens,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length)\n\n startup_prog = fluid.Program()\n if args.random_seed is not None:\n startup_prog.random_seed = args.random_seed\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n\n if args.do_train:\n train_concept_settings = {\n 'tokenization_path': '../retrieve_concepts/tokenization_record/tokens/train.tokenization.{}.data'.format('uncased' if args.do_lower_case else 'cased'),\n 'concept2id': concept2id,\n 'use_wordnet': args.use_wordnet,\n 'retrieved_synset_path': args.retrieved_synset_path,\n 'use_nell': args.use_nell,\n 'retrieved_nell_concept_path': args.train_retrieved_nell_concept_path, \n } \n train_data_generator = processor.data_generator(\n data_path=args.train_file,\n batch_size=args.batch_size,\n phase='train',\n shuffle=True,\n dev_count=dev_count,\n version_2_with_negative=args.version_2_with_negative,\n epoch=args.epoch,\n **train_concept_settings)\n\n num_train_examples = processor.get_num_examples(phase='train')\n if args.in_tokens:\n max_train_steps = args.epoch * num_train_examples // (\n args.batch_size // args.max_seq_len) // dev_count\n else:\n max_train_steps = args.epoch * num_train_examples // (\n args.batch_size) // dev_count\n warmup_steps = int(max_train_steps * args.warmup_proportion)\n logger.info(\"Device count: %d\" % dev_count)\n logger.info(\"Num train examples: %d\" % num_train_examples)\n logger.info(\"Max train steps: %d\" % max_train_steps)\n logger.info(\"Num warmup steps: %d\" % warmup_steps)\n\n train_program = fluid.Program()\n # if args.random_seed is not None:\n # train_program.random_seed = args.random_seed\n with fluid.program_guard(train_program, startup_prog):\n with fluid.unique_name.guard():\n train_pyreader, loss, num_seqs = create_model(\n pyreader_name='train_reader',\n bert_config=bert_config,\n max_concept_length=processor.train_max_concept_length,\n concept_embedding_mat=concept_embedding_mat,\n is_training=True,\n freeze=args.freeze)\n\n scheduled_lr = optimization(\n loss=loss,\n warmup_steps=warmup_steps,\n num_train_steps=max_train_steps,\n learning_rate=args.learning_rate,\n train_program=train_program,\n startup_prog=startup_prog,\n weight_decay=args.weight_decay,\n scheduler=args.lr_scheduler,\n use_fp16=args.use_fp16,\n loss_scaling=args.loss_scaling)\n \n if args.use_ema:\n ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)\n ema.update()\n\n fluid.memory_optimize(train_program, skip_opt_set=[loss.name, num_seqs.name])\n\n if args.verbose:\n if args.in_tokens:\n lower_mem, upper_mem, unit = fluid.contrib.memory_usage(\n program=train_program,\n batch_size=args.batch_size // args.max_seq_len)\n else:\n lower_mem, upper_mem, unit = fluid.contrib.memory_usage(\n program=train_program, batch_size=args.batch_size)\n logger.info(\"Theoretical memory usage in training: %.3f - %.3f %s\" %\n (lower_mem, upper_mem, unit))\n\n if args.do_predict or args.do_val:\n eval_concept_settings = {\n 'tokenization_path': '../retrieve_concepts/tokenization_record/tokens/dev.tokenization.{}.data'.format('uncased' if args.do_lower_case else 'cased'),\n 'concept2id': concept2id,\n 'use_wordnet': args.use_wordnet,\n 'retrieved_synset_path': args.retrieved_synset_path,\n 'use_nell': args.use_nell,\n 'retrieved_nell_concept_path': args.dev_retrieved_nell_concept_path, \n } \n eval_data_generator = processor.data_generator(\n data_path=args.predict_file,\n batch_size=args.batch_size,\n phase='predict',\n shuffle=False,\n dev_count=1,\n epoch=1,\n **eval_concept_settings)\n\n test_prog = fluid.Program()\n # if args.random_seed is not None:\n # test_prog.random_seed = args.random_seed\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n test_pyreader, unique_ids, start_logits, end_logits, num_seqs = create_model(\n pyreader_name='test_reader',\n bert_config=bert_config,\n max_concept_length=processor.predict_max_concept_length,\n concept_embedding_mat=concept_embedding_mat, \n is_training=False)\n \n if args.use_ema and 'ema' not in dir():\n ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)\n\n fluid.memory_optimize(test_prog, skip_opt_set=[unique_ids.name,\n start_logits.name, end_logits.name, num_seqs.name])\n\n test_prog = test_prog.clone(for_test=True)\n # if args.random_seed is not None:\n # test_prog.random_seed = args.random_seed\n\n exe.run(startup_prog)\n\n if args.do_train:\n logger.info('load pretrained concept embedding')\n fluid.global_scope().find_var('concept_emb_mat').get_tensor().set(concept_embedding_mat, place)\n\n if args.init_checkpoint and args.init_pretraining_params:\n logger.info(\n \"WARNING: args 'init_checkpoint' and 'init_pretraining_params' \"\n \"both are set! Only arg 'init_checkpoint' is made valid.\")\n if args.init_checkpoint:\n init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog,\n use_fp16=args.use_fp16)\n elif args.init_pretraining_params:\n init_pretraining_params(\n exe,\n args.init_pretraining_params,\n main_program=startup_prog,\n use_fp16=args.use_fp16)\n elif args.do_predict or args.do_val:\n if not args.init_checkpoint:\n raise ValueError(\"args 'init_checkpoint' should be set if\"\n \"only doing prediction!\")\n init_checkpoint(\n exe,\n args.init_checkpoint,\n main_program=startup_prog,\n use_fp16=args.use_fp16)\n\n if args.do_train:\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.use_experimental_executor = args.use_fast_executor\n exec_strategy.num_threads = dev_count\n exec_strategy.num_iteration_per_drop_scope = args.num_iteration_per_drop_scope\n\n train_exe = fluid.ParallelExecutor(\n use_cuda=args.use_cuda,\n loss_name=loss.name,\n exec_strategy=exec_strategy,\n main_program=train_program)\n\n train_pyreader.decorate_tensor_provider(train_data_generator)\n\n train_pyreader.start()\n steps = 0\n total_cost, total_num_seqs = [], []\n time_begin = time.time()\n while steps < max_train_steps:\n try:\n steps += 1\n if steps % args.skip_steps == 0:\n if warmup_steps <= 0:\n fetch_list = [loss.name, num_seqs.name]\n else:\n fetch_list = [\n loss.name, scheduled_lr.name, num_seqs.name\n ]\n else:\n fetch_list = []\n\n outputs = train_exe.run(fetch_list=fetch_list)\n\n if steps % args.skip_steps == 0:\n if warmup_steps <= 0:\n np_loss, np_num_seqs = outputs\n else:\n np_loss, np_lr, np_num_seqs = outputs\n total_cost.extend(np_loss * np_num_seqs)\n total_num_seqs.extend(np_num_seqs)\n\n if args.verbose:\n verbose = \"train pyreader queue size: %d, \" % train_pyreader.queue.size(\n )\n verbose += \"learning rate: %f\" % (\n np_lr[0]\n if warmup_steps > 0 else args.learning_rate)\n logger.info(verbose)\n\n time_end = time.time()\n used_time = time_end - time_begin\n current_example, epoch = processor.get_train_progress()\n\n logger.info(\"epoch: %d, progress: %d/%d, step: %d, loss: %f, \"\n \"speed: %f steps/s\" %\n (epoch, current_example, num_train_examples, steps,\n np.sum(total_cost) / np.sum(total_num_seqs),\n args.skip_steps / used_time))\n total_cost, total_num_seqs = [], []\n time_begin = time.time()\n\n if steps % args.save_steps == 0 or steps == max_train_steps:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps))\n fluid.io.save_persistables(exe, save_path, train_program)\n \n if steps % args.validation_steps == 0 or steps == max_train_steps:\n if args.do_val:\n test_pyreader.decorate_tensor_provider(\n processor.data_generator(\n data_path=args.predict_file,\n batch_size=args.batch_size,\n phase='predict',\n shuffle=False,\n dev_count=1,\n epoch=1,\n **eval_concept_settings)\n )\n val_performance = predict(exe, test_prog, test_pyreader, [\n unique_ids.name, start_logits.name, end_logits.name, num_seqs.name\n ], processor, eval_concept_settings, 'validate_result_step_{}.json'.format(steps))\n logger.info(\"Validation performance after step {}:\\n* Exact_match: {}\\n* F1: {}\".format(steps, val_performance['exact_match'], val_performance['f1']))\n \n except fluid.core.EOFException:\n save_path = os.path.join(args.checkpoints,\n \"step_\" + str(steps) + \"_final\")\n fluid.io.save_persistables(exe, save_path, train_program)\n train_pyreader.reset()\n break\n\n if args.do_predict:\n test_pyreader.decorate_tensor_provider(eval_data_generator)\n\n if args.use_ema:\n with ema.apply(exe):\n eval_performance = predict(exe, test_prog, test_pyreader, [\n unique_ids.name, start_logits.name, end_logits.name, num_seqs.name\n ], processor, eval_concept_settings)\n else:\n eval_performance = predict(exe, test_prog, test_pyreader, [\n unique_ids.name, start_logits.name, end_logits.name, num_seqs.name\n ], processor, eval_concept_settings)\n\n logger.info(\"Eval performance:\\n* Exact_match: {}\\n* F1: {}\".format(eval_performance['exact_match'], eval_performance['f1']))\n\n\nif __name__ == '__main__':\n print_arguments(args)\n train(args)\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport ast\nimport copy\nimport logging\nimport multiprocessing\nimport os\nimport six\nimport sys\nimport time\n\nimport numpy as np\nimport paddle.fluid as fluid\n\nimport reader\nfrom config import *\nfrom desc import *\nfrom model import transformer, position_encoding_init\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Training for Transformer.\")\n parser.add_argument(\n \"--src_vocab_fpath\",\n type=str,\n required=True,\n help=\"The path of vocabulary file of source language.\")\n parser.add_argument(\n \"--trg_vocab_fpath\",\n type=str,\n required=True,\n help=\"The path of vocabulary file of target language.\")\n parser.add_argument(\n \"--phoneme_vocab_fpath\",\n type=str,\n required=True,\n help=\"The path of vocabulary file of phonemes.\")\n parser.add_argument(\n \"--lexicon_fpath\",\n type=str,\n required=True,\n help=\"The path of lexicon of source language.\")\n parser.add_argument(\n \"--train_file_pattern\",\n type=str,\n required=True,\n help=\"The pattern to match training data files.\")\n parser.add_argument(\n \"--val_file_pattern\",\n type=str,\n help=\"The pattern to match validation data files.\")\n parser.add_argument(\n \"--use_token_batch\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to \"\n \"produce batch data according to token number.\")\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=4096,\n help=\"The number of sequences contained in a mini-batch, or the maximum \"\n \"number of tokens (include paddings) contained in a mini-batch. Note \"\n \"that this represents the number on single device and the actual batch \"\n \"size for multi-devices will multiply the device number.\")\n parser.add_argument(\n \"--pool_size\",\n type=int,\n default=200000,\n help=\"The buffer size to pool data.\")\n parser.add_argument(\n \"--sort_type\",\n default=\"pool\",\n choices=(\"global\", \"pool\", \"none\"),\n help=\"The grain to sort by length: global for all instances; pool for \"\n \"instances in pool; none for no sort.\")\n parser.add_argument(\n \"--shuffle\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to shuffle instances in each pass.\")\n parser.add_argument(\n \"--shuffle_batch\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to shuffle the data batches.\")\n parser.add_argument(\n \"--special_token\",\n type=str,\n default=[\"<s>\", \"<e>\", \"<unk>\"],\n nargs=3,\n help=\"The <bos>, <eos> and <unk> tokens in the dictionary.\")\n parser.add_argument(\n \"--token_delimiter\",\n type=lambda x: str(x.encode().decode(\"unicode-escape\")),\n default=\" \",\n help=\"The delimiter used to split tokens in source or target sentences. \"\n \"For EN-DE BPE data we provided, use spaces as token delimiter. \")\n parser.add_argument(\n 'opts',\n help='See config.py for all options',\n default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument(\n '--local',\n type=ast.literal_eval,\n default=True,\n help='Whether to run as local mode.')\n parser.add_argument(\n '--device',\n type=str,\n default='GPU',\n choices=['CPU', 'GPU'],\n help=\"The device type.\")\n parser.add_argument(\n '--update_method',\n choices=(\"pserver\", \"nccl2\"),\n default=\"pserver\",\n help='Update method.')\n parser.add_argument(\n '--sync', type=ast.literal_eval, default=True, help=\"sync mode.\")\n parser.add_argument(\n \"--enable_ce\",\n type=ast.literal_eval,\n default=False,\n help=\"The flag indicating whether to run the task \"\n \"for continuous evaluation.\")\n parser.add_argument(\n \"--use_py_reader\",\n type=ast.literal_eval,\n default=True,\n help=\"The flag indicating whether to use py_reader.\")\n parser.add_argument(\n \"--fetch_steps\",\n type=int,\n default=100,\n help=\"The frequency to fetch and print output.\")\n\n args = parser.parse_args()\n # Append args related to dict\n src_dict = reader.DataReader.load_dict(args.src_vocab_fpath)\n trg_dict = reader.DataReader.load_dict(args.trg_vocab_fpath)\n phone_dict = reader.DataReader.load_dict(args.phoneme_vocab_fpath)\n dict_args = [\n \"src_vocab_size\", str(len(src_dict)), \"trg_vocab_size\",\n str(len(trg_dict)), \"phone_vocab_size\", str(len(phone_dict)), \"bos_idx\",\n str(src_dict[args.special_token[0]]), \"eos_idx\",\n str(src_dict[args.special_token[1]]), \"unk_idx\",\n str(src_dict[args.special_token[2]])\n ]\n merge_cfg_from_list(args.opts + dict_args,\n [TrainTaskConfig, ModelHyperParams])\n\n return args\n\n\ndef append_nccl2_prepare(startup_prog, trainer_id, worker_endpoints,\n current_endpoint):\n assert (trainer_id >= 0 and len(worker_endpoints) > 1 and\n current_endpoint in worker_endpoints)\n eps = copy.deepcopy(worker_endpoints)\n eps.remove(current_endpoint)\n nccl_id_var = startup_prog.global_block().create_var(\n name=\"NCCLID\", persistable=True, type=fluid.core.VarDesc.VarType.RAW)\n startup_prog.global_block().append_op(\n type=\"gen_nccl_id\",\n inputs={},\n outputs={\"NCCLID\": nccl_id_var},\n attrs={\n \"endpoint\": current_endpoint,\n \"endpoint_list\": eps,\n \"trainer_id\": trainer_id\n })\n return nccl_id_var\n\n\ndef pad_phoneme_data(phoneme_seqs, pad_idx, max_seq_len):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n ph_seq_lens = []\n for ps in phoneme_seqs:\n cur_seq_lens = [len(x) for x in ps]\n ph_seq_lens.append(max(cur_seq_lens))\n max_ph_seq_len = max(ph_seq_lens)\n\n batch_size = len(phoneme_seqs)\n phoneme_data = pad_idx * np.ones(\n (batch_size, max_seq_len, max_ph_seq_len), dtype=np.int64)\n phoneme_mask = np.zeros(\n (batch_size, max_seq_len, max_ph_seq_len), dtype=np.int64)\n\n for i in range(batch_size):\n cur_ph_seq = phoneme_seqs[i]\n for j, cur_word_phs in enumerate(cur_ph_seq):\n word_phs_len = len(cur_word_phs)\n phoneme_data[i, j, :word_phs_len] = cur_word_phs\n phoneme_mask[i, j, :word_phs_len] = 1\n\n phoneme_data = np.reshape(phoneme_data, [batch_size, max_seq_len, -1, 1])\n\n return phoneme_data, phoneme_mask, max_ph_seq_len\n\n\ndef pad_batch_data(insts,\n pad_idx,\n n_head,\n is_target=False,\n is_label=False,\n return_attn_bias=True,\n return_max_len=True,\n return_num_token=False):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n return_list = []\n max_len = max(len(inst) for inst in insts)\n # Any token included in dict can be used to pad, since the paddings' loss\n # will be masked out by weights and make no effect on parameter gradients.\n inst_data = np.array(\n [inst + [pad_idx] * (max_len - len(inst)) for inst in insts])\n return_list += [inst_data.astype(\"int64\").reshape([-1, 1])]\n if is_label: # label weight\n inst_weight = np.array([[1.] * len(inst) + [0.] * (max_len - len(inst))\n for inst in insts])\n return_list += [inst_weight.astype(\"float32\").reshape([-1, 1])]\n else: # position data\n inst_pos = np.array([\n list(range(0, len(inst))) + [0] * (max_len - len(inst))\n for inst in insts\n ])\n return_list += [inst_pos.astype(\"int64\").reshape([-1, 1])]\n if return_attn_bias:\n if is_target:\n # This is used to avoid attention on paddings and subsequent\n # words.\n slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, max_len))\n slf_attn_bias_data = np.triu(slf_attn_bias_data,\n 1).reshape([-1, 1, max_len, max_len])\n slf_attn_bias_data = np.tile(slf_attn_bias_data,\n [1, n_head, 1, 1]) * [-1e9]\n else:\n # This is used to avoid attention on paddings.\n slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] *\n (max_len - len(inst))\n for inst in insts])\n slf_attn_bias_data = np.tile(\n slf_attn_bias_data.reshape([-1, 1, 1, max_len]),\n [1, n_head, max_len, 1])\n return_list += [slf_attn_bias_data.astype(\"float32\")]\n if return_max_len:\n return_list += [max_len]\n if return_num_token:\n num_token = 0\n for inst in insts:\n num_token += len(inst)\n return_list += [num_token]\n return return_list if len(return_list) > 1 else return_list[0]\n\n\ndef prepare_batch_input(insts, data_input_names, src_pad_idx, phone_pad_idx,\n trg_pad_idx, n_head, d_model):\n \"\"\"\n Put all padded data needed by training into a dict.\n \"\"\"\n src_word, src_pos, src_slf_attn_bias, src_max_len = pad_batch_data(\n [inst[0] for inst in insts], src_pad_idx, n_head, is_target=False)\n src_word = src_word.reshape(-1, src_max_len, 1)\n src_pos = src_pos.reshape(-1, src_max_len, 1)\n src_phone, src_phone_mask, max_phone_len = pad_phoneme_data(\n [inst[1] for inst in insts], phone_pad_idx, src_max_len)\n trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = pad_batch_data(\n [inst[2] for inst in insts], trg_pad_idx, n_head, is_target=True)\n trg_word = trg_word.reshape(-1, trg_max_len, 1)\n trg_pos = trg_pos.reshape(-1, trg_max_len, 1)\n\n trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :],\n [1, 1, trg_max_len, 1]).astype(\"float32\")\n\n lbl_word, lbl_weight, num_token = pad_batch_data(\n [inst[3] for inst in insts],\n trg_pad_idx,\n n_head,\n is_target=False,\n is_label=True,\n return_attn_bias=False,\n return_max_len=False,\n return_num_token=True)\n\n data_input_dict = dict(\n zip(data_input_names, [\n src_word, src_pos, src_slf_attn_bias, src_phone, src_phone_mask,\n trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias, lbl_word,\n lbl_weight\n ]))\n\n return data_input_dict, np.asarray([num_token], dtype=\"float32\")\n\n\ndef prepare_data_generator(args,\n is_test,\n count,\n pyreader,\n py_reader_provider_wrapper,\n place=None):\n \"\"\"\n Data generator wrapper for DataReader. If use py_reader, set the data\n provider for py_reader\n \"\"\"\n data_reader = reader.DataReader(\n phoneme_vocab_fpath=args.phoneme_vocab_fpath,\n lexicon_fpath=args.lexicon_fpath,\n fpattern=args.val_file_pattern if is_test else args.train_file_pattern,\n src_vocab_fpath=args.src_vocab_fpath,\n trg_vocab_fpath=args.trg_vocab_fpath,\n token_delimiter=args.token_delimiter,\n use_token_batch=args.use_token_batch,\n batch_size=args.batch_size * (1 if args.use_token_batch else count),\n pool_size=args.pool_size,\n sort_type=args.sort_type,\n shuffle=args.shuffle,\n shuffle_batch=args.shuffle_batch,\n start_mark=args.special_token[0],\n end_mark=args.special_token[1],\n unk_mark=args.special_token[2],\n # count start and end tokens out\n max_length=ModelHyperParams.max_length - 2,\n clip_last_batch=False).batch_generator\n\n def stack(data_reader, count, clip_last=True):\n def __impl__():\n res = []\n for item in data_reader():\n res.append(item)\n if len(res) == count:\n yield res\n res = []\n if len(res) == count:\n yield res\n elif not clip_last:\n data = []\n for item in res:\n data += item\n if len(data) > count:\n inst_num_per_part = len(data) // count\n yield [\n data[inst_num_per_part * i:inst_num_per_part * (i + 1)]\n for i in range(count)\n ]\n\n return __impl__\n\n def split(data_reader, count):\n def __impl__():\n for item in data_reader():\n inst_num_per_part = len(item) // count\n for i in range(count):\n yield item[inst_num_per_part * i:inst_num_per_part * (i + 1\n )]\n\n return __impl__\n\n if not args.use_token_batch:\n # to make data on each device have similar token number\n data_reader = split(data_reader, count)\n if args.use_py_reader:\n pyreader.decorate_tensor_provider(\n py_reader_provider_wrapper(data_reader, place))\n data_reader = None\n else: # Data generator for multi-devices\n data_reader = stack(data_reader, count)\n return data_reader\n\n\ndef prepare_feed_dict_list(data_generator, init_flag, count):\n \"\"\"\n Prepare the list of feed dict for multi-devices.\n \"\"\"\n feed_dict_list = []\n if data_generator is not None: # use_py_reader == False\n data_input_names = encoder_data_input_fields + \\\n decoder_data_input_fields[:-1] + label_data_input_fields\n data = next(data_generator)\n for idx, data_buffer in enumerate(data):\n data_input_dict, num_token = prepare_batch_input(\n data_buffer, data_input_names, ModelHyperParams.eos_idx,\n ModelHyperParams.phone_pad_idx, ModelHyperParams.eos_idx,\n ModelHyperParams.n_head, ModelHyperParams.d_model)\n feed_dict_list.append(data_input_dict)\n if init_flag:\n for idx in range(count):\n pos_enc_tables = dict()\n for pos_enc_param_name in pos_enc_param_names:\n pos_enc_tables[pos_enc_param_name] = position_encoding_init(\n ModelHyperParams.max_length + 1, ModelHyperParams.d_model)\n if len(feed_dict_list) <= idx:\n feed_dict_list.append(pos_enc_tables)\n else:\n feed_dict_list[idx] = dict(\n list(pos_enc_tables.items()) + list(feed_dict_list[idx]\n .items()))\n return feed_dict_list if len(feed_dict_list) == count else None\n\n\ndef py_reader_provider_wrapper(data_reader, place):\n \"\"\"\n Data provider needed by fluid.layers.py_reader.\n \"\"\"\n\n def py_reader_provider():\n data_input_names = encoder_data_input_fields + \\\n decoder_data_input_fields[:-1] + label_data_input_fields\n for batch_id, data in enumerate(data_reader()):\n data_input_dict, num_token = prepare_batch_input(\n data, data_input_names, ModelHyperParams.eos_idx,\n ModelHyperParams.phone_pad_idx, ModelHyperParams.eos_idx,\n ModelHyperParams.n_head, ModelHyperParams.d_model)\n yield [data_input_dict[item] for item in data_input_names]\n\n return py_reader_provider\n\n\ndef test_context(exe, train_exe, dev_count):\n # Context to do validation.\n test_prog = fluid.Program()\n startup_prog = fluid.Program()\n if args.enable_ce:\n test_prog.random_seed = 1000\n startup_prog.random_seed = 1000\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n sum_cost, avg_cost, predict, token_num, pyreader = transformer(\n ModelHyperParams.src_vocab_size,\n ModelHyperParams.trg_vocab_size,\n ModelHyperParams.max_length + 1,\n ModelHyperParams.n_layer,\n ModelHyperParams.n_head,\n ModelHyperParams.d_key,\n ModelHyperParams.d_value,\n ModelHyperParams.d_model,\n ModelHyperParams.d_inner_hid,\n ModelHyperParams.prepostprocess_dropout,\n ModelHyperParams.attention_dropout,\n ModelHyperParams.relu_dropout,\n ModelHyperParams.preprocess_cmd,\n ModelHyperParams.postprocess_cmd,\n ModelHyperParams.weight_sharing,\n TrainTaskConfig.label_smooth_eps,\n use_py_reader=args.use_py_reader,\n beta=ModelHyperParams.beta,\n is_test=True)\n test_prog = test_prog.clone(for_test=True)\n test_data = prepare_data_generator(\n args,\n is_test=True,\n count=dev_count,\n pyreader=pyreader,\n py_reader_provider_wrapper=py_reader_provider_wrapper)\n\n exe.run(startup_prog) # to init pyreader for testing\n if TrainTaskConfig.ckpt_path:\n fluid.io.load_persistables(\n exe, TrainTaskConfig.ckpt_path, main_program=test_prog)\n\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.use_experimental_executor = True\n build_strategy = fluid.BuildStrategy()\n test_exe = fluid.ParallelExecutor(\n use_cuda=TrainTaskConfig.use_gpu,\n main_program=test_prog,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy,\n share_vars_from=train_exe)\n\n def test(exe=test_exe, pyreader=pyreader):\n test_total_cost = 0\n test_total_token = 0\n\n if args.use_py_reader:\n pyreader.start()\n data_generator = None\n else:\n data_generator = test_data()\n while True:\n try:\n feed_dict_list = prepare_feed_dict_list(data_generator, False,\n dev_count)\n outs = test_exe.run(fetch_list=[sum_cost.name, token_num.name],\n feed=feed_dict_list)\n except (StopIteration, fluid.core.EOFException):\n # The current pass is over.\n if args.use_py_reader:\n pyreader.reset()\n break\n sum_cost_val, token_num_val = np.array(outs[0]), np.array(outs[1])\n test_total_cost += sum_cost_val.sum()\n test_total_token += token_num_val.sum()\n test_avg_cost = test_total_cost / test_total_token\n test_ppl = np.exp([min(test_avg_cost, 100)])\n return test_avg_cost, test_ppl\n\n return test\n\n\ndef train_loop(exe,\n train_prog,\n startup_prog,\n dev_count,\n sum_cost,\n avg_cost,\n token_num,\n predict,\n pyreader,\n nccl2_num_trainers=1,\n nccl2_trainer_id=0):\n # Initialize the parameters.\n if TrainTaskConfig.ckpt_path:\n exe.run(startup_prog) # to init pyreader for training\n logging.info(\"load checkpoint from {}\".format(\n TrainTaskConfig.ckpt_path))\n fluid.io.load_persistables(\n exe, TrainTaskConfig.ckpt_path, main_program=train_prog)\n else:\n logging.info(\"init fluid.framework.default_startup_program\")\n exe.run(startup_prog)\n\n logging.info(\"begin reader\")\n train_data = prepare_data_generator(\n args,\n is_test=False,\n count=dev_count,\n pyreader=pyreader,\n py_reader_provider_wrapper=py_reader_provider_wrapper)\n\n # For faster executor\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.use_experimental_executor = True\n exec_strategy.num_iteration_per_drop_scope = int(args.fetch_steps)\n build_strategy = fluid.BuildStrategy()\n # Since the token number differs among devices, customize gradient scale to\n # use token average cost among multi-devices. and the gradient scale is\n # `1 / token_number` for average cost.\n # build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized\n\n logging.info(\"begin executor\")\n train_exe = fluid.ParallelExecutor(\n use_cuda=TrainTaskConfig.use_gpu,\n loss_name=avg_cost.name,\n main_program=train_prog,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy,\n num_trainers=nccl2_num_trainers,\n trainer_id=nccl2_trainer_id)\n\n if args.val_file_pattern is not None:\n test = test_context(exe, train_exe, dev_count)\n\n # the best cross-entropy value with label smoothing\n loss_normalizer = -((1. - TrainTaskConfig.label_smooth_eps) * np.log(\n (1. - TrainTaskConfig.label_smooth_eps\n )) + TrainTaskConfig.label_smooth_eps *\n np.log(TrainTaskConfig.label_smooth_eps / (\n ModelHyperParams.trg_vocab_size - 1) + 1e-20))\n\n step_idx = 0\n init_flag = True\n\n logging.info(\"begin train\")\n for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):\n pass_start_time = time.time()\n\n if args.use_py_reader:\n pyreader.start()\n data_generator = None\n else:\n data_generator = train_data()\n\n batch_id = 0\n while True:\n try:\n feed_dict_list = prepare_feed_dict_list(data_generator,\n init_flag, dev_count)\n outs = train_exe.run(\n fetch_list=[sum_cost.name, token_num.name]\n if step_idx % args.fetch_steps == 0 else [],\n feed=feed_dict_list)\n\n if step_idx % args.fetch_steps == 0:\n sum_cost_val, token_num_val = np.array(outs[0]), np.array(\n outs[1])\n # sum the cost from multi-devices\n total_sum_cost = sum_cost_val.sum()\n total_token_num = token_num_val.sum()\n total_avg_cost = total_sum_cost / total_token_num\n\n if step_idx == 0:\n logging.info(\n \"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, \"\n \"normalized loss: %f, ppl: %f\" %\n (step_idx, pass_id, batch_id, total_avg_cost,\n total_avg_cost - loss_normalizer,\n np.exp([min(total_avg_cost, 100)])))\n avg_batch_time = time.time()\n else:\n logging.info(\n \"step_idx: %d, epoch: %d, batch: %d, avg loss: %f, \"\n \"normalized loss: %f, ppl: %f, speed: %.2f step/s\" %\n (step_idx, pass_id, batch_id, total_avg_cost,\n total_avg_cost - loss_normalizer, np.exp(\n [min(total_avg_cost, 100)]),\n args.fetch_steps / (time.time() - avg_batch_time)))\n avg_batch_time = time.time()\n\n if step_idx % TrainTaskConfig.save_freq == 0 and step_idx > 0:\n fluid.io.save_persistables(\n exe,\n os.path.join(TrainTaskConfig.ckpt_dir,\n \"latest.checkpoint\"), train_prog)\n fluid.io.save_params(\n exe,\n os.path.join(TrainTaskConfig.model_dir,\n \"iter_\" + str(step_idx) + \".infer.model\"),\n train_prog)\n\n init_flag = False\n batch_id += 1\n step_idx += 1\n except (StopIteration, fluid.core.EOFException):\n # The current pass is over.\n if args.use_py_reader:\n pyreader.reset()\n break\n\n time_consumed = time.time() - pass_start_time\n # Validate and save the persistable.\n if args.val_file_pattern is not None:\n val_avg_cost, val_ppl = test()\n logging.info(\n \"epoch: %d, val avg loss: %f, val normalized loss: %f, val ppl: %f,\"\n \" consumed %fs\" % (pass_id, val_avg_cost,\n val_avg_cost - loss_normalizer, val_ppl,\n time_consumed))\n else:\n logging.info(\"epoch: %d, consumed %fs\" % (pass_id, time_consumed))\n if not args.enable_ce:\n fluid.io.save_persistables(\n exe,\n os.path.join(TrainTaskConfig.ckpt_dir,\n \"pass_\" + str(pass_id) + \".checkpoint\"),\n train_prog)\n\n if args.enable_ce: # For CE\n print(\"kpis\\ttrain_cost_card%d\\t%f\" % (dev_count, total_avg_cost))\n if args.val_file_pattern is not None:\n print(\"kpis\\ttest_cost_card%d\\t%f\" % (dev_count, val_avg_cost))\n print(\"kpis\\ttrain_duration_card%d\\t%f\" % (dev_count, time_consumed))\n\n\ndef train(args):\n # priority: ENV > args > config\n is_local = os.getenv(\"PADDLE_IS_LOCAL\", \"1\")\n if is_local == '0':\n args.local = False\n logging.info(args)\n\n if args.device == 'CPU':\n TrainTaskConfig.use_gpu = False\n\n training_role = os.getenv(\"TRAINING_ROLE\", \"TRAINER\")\n\n if training_role == \"PSERVER\" or (not TrainTaskConfig.use_gpu):\n place = fluid.CPUPlace()\n dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))\n else:\n place = fluid.CUDAPlace(0)\n dev_count = fluid.core.get_cuda_device_count()\n\n exe = fluid.Executor(place)\n\n train_prog = fluid.Program()\n startup_prog = fluid.Program()\n\n if args.enable_ce:\n train_prog.random_seed = 1000\n startup_prog.random_seed = 1000\n\n with fluid.program_guard(train_prog, startup_prog):\n with fluid.unique_name.guard():\n sum_cost, avg_cost, predict, token_num, pyreader = transformer(\n ModelHyperParams.src_vocab_size,\n ModelHyperParams.trg_vocab_size,\n ModelHyperParams.phone_vocab_size,\n ModelHyperParams.max_length + 1,\n ModelHyperParams.n_layer,\n ModelHyperParams.n_head,\n ModelHyperParams.d_key,\n ModelHyperParams.d_value,\n ModelHyperParams.d_model,\n ModelHyperParams.d_inner_hid,\n ModelHyperParams.prepostprocess_dropout,\n ModelHyperParams.attention_dropout,\n ModelHyperParams.relu_dropout,\n ModelHyperParams.preprocess_cmd,\n ModelHyperParams.postprocess_cmd,\n ModelHyperParams.weight_sharing,\n TrainTaskConfig.label_smooth_eps,\n ModelHyperParams.beta,\n ModelHyperParams.bos_idx,\n use_py_reader=args.use_py_reader,\n is_test=False)\n\n optimizer = None\n if args.sync:\n lr_decay = fluid.layers.learning_rate_scheduler.noam_decay(\n ModelHyperParams.d_model, TrainTaskConfig.warmup_steps)\n logging.info(\"before adam\")\n\n with fluid.default_main_program()._lr_schedule_guard():\n learning_rate = lr_decay * TrainTaskConfig.learning_rate\n\n optimizer = fluid.optimizer.Adam(\n learning_rate=learning_rate,\n beta1=TrainTaskConfig.beta1,\n beta2=TrainTaskConfig.beta2,\n epsilon=TrainTaskConfig.eps)\n else:\n optimizer = fluid.optimizer.SGD(0.003)\n optimizer.minimize(avg_cost)\n\n if args.local:\n logging.info(\"local start_up:\")\n train_loop(exe, train_prog, startup_prog, dev_count, sum_cost, avg_cost,\n token_num, predict, pyreader)\n else:\n if args.update_method == \"nccl2\":\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\", \"0\"))\n port = os.getenv(\"PADDLE_PORT\")\n worker_ips = os.getenv(\"PADDLE_TRAINERS\")\n worker_endpoints = []\n for ip in worker_ips.split(\",\"):\n worker_endpoints.append(':'.join([ip, port]))\n trainers_num = len(worker_endpoints)\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + port\n if trainer_id == 0:\n logging.info(\"train_id == 0, sleep 60s\")\n time.sleep(60)\n logging.info(\"trainers_num:{}\".format(trainers_num))\n logging.info(\"worker_endpoints:{}\".format(worker_endpoints))\n logging.info(\"current_endpoint:{}\".format(current_endpoint))\n append_nccl2_prepare(startup_prog, trainer_id, worker_endpoints,\n current_endpoint)\n train_loop(exe, train_prog, startup_prog, dev_count, sum_cost,\n avg_cost, token_num, predict, pyreader, trainers_num,\n trainer_id)\n return\n\n port = os.getenv(\"PADDLE_PORT\", \"6174\")\n pserver_ips = os.getenv(\"PADDLE_PSERVERS\") # ip,ip...\n eplist = []\n for ip in pserver_ips.split(\",\"):\n eplist.append(':'.join([ip, port]))\n pserver_endpoints = \",\".join(eplist) # ip:port,ip:port...\n trainers = int(os.getenv(\"PADDLE_TRAINERS_NUM\", \"0\"))\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + port\n trainer_id = int(os.getenv(\"PADDLE_TRAINER_ID\"))\n\n logging.info(\"pserver_endpoints:{}\".format(pserver_endpoints))\n logging.info(\"current_endpoint:{}\".format(current_endpoint))\n logging.info(\"trainer_id:{}\".format(trainer_id))\n logging.info(\"pserver_ips:{}\".format(pserver_ips))\n logging.info(\"port:{}\".format(port))\n\n t = fluid.DistributeTranspiler()\n t.transpile(\n trainer_id,\n pservers=pserver_endpoints,\n trainers=trainers,\n program=train_prog,\n startup_program=startup_prog)\n\n if training_role == \"PSERVER\":\n logging.info(\"distributed: pserver started\")\n current_endpoint = os.getenv(\"POD_IP\") + \":\" + os.getenv(\n \"PADDLE_PORT\")\n if not current_endpoint:\n logging.critical(\"need env SERVER_ENDPOINT\")\n exit(1)\n pserver_prog = t.get_pserver_program(current_endpoint)\n pserver_startup = t.get_startup_program(current_endpoint,\n pserver_prog)\n\n exe.run(pserver_startup)\n exe.run(pserver_prog)\n elif training_role == \"TRAINER\":\n logging.info(\"distributed: trainer started\")\n trainer_prog = t.get_trainer_program()\n\n train_loop(exe, train_prog, startup_prog, dev_count, sum_cost,\n avg_cost, token_num, predict, pyreader)\n else:\n logging.critical(\n \"environment var TRAINER_ROLE should be TRAINER os PSERVER\")\n exit(1)\n\n\nif __name__ == \"__main__\":\n LOG_FORMAT = \"[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] %(message)s\"\n logging.basicConfig(\n stream=sys.stdout, level=logging.DEBUG, format=LOG_FORMAT)\n logging.getLogger().setLevel(logging.INFO)\n\n args = parse_args()\n train(args)\n", "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"bert model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport sys\nimport six\nimport logging\nimport numpy as np\nimport paddle.fluid as fluid\nfrom paddle.fluid.layers import shape\n\nfrom model.transformer_encoder import encoder, pre_process_layer\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', \n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogging.getLogger().setLevel(logging.INFO) \nlogger = logging.getLogger(__name__)\n\ndef dynamic_expand(dynamic_tensor, smaller_tensor):\n \"\"\"\n :param dynamic_tensor:\n :param smaller_tensor:\n :return:\n \"\"\"\n assert len(dynamic_tensor.shape) > len(smaller_tensor.shape)\n if type(smaller_tensor.shape) == list:\n for dim_idx, dim in smaller_tensor.shape:\n dynamic_tensor_dim_idx = len(dynamic_tensor) - len(smaller_tensor) + dim_idx\n assert dynamic_tensor.shape[dynamic_tensor_dim_idx] % dim == 0\n elif type(smaller_tensor.shape) == int:\n assert dynamic_tensor.shape[-1] % smaller_tensor.shape == 0\n memory_embs_zero = fluid.layers.scale(dynamic_tensor, scale=0.0)\n smaller_tensor = fluid.layers.elementwise_add(memory_embs_zero, smaller_tensor)\n return smaller_tensor\n\n\ndef print_tensor(tensor, message, print_runtime=False):\n logger.info(\"{}: {}\".format(message, tensor.shape))\n if print_runtime:\n fluid.layers.Print(tensor, summarize=10, message=message)\n\n\nclass MemoryLayer(object):\n def __init__(self, bert_config, concept_size, mem_emb_size, mem_method='cat', prefix=None):\n self.initializer_range = bert_config['initializer_range']\n self.bert_size = bert_config['hidden_size']\n self.concept_size = concept_size\n self.mem_emb_size = mem_emb_size\n assert mem_method in ['add', 'cat', 'raw']\n self.mem_method = mem_method\n self.prefix = prefix\n\n def forward(self, bert_output, memory_embs, mem_length, ignore_no_memory_token=True):\n \"\"\"\n :param bert_output: [batch_size, seq_size, bert_size]\n :param memory_embs: [batch_size, seq_size, concept_size, mem_emb_size]\n :param mem_length: [batch_size, sent_size, 1]\n :return: \n \"\"\"\n\n bert_size = self.bert_size\n concept_size = self.concept_size\n mem_emb_size = self.mem_emb_size\n\n print_tensor(bert_output, \"bert_output\")\n print_tensor(memory_embs, \"memory_embs\")\n print_tensor(mem_length, \"mem_length\")\n\n \n projected_bert = fluid.layers.fc(bert_output, size=mem_emb_size, num_flatten_dims=2,\n param_attr=fluid.ParamAttr(\n name='{}_memory_layer_projection.w_0'.format(self.prefix) if self.prefix else 'memory_layer_projection.w_0',\n initializer=fluid.initializer.NormalInitializer(\n loc=0.0, scale=self.initializer_range)),\n bias_attr=False) # [batch_size *seq_size, mem_emb_size]\n logger.info(\"projected_bert: {}\".format(projected_bert.shape))\n\n expanded_bert = fluid.layers.unsqueeze(projected_bert, axes=[2]) # [batch_size, seq_size, 1, mem_emb_size]\n\n \n extended_memory, memory_score = self.add_sentinel(expanded_bert, memory_embs, mem_emb_size)\n # extended_memory: [batch_size, seq_size, 1+concept_size, mem_emb_size]\n # memory_score: [batch_size, seq_size, 1+concept_size]\n\n\n concept_ordinal = self.get_concept_oridinal(concept_size, memory_score) # [bs,sq,1+cs]\n\n memory_reverse_mask = fluid.layers.less_than(\n fluid.layers.expand(mem_length, expand_times=[1, 1, 1 + concept_size])\n , concept_ordinal)\n # [batch_size, seq_size, 1+concept_size]\n memory_reverse_mask = fluid.layers.cast(memory_reverse_mask, dtype=\"float32\")\n print_tensor(memory_reverse_mask, \"memory_reverse_mask\")\n\n memory_reverse_masked_infinity = fluid.layers.scale(memory_reverse_mask, scale=-1e6)\n # [batch_size, seq_size, 1+concept_size]\n print_tensor(memory_reverse_masked_infinity, \"memory_reverse_masked_infinity\")\n\n memory_score = fluid.layers.elementwise_add(memory_score, memory_reverse_masked_infinity)\n # [batch_size, seq_size, 1+concept_size]\n logger.info(\"memory_score:{}\".format(memory_score.shape))\n\n memory_att = fluid.layers.softmax(memory_score) # [batch_size, seq_size, 1+concept_size]\n memory_att = fluid.layers.unsqueeze(memory_att, axes=[2]) # [batch_size, seq_size, 1, 1+concept_size]\n logger.info(\"memory_att: {}\".format(memory_att.shape))\n logger.info(\"extended_memory: {}\".format(extended_memory.shape))\n summ = fluid.layers.matmul(memory_att,extended_memory) # [batch_size, seq_size,1, mem_emb_size]\n summ = fluid.layers.squeeze(summ, axes=[2]) # [batch_size, seq_size,mem_emb_size]\n\n if ignore_no_memory_token:\n condition = fluid.layers.less_than(\n dynamic_expand(mem_length, fluid.layers.zeros([1],\"float32\")),\n mem_length) # [bs, sq]\n # summ_true = fluid.layers.elementwise_mul(\n # summ,\n # fluid.layers.cast(condition, \"float32\")) # [bs, sq, ms]\n # summ_false = fluid.layers.elementwise_mul(\n # summ,\n # fluid.layers.scale(fluid.layers.cast(condition, \"float32\"), -1)) # [bs, sq, ms]\n # summ = fluid.layers.elementwise_add(summ_true, summ_false) # [bs, sq, ms]\n summ = fluid.layers.elementwise_mul(\n summ,\n fluid.layers.cast(condition, \"float32\")) # [bs, sq, ms]\n\n print_tensor(summ, \"summ\")\n\n if self.mem_method == \"add\":\n summ_transform = fluid.layers.fc(summ, size=bert_size, num_flatten_dims=2) # [batch_size, seq_size, bert_size]\n output = fluid.layers.sums(input=[summ_transform, bert_output]) # [batch_size, seq_size, bert_size]\n elif self.mem_method == \"cat\":\n logger.info(\"bert_output: {}\".format(bert_output.shape))\n logger.info(\"summ: {}\".format(summ.shape))\n output = fluid.layers.concat(input=[bert_output, summ], axis=2) # [batch_size, seq_size, bert_size + mem_emb_size]\n elif self.mem_method == \"raw\":\n logger.info(\"bert_output: {}\".format(bert_output.shape))\n logger.info(\"summ: {}\".format(summ.shape))\n output = summ # [batch_size, seq_size, mem_emb_size]\n else:\n raise ValueError(\"mem_method not supported\")\n logger.info(\"output: {}\".format(output.shape))\n return output\n\n def get_concept_oridinal(self, concept_size, memory_score):\n \"\"\"\n\n :param concept_size:\n :param memory_score: [batch_size, seq_size, 1+concept_size]\n :return:\n \"\"\"\n concept_ordinal = fluid.layers.create_tensor(dtype=\"float32\")\n fluid.layers.assign(np.arange(start=0, stop=(1 + concept_size), step=1, dtype=np.float32),\n concept_ordinal) # [1+cs]\n print_tensor(concept_ordinal, \"concept_ordinal\")\n print_tensor(memory_score, \"memory_score\")\n\n concept_ordinal = dynamic_expand(memory_score, concept_ordinal) # [bs,sq,1+cs]\n\n logger.info(\"concept_ordinal: {}\".format(concept_ordinal.shape))\n return concept_ordinal\n\n def add_sentinel(self, expanded_bert, memory_embs, mem_emb_size):\n \"\"\"\n\n :param expanded_bert: [batch_size, seq_size, 1, mem_emb_size]\n :param memory_embs: [batch_size, seq_size, concept_size, mem_emb_size]\n :param mem_emb_size:\n :return:\n \"\"\"\n sentinel = fluid.layers.create_parameter(\n name='{}_memory_layer_sentinel'.format(self.prefix) if self.prefix else 'memory_layer_sentinel',\n dtype=\"float32\",\n shape=[mem_emb_size],\n default_initializer=fluid.initializer.ConstantInitializer(0)) # [mem_emb_size]\n print_tensor(sentinel, \"sentinel\")\n\n memory_embs_squeeze = fluid.layers.slice(memory_embs, axes=[2], starts=[0],\n ends=[1]) # [bs,sq,1,ms]\n print_tensor(memory_embs_squeeze, \"memory_embs_squeeze\")\n\n sentinel = dynamic_expand(memory_embs_squeeze, sentinel) # [bs,sq,1,ms]\n print_tensor(sentinel, \"sentinel\")\n print_tensor(memory_embs, \"memory_embs\")\n\n extended_memory = fluid.layers.concat([sentinel, memory_embs],\n axis=2) # [batch_size, seq_size, 1+concept_size, mem_emb_size]\n extended_memory = fluid.layers.transpose(extended_memory, perm=[0, 1, 3, 2])\n # [batch_size, seq_size, mem_emb_size, 1+concept_size]\n logger.info(\"extended_memory: {}\".format(extended_memory.shape))\n memory_score = fluid.layers.matmul(expanded_bert,\n extended_memory) # [batch_size, seq_size, 1, 1+concept_size]\n memory_score = fluid.layers.squeeze(memory_score, axes=[2])\n # [batch_size, seq_size, 1+concept_size]\n extended_memory = fluid.layers.transpose(extended_memory, perm=[0, 1, 3, 2])\n # [batch_size, seq_size, 1+concept_size, mem_emb_size]\n return extended_memory, memory_score\n\n\nclass TriLinearTwoTimeSelfAttentionLayer(object):\n def __init__(self, hidden_size, dropout_rate=0.0,\n cat_mul=False, cat_sub=False, cat_twotime=False, cat_twotime_mul=False, cat_twotime_sub=False):\n self.hidden_size = hidden_size\n self.dropout_rate = dropout_rate\n self.cat_mul = cat_mul\n self.cat_sub = cat_sub\n self.cat_twotime = cat_twotime\n self.cat_twotime_mul = cat_twotime_mul\n self.cat_twotime_sub = cat_twotime_sub\n\n def forward(self, hidden_emb, sequence_mask):\n \"\"\"\n :param hidden_emb: [batch_size, seq_size, hidden_size]\n :param sequence_mask: [batch_size, seq_size, 1]\n :return:\n \"\"\"\n assert len(hidden_emb.shape) ==3 and len(sequence_mask.shape) == 3 \\\n and sequence_mask.shape[-1] == 1\n assert hidden_emb.shape[:2] == sequence_mask.shape[:2] \n\n hidden_size = self.hidden_size\n\n bias = fluid.layers.create_parameter(name='self_matching_layer_bias', shape=[1], dtype=\"float32\",\n default_initializer=fluid.initializer.ConstantInitializer(0))\n\n weight_1 = fluid.layers.create_parameter(name='self_matching_layer_weight1', shape=[hidden_size], dtype=\"float32\",\n default_initializer=fluid.initializer.XavierInitializer(uniform=True, fan_in=1, fan_out=hidden_size)) # [HS]\n bs_1_hs = fluid.layers.slice(hidden_emb, axes=[1], starts=[0], ends=[1]) # [bs, 1, hs]\n print_tensor(bs_1_hs, \"bs_1_hs\")\n bs_hs_1 = fluid.layers.transpose(bs_1_hs, perm=[0, 2, 1]) # [bs, hs, 1]\n print_tensor(bs_hs_1, \"bs_hs_1\")\n print_tensor(weight_1, \"weight_1\")\n weight_1 = dynamic_expand(bs_1_hs, weight_1) # [BS, 1, HS] (a)jk\n weight_1 = fluid.layers.transpose(weight_1, perm=[0, 2, 1])\n print_tensor(hidden_emb, \"hidden_emb\")\n print_tensor(weight_1, \"weight_1\")\n r1 = fluid.layers.matmul(hidden_emb, weight_1) # [BS, SQ, 1] aik\n print_tensor(r1, \"r1\")\n\n weight_2 = fluid.layers.create_parameter(name='self_matching_layer_weight2', shape=[hidden_size], dtype=\"float32\",\n default_initializer=fluid.initializer.XavierInitializer(uniform=True, fan_in=1, fan_out=hidden_size)) # [HS]\n weight_2 = dynamic_expand(bs_1_hs, weight_2) # # [BS, 1, HS] (a)jk\n hidden_emb_transpose = fluid.layers.transpose(hidden_emb, perm=[0, 2, 1]) # [BS, HS, SQ] aji\n r2 = fluid.layers.matmul(weight_2, hidden_emb_transpose) # [BS, 1, SQ] aki\n print_tensor(r2, \"r2\")\n\n weight_mul = fluid.layers.create_parameter(name='self_matching_layer_weightmul', shape=[hidden_size], dtype=\"float32\",\n default_initializer=fluid.initializer.XavierInitializer(uniform=True)) # [HS]\n\n \n weight_mul = dynamic_expand(hidden_emb, weight_mul)\n rmul_1 = fluid.layers.elementwise_mul(hidden_emb, weight_mul) # for \"hidden * self.weight_mul\". [bs, sq(i), hs(j)]\n print_tensor(rmul_1, \"rmul_1\")\n rmul_2 = fluid.layers.matmul(rmul_1, hidden_emb_transpose) # [bs, sq(i), hs(j)] mul [bs, hs(j), sq(k)] = [bs, sq(i), sq(k)]\n print_tensor(rmul_2, \"rmul_2\")\n\n r1 = fluid.layers.squeeze(r1, axes=[2]) # [BS, SQ] aik\n r1 = dynamic_expand(\n fluid.layers.transpose(rmul_2, [1, 0, 2]), # [sq, bs, sq]\n r1) # [ SQ(from 1), bs, SQ]\n r1 = fluid.layers.transpose(r1, [1, 2, 0]) # [bs, sq, sq(from 1)]\n\n r2 = fluid.layers.squeeze(r2, axes=[1]) # [BS, SQ] aik\n r2 = dynamic_expand(\n fluid.layers.transpose(rmul_2, [1, 0, 2]), # [sq, bs, sq]\n r2) # [ SQ(from 1), bs, SQ]\n r2 = fluid.layers.transpose(r2, [1, 0, 2]) # [bs,sq(from 1),sq]\n\n bias = dynamic_expand(rmul_2, bias) # [BS, SQ, SQ]\n sim_score = fluid.layers.sums(input=[r1, r2, rmul_2, bias])\n # [bs,sq,1]+[bs,1,sq]+[bs,sq,sq]+[bs,sq,sq]=[BS,SQ,SQ]\n print_tensor(sim_score, \"sim_score\")\n\n sequence_mask = fluid.layers.cast(sequence_mask, dtype=\"float32\") # [BS,SQ,1]\n softmax_mask = fluid.layers.elementwise_sub(\n sequence_mask,\n fluid.layers.fill_constant([1], \"float32\", 1)) # [BS,SQ,1]\n softmax_mask = fluid.layers.scale(softmax_mask, -1)\n very_negative_number = fluid.layers.fill_constant([1], value=-1e6, dtype=\"float32\")\n logger.info(\"softmax_mask: {}\".format(softmax_mask.shape))\n logger.info(\"very_negative_number: {}\".format(very_negative_number.shape))\n\n softmax_mask = fluid.layers.elementwise_mul(softmax_mask, very_negative_number) # [BS,SQ,1]\n\n softmax_mask = fluid.layers.squeeze(softmax_mask, axes=[2]) # [BS,SQ]\n softmax_mask = dynamic_expand(fluid.layers.transpose(sim_score, perm=[2, 0, 1]), softmax_mask) # [sq(1),bs,sq]\n softmax_mask = fluid.layers.transpose(softmax_mask, perm=[1, 0, 2]) # [BS,sq(1),SQ]\n print_tensor(softmax_mask, \"softmax_mask\")\n sim_score = fluid.layers.elementwise_add(sim_score, softmax_mask) # [bs,sq,sq]+[bs,sq(1),sq]=[BS,SQ,SQ]\n print_tensor(sim_score, \"sim_score\")\n\n attn_prob = fluid.layers.softmax(sim_score) # [BS,SQ,SQ]\n weighted_sum = fluid.layers.matmul(attn_prob, hidden_emb) # [bs,sq,sq]*[bs,sq,hs]=[BS,SQ,HS]\n if any([self.cat_twotime, self.cat_twotime_mul, self.cat_twotime_sub]):\n twotime_att_prob = fluid.layers.matmul(attn_prob, attn_prob) # [bs,sq,sq]*[bs,sq,sq]=[BS,SQ,SQ]\n twotime_weited_sum = fluid.layers.matmul(twotime_att_prob, hidden_emb) # [BS,SQ,HS]\n\n out_tensors = [hidden_emb, weighted_sum]\n if self.cat_mul:\n out_tensors.append(fluid.layers.elementwise_mul(hidden_emb, weighted_sum))\n if self.cat_sub:\n out_tensors.append(fluid.layers.elementwise_sub(hidden_emb, weighted_sum))\n if self.cat_twotime:\n out_tensors.append(twotime_weited_sum)\n if self.cat_twotime_mul:\n out_tensors.append(fluid.layers.elementwise_mul(hidden_emb, twotime_weited_sum))\n if self.cat_twotime_sub:\n out_tensors.append(fluid.layers.elementwise_sub(hidden_emb, twotime_weited_sum))\n output = fluid.layers.concat(out_tensors, axis=2) # [BS,SQ, HS+HS+....]\n print_tensor(output, \"output\")\n return output\n\n\n\n" ]
[ [ "numpy.concatenate", "numpy.zeros", "numpy.ones", "numpy.triu", "numpy.diag", "numpy.tril" ], [ "numpy.random.seed", "numpy.sum", "numpy.array", "numpy.isnan" ], [ "numpy.array", "numpy.asarray", "numpy.reshape", "numpy.zeros", "numpy.log", "numpy.ones", "numpy.tile", "numpy.triu" ], [ "numpy.arange" ] ]
PulkitSinghDev/Black-Hole-Classifier
[ "41808776f9852e6ef1a6646e6b40e4cb858c0944" ]
[ "TF_(source_code)/Keras/main.py" ]
[ "from keras.models import load_model\nfrom PIL import Image, ImageOps\nimport numpy as np\n\n# Load the model\nmodel = load_model('keras_model.h5')\n\n# Create the array of the right shape to feed into the keras model\n# The 'length' or number of images you can put into the array is\n# determined by the first position in the shape tuple, in this case 1.\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n# Replace this with the path to your image\nimage = Image.open('<IMAGE_PATH>')\n#resize the image to a 224x224 with the same strategy as in TM2:\n#resizing the image to be at least 224x224 and then cropping from the center\nsize = (224, 224)\nimage = ImageOps.fit(image, size, Image.ANTIALIAS)\n\n#turn the image into a numpy array\nimage_array = np.asarray(image)\n# Normalize the image\nnormalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\n# Load the image into the array\ndata[0] = normalized_image_array\n\n# run the inference\nprediction = model.predict(data)\nprint(prediction)\n" ]
[ [ "numpy.ndarray", "numpy.asarray" ] ]
jlinkemeyer/MLinPractice
[ "ef96888ef0565b84fece4283fe8dd8dc809c5e99" ]
[ "code/feature_extraction/cap_words_count.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nFeature that counts the number of capitalized words within a tweet.\n\nCreated on Fri Oct 8 12:51:27 2021\n\n@author: jlinkemeyer\n\"\"\"\n\nimport numpy as np\nfrom code.feature_extraction.feature_extractor import FeatureExtractor\nfrom code.util import TOKEN_DELIMITER, ALL_CAPS_EXCLUDE_LIST\n\nclass CapitalizedWordsCount(FeatureExtractor):\n \"\"\"Class for extracting the number of capitalized words in a tweet\"\"\"\n \n # constructor\n def __init__(self, input_column):\n \"\"\"Initialize CapitalizedWordsCount with the given input and output column.\"\"\"\n \n super().__init__([input_column], \"{0}_cap_words_count\".format(input_column))\n \n # don't need to fit, so don't overwrite _set_variables()\n \n def _get_values(self, inputs):\n \"\"\"Compute the number of capitalized words based on the 'tweet_tokenized' string provided after preprocessing\"\"\"\n \n # tokenized tweet is provided as a string\n result = []\n for tokenized_tweet in inputs[0]:\n \n # from the provided tokenized string, create a list of tokens\n tokens_list = tokenized_tweet[1:-1].split(TOKEN_DELIMITER)\n \n # iterate over the tokens to count the number of all fully \n # capitalized words\n number_cap_words = sum([1 for token in tokens_list if token.isupper() and not token in ALL_CAPS_EXCLUDE_LIST])\n result.append(number_cap_words)\n \n result = np.array(result).reshape(-1,1)\n return result\n" ]
[ [ "numpy.array" ] ]
vishalbelsare/jina
[ "ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43", "ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43" ]
[ "tests/unit/types/arrays/mixins/test_content.py", "jina/math/distance/numpy.py" ]
[ "import numpy as np\nimport pytest\n\nfrom jina import DocumentArray, DocumentArrayMemmap\n\n\n@pytest.mark.parametrize('cls', [DocumentArray, DocumentArrayMemmap])\n@pytest.mark.parametrize(\n 'content_attr', ['texts', 'embeddings', 'blobs', 'buffers', 'contents']\n)\ndef test_content_empty_getter_return_none(cls, content_attr):\n da = cls()\n assert getattr(da, content_attr) is None\n\n\n@pytest.mark.parametrize('cls', [DocumentArray, DocumentArrayMemmap])\n@pytest.mark.parametrize(\n 'content_attr',\n [\n ('texts', ''),\n ('embeddings', np.array([])),\n ('blobs', np.array([])),\n ('buffers', []),\n ('contents', []),\n ],\n)\ndef test_content_empty_setter(cls, content_attr):\n da = cls()\n setattr(da, content_attr[0], content_attr[1])\n assert getattr(da, content_attr[0]) is None\n\n\n@pytest.mark.parametrize('cls', [DocumentArray, DocumentArrayMemmap])\n@pytest.mark.parametrize(\n 'content_attr',\n [\n ('texts', ['s'] * 10),\n ('blobs', np.random.random([10, 2])),\n ('buffers', [b's'] * 10),\n ],\n)\ndef test_content_getter_setter(cls, content_attr):\n da = cls.empty(10)\n setattr(da, content_attr[0], content_attr[1])\n np.testing.assert_equal(da.contents, content_attr[1])\n da.contents = content_attr[1]\n np.testing.assert_equal(da.contents, content_attr[1])\n np.testing.assert_equal(getattr(da, content_attr[0]), content_attr[1])\n da.contents = None\n assert da.contents is None\n", "from typing import TYPE_CHECKING\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from ...types.ndarray import ArrayType\n\n\ndef cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':\n \"\"\"Cosine distance between each row in x_mat and each row in y_mat.\n\n :param x_mat: np.ndarray with ndim=2\n :param y_mat: np.ndarray with ndim=2\n :param eps: a small jitter to avoid divde by zero\n :return: np.ndarray with ndim=2\n \"\"\"\n return 1 - np.clip(\n (np.dot(x_mat, y_mat.T) + eps)\n / (\n np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps\n ),\n -1,\n 1,\n )\n\n\ndef sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':\n \"\"\"Squared Euclidean distance between each row in x_mat and each row in y_mat.\n :param x_mat: np.ndarray with ndim=2\n :param y_mat: np.ndarray with ndim=2\n :return: np.ndarray with ndim=2\n \"\"\"\n return (\n np.sum(y_mat ** 2, axis=1)\n + np.sum(x_mat ** 2, axis=1)[:, np.newaxis]\n - 2 * np.dot(x_mat, y_mat.T)\n )\n\n\ndef sparse_cosine(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':\n \"\"\"Cosine distance between each row in x_mat and each row in y_mat.\n :param x_mat: scipy.sparse like array with ndim=2\n :param y_mat: scipy.sparse like array with ndim=2\n :return: np.ndarray with ndim=2\n \"\"\"\n from scipy.sparse.linalg import norm\n\n # we need the np.asarray otherwise we get a np.matrix object that iterates differently\n return 1 - np.clip(\n np.asarray(\n x_mat.dot(y_mat.T) / (np.outer(norm(x_mat, axis=1), norm(y_mat, axis=1)))\n ),\n -1,\n 1,\n )\n\n\ndef sparse_sqeuclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':\n \"\"\"Cosine distance between each row in x_mat and each row in y_mat.\n\n :param x_mat: scipy.sparse like array with ndim=2\n :param y_mat: scipy.sparse like array with ndim=2\n :return: np.ndarray with ndim=2\n \"\"\"\n # we need the np.asarray otherwise we get a np.matrix object that iterates differently\n return np.asarray(\n y_mat.power(2).sum(axis=1).flatten()\n + x_mat.power(2).sum(axis=1)\n - 2 * x_mat.dot(y_mat.T)\n )\n\n\ndef sparse_euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':\n \"\"\"Sparse euclidean distance between each row in x_mat and each row in y_mat.\n\n :param x_mat: scipy.sparse like array with ndim=2\n :param y_mat: scipy.sparse like array with ndim=2\n :return: np.ndarray with ndim=2\n \"\"\"\n return np.sqrt(sparse_sqeuclidean(x_mat, y_mat))\n\n\ndef euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':\n \"\"\"Euclidean distance between each row in x_mat and each row in y_mat.\n\n :param x_mat: scipy.sparse like array with ndim=2\n :param y_mat: scipy.sparse like array with ndim=2\n :return: np.ndarray with ndim=2\n \"\"\"\n return np.sqrt(sqeuclidean(x_mat, y_mat))\n" ]
[ [ "numpy.random.random", "numpy.array", "numpy.testing.assert_equal" ], [ "numpy.sum", "scipy.sparse.linalg.norm", "numpy.dot", "numpy.linalg.norm" ] ]
SJTU-IPADS/fgnn-artifacts
[ "5c73564e4a9bd5deeff7eed0b923c115ccba34d7" ]
[ "example/samgraph/train_pinsage.py" ]
[ "import argparse\nimport time\nimport torch\nimport sys\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nimport torch.optim as optim\nimport numpy as np\n\nimport samgraph.torch as sam\nfrom common_config import *\n\n\"\"\"\n We have made the following modification(or say, simplification) on PinSAGE,\n because we only want to focus on the core algorithm of PinSAGE:\n 1. we modify PinSAGE to make it be able to be trained on homogenous graph.\n 2. we use cross-entropy loss instead of max-margin ranking loss describe in the paper.\n\"\"\"\n\n\nclass WeightedSAGEConv(nn.Module):\n def __init__(self, input_dims, hidden_dims, output_dims, dropout, act=F.relu):\n super().__init__()\n\n self.act = act\n self.Q = nn.Linear(input_dims, hidden_dims)\n self.W = nn.Linear(input_dims + hidden_dims, output_dims)\n self.reset_parameters()\n self.dropout = nn.Dropout(dropout)\n\n def reset_parameters(self):\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_uniform_(self.Q.weight, gain=gain)\n nn.init.xavier_uniform_(self.W.weight, gain=gain)\n nn.init.constant_(self.Q.bias, 0)\n nn.init.constant_(self.W.bias, 0)\n\n def forward(self, g, h, weights):\n \"\"\"\n g : graph\n h : node features\n weights : scalar edge weights\n \"\"\"\n h_src, h_dst = h\n with g.local_scope():\n g.srcdata['n'] = self.act(self.Q(self.dropout(h_src)))\n g.edata['w'] = weights.float()\n g.update_all(fn.u_mul_e('n', 'w', 'm'), fn.sum('m', 'n'))\n g.update_all(fn.copy_e('w', 'm'), fn.sum('m', 'ws'))\n n = g.dstdata['n']\n ws = g.dstdata['ws'].unsqueeze(1).clamp(min=1)\n z = self.act(self.W(self.dropout(torch.cat([n / ws, h_dst], 1))))\n z_norm = z.norm(2, 1, keepdim=True)\n z_norm = torch.where(\n z_norm == 0, torch.tensor(1.).to(z_norm), z_norm)\n z = z / z_norm\n return z\n\n\nclass PinSAGE(nn.Module):\n def __init__(self,\n in_feats,\n n_hidden,\n n_classes,\n n_layers,\n activation,\n dropout):\n super().__init__()\n self.n_layers = n_layers\n self.n_hidden = n_hidden\n self.n_classes = n_classes\n self.layers = nn.ModuleList()\n\n self.layers.append(WeightedSAGEConv(\n in_feats, n_hidden, n_hidden, dropout, activation))\n for _ in range(1, n_layers - 1):\n self.layers.append(WeightedSAGEConv(\n n_hidden, n_hidden, n_hidden, dropout, activation))\n self.layers.append(WeightedSAGEConv(\n n_hidden, n_hidden, n_classes, dropout, activation))\n\n def forward(self, blocks, h):\n for layer, block in zip(self.layers, blocks):\n h_dst = h[:block.number_of_nodes('DST/' + block.ntypes[0])]\n h = layer(block, (h, h_dst), block.edata['weights'])\n return h\n\n\ndef parse_args(default_run_config):\n argparser = argparse.ArgumentParser(\"PinSAGE Training\")\n\n add_common_arguments(argparser, default_run_config)\n\n argparser.add_argument('--random-walk-length', type=int,\n default=default_run_config['random_walk_length'])\n argparser.add_argument('--random-walk-restart-prob',\n type=float, default=default_run_config['random_walk_restart_prob'])\n argparser.add_argument('--num-random-walk', type=int,\n default=default_run_config['num_random_walk'])\n argparser.add_argument('--num-neighbor', type=int,\n default=default_run_config['num_neighbor'])\n argparser.add_argument('--num-layer', type=int,\n default=default_run_config['num_layer'])\n\n argparser.add_argument(\n '--lr', type=float, default=default_run_config['lr'])\n argparser.add_argument('--dropout', type=float,\n default=default_run_config['dropout'])\n\n return vars(argparser.parse_args())\n\n\ndef get_run_config():\n run_config = {}\n\n run_config.update(get_default_common_config())\n run_config['arch'] = 'arch3'\n run_config['sample_type'] = 'random_walk'\n\n run_config['random_walk_length'] = 3\n run_config['random_walk_restart_prob'] = 0.5\n run_config['num_random_walk'] = 4\n run_config['num_neighbor'] = 5\n run_config['num_layer'] = 3\n\n run_config['lr'] = 0.003\n run_config['dropout'] = 0.5\n\n run_config.update(parse_args(run_config))\n\n process_common_config(run_config)\n assert(run_config['arch'] != 'arch5')\n assert(run_config['sample_type'] == 'random_walk')\n\n print_run_config(run_config)\n\n if run_config['validate_configs']:\n sys.exit()\n\n return run_config\n\n\ndef run():\n run_config = get_run_config()\n\n sam.config(run_config)\n sam.init()\n\n train_device = th.device(run_config['trainer_ctx'])\n\n in_feat = sam.feat_dim()\n num_class = sam.num_class()\n num_layer = run_config['num_layer']\n\n model = PinSAGE(in_feat, run_config['num_hidden'], num_class,\n num_layer, F.relu, run_config['dropout'])\n model = model.to(train_device)\n\n loss_fcn = nn.CrossEntropyLoss()\n loss_fcn.to(train_device)\n optimizer = optim.Adam(model.parameters(), lr=run_config['lr'])\n\n num_epoch = sam.num_epoch()\n num_step = sam.steps_per_epoch()\n\n model.train()\n\n epoch_sample_times = [0 for i in range(num_epoch)]\n epoch_copy_times = [0 for i in range(num_epoch)]\n epoch_convert_times = [0 for i in range(num_epoch)]\n epoch_train_times = [0 for i in range(num_epoch)]\n epoch_total_times = [0 for i in range(num_epoch)]\n\n # sample_times = [0 for i in range(num_epoch * num_step)]\n # copy_times = [0 for i in range(num_epoch * num_step)]\n # convert_times = [0 for i in range(num_epoch * num_step)]\n # train_times = [0 for i in range(num_epoch * num_step)]\n # total_times = [0 for i in range(num_epoch * num_step)]\n # num_nodes = [0 for i in range(num_epoch * num_step)]\n # num_samples = [0 for i in range(num_epoch * num_step)]\n\n cur_step_key = 0\n for epoch in range(num_epoch):\n for step in range(num_step):\n t0 = time.time()\n sam.trace_step_begin_now(\n epoch * num_step + step, sam.kL0Event_Train_Step)\n if not run_config['pipeline']:\n sam.sample_once()\n elif epoch + step == 0:\n sam.start()\n batch_key = sam.get_next_batch()\n t1 = time.time()\n sam.trace_step_begin_now(batch_key, sam.kL1Event_Convert)\n blocks, batch_input, batch_label = sam.get_dgl_blocks_with_weights(\n batch_key, num_layer)\n t2 = time.time()\n sam.trace_step_end_now(batch_key, sam.kL1Event_Convert)\n sam.trace_step_begin_now(batch_key, sam.kL1Event_Train)\n batch_pred = model(blocks, batch_input)\n loss = loss_fcn(batch_pred, batch_label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # wait for the train finish then we can free the data safely\n event_sync()\n\n batch_input = None\n batch_label = None\n blocks = None\n\n sam.trace_step_end_now(batch_key, sam.kL1Event_Train)\n t3 = time.time()\n sam.trace_step_end_now(\n epoch * num_step + step, sam.kL0Event_Train_Step)\n\n # sample_time = sam.get_log_step_value(epoch, step, sam.kLogL1SampleTime)\n # copy_time = sam.get_log_step_value(epoch, step, sam.kLogL1CopyTime)\n convert_time = t2 - t1\n train_time = t3 - t2\n total_time = t3 - t0\n\n # num_node = sam.get_log_step_value(epoch, step, sam.kLogL1NumNode)\n # num_sample = sam.get_log_step_value(epoch, step, sam.kLogL1NumSample)\n\n sam.log_step(epoch, step, sam.kLogL1TrainTime, train_time)\n sam.log_step(epoch, step, sam.kLogL1ConvertTime, convert_time)\n sam.log_epoch_add(epoch, sam.kLogEpochConvertTime, convert_time)\n sam.log_epoch_add(epoch, sam.kLogEpochTrainTime, train_time)\n sam.log_epoch_add(epoch, sam.kLogEpochTotalTime, total_time)\n\n # sample_times [cur_step_key] = sample_time\n # copy_times [cur_step_key] = copy_time\n # convert_times [cur_step_key] = convert_time\n # train_times [cur_step_key] = train_time\n # total_times [cur_step_key] = total_time\n\n # num_nodes [cur_step_key] = num_node\n # num_samples [cur_step_key] = num_sample\n\n # print('Epoch {:05d} | Step {:05d} | Nodes {:.0f} | Samples {:.0f} | Time {:.4f} secs | Sample Time {:.4f} secs | Copy Time {:.4f} secs | Train Time {:.4f} secs (Convert Time {:.4f} secs) | Loss {:.4f} '.format(\n # epoch, step, num_node, num_sample, total_time,\n # sample_time, copy_time, train_time, convert_time, loss\n # ))\n\n # sam.report_step_average(epoch, step)\n # sam.report_step(epoch, step)\n cur_step_key += 1\n\n # sam.report_epoch_average(epoch)\n\n epoch_sample_times[epoch] = sam.get_log_epoch_value(\n epoch, sam.kLogEpochSampleTime)\n epoch_copy_times[epoch] = sam.get_log_epoch_value(\n epoch, sam.kLogEpochCopyTime)\n epoch_convert_times[epoch] = sam.get_log_epoch_value(\n epoch, sam.kLogEpochConvertTime)\n epoch_train_times[epoch] = sam.get_log_epoch_value(\n epoch, sam.kLogEpochTrainTime)\n epoch_total_times[epoch] = sam.get_log_epoch_value(\n epoch, sam.kLogEpochTotalTime)\n sam.forward_barrier()\n\n sam.report_step_average(num_epoch - 1, num_step - 1)\n print('Avg Epoch Time {:.4f} | Sample Time {:.4f} | Copy Time {:.4f} | Convert Time {:.4f} | Train Time {:.4f}'.format(\n np.mean(epoch_total_times[1:]), np.mean(epoch_sample_times[1:]), np.mean(epoch_copy_times[1:]), np.mean(epoch_convert_times[1:]), np.mean(epoch_train_times[1:])))\n\n sam.report_node_access()\n sam.dump_trace()\n sam.shutdown()\n\n\nif __name__ == '__main__':\n run()\n" ]
[ [ "torch.nn.Linear", "torch.device", "torch.nn.Dropout", "torch.cat", "torch.nn.ModuleList", "torch.nn.init.constant_", "torch.nn.init.xavier_uniform_", "numpy.mean", "torch.tensor", "torch.nn.init.calculate_gain", "torch.nn.CrossEntropyLoss" ] ]
mrazizi/TextGAIL
[ "18ba72c6d63c3c3db1f195d118267c6e8243b4ff" ]
[ "TorchFly/torchfly/text/rl/textrl_replay_buffer.py" ]
[ "from typing import Callable, Iterator\nfrom omegaconf import DictConfig\nimport numpy as np\nfrom operator import itemgetter\nfrom collections import namedtuple\n\nTextRLSample = namedtuple('TextRLSample', ['state', 'action', 'action_log_prob', 'reward', 'normalized_reward'])\n\n\nclass TextRLReplayBuffer:\n \"\"\"\n We need to store (state, action, action_log_probs, reward, and normalized_reward)\n All rewards are normalized with running mean and std (Important for RL)\n We use momentum so that the running stats only depends on the recent data\n \"\"\"\n def __init__(self, max_buffer_size=512, momentum=0.90):\n self.max_buffer_size = max_buffer_size\n #self.buffer = [deque(maxlen=self.max_buffer_size)]\n self.buffer = []\n self.momentum = momentum\n self.reward_mean = 0.0\n self.reward_mean_sq = 0.0\n self.reward_std = 1.0\n\n def update_batch(self, states, actions, action_log_probs, rewards, normalize_reward=True):\n if normalize_reward:\n batch_momentum = self.momentum**len(rewards)\n self.reward_mean = self.reward_mean * batch_momentum + np.mean(rewards) * (1 - batch_momentum)\n self.reward_mean_sq = self.reward_mean_sq * batch_momentum + np.mean(rewards**2) * (1 - batch_momentum)\n self.reward_std = np.abs(self.reward_mean_sq - self.reward_mean**2)**0.5\n normalized_rewards = (rewards - self.reward_mean) / (self.reward_std + 1e-5)\n normalized_rewards = np.clip(normalized_rewards, -2.0, 2.0)\n else:\n normalized_rewards = rewards\n\n self.buffer.extend(zip(states, actions, action_log_probs, rewards, normalized_rewards))\n\n def update(self, state, action, action_log_prob, reward, normalize_reward=True):\n if normalize_reward:\n self.reward_mean = self.reward_mean * self.momentum + reward * (1 - self.momentum)\n self.reward_mean_sq = self.reward_mean_sq * self.momentum + (reward**2) * (1 - self.momentum)\n self.reward_std = np.abs(self.reward_mean_sq - self.reward_mean**2)**0.5\n normalized_reward = (reward - self.reward_mean) / (self.reward_std + 1e-5)\n normalized_reward = np.clip(normalized_reward, -2.0, 2.0)\n else:\n normalize_reward = reward\n\n self.buffer.append((state, action, action_log_prob, reward, normalized_reward))\n\n def __getitem__(self, index):\n return self.buffer[index]\n\n def __len__(self):\n return len(self.buffer)\n\n def clear(self):\n self.buffer = []\n\n def iterate_sample(self, mini_batch_size, shuffle=False) -> Iterator:\n \"\"\"\n A mini batch iterator\n \"\"\"\n indices = np.arange(len(self.buffer))\n if shuffle:\n np.random.shuffle(indices)\n\n for i in range(0, len(self.buffer), mini_batch_size):\n sampled_indices = indices[i:i + mini_batch_size]\n # get sampled batch\n yield itemgetter(*sampled_indices)(self.buffer)\n" ]
[ [ "numpy.abs", "numpy.mean", "numpy.random.shuffle", "numpy.clip" ] ]
garudlab/mother_infant
[ "98a27c83bf5ece9497d5a030c6c9396a8c514781", "98a27c83bf5ece9497d5a030c6c9396a8c514781" ]
[ "pickle_pnps.py", "utils/substitution_rates_utils.py" ]
[ "import matplotlib\r\nmatplotlib.use('Agg')\r\nfrom utils import parse_midas_data, sample_utils, config, sfs_utils, diversity_utils, stats_utils\r\nimport sys, os.path, numpy\r\nfrom math import log10,ceil\r\nimport matplotlib.pyplot as plt\r\nfrom collections import defaultdict\r\nfrom utils.classes import Interval\r\n\r\ntype = sys.argv[1] # common, rare, etc.\r\n\r\n# Parameters\r\nmin_coverage = 20 # orig 20\r\ncommon_freqrange = [Interval('[0.2, 0.8]')]\r\nrare_freqrange = [Interval('(0, 0.1]'), Interval('[0.9, 1)')]\r\nseg_freqrange = [Interval('(0, 1)')]\r\n\r\nif type == 'common':\r\n\tfreqrange = common_freqrange\r\nelif type == 'rare':\r\n\tfreqrange = rare_freqrange\r\nelif type == 'seg':\r\n\tfreqrange = seg_freqrange\r\n\r\n# Good species list\r\ngood_species_list = parse_midas_data.load_pickled_good_species_list()\r\n\r\n# Dictionary: sample -> species -> (within_1D, total_1D, within_4D, total_4D)\r\nwithin_total_sites_QP = defaultdict(dict)\r\nwithin_total_sites_nonQP = defaultdict(dict)\r\n\r\n# Dictionary: sample -> species -> pN/pS\r\npNpS_QP = defaultdict(dict)\r\npNpS_nonQP = defaultdict(dict)\r\n\r\nfor species in good_species_list:\r\n\t\r\n\t# Load SNP information for this species\r\n\tsys.stderr.write(\"Loading SFSs for %s...\\t\" % species)\r\n\tsamples_4D, sfs_map_4D = parse_midas_data.parse_within_sample_sfs(species, allowed_variant_types=set(['4D'])) # synonymous\r\n\tsamples_1D, sfs_map_1D = parse_midas_data.parse_within_sample_sfs(species, allowed_variant_types=set(['1D'])) # nonsynonymous\r\n\tsys.stderr.write(\"Done!\\n\")\r\n\t\r\n\t# Load genomic coverage distributions\r\n\tsample_coverage_histograms, samples = parse_midas_data.parse_coverage_distribution(species)\r\n\tsamples = numpy.array(samples)\r\n\tmedian_coverages = numpy.array([stats_utils.calculate_nonzero_median_from_histogram(hist) for hist in sample_coverage_histograms])\r\n\tsample_median_coverage_map = {samples[i]: median_coverages[i] for i in range(len(samples))}\r\n\t\r\n\t# Get QP samples (note: low coverage samples are excluded)\r\n\tqp_sample_dict = sample_utils.calculate_qp_samples(samples, species)\r\n\tsamples_QP = qp_sample_dict['qp']\r\n\tsamples_nonQP = qp_sample_dict['non-qp']\r\n\t\r\n\t# Only plot samples above a certain median coverage threshold (100)\r\n\tdesired_samples = samples[(median_coverages >= min_coverage)]\r\n\tdesired_median_coverages = numpy.array([sample_median_coverage_map[sample] for sample in desired_samples])\r\n\t\r\n\tif len(desired_samples) <= 0:\r\n\t\tcontinue\r\n\t\r\n\t# ====================================\r\n\t# Calculate within polymorphism rates\r\n\t# ====================================\r\n\t\r\n\t# Final list of samples used (filtered for nonzero total site counts)\r\n\tsample_names = []\r\n\t\r\n\t# Sites with freq 0-0.05 (rare alleles)\r\n\tbetween_rates_1D = []\r\n\tbetween_rates_4D = []\r\n\t\r\n\t# Sites with freq = 0, freq > 0.05\r\n\twithin_rates_1D = []\r\n\twithin_rates_4D = []\r\n\t\r\n\twithin_sites_1D_array=[]\r\n\ttotal_sites_1D_array=[]\r\n\twithin_sites_4D_array=[]\r\n\ttotal_sites_4D_array=[]\r\n\t\r\n\tfor sample in desired_samples:\r\n\t\t\r\n\t\ttotal_sites_1D, within_sites_1D = sfs_utils.calculate_sites_within_freq_range_from_sfs_map(sfs_map_1D[sample], freqrange)\r\n\t\ttotal_sites_4D, within_sites_4D = sfs_utils.calculate_sites_within_freq_range_from_sfs_map(sfs_map_4D[sample], freqrange)\r\n\t\t\r\n\t\t# Skip if zero of either syn. or nonsyn. total sites\r\n\t\tif total_sites_1D <= 0 or total_sites_4D <= 0:\r\n\t\t\tcontinue\r\n\t\t\r\n\t\t# Fraction of all nonsynonymous sites with minor allele frequency > 0.05\r\n\t\tpN = (within_sites_1D*1.0 + 1.0)/(total_sites_1D + 1.0)\t\t\r\n\t\t# Fraction of all synonymous sites with minor allele frequency > 0.05\r\n\t\tpS = (within_sites_4D*1.0 + 1.0)/(total_sites_4D + 1.0)\r\n\t\t\r\n\t\t# Store within and total sites, pN/pS for each sample-species pair\r\n\t\tif sample in samples_QP:\r\n\t\t\twithin_total_sites_QP[sample][species] = (within_sites_1D, total_sites_1D, within_sites_4D, total_sites_4D)\r\n\t\t\tpNpS_QP[sample][species] = pN/pS\r\n\t\t\r\n\t\telif sample in samples_nonQP:\r\n\t\t\twithin_total_sites_nonQP[sample][species] = (within_sites_1D, total_sites_1D, within_sites_4D, total_sites_4D)\r\n\t\t\tpNpS_nonQP[sample][species] = pN/pS\r\n\r\n# Pickle!!\r\nimport pickle\r\n\r\npdir = \"%s/pickles\" % config.data_directory\r\n\r\npickle.dump(within_total_sites_nonQP, open(\"%s/within_total_sites_%s_nonQP_cov20_rare10pct.pkl\" % (pdir, type), 'wb'))\r\npickle.dump(within_total_sites_QP, open(\"%s/within_total_sites_%s_QP_cov20_rare10pct.pkl\" % (pdir, type), 'wb'))\r\npickle.dump(pNpS_nonQP, open(\"%s/pNpS_%s_nonQP.pkl\" % (pdir, type), 'wb'))\r\npickle.dump(pNpS_QP, open(\"%s/pNpS_%s_QP.pkl\" % (pdir, type), 'wb'))\r\n", "import numpy\r\nimport gzip\r\nimport os\r\nimport config\r\n\r\nsubstitution_rate_directory = '%s/substitution_rates/' % (config.data_directory)\r\nintermediate_filename_template = '%s/%s/%s.txt.gz'\r\n\r\ndef load_substitution_rate_map(species_name, prev_cohort='all'):\r\n\r\n intermediate_filename = intermediate_filename_template % (substitution_rate_directory, prev_cohort, species_name)\r\n\r\n substitution_rate_map = {}\r\n\r\n if not os.path.isfile(intermediate_filename):\r\n return substitution_rate_map\r\n \r\n file = gzip.open(intermediate_filename,\"r\")\r\n file.readline() # header\r\n for line in file:\r\n items = line.split(\",\")\r\n if items[0].strip()!=species_name:\r\n continue\r\n \r\n record_strs = [\", \".join(['Species', 'Sample1', 'Sample2', 'Type', 'Num_muts', 'Num_revs', 'Num_mut_opportunities', 'Num_rev_opportunities'])] \r\n \r\n sample_1 = items[1].strip()\r\n sample_2 = items[2].strip()\r\n type = items[3].strip()\r\n num_muts = float(items[4])\r\n num_revs = float(items[5])\r\n num_mut_opportunities = float(items[6])\r\n num_rev_opportunities = float(items[7])\r\n \r\n num_changes = num_muts+num_revs\r\n num_opportunities = num_mut_opportunities+num_rev_opportunities\r\n \r\n sample_pair = (sample_1, sample_2)\r\n \r\n if type not in substitution_rate_map:\r\n substitution_rate_map[type] = {}\r\n \r\n substitution_rate_map[type][sample_pair] = (num_muts, num_revs, num_mut_opportunities, num_rev_opportunities)\r\n \r\n return substitution_rate_map\r\n\r\ndef calculate_mutrev_matrices_from_substitution_rate_map(substitution_rate_map, type, allowed_samples=[]): \r\n # Rewritten to preserve order of allowed samples\r\n # If allowed samples contains things that are not in DB, it returns zero opportunities\r\n\r\n total_sample_set = set([])\r\n for sample_1, sample_2 in substitution_rate_map[type].keys():\r\n total_sample_set.add(sample_1)\r\n total_sample_set.add(sample_2)\r\n\r\n if len(allowed_samples)==0:\r\n allowed_samples = list(sorted(total_sample_set)) \r\n \r\n # allows us to go from sample name to idx in allowed samples (to preserve order)\r\n sample_idx_map = {allowed_samples[i]:i for i in xrange(0,len(allowed_samples))}\r\n \r\n mut_difference_matrix = numpy.zeros((len(allowed_samples), len(allowed_samples)))*1.0\r\n rev_difference_matrix = numpy.zeros_like(mut_difference_matrix)\r\n \r\n mut_opportunity_matrix = numpy.zeros_like(mut_difference_matrix)\r\n rev_opportunity_matrix = numpy.zeros_like(mut_difference_matrix)\r\n \r\n for sample_pair in substitution_rate_map[type].keys():\r\n \r\n sample_i = sample_pair[0]\r\n sample_j = sample_pair[1]\r\n \r\n if not ((sample_i in sample_idx_map) and (sample_j in sample_idx_map)):\r\n continue\r\n \r\n i = sample_idx_map[sample_i]\r\n j = sample_idx_map[sample_j]\r\n \r\n num_muts, num_revs, num_mut_opportunities, num_rev_opportunities = substitution_rate_map[type][sample_pair]\r\n \r\n mut_difference_matrix[i,j] = num_muts\r\n rev_difference_matrix[i,j] = num_revs\r\n \r\n mut_opportunity_matrix[i,j] = num_mut_opportunities\r\n rev_opportunity_matrix[i,j] = num_rev_opportunities\r\n \r\n return allowed_samples, mut_difference_matrix, rev_difference_matrix, mut_opportunity_matrix, rev_opportunity_matrix\r\n\r\n \r\ndef calculate_matrices_from_substitution_rate_map(substitution_rate_map, type, allowed_samples=[]):\r\n# once the map is loaded, then we can compute rate matrices in this definition (so, it relies on the previous def) \r\n\r\n samples, mut_difference_matrix, rev_difference_matrix, mut_opportunity_matrix, rev_opportunity_matrix = calculate_mutrev_matrices_from_substitution_rate_map( substitution_rate_map, type, allowed_samples)\r\n\r\n difference_matrix = mut_difference_matrix+rev_difference_matrix\r\n opportunity_matrix = mut_opportunity_matrix+rev_opportunity_matrix\r\n \r\n return samples, difference_matrix, opportunity_matrix\r\n" ]
[ [ "matplotlib.use", "numpy.array" ], [ "numpy.zeros_like" ] ]
ardhani31/SIR-Feb
[ "f60d76f73ee8ecd2aa2a319fb0cd20b5fdbffa15" ]
[ "covsirphy/loading/db_cs_japan.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom covsirphy.util.term import Term\nfrom covsirphy.loading.db_base import _RemoteDatabase\n\n\nclass _CSJapan(_RemoteDatabase):\n \"\"\"\n Access \"COVID-19 Dataset in Japan.\n https://github.com/lisphilar/covid19-sir/tree/master/data\n\n Args:\n filename (str): CSV filename to save records\n \"\"\"\n # URL\n GITHUB_URL = \"https://raw.githubusercontent.com\"\n URL_C = f\"{GITHUB_URL}/lisphilar/covid19-sir/master/data/japan/covid_jpn_total.csv\"\n URL_P = f\"{GITHUB_URL}/lisphilar/covid19-sir/master/data/japan/covid_jpn_prefecture.csv\"\n # Citation\n CITATION = \"Hirokazu Takaya (2020-2022), COVID-19 dataset in Japan, GitHub repository, \" \\\n \"https://github.com/lisphilar/covid19-sir/data/japan\"\n # Column names and data types\n # {\"name in database\": \"name defined in Term class\"}\n COL_DICT = {\n \"Date\": Term.DATE,\n Term.COUNTRY: Term.COUNTRY,\n \"Area\": Term.PROVINCE,\n Term.ISO3: Term.ISO3,\n \"Positive\": Term.C,\n \"Fatal\": Term.F,\n \"Discharged\": Term.R,\n \"Hosp_require\": \"Hosp_require\",\n Term.MODERATE: Term.MODERATE,\n \"Hosp_severe\": Term.SEVERE,\n \"Tested\": Term.TESTS,\n Term.VAC: Term.VAC,\n Term.VAC_BOOSTERS: Term.VAC_BOOSTERS,\n Term.V_ONCE: Term.V_ONCE,\n Term.V_FULL: Term.V_FULL,\n }\n\n def download(self, verbose):\n \"\"\"\n Download the dataset from the server and set the list of primary sources.\n\n Args:\n verbose (int): level of verbosity\n\n Returns:\n pandas.DataFrame\n Index\n reset index\n Columns\n defined by the first values of self.COL_DICT.values()\n\n Note:\n If @verbose is equal to or over 1, how to show the list will be explained.\n \"\"\"\n\n # Download datasets\n if verbose:\n print(\"Retrieving COVID-19 dataset in Japan from https://github.com/lisphilar/covid19-sir/data/japan\")\n # Domestic/Airport/Returnee\n dar_value_cols = [\"Positive\", \"Tested\", \"Discharged\", \"Fatal\", \"Hosp_require\", \"Hosp_severe\"]\n dar_cols = [*dar_value_cols, \"Date\", \"Location\", \"Vaccinated_1st\", \"Vaccinated_2nd\", \"Vaccinated_3rd\"]\n dar_df = pd.read_csv(self.URL_C, usecols=dar_cols)\n dar_df = dar_df.rename(columns={\"Location\": \"Area\"}).set_index(\"Date\")\n # Country level data\n c_df = dar_df.groupby(\"Date\").sum().reset_index()\n c_df[\"Area\"] = self.UNKNOWN\n # Entering (= Airport + Returnee)\n e_df = dar_df.loc[dar_df[\"Area\"].isin([\"Airport\", \"Returnee\"])].groupby(\"Date\").sum().reset_index()\n e_df[\"Area\"] = \"Entering\"\n # Province level data\n p_cols = [*dar_value_cols, \"Date\", \"Prefecture\"]\n p_df = pd.read_csv(self.URL_P, usecols=p_cols)\n p_df = p_df.rename(columns={\"Prefecture\": \"Area\"})\n # Combine\n df = pd.concat([c_df, e_df, p_df], axis=0, ignore_index=True, sort=True)\n # Set additional columns\n df[self.COUNTRY] = \"Japan\"\n df[self.ISO3] = \"JPN\"\n df[self.MODERATE] = df[\"Hosp_require\"] - df[\"Hosp_severe\"]\n df[self.V_ONCE] = df[\"Vaccinated_1st\"].cumsum()\n df[self.V_FULL] = df[\"Vaccinated_2nd\"].cumsum()\n df[self.VAC_BOOSTERS] = df[\"Vaccinated_3rd\"].cumsum()\n df[self.VAC] = df[[self.V_ONCE, self.V_FULL, self.VAC_BOOSTERS]].sum(axis=1)\n return df\n" ]
[ [ "pandas.read_csv", "pandas.concat" ] ]
anuragdw710/jina
[ "520fc0794fb43d96e1fc85534e9df3cf9c89c42e" ]
[ "jina/types/arrays/neural_ops.py" ]
[ "from typing import Optional, Union, Callable, Tuple\n\nimport numpy as np\n\nfrom ... import Document\nfrom ...importer import ImportExtensions\nfrom ...math.helper import top_k, minmax_normalize, update_rows_x_mat_best\n\nif False:\n from .document import DocumentArray\n from .memmap import DocumentArrayMemmap\n\n\nclass DocumentArrayNeuralOpsMixin:\n \"\"\" A mixin that provides match functionality to DocumentArrays \"\"\"\n\n def match(\n self,\n darray: Union['DocumentArray', 'DocumentArrayMemmap'],\n metric: Union[\n str, Callable[['np.ndarray', 'np.ndarray'], 'np.ndarray']\n ] = 'cosine',\n limit: Optional[int] = 20,\n normalization: Optional[Tuple[int, int]] = None,\n use_scipy: bool = False,\n metric_name: Optional[str] = None,\n batch_size: Optional[int] = None,\n ) -> None:\n \"\"\"Compute embedding based nearest neighbour in `another` for each Document in `self`,\n and store results in `matches`.\n .. note::\n 'cosine', 'euclidean', 'sqeuclidean' are supported natively without extra dependency.\n You can use other distance metric provided by ``scipy``, such as β€˜braycurtis’, β€˜canberra’, β€˜chebyshev’,\n β€˜cityblock’, β€˜correlation’, β€˜cosine’, β€˜dice’, β€˜euclidean’, β€˜hamming’, β€˜jaccard’, β€˜jensenshannon’,\n β€˜kulsinski’, β€˜mahalanobis’, β€˜matching’, β€˜minkowski’, β€˜rogerstanimoto’, β€˜russellrao’, β€˜seuclidean’,\n β€˜sokalmichener’, β€˜sokalsneath’, β€˜sqeuclidean’, β€˜wminkowski’, β€˜yule’.\n To use scipy metric, please set ``use_scipy=True``.\n - To make all matches values in [0, 1], use ``dA.match(dB, normalization=(0, 1))``\n - To invert the distance as score and make all values in range [0, 1],\n use ``dA.match(dB, normalization=(1, 0))``. Note, how ``normalization`` differs from the previous.\n :param darray: the other DocumentArray or DocumentArrayMemmap to match against\n :param metric: the distance metric\n :param limit: the maximum number of matches, when not given defaults to 20.\n :param normalization: a tuple [a, b] to be used with min-max normalization,\n the min distance will be rescaled to `a`, the max distance will be rescaled to `b`\n all values will be rescaled into range `[a, b]`.\n :param use_scipy: use Scipy as the computation backend\n :param metric_name: if provided, then match result will be marked with this string.\n :param batch_size: if provided, then `darray` is loaded in chunks of, at most, batch_size elements. This option\n will be slower but more memory efficient. Specialy indicated if `darray` is a big\n DocumentArrayMemmap.\n \"\"\"\n\n if callable(metric):\n cdist = metric\n elif isinstance(metric, str):\n if use_scipy:\n from scipy.spatial.distance import cdist as cdist\n else:\n from ...math.distance import cdist as cdist\n else:\n raise TypeError(\n f'metric must be either string or a 2-arity function, received: {metric!r}'\n )\n\n metric_name = metric_name or (metric.__name__ if callable(metric) else metric)\n limit = len(darray) if limit is None else limit\n\n if batch_size:\n dist, idx = self._match_online(\n darray, cdist, limit, normalization, metric_name, batch_size\n )\n else:\n dist, idx = self._match(darray, cdist, limit, normalization, metric_name)\n\n for _q, _ids, _dists in zip(self, idx, dist):\n _q.matches.clear()\n for _id, _dist in zip(_ids, _dists):\n # Note, when match self with other, or both of them share the same Document\n # we might have recursive matches .\n # checkout https://github.com/jina-ai/jina/issues/3034\n d = darray[int(_id)]\n if d.id in self:\n d = Document(d, copy=True)\n d.pop('matches')\n _q.matches.append(d, scores={metric_name: _dist})\n\n def _match(self, darray, cdist, limit, normalization, metric_name):\n \"\"\"\n Computes the matches between self and `darray` loading `darray` into main memory.\n :param darray: the other DocumentArray or DocumentArrayMemmap to match against\n :param cdist: the distance metric\n :param limit: the maximum number of matches, when not given\n all Documents in `darray` are considered as matches\n :param normalization: a tuple [a, b] to be used with min-max normalization,\n the min distance will be rescaled to `a`, the max distance will be rescaled to `b`\n all values will be rescaled into range `[a, b]`.\n :param metric_name: if provided, then match result will be marked with this string.\n :return: distances and indices\n \"\"\"\n is_sparse = False\n\n if isinstance(darray[0].embedding, np.ndarray):\n x_mat = self.embeddings\n y_mat = darray.embeddings\n\n else:\n import scipy.sparse as sp\n\n if sp.issparse(darray[0].embedding):\n x_mat = sp.vstack(self.get_attributes('embedding'))\n y_mat = sp.vstack(darray.get_attributes('embedding'))\n is_sparse = True\n\n if is_sparse:\n dists = cdist(x_mat, y_mat, metric_name, is_sparse=is_sparse)\n else:\n dists = cdist(x_mat, y_mat, metric_name)\n\n dist, idx = top_k(dists, min(limit, len(darray)), descending=False)\n if isinstance(normalization, (tuple, list)) and normalization is not None:\n\n # normalization bound uses original distance not the top-k trimmed distance\n if is_sparse:\n min_d = dists.min(axis=-1).toarray()\n max_d = dists.max(axis=-1).toarray()\n else:\n min_d = np.min(dists, axis=-1, keepdims=True)\n max_d = np.max(dists, axis=-1, keepdims=True)\n\n dist = minmax_normalize(dist, normalization, (min_d, max_d))\n\n return dist, idx\n\n def _match_online(\n self, darray, cdist, limit, normalization, metric_name, batch_size\n ):\n \"\"\"\n Computes the matches between self and `darray` loading `darray` into main memory in chunks of size `batch_size`.\n\n :param darray: the other DocumentArray or DocumentArrayMemmap to match against\n :param cdist: the distance metric\n :param limit: the maximum number of matches, when not given\n all Documents in `another` are considered as matches\n :param normalization: a tuple [a, b] to be used with min-max normalization,\n the min distance will be rescaled to `a`, the max distance will be rescaled to `b`\n all values will be rescaled into range `[a, b]`.\n :param batch_size: length of the chunks loaded into memory from darray.\n :param metric_name: if provided, then match result will be marked with this string.\n :return: distances and indices\n \"\"\"\n assert isinstance(\n darray[0].embedding, np.ndarray\n ), f'expected embedding of type np.ndarray but received {type(darray[0].embedding)}'\n\n x_mat = self.embeddings\n n_x = x_mat.shape[0]\n\n def batch_generator(y_darray: 'DocumentArrayMemmap', n_batch: int):\n for i in range(0, len(y_darray), n_batch):\n y_mat = y_darray._get_embeddings(slice(i, i + n_batch))\n yield y_mat, i\n\n y_batch_generator = batch_generator(darray, batch_size)\n top_dists = np.inf * np.ones((n_x, limit))\n top_inds = np.zeros((n_x, limit), dtype=int)\n\n for y_batch, y_batch_start_pos in y_batch_generator:\n distances = cdist(x_mat, y_batch, metric_name)\n dists, inds = top_k(distances, limit, descending=False)\n\n if isinstance(normalization, (tuple, list)) and normalization is not None:\n dists = minmax_normalize(dists, normalization)\n\n inds = y_batch_start_pos + inds\n top_dists, top_inds = update_rows_x_mat_best(\n top_dists, top_inds, dists, inds, limit\n )\n\n # sort final the final `top_dists` and `top_inds` per row\n permutation = np.argsort(top_dists, axis=1)\n dist = np.take_along_axis(top_dists, permutation, axis=1)\n idx = np.take_along_axis(top_inds, permutation, axis=1)\n\n return dist, idx\n\n def visualize(\n self,\n output: Optional[str] = None,\n title: Optional[str] = None,\n colored_tag: Optional[str] = None,\n colormap: str = 'rainbow',\n method: str = 'pca',\n show_axis: bool = False,\n ):\n \"\"\"Visualize embeddings in a 2D projection with the PCA algorithm. This function requires ``matplotlib`` installed.\n\n If `tag_name` is provided the plot uses a distinct color for each unique tag value in the\n documents of the DocumentArray.\n\n :param output: Optional path to store the visualization. If not given, show in UI\n :param title: Optional title of the plot. When not given, the default title is used.\n :param colored_tag: Optional str that specifies tag used to color the plot\n :param colormap: the colormap string supported by matplotlib.\n :param method: the visualization method, available `pca`, `tsne`. `pca` is fast but may not well represent\n nonlinear relationship of high-dimensional data. `tsne` requires scikit-learn to be installed and is\n much slower.\n :param show_axis: If set, axis and bounding box of the plot will be printed.\n\n \"\"\"\n\n x_mat = self.embeddings\n assert isinstance(\n x_mat, np.ndarray\n ), f'Type {type(x_mat)} not currently supported, use np.ndarray embeddings'\n\n if method == 'tsne':\n from sklearn.manifold import TSNE\n\n x_mat_2d = TSNE(n_components=2).fit_transform(x_mat)\n else:\n from ...math.dimensionality_reduction import PCA\n\n x_mat_2d = PCA(n_components=2).fit_transform(x_mat)\n\n plt_kwargs = {\n 'x': x_mat_2d[:, 0],\n 'y': x_mat_2d[:, 1],\n 'alpha': 0.2,\n 'marker': '.',\n }\n\n with ImportExtensions(required=True):\n import matplotlib.pyplot as plt\n\n plt.figure(figsize=(8, 8))\n\n plt.title(title or f'{len(x_mat)} Documents with PCA')\n\n if colored_tag:\n tags = [x[colored_tag] for x in self.get_attributes('tags')]\n tag_to_num = {tag: num for num, tag in enumerate(set(tags))}\n plt_kwargs['c'] = np.array([tag_to_num[ni] for ni in tags])\n plt_kwargs['cmap'] = plt.get_cmap(colormap)\n\n plt.scatter(**plt_kwargs)\n\n if not show_axis:\n plt.gca().set_axis_off()\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n\n if output:\n plt.savefig(output, bbox_inches='tight', pad_inches=0.1)\n else:\n plt.show()\n\n def _get_embeddings(self, indices: Optional[slice] = None) -> np.ndarray:\n \"\"\"Return a `np.ndarray` stacking the `embedding` attributes as rows.\n If indices is passed the embeddings from the indices are retrieved, otherwise\n all indices are retrieved.\n\n Example: `self._get_embeddings(10:20)` will return 10 embeddings from positions 10 to 20\n in the `DocumentArray` or `DocumentArrayMemmap`\n\n .. warning:: This operation assumes all embeddings have the same shape and dtype.\n All dtype and shape values are assumed to be equal to the values of the\n first element in the DocumentArray / DocumentArrayMemmap\n\n .. warning:: This operation currently does not support sparse arrays.\n\n :param indices: slice of data from where to retrieve embeddings.\n :return: embeddings stacked per row as `np.ndarray`.\n \"\"\"\n if indices is None:\n indices = slice(0, len(self))\n\n x_mat = bytearray()\n len_slice = 0\n for d in self[indices]:\n x_mat += d.proto.embedding.dense.buffer\n len_slice += 1\n\n return np.frombuffer(x_mat, dtype=self[0].proto.embedding.dense.dtype).reshape(\n (len_slice, self[0].proto.embedding.dense.shape[0])\n )\n" ]
[ [ "numpy.max", "scipy.sparse.issparse", "numpy.array", "numpy.zeros", "matplotlib.pyplot.savefig", "matplotlib.pyplot.get_cmap", "numpy.ones", "numpy.min", "sklearn.manifold.TSNE", "matplotlib.pyplot.figure", "numpy.take_along_axis", "matplotlib.pyplot.show", "numpy.argsort", "numpy.frombuffer", "matplotlib.pyplot.scatter", "matplotlib.pyplot.gca", "matplotlib.pyplot.NullLocator", "scipy.spatial.distance.cdist" ] ]
walshb/numpy
[ "073bc39c58a6788ffda6aaa7549955cc3d4fdc93" ]
[ "numpy/lib/_iotools.py" ]
[ "\"\"\"A collection of functions designed to help I/O with ascii files.\"\"\"\n__docformat__ = \"restructuredtext en\"\n\nimport sys\nimport numpy as np\nimport numpy.core.numeric as nx\nfrom __builtin__ import bool, int, long, float, complex, object, unicode, str\n\nfrom numpy.compat import asbytes, bytes, asbytes_nested\n\nif sys.version_info[0] >= 3:\n def _bytes_to_complex(s):\n return complex(s.decode('ascii'))\n def _bytes_to_name(s):\n return s.decode('ascii')\nelse:\n _bytes_to_complex = complex\n _bytes_to_name = str\n\ndef _is_string_like(obj):\n \"\"\"\n Check whether obj behaves like a string.\n \"\"\"\n try:\n obj + ''\n except (TypeError, ValueError):\n return False\n return True\n\ndef _is_bytes_like(obj):\n \"\"\"\n Check whether obj behaves like a bytes object.\n \"\"\"\n try:\n obj + asbytes('')\n except (TypeError, ValueError):\n return False\n return True\n\n\ndef _to_filehandle(fname, flag='r', return_opened=False):\n \"\"\"\n Returns the filehandle corresponding to a string or a file.\n If the string ends in '.gz', the file is automatically unzipped.\n\n Parameters\n ----------\n fname : string, filehandle\n Name of the file whose filehandle must be returned.\n flag : string, optional\n Flag indicating the status of the file ('r' for read, 'w' for write).\n return_opened : boolean, optional\n Whether to return the opening status of the file.\n \"\"\"\n if _is_string_like(fname):\n if fname.endswith('.gz'):\n import gzip\n fhd = gzip.open(fname, flag)\n elif fname.endswith('.bz2'):\n import bz2\n fhd = bz2.BZ2File(fname)\n else:\n fhd = file(fname, flag)\n opened = True\n elif hasattr(fname, 'seek'):\n fhd = fname\n opened = False\n else:\n raise ValueError('fname must be a string or file handle')\n if return_opened:\n return fhd, opened\n return fhd\n\n\ndef has_nested_fields(ndtype):\n \"\"\"\n Returns whether one or several fields of a dtype are nested.\n\n Parameters\n ----------\n ndtype : dtype\n Data-type of a structured array.\n\n Raises\n ------\n AttributeError : If `ndtype` does not have a `names` attribute.\n\n Examples\n --------\n >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])\n >>> np.lib._iotools.has_nested_fields(dt)\n False\n\n \"\"\"\n for name in ndtype.names or ():\n if ndtype[name].names:\n return True\n return False\n\n\ndef flatten_dtype(ndtype, flatten_base=False):\n \"\"\"\n Unpack a structured data-type by collapsing nested fields and/or fields\n with a shape.\n\n Note that the field names are lost.\n\n Parameters\n ----------\n ndtype : dtype\n The datatype to collapse\n flatten_base : {False, True}, optional\n Whether to transform a field with a shape into several fields or not.\n\n Examples\n --------\n >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),\n ... ('block', int, (2, 3))])\n >>> np.lib._iotools.flatten_dtype(dt)\n [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]\n >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)\n [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),\n dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),\n dtype('int32')]\n\n \"\"\"\n names = ndtype.names\n if names is None:\n if flatten_base:\n return [ndtype.base] * int(np.prod(ndtype.shape))\n return [ndtype.base]\n else:\n types = []\n for field in names:\n info = ndtype.fields[field]\n flat_dt = flatten_dtype(info[0], flatten_base)\n types.extend(flat_dt)\n return types\n\n\n\n\n\n\nclass LineSplitter(object):\n \"\"\"\n Object to split a string at a given delimiter or at given places.\n\n Parameters\n ----------\n delimiter : str, int, or sequence of ints, optional\n If a string, character used to delimit consecutive fields.\n If an integer or a sequence of integers, width(s) of each field.\n comment : str, optional\n Character used to mark the beginning of a comment. Default is '#'.\n autostrip : bool, optional\n Whether to strip each individual field. Default is True.\n\n \"\"\"\n\n def autostrip(self, method):\n \"\"\"\n Wrapper to strip each member of the output of `method`.\n\n Parameters\n ----------\n method : function\n Function that takes a single argument and returns a sequence of\n strings.\n\n Returns\n -------\n wrapped : function\n The result of wrapping `method`. `wrapped` takes a single input\n argument and returns a list of strings that are stripped of\n white-space.\n\n \"\"\"\n return lambda input: [_.strip() for _ in method(input)]\n #\n def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):\n self.comments = comments\n # Delimiter is a character\n if isinstance(delimiter, unicode):\n delimiter = delimiter.encode('ascii')\n if (delimiter is None) or _is_bytes_like(delimiter):\n delimiter = delimiter or None\n _handyman = self._delimited_splitter\n # Delimiter is a list of field widths\n elif hasattr(delimiter, '__iter__'):\n _handyman = self._variablewidth_splitter\n idx = np.cumsum([0] + list(delimiter))\n delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]\n # Delimiter is a single integer\n elif int(delimiter):\n (_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))\n else:\n (_handyman, delimiter) = (self._delimited_splitter, None)\n self.delimiter = delimiter\n if autostrip:\n self._handyman = self.autostrip(_handyman)\n else:\n self._handyman = _handyman\n #\n def _delimited_splitter(self, line):\n line = line.split(self.comments)[0].strip(asbytes(\" \\r\\n\"))\n if not line:\n return []\n return line.split(self.delimiter)\n #\n def _fixedwidth_splitter(self, line):\n line = line.split(self.comments)[0].strip(asbytes(\"\\r\\n\"))\n if not line:\n return []\n fixed = self.delimiter\n slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]\n return [line[s] for s in slices]\n #\n def _variablewidth_splitter(self, line):\n line = line.split(self.comments)[0]\n if not line:\n return []\n slices = self.delimiter\n return [line[s] for s in slices]\n #\n def __call__(self, line):\n return self._handyman(line)\n\n\n\nclass NameValidator(object):\n \"\"\"\n Object to validate a list of strings to use as field names.\n\n The strings are stripped of any non alphanumeric character, and spaces\n are replaced by '_'. During instantiation, the user can define a list of\n names to exclude, as well as a list of invalid characters. Names in the\n exclusion list are appended a '_' character.\n\n Once an instance has been created, it can be called with a list of names,\n and a list of valid names will be created.\n The `__call__` method accepts an optional keyword \"default\" that sets\n the default name in case of ambiguity. By default this is 'f', so\n that names will default to `f0`, `f1`, etc.\n\n Parameters\n ----------\n excludelist : sequence, optional\n A list of names to exclude. This list is appended to the default list\n ['return', 'file', 'print']. Excluded names are appended an underscore:\n for example, `file` becomes `file_` if supplied.\n deletechars : str, optional\n A string combining invalid characters that must be deleted from the\n names.\n casesensitive : {True, False, 'upper', 'lower'}, optional\n * If True, field names are case-sensitive.\n * If False or 'upper', field names are converted to upper case.\n * If 'lower', field names are converted to lower case.\n\n The default value is True.\n replace_space: '_', optional\n Character(s) used in replacement of white spaces.\n\n Notes\n -----\n Calling an instance of `NameValidator` is the same as calling its method\n `validate`.\n\n Examples\n --------\n >>> validator = np.lib._iotools.NameValidator()\n >>> validator(['file', 'field2', 'with space', 'CaSe'])\n ['file_', 'field2', 'with_space', 'CaSe']\n\n >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],\n deletechars='q',\n case_sensitive='False')\n >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])\n ['excl_', 'field2', 'no_', 'with_space', 'case']\n\n \"\"\"\n #\n defaultexcludelist = ['return', 'file', 'print']\n defaultdeletechars = set(\"\"\"~!@#$%^&*()-=+~\\|]}[{';: /?.>,<\"\"\")\n #\n def __init__(self, excludelist=None, deletechars=None,\n case_sensitive=None, replace_space='_'):\n # Process the exclusion list ..\n if excludelist is None:\n excludelist = []\n excludelist.extend(self.defaultexcludelist)\n self.excludelist = excludelist\n # Process the list of characters to delete\n if deletechars is None:\n delete = self.defaultdeletechars\n else:\n delete = set(deletechars)\n delete.add('\"')\n self.deletechars = delete\n # Process the case option .....\n if (case_sensitive is None) or (case_sensitive is True):\n self.case_converter = lambda x: x\n elif (case_sensitive is False) or ('u' in case_sensitive):\n self.case_converter = lambda x: x.upper()\n elif 'l' in case_sensitive:\n self.case_converter = lambda x: x.lower()\n else:\n self.case_converter = lambda x: x\n #\n self.replace_space = replace_space\n\n def validate(self, names, defaultfmt=\"f%i\", nbfields=None):\n \"\"\"\n Validate a list of strings to use as field names for a structured array.\n\n Parameters\n ----------\n names : sequence of str\n Strings to be validated.\n defaultfmt : str, optional\n Default format string, used if validating a given string reduces its\n length to zero.\n nboutput : integer, optional\n Final number of validated names, used to expand or shrink the initial\n list of names.\n\n Returns\n -------\n validatednames : list of str\n The list of validated field names.\n\n Notes\n -----\n A `NameValidator` instance can be called directly, which is the same as\n calling `validate`. For examples, see `NameValidator`.\n\n \"\"\"\n # Initial checks ..............\n if (names is None):\n if (nbfields is None):\n return None\n names = []\n if isinstance(names, basestring):\n names = [names, ]\n if nbfields is not None:\n nbnames = len(names)\n if (nbnames < nbfields):\n names = list(names) + [''] * (nbfields - nbnames)\n elif (nbnames > nbfields):\n names = names[:nbfields]\n # Set some shortcuts ...........\n deletechars = self.deletechars\n excludelist = self.excludelist\n case_converter = self.case_converter\n replace_space = self.replace_space\n # Initializes some variables ...\n validatednames = []\n seen = dict()\n nbempty = 0\n #\n for item in names:\n item = case_converter(item).strip()\n if replace_space:\n item = item.replace(' ', replace_space)\n item = ''.join([c for c in item if c not in deletechars])\n if item == '':\n item = defaultfmt % nbempty\n while item in names:\n nbempty += 1\n item = defaultfmt % nbempty\n nbempty += 1\n elif item in excludelist:\n item += '_'\n cnt = seen.get(item, 0)\n if cnt > 0:\n validatednames.append(item + '_%d' % cnt)\n else:\n validatednames.append(item)\n seen[item] = cnt + 1\n return tuple(validatednames)\n #\n def __call__(self, names, defaultfmt=\"f%i\", nbfields=None):\n return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)\n\n\n\ndef str2bool(value):\n \"\"\"\n Tries to transform a string supposed to represent a boolean to a boolean.\n\n Parameters\n ----------\n value : str\n The string that is transformed to a boolean.\n\n Returns\n -------\n boolval : bool\n The boolean representation of `value`.\n\n Raises\n ------\n ValueError\n If the string is not 'True' or 'False' (case independent)\n\n Examples\n --------\n >>> np.lib._iotools.str2bool('TRUE')\n True\n >>> np.lib._iotools.str2bool('false')\n False\n\n \"\"\"\n value = value.upper()\n if value == asbytes('TRUE'):\n return True\n elif value == asbytes('FALSE'):\n return False\n else:\n raise ValueError(\"Invalid boolean\")\n\n\nclass ConverterError(Exception):\n \"\"\"\n Exception raised when an error occurs in a converter for string values.\n\n \"\"\"\n pass\n\nclass ConverterLockError(ConverterError):\n \"\"\"\n Exception raised when an attempt is made to upgrade a locked converter.\n\n \"\"\"\n pass\n\nclass ConversionWarning(UserWarning):\n \"\"\"\n Warning issued when a string converter has a problem.\n\n Notes\n -----\n In `genfromtxt` a `ConversionWarning` is issued if raising exceptions\n is explicitly suppressed with the \"invalid_raise\" keyword.\n\n \"\"\"\n pass\n\n\n\nclass StringConverter(object):\n \"\"\"\n Factory class for function transforming a string into another object (int,\n float).\n\n After initialization, an instance can be called to transform a string\n into another object. If the string is recognized as representing a missing\n value, a default value is returned.\n\n Attributes\n ----------\n func : function\n Function used for the conversion.\n default : any\n Default value to return when the input corresponds to a missing value.\n type : type\n Type of the output.\n _status : int\n Integer representing the order of the conversion.\n _mapper : sequence of tuples\n Sequence of tuples (dtype, function, default value) to evaluate in\n order.\n _locked : bool\n Holds `locked` parameter.\n\n Parameters\n ----------\n dtype_or_func : {None, dtype, function}, optional\n If a `dtype`, specifies the input data type, used to define a basic\n function and a default value for missing data. For example, when\n `dtype` is float, the `func` attribute is set to `float` and the\n default value to `np.nan`.\n If a function, this function is used to convert a string to another\n object. In this case, it is recommended to give an associated default\n value as input.\n default : any, optional\n Value to return by default, that is, when the string to be converted\n is flagged as missing. If not given, `StringConverter` tries to supply\n a reasonable default value.\n missing_values : sequence of str, optional\n Sequence of strings indicating a missing value.\n locked : bool, optional\n Whether the StringConverter should be locked to prevent automatic\n upgrade or not. Default is False.\n\n \"\"\"\n #\n _mapper = [(nx.bool_, str2bool, False),\n (nx.integer, int, -1),\n (nx.floating, float, nx.nan),\n (complex, _bytes_to_complex, nx.nan + 0j),\n (nx.string_, bytes, asbytes('???'))]\n (_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)\n #\n @classmethod\n def _getdtype(cls, val):\n \"\"\"Returns the dtype of the input variable.\"\"\"\n return np.array(val).dtype\n #\n @classmethod\n def _getsubdtype(cls, val):\n \"\"\"Returns the type of the dtype of the input variable.\"\"\"\n return np.array(val).dtype.type\n #\n # This is a bit annoying. We want to return the \"general\" type in most cases\n # (ie. \"string\" rather than \"S10\"), but we want to return the specific type\n # for datetime64 (ie. \"datetime64[us]\" rather than \"datetime64\").\n @classmethod\n def _dtypeortype(cls, dtype):\n \"\"\"Returns dtype for datetime64 and type of dtype otherwise.\"\"\"\n if dtype.type == np.datetime64:\n return dtype\n return dtype.type\n #\n @classmethod\n def upgrade_mapper(cls, func, default=None):\n \"\"\"\n Upgrade the mapper of a StringConverter by adding a new function and its\n corresponding default.\n\n The input function (or sequence of functions) and its associated default\n value (if any) is inserted in penultimate position of the mapper.\n The corresponding type is estimated from the dtype of the default value.\n\n Parameters\n ----------\n func : var\n Function, or sequence of functions\n\n Examples\n --------\n >>> import dateutil.parser\n >>> import datetime\n >>> dateparser = datetustil.parser.parse\n >>> defaultdate = datetime.date(2000, 1, 1)\n >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)\n \"\"\"\n # Func is a single functions\n if hasattr(func, '__call__'):\n cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))\n return\n elif hasattr(func, '__iter__'):\n if isinstance(func[0], (tuple, list)):\n for _ in func:\n cls._mapper.insert(-1, _)\n return\n if default is None:\n default = [None] * len(func)\n else:\n default = list(default)\n default.append([None] * (len(func) - len(default)))\n for (fct, dft) in zip(func, default):\n cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))\n #\n def __init__(self, dtype_or_func=None, default=None, missing_values=None,\n locked=False):\n # Convert unicode (for Py3)\n if isinstance(missing_values, unicode):\n missing_values = asbytes(missing_values)\n elif isinstance(missing_values, (list, tuple)):\n missing_values = asbytes_nested(missing_values)\n # Defines a lock for upgrade\n self._locked = bool(locked)\n # No input dtype: minimal initialization\n if dtype_or_func is None:\n self.func = str2bool\n self._status = 0\n self.default = default or False\n dtype = np.dtype('bool')\n else:\n # Is the input a np.dtype ?\n try:\n self.func = None\n dtype = np.dtype(dtype_or_func)\n except TypeError:\n # dtype_or_func must be a function, then\n if not hasattr(dtype_or_func, '__call__'):\n errmsg = \"The input argument `dtype` is neither a function\"\\\n \" or a dtype (got '%s' instead)\"\n raise TypeError(errmsg % type(dtype_or_func))\n # Set the function\n self.func = dtype_or_func\n # If we don't have a default, try to guess it or set it to None\n if default is None:\n try:\n default = self.func(asbytes('0'))\n except ValueError:\n default = None\n dtype = self._getdtype(default)\n # Set the status according to the dtype\n _status = -1\n for (i, (deftype, func, default_def)) in enumerate(self._mapper):\n if np.issubdtype(dtype.type, deftype):\n _status = i\n if default is None:\n self.default = default_def\n else:\n self.default = default\n break\n if _status == -1:\n # We never found a match in the _mapper...\n _status = 0\n self.default = default\n self._status = _status\n # If the input was a dtype, set the function to the last we saw\n if self.func is None:\n self.func = func\n # If the status is 1 (int), change the function to\n # something more robust.\n if self.func == self._mapper[1][1]:\n if issubclass(dtype.type, np.uint64):\n self.func = np.uint64\n elif issubclass(dtype.type, np.int64):\n self.func = np.int64\n else:\n self.func = lambda x : int(float(x))\n # Store the list of strings corresponding to missing values.\n if missing_values is None:\n self.missing_values = set([asbytes('')])\n else:\n if isinstance(missing_values, bytes):\n missing_values = missing_values.split(asbytes(\",\"))\n self.missing_values = set(list(missing_values) + [asbytes('')])\n #\n self._callingfunction = self._strict_call\n self.type = self._dtypeortype(dtype)\n self._checked = False\n self._initial_default = default\n #\n def _loose_call(self, value):\n try:\n return self.func(value)\n except ValueError:\n return self.default\n #\n def _strict_call(self, value):\n try:\n return self.func(value)\n except ValueError:\n if value.strip() in self.missing_values:\n if not self._status:\n self._checked = False\n return self.default\n raise ValueError(\"Cannot convert string '%s'\" % value)\n #\n def __call__(self, value):\n return self._callingfunction(value)\n #\n def upgrade(self, value):\n \"\"\"\n Try to find the best converter for a given string, and return the result.\n\n The supplied string `value` is converted by testing different\n converters in order. First the `func` method of the `StringConverter`\n instance is tried, if this fails other available converters are tried.\n The order in which these other converters are tried is determined by the\n `_status` attribute of the instance.\n\n Parameters\n ----------\n value : str\n The string to convert.\n\n Returns\n -------\n out : any\n The result of converting `value` with the appropriate converter.\n\n \"\"\"\n self._checked = True\n try:\n self._strict_call(value)\n except ValueError:\n # Raise an exception if we locked the converter...\n if self._locked:\n errmsg = \"Converter is locked and cannot be upgraded\"\n raise ConverterLockError(errmsg)\n _statusmax = len(self._mapper)\n # Complains if we try to upgrade by the maximum\n _status = self._status\n if _status == _statusmax:\n errmsg = \"Could not find a valid conversion function\"\n raise ConverterError(errmsg)\n elif _status < _statusmax - 1:\n _status += 1\n (self.type, self.func, default) = self._mapper[_status]\n self._status = _status\n if self._initial_default is not None:\n self.default = self._initial_default\n else:\n self.default = default\n self.upgrade(value)\n\n def iterupgrade(self, value):\n self._checked = True\n if not hasattr(value, '__iter__'):\n value = (value,)\n _strict_call = self._strict_call\n try:\n map(_strict_call, value)\n except ValueError:\n # Raise an exception if we locked the converter...\n if self._locked:\n errmsg = \"Converter is locked and cannot be upgraded\"\n raise ConverterLockError(errmsg)\n _statusmax = len(self._mapper)\n # Complains if we try to upgrade by the maximum\n _status = self._status\n if _status == _statusmax:\n raise ConverterError(\"Could not find a valid conversion function\")\n elif _status < _statusmax - 1:\n _status += 1\n (self.type, self.func, default) = self._mapper[_status]\n if self._initial_default is not None:\n self.default = self._initial_default\n else:\n self.default = default\n self._status = _status\n self.iterupgrade(value)\n\n def update(self, func, default=None, testing_value=None,\n missing_values=asbytes(''), locked=False):\n \"\"\"\n Set StringConverter attributes directly.\n\n Parameters\n ----------\n func : function\n Conversion function.\n default : any, optional\n Value to return by default, that is, when the string to be converted\n is flagged as missing. If not given, `StringConverter` tries to supply\n a reasonable default value.\n testing_value : str, optional\n A string representing a standard input value of the converter.\n This string is used to help defining a reasonable default value.\n missing_values : sequence of str, optional\n Sequence of strings indicating a missing value.\n locked : bool, optional\n Whether the StringConverter should be locked to prevent automatic\n upgrade or not. Default is False.\n\n Notes\n -----\n `update` takes the same parameters as the constructor of `StringConverter`,\n except that `func` does not accept a `dtype` whereas `dtype_or_func` in\n the constructor does.\n\n \"\"\"\n self.func = func\n self._locked = locked\n # Don't reset the default to None if we can avoid it\n if default is not None:\n self.default = default\n self.type = self._dtypeortype(self._getdtype(default))\n else:\n try:\n tester = func(testing_value or asbytes('1'))\n except (TypeError, ValueError):\n tester = None\n self.type = self._dtypeortype(self._getdtype(tester))\n # Add the missing values to the existing set\n if missing_values is not None:\n if _is_bytes_like(missing_values):\n self.missing_values.add(missing_values)\n elif hasattr(missing_values, '__iter__'):\n for val in missing_values:\n self.missing_values.add(val)\n else:\n self.missing_values = []\n\n\n\ndef easy_dtype(ndtype, names=None, defaultfmt=\"f%i\", **validationargs):\n \"\"\"\n Convenience function to create a `np.dtype` object.\n\n The function processes the input `dtype` and matches it with the given\n names.\n\n Parameters\n ----------\n ndtype : var\n Definition of the dtype. Can be any string or dictionary\n recognized by the `np.dtype` function, or a sequence of types.\n names : str or sequence, optional\n Sequence of strings to use as field names for a structured dtype.\n For convenience, `names` can be a string of a comma-separated list of\n names.\n defaultfmt : str, optional\n Format string used to define missing names, such as ``\"f%i\"``\n (default) or ``\"fields_%02i\"``.\n validationargs : optional\n A series of optional arguments used to initialize a `NameValidator`.\n\n Examples\n --------\n >>> np.lib._iotools.easy_dtype(float)\n dtype('float64')\n >>> np.lib._iotools.easy_dtype(\"i4, f8\")\n dtype([('f0', '<i4'), ('f1', '<f8')])\n >>> np.lib._iotools.easy_dtype(\"i4, f8\", defaultfmt=\"field_%03i\")\n dtype([('field_000', '<i4'), ('field_001', '<f8')])\n\n >>> np.lib._iotools.easy_dtype((int, float, float), names=\"a,b,c\")\n dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])\n >>> np.lib._iotools.easy_dtype(float, names=\"a,b,c\")\n dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])\n\n \"\"\"\n try:\n ndtype = np.dtype(ndtype)\n except TypeError:\n validate = NameValidator(**validationargs)\n nbfields = len(ndtype)\n if names is None:\n names = [''] * len(ndtype)\n elif isinstance(names, basestring):\n names = names.split(\",\")\n names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)\n ndtype = np.dtype(dict(formats=ndtype, names=names))\n else:\n nbtypes = len(ndtype)\n # Explicit names\n if names is not None:\n validate = NameValidator(**validationargs)\n if isinstance(names, basestring):\n names = names.split(\",\")\n # Simple dtype: repeat to match the nb of names\n if nbtypes == 0:\n formats = tuple([ndtype.type] * len(names))\n names = validate(names, defaultfmt=defaultfmt)\n ndtype = np.dtype(zip(names, formats))\n # Structured dtype: just validate the names as needed\n else:\n ndtype.names = validate(names, nbfields=nbtypes,\n defaultfmt=defaultfmt)\n # No implicit names\n elif (nbtypes > 0):\n validate = NameValidator(**validationargs)\n # Default initial names : should we change the format ?\n if (ndtype.names == tuple(\"f%i\" % i for i in range(nbtypes))) and \\\n (defaultfmt != \"f%i\"):\n ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)\n # Explicit initial names : just validate\n else:\n ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)\n return ndtype\n" ]
[ [ "numpy.array", "numpy.prod", "numpy.compat.asbytes_nested", "numpy.issubdtype", "numpy.dtype", "numpy.compat.asbytes" ] ]
jialeiY/pytorch-ssd
[ "3372664b14540a53b941404a20efe8e6b4aaf9ad", "3372664b14540a53b941404a20efe8e6b4aaf9ad" ]
[ "vision/transforms/transforms.py", "vision/utils/box_utils.py" ]
[ "# from https://github.com/amdegroot/ssd.pytorch\n\n\nimport torch\nfrom torchvision import transforms\nimport cv2\nimport numpy as np\nimport types\nfrom numpy import random\n\n\ndef intersect(box_a, box_b):\n max_xy = np.minimum(box_a[:, 2:], box_b[2:])\n min_xy = np.maximum(box_a[:, :2], box_b[:2])\n inter = np.clip((max_xy - min_xy), a_min=0, a_max=np.inf)\n return inter[:, 0] * inter[:, 1]\n\n\ndef jaccard_numpy(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes.\n E.g.:\n A ∩ B / A βˆͺ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: Multiple bounding boxes, Shape: [num_boxes,4]\n box_b: Single bounding box, Shape: [4]\n Return:\n jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])) # [A,B]\n area_b = ((box_b[2]-box_b[0]) *\n (box_b[3]-box_b[1])) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\nclass Compose(object):\n \"\"\"Composes several augmentations together.\n Args:\n transforms (List[Transform]): list of transforms to compose.\n Example:\n >>> augmentations.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img, boxes=None, labels=None):\n for t in self.transforms:\n img, boxes, labels = t(img, boxes, labels)\n return img, boxes, labels\n\n\nclass Lambda(object):\n \"\"\"Applies a lambda as a transform.\"\"\"\n\n def __init__(self, lambd):\n assert isinstance(lambd, types.LambdaType)\n self.lambd = lambd\n\n def __call__(self, img, boxes=None, labels=None):\n return self.lambd(img, boxes, labels)\n\n\nclass ConvertFromInts(object):\n def __call__(self, image, boxes=None, labels=None):\n return image.astype(np.float32), boxes, labels\n\n\nclass SubtractMeans(object):\n def __init__(self, mean):\n self.mean = np.array(mean, dtype=np.float32)\n\n def __call__(self, image, boxes=None, labels=None):\n image = image.astype(np.float32)\n image -= self.mean\n return image.astype(np.float32), boxes, labels\n\n\nclass ToAbsoluteCoords(object):\n def __call__(self, image, boxes=None, labels=None):\n height, width, channels = image.shape\n boxes[:, 0] *= width\n boxes[:, 2] *= width\n boxes[:, 1] *= height\n boxes[:, 3] *= height\n\n return image, boxes, labels\n\n\nclass ToPercentCoords(object):\n def __call__(self, image, boxes=None, labels=None):\n height, width, channels = image.shape\n if len(labels)>0:\n boxes[:, 0] /= width\n boxes[:, 2] /= width\n boxes[:, 1] /= height\n boxes[:, 3] /= height\n\n return image, boxes, labels\n\n\nclass Resize(object):\n def __init__(self, size=300):\n self.size = size\n\n def __call__(self, image, boxes=None, labels=None):\n image = cv2.resize(image, (self.size,\n self.size))\n return image, boxes, labels\n\n\nclass RandomSaturation(object):\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n def __call__(self, image, boxes=None, labels=None):\n if random.randint(2):\n image[:, :, 1] *= random.uniform(self.lower, self.upper)\n\n return image, boxes, labels\n\n\nclass RandomHue(object):\n def __init__(self, delta=18.0):\n assert delta >= 0.0 and delta <= 360.0\n self.delta = delta\n\n def __call__(self, image, boxes=None, labels=None):\n if random.randint(2):\n image[:, :, 0] += random.uniform(-self.delta, self.delta)\n image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0\n image[:, :, 0][image[:, :, 0] < 0.0] += 360.0\n return image, boxes, labels\n\n\nclass RandomLightingNoise(object):\n def __init__(self):\n self.perms = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n\n def __call__(self, image, boxes=None, labels=None):\n if random.randint(2):\n swap = self.perms[random.randint(len(self.perms))]\n shuffle = SwapChannels(swap) # shuffle channels\n image = shuffle(image)\n return image, boxes, labels\n\n\nclass ConvertColor(object):\n def __init__(self, current, transform):\n self.transform = transform\n self.current = current\n\n def __call__(self, image, boxes=None, labels=None):\n if self.current == 'BGR' and self.transform == 'HSV':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n elif self.current == 'RGB' and self.transform == 'HSV':\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n elif self.current == 'BGR' and self.transform == 'RGB':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n elif self.current == 'HSV' and self.transform == 'BGR':\n image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n elif self.current == 'HSV' and self.transform == \"RGB\":\n image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)\n else:\n raise NotImplementedError\n return image, boxes, labels\n\n\nclass RandomContrast(object):\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n # expects float image\n def __call__(self, image, boxes=None, labels=None):\n if random.randint(2):\n alpha = random.uniform(self.lower, self.upper)\n image *= alpha\n return image, boxes, labels\n\n\nclass RandomBrightness(object):\n def __init__(self, delta=32):\n assert delta >= 0.0\n assert delta <= 255.0\n self.delta = delta\n\n def __call__(self, image, boxes=None, labels=None):\n if random.randint(2):\n delta = random.uniform(-self.delta, self.delta)\n image += delta\n return image, boxes, labels\n\n\nclass ToCV2Image(object):\n def __call__(self, tensor, boxes=None, labels=None):\n return tensor.cpu().numpy().astype(np.float32).transpose((1, 2, 0)), boxes, labels\n\n\nclass ToTensor(object):\n def __call__(self, cvimage, boxes=None, labels=None):\n return torch.from_numpy(cvimage.astype(np.float32)).permute(2, 0, 1), boxes, labels\n\n\nclass RandomSampleCrop(object):\n \"\"\"Crop\n Arguments:\n img (Image): the image being input during training\n boxes (Tensor): the original bounding boxes in pt form\n labels (Tensor): the class labels for each bbox\n mode (float tuple): the min and max jaccard overlaps\n Return:\n (img, boxes, classes)\n img (Image): the cropped image\n boxes (Tensor): the adjusted bounding boxes in pt form\n labels (Tensor): the class labels for each bbox\n \"\"\"\n def __init__(self):\n self.sample_options = (\n # using entire original input image\n None,\n # sample a patch s.t. MIN jaccard w/ obj in .1,.3,.4,.7,.9\n (0.1, None),\n (0.3, None),\n (0.7, None),\n (0.9, None),\n # randomly sample a patch\n (None, None),\n )\n\n def __call__(self, image, boxes=None, labels=None):\n height, width, _ = image.shape\n while True:\n # randomly choose a mode\n #mode = random.choice(self.sample_options) # throws numpy deprecation warning\n mode = self.sample_options[random.randint(len(self.sample_options))]\n \n if mode is None:\n return image, boxes, labels\n\n min_iou, max_iou = mode\n if min_iou is None:\n min_iou = float('-inf')\n if max_iou is None:\n max_iou = float('inf')\n\n # max trails (50)\n for _ in range(50):\n current_image = image\n\n w = random.uniform(0.3 * width, width)\n h = random.uniform(0.3 * height, height)\n\n # aspect ratio constraint b/t .5 & 2\n if h / w < 0.5 or h / w > 2:\n continue\n\n left = random.uniform(width - w)\n top = random.uniform(height - h)\n\n # convert to integer rect x1,y1,x2,y2\n rect = np.array([int(left), int(top), int(left+w), int(top+h)])\n\n # calculate IoU (jaccard overlap) b/t the cropped and gt boxes\n overlap = jaccard_numpy(boxes, rect)\n\n # is min and max overlap constraint satisfied? if not try again\n if overlap.min() < min_iou and max_iou < overlap.max():\n continue\n\n # cut the crop from the image\n current_image = current_image[rect[1]:rect[3], rect[0]:rect[2],\n :]\n\n # keep overlap with gt box IF center in sampled patch\n centers = (boxes[:, :2] + boxes[:, 2:]) / 2.0\n\n # mask in all gt boxes that above and to the left of centers\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n\n # mask in all gt boxes that under and to the right of centers\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n\n # mask in that both m1 and m2 are true\n mask = m1 * m2\n\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n\n # take only matching gt boxes\n current_boxes = boxes[mask, :].copy()\n\n # take only matching gt labels\n current_labels = labels[mask]\n\n # should we use the box left and top corner or the crop's\n current_boxes[:, :2] = np.maximum(current_boxes[:, :2],\n rect[:2])\n # adjust to crop (by substracting crop's left,top)\n current_boxes[:, :2] -= rect[:2]\n\n current_boxes[:, 2:] = np.minimum(current_boxes[:, 2:],\n rect[2:])\n # adjust to crop (by substracting crop's left,top)\n current_boxes[:, 2:] -= rect[:2]\n\n return current_image, current_boxes, current_labels\n\n\nclass Expand(object):\n def __init__(self, mean):\n self.mean = mean\n\n def __call__(self, image, boxes, labels):\n if random.randint(2):\n return image, boxes, labels\n\n height, width, depth = image.shape\n ratio = random.uniform(1, 4)\n left = random.uniform(0, width*ratio - width)\n top = random.uniform(0, height*ratio - height)\n\n expand_image = np.zeros(\n (int(height*ratio), int(width*ratio), depth),\n dtype=image.dtype)\n expand_image[:, :, :] = self.mean\n expand_image[int(top):int(top + height),\n int(left):int(left + width)] = image\n image = expand_image\n\n boxes = boxes.copy()\n boxes[:, :2] += (int(left), int(top))\n boxes[:, 2:] += (int(left), int(top))\n\n return image, boxes, labels\n\n\nclass RandomMirror(object):\n def __call__(self, image, boxes, classes):\n _, width, _ = image.shape\n if random.randint(2):\n image = image[:, ::-1]\n boxes = boxes.copy()\n boxes[:, 0::2] = width - boxes[:, 2::-2]\n return image, boxes, classes\n\n\nclass SwapChannels(object):\n \"\"\"Transforms a tensorized image by swapping the channels in the order\n specified in the swap tuple.\n Args:\n swaps (int triple): final order of channels\n eg: (2, 1, 0)\n \"\"\"\n\n def __init__(self, swaps):\n self.swaps = swaps\n\n def __call__(self, image):\n \"\"\"\n Args:\n image (Tensor): image tensor to be transformed\n Return:\n a tensor with channels swapped according to swap\n \"\"\"\n # if torch.is_tensor(image):\n # image = image.data.cpu().numpy()\n # else:\n # image = np.array(image)\n image = image[:, :, self.swaps]\n return image\n\n\nclass PhotometricDistort(object):\n def __init__(self):\n self.pd = [\n RandomContrast(), # RGB\n ConvertColor(current=\"RGB\", transform='HSV'), # HSV\n RandomSaturation(), # HSV\n RandomHue(), # HSV\n ConvertColor(current='HSV', transform='RGB'), # RGB\n RandomContrast() # RGB\n ]\n self.rand_brightness = RandomBrightness()\n self.rand_light_noise = RandomLightingNoise()\n\n def __call__(self, image, boxes, labels):\n im = image.copy()\n im, boxes, labels = self.rand_brightness(im, boxes, labels)\n if random.randint(2):\n distort = Compose(self.pd[:-1])\n else:\n distort = Compose(self.pd[1:])\n im, boxes, labels = distort(im, boxes, labels)\n return self.rand_light_noise(im, boxes, labels)\n\n", "import collections\nimport torch\nimport itertools\nfrom typing import List\nimport math\nimport numpy as np\n\nSSDBoxSizes = collections.namedtuple('SSDBoxSizes', ['min', 'max'])\n\nSSDSpec = collections.namedtuple('SSDSpec', ['feature_map_size', 'shrinkage', 'box_sizes', 'aspect_ratios'])\n\n\ndef generate_ssd_priors(specs: List[SSDSpec], image_size, clamp=True) -> torch.Tensor:\n \"\"\"Generate SSD Prior Boxes.\n\n It returns the center, height and width of the priors. The values are relative to the image size\n Args:\n specs: SSDSpecs about the shapes of sizes of prior boxes. i.e.\n specs = [\n SSDSpec(38, 8, SSDBoxSizes(30, 60), [2]),\n SSDSpec(19, 16, SSDBoxSizes(60, 111), [2, 3]),\n SSDSpec(10, 32, SSDBoxSizes(111, 162), [2, 3]),\n SSDSpec(5, 64, SSDBoxSizes(162, 213), [2, 3]),\n SSDSpec(3, 100, SSDBoxSizes(213, 264), [2]),\n SSDSpec(1, 300, SSDBoxSizes(264, 315), [2])\n ]\n image_size: image size.\n clamp: if true, clamp the values to make fall between [0.0, 1.0]\n Returns:\n priors (num_priors, 4): The prior boxes represented as [[center_x, center_y, w, h]]. All the values\n are relative to the image size.\n \"\"\"\n priors = []\n for spec in specs:\n scale = image_size / spec.shrinkage\n for j, i in itertools.product(range(spec.feature_map_size), repeat=2):\n x_center = (i + 0.5) / scale\n y_center = (j + 0.5) / scale\n\n # small sized square box\n size = spec.box_sizes.min\n h = w = size / image_size\n priors.append([\n x_center,\n y_center,\n w,\n h\n ])\n\n # big sized square box\n size = math.sqrt(spec.box_sizes.max * spec.box_sizes.min)\n h = w = size / image_size\n priors.append([\n x_center,\n y_center,\n w,\n h\n ])\n\n # change h/w ratio of the small sized box\n size = spec.box_sizes.min\n h = w = size / image_size\n for ratio in spec.aspect_ratios:\n ratio = math.sqrt(ratio)\n priors.append([\n x_center,\n y_center,\n w * ratio,\n h / ratio\n ])\n priors.append([\n x_center,\n y_center,\n w / ratio,\n h * ratio\n ])\n\n priors = torch.tensor(priors)\n if clamp:\n torch.clamp(priors, 0.0, 1.0, out=priors)\n return priors\n\n\ndef convert_locations_to_boxes(locations, priors, center_variance,\n size_variance):\n \"\"\"Convert regressional location results of SSD into boxes in the form of (center_x, center_y, h, w).\n\n The conversion:\n $$predicted\\_center * center_variance = \\frac {real\\_center - prior\\_center} {prior\\_hw}$$\n $$exp(predicted\\_hw * size_variance) = \\frac {real\\_hw} {prior\\_hw}$$\n We do it in the inverse direction here.\n Args:\n locations (batch_size, num_priors, 4): the regression output of SSD. It will contain the outputs as well.\n priors (num_priors, 4) or (batch_size/1, num_priors, 4): prior boxes.\n center_variance: a float used to change the scale of center.\n size_variance: a float used to change of scale of size.\n Returns:\n boxes: priors: [[center_x, center_y, h, w]]. All the values\n are relative to the image size.\n \"\"\"\n # priors can have one dimension less.\n if priors.dim() + 1 == locations.dim():\n priors = priors.unsqueeze(0)\n return torch.cat([\n locations[..., :2] * center_variance * priors[..., 2:] + priors[..., :2],\n torch.exp(locations[..., 2:] * size_variance) * priors[..., 2:]\n ], dim=locations.dim() - 1)\n\n\ndef convert_boxes_to_locations(center_form_boxes, center_form_priors, center_variance, size_variance):\n # priors can have one dimension less\n if center_form_priors.dim() + 1 == center_form_boxes.dim():\n center_form_priors = center_form_priors.unsqueeze(0)\n return torch.cat([\n (center_form_boxes[..., :2] - center_form_priors[..., :2]) / center_form_priors[..., 2:] / center_variance,\n torch.log(center_form_boxes[..., 2:] / center_form_priors[..., 2:]) / size_variance\n ], dim=center_form_boxes.dim() - 1)\n\n\ndef area_of(left_top, right_bottom) -> torch.Tensor:\n \"\"\"Compute the areas of rectangles given two corners.\n\n Args:\n left_top (N, 2): left top corner.\n right_bottom (N, 2): right bottom corner.\n\n Returns:\n area (N): return the area.\n \"\"\"\n hw = torch.clamp(right_bottom - left_top, min=0.0)\n return hw[..., 0] * hw[..., 1]\n\n\ndef iou_of(boxes0, boxes1, eps=1e-5):\n \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n\n Args:\n boxes0 (N, 4): ground truth boxes.\n boxes1 (N or 1, 4): predicted boxes.\n eps: a small number to avoid 0 as denominator.\n Returns:\n iou (N): IoU values.\n \"\"\"\n overlap_left_top = torch.max(boxes0[..., :2], boxes1[..., :2])\n overlap_right_bottom = torch.min(boxes0[..., 2:], boxes1[..., 2:])\n\n overlap_area = area_of(overlap_left_top, overlap_right_bottom)\n area0 = area_of(boxes0[..., :2], boxes0[..., 2:])\n area1 = area_of(boxes1[..., :2], boxes1[..., 2:])\n return overlap_area / (area0 + area1 - overlap_area + eps)\n\n\ndef assign_priors(gt_boxes, gt_labels, corner_form_priors,\n iou_threshold):\n \"\"\"Assign ground truth boxes and targets to priors.\n\n Args:\n gt_boxes (num_targets, 4): ground truth boxes.\n gt_labels (num_targets): labels of targets.\n priors (num_priors, 4): corner form priors\n Returns:\n boxes (num_priors, 4): real values for priors.\n labels (num_priros): labels for priors.\n \"\"\"\n # size: num_priors x num_targets\n num_priors=corner_form_priors.shape[0]\n if len(gt_labels)==0:\n boxes=corner_form_priors\n labels=torch.from_numpy(np.zeros(num_priors,dtype=np.int64))\n else:\n ious = iou_of(gt_boxes.unsqueeze(0), corner_form_priors.unsqueeze(1))\n # size: num_priors\n best_target_per_prior, best_target_per_prior_index = ious.max(1)\n # size: num_targets\n best_prior_per_target, best_prior_per_target_index = ious.max(0)\n\n for target_index, prior_index in enumerate(best_prior_per_target_index):\n best_target_per_prior_index[prior_index] = target_index\n # 2.0 is used to make sure every target has a prior assigned\n best_target_per_prior.index_fill_(0, best_prior_per_target_index, 2)\n # size: num_priors\n labels = gt_labels[best_target_per_prior_index]\n labels[best_target_per_prior < iou_threshold] = 0 # the backgournd id\n boxes = gt_boxes[best_target_per_prior_index]\n\n return boxes, labels\n\n\ndef hard_negative_mining(loss, labels, neg_pos_ratio):\n \"\"\"\n It used to suppress the presence of a large number of negative prediction.\n It works on image level not batch level.\n For any example/image, it keeps all the positive predictions and\n cut the number of negative predictions to make sure the ratio\n between the negative examples and positive examples is no more\n the given ratio for an image.\n\n Args:\n loss (N, num_priors): the loss for each example.\n labels (N, num_priors): the labels.\n neg_pos_ratio: the ratio between the negative examples and positive examples.\n \"\"\"\n pos_mask = labels > 0\n num_pos = pos_mask.long().sum(dim=1, keepdim=True)\n num_pos[num_pos==0]=1\n\n\n num_neg = num_pos * neg_pos_ratio\n\n loss[pos_mask] = -math.inf\n _, indexes = loss.sort(dim=1, descending=True)\n _, orders = indexes.sort(dim=1)\n neg_mask = orders < num_neg\n return pos_mask | neg_mask\n\n\ndef center_form_to_corner_form(locations):\n return torch.cat([locations[..., :2] - locations[..., 2:]/2,\n locations[..., :2] + locations[..., 2:]/2], locations.dim() - 1) \n\n\ndef corner_form_to_center_form(boxes):\n return torch.cat([\n (boxes[..., :2] + boxes[..., 2:]) / 2,\n boxes[..., 2:] - boxes[..., :2]\n ], boxes.dim() - 1)\n\n\ndef hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n \"\"\"\n\n Args:\n box_scores (N, 5): boxes in corner-form and probabilities.\n iou_threshold: intersection over union threshold.\n top_k: keep top_k results. If k <= 0, keep all the results.\n candidate_size: only consider the candidates with the highest scores.\n Returns:\n picked: a list of indexes of the kept boxes\n \"\"\"\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n _, indexes = scores.sort(descending=True)\n indexes = indexes[:candidate_size]\n while len(indexes) > 0:\n current = indexes[0]\n picked.append(current.item())\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n indexes = indexes[1:]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n current_box.unsqueeze(0),\n )\n indexes = indexes[iou <= iou_threshold]\n\n return box_scores[picked, :]\n\n\ndef nms(box_scores, nms_method=None, score_threshold=None, iou_threshold=None,\n sigma=0.5, top_k=-1, candidate_size=200):\n if nms_method == \"soft\":\n return soft_nms(box_scores, score_threshold, sigma, top_k)\n else:\n return hard_nms(box_scores, iou_threshold, top_k, candidate_size=candidate_size)\n\n\ndef soft_nms(box_scores, score_threshold, sigma=0.5, top_k=-1):\n \"\"\"Soft NMS implementation.\n\n References:\n https://arxiv.org/abs/1704.04503\n https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/cython_nms.pyx\n\n Args:\n box_scores (N, 5): boxes in corner-form and probabilities.\n score_threshold: boxes with scores less than value are not considered.\n sigma: the parameter in score re-computation.\n scores[i] = scores[i] * exp(-(iou_i)^2 / simga)\n top_k: keep top_k results. If k <= 0, keep all the results.\n Returns:\n picked_box_scores (K, 5): results of NMS.\n \"\"\"\n picked_box_scores = []\n while box_scores.size(0) > 0:\n max_score_index = torch.argmax(box_scores[:, 4])\n cur_box_prob = torch.tensor(box_scores[max_score_index, :])\n picked_box_scores.append(cur_box_prob)\n if len(picked_box_scores) == top_k > 0 or box_scores.size(0) == 1:\n break\n cur_box = cur_box_prob[:-1]\n box_scores[max_score_index, :] = box_scores[-1, :]\n box_scores = box_scores[:-1, :]\n ious = iou_of(cur_box.unsqueeze(0), box_scores[:, :-1])\n box_scores[:, -1] = box_scores[:, -1] * torch.exp(-(ious * ious) / sigma)\n box_scores = box_scores[box_scores[:, -1] > score_threshold, :]\n if len(picked_box_scores) > 0:\n return torch.stack(picked_box_scores)\n else:\n return torch.tensor([])\n\n\n\n" ]
[ [ "numpy.array", "numpy.minimum", "numpy.random.uniform", "numpy.random.randint", "numpy.clip", "numpy.maximum" ], [ "torch.stack", "torch.min", "numpy.zeros", "torch.max", "torch.clamp", "torch.tensor", "torch.log", "torch.exp", "torch.argmax" ] ]
ptrebert/reference-data
[ "7bca069b8995660252d4f601976f9f7abaaf063b" ]
[ "scripts/process_mapping.py" ]
[ "#!/usr/bin/env python3\n# coding=utf-8\n\nimport os as os\nimport sys as sys\nimport traceback as trb\nimport argparse as argp\nimport collections as col\nimport multiprocessing as mp\nimport functools as fnt\nimport gzip as gz\n\nimport pandas as pd\n\n\nCrossBlock = col.namedtuple('CrossBlock', ['chrom', 'start', 'end', 'block'])\n\nSymmBlock = col.namedtuple('SymmBlock', ['trg_chrom', 'trg_start', 'trg_end', 'trg_strand',\n 'block',\n 'qry_chrom', 'qry_start', 'qry_end', 'qry_strand'])\n\n\ndef _parse_crossmap_line(num, explode, line):\n \"\"\"\n :param line:\n :return:\n \"\"\"\n chrom, start, end, block = explode(line)\n cb = CrossBlock(chrom, num(start), num(end), num(block))\n return cb\n\n\ndef _parse_symm_line(num, explode, line):\n \"\"\"\n :param line:\n :return:\n \"\"\"\n tchrom, tstart, tend, tstrand, block, qchrom, qstart, qend, qstrand = explode(line)\n sb = SymmBlock(tchrom, num(tstart), num(tend), tstrand,\n num(block),\n qchrom, num(qstart), num(qend), qstrand)\n return sb\n\n\ndef _compare_blocks(symblock, crossblock):\n \"\"\"\n :param symblock:\n :param crossblock:\n :return:\n \"\"\"\n v = symblock.qry_chrom == crossblock.chrom\n v &= symblock.qry_start == crossblock.start\n v &= symblock.qry_end == crossblock.end\n return v\n\n\ndef parse_command_line():\n \"\"\"\n :return:\n \"\"\"\n parser = argp.ArgumentParser()\n parser.add_argument('--symm-map', '-s', type=str, dest='symmmap')\n parser.add_argument('--cross-map', '-c', type=str, dest='crossmap')\n parser.add_argument('--chain-lut', '-lu', type=str, dest='chainlut')\n parser.add_argument('--output', '-o', type=str, dest='output')\n args = parser.parse_args()\n return args\n\n\ndef confirm_mapping(lut, symb, cmb):\n \"\"\"\n :param lut:\n :param symb:\n :param cmb:\n :return:\n \"\"\"\n chains = lut.query('qchrom == @cmb.chrom and qstart < @cmb.end and '\n 'qend > @cmb.start and score > 1000 and '\n 'tlength > 100 and qlength > 100', inplace=False)\n assert not chains.empty, 'Blocks {} and {} resulted in empty lookup'.format(symb, cmb)\n if chains.shape[0] == 1:\n assert symb.trg_chrom == chains['tchrom'].item(), 'Chromosome mismatch: {} vs {}'.format(symb, chains)\n assert symb.trg_start < chains['tend'].item() and symb.trg_end > chains['tstart'].item(),\\\n 'Coordinates incompatible: {} vs {}'.format(symb, chains)\n assert symb.qry_strand == chains['qstrand'].item(), 'Strand mismatch: {} vs {}'.format(symb, chains)\n # only one chain matches - optimal case\n new_symb = SymmBlock(symb.trg_chrom, symb.trg_start, symb.trg_end, symb.trg_strand,\n symb.block,\n symb.qry_chrom, cmb.start, cmb.end, symb.qry_strand)\n return new_symb\n elif chains.shape[0] == 2:\n min_val = chains['score'].min()\n chains = chains.query('score == @min_val', inplace=False)\n assert symb.trg_chrom == chains['tchrom'].item(), 'Chromosome mismatch: {} vs {}'.format(symb, chains)\n assert symb.trg_start < chains['tend'].item() and symb.trg_end > chains['tstart'].item(),\\\n 'Coordinates incompatible: {} vs {}'.format(symb, chains)\n new_symb = SymmBlock(symb.trg_chrom, symb.trg_start, symb.trg_end, symb.trg_strand,\n symb.block,\n symb.qry_chrom, cmb.start, cmb.end, chains['qstrand'].item())\n return new_symb\n else:\n raise ValueError('Query blocks\\n{}\\nand\\n{}\\nreturned several matches:\\n\\n{}'.format(symb, cmb, chains))\n\n\ndef lookup_chain(chainlut, recv_lines, send_checked):\n \"\"\"\n :param chainlut:\n :param recv_lines:\n :param send_checked:\n :return:\n \"\"\"\n with pd.HDFStore(chainlut, 'r') as hdf:\n lut = hdf['chainlut']\n check_map = fnt.partial(confirm_mapping, *(lut, ))\n while 1:\n if recv_lines.poll():\n item = recv_lines.recv()\n if item is None:\n break\n symb, cmb = item\n try:\n new_symb = check_map(symb, cmb)\n send_checked.send(new_symb)\n except Exception as err:\n send_checked.send(None)\n raise err\n send_checked.send(None)\n return\n\n\ndef read_map_files(symmap, crossmap, send_lines, recv_checked, send_blocks):\n \"\"\"\n :param symmap:\n :param crossmap:\n :param send_lines:\n :param recv_checked:\n :param send_blocks:\n :return:\n \"\"\"\n read_symm = fnt.partial(_parse_symm_line, *(int, str.split))\n read_cross = fnt.partial(_parse_crossmap_line, *(int, str.split))\n comp = _compare_blocks\n clean = str.strip\n read_lines = 0\n try:\n with gz.open(symmap, 'rt') as symmfile:\n with open(crossmap, 'r') as crossfile:\n while 1:\n try:\n crossblock = read_cross(crossfile.readline())\n except ValueError:\n break\n symblock = read_symm(clean(symmfile.readline()))\n read_lines += 1\n assert symblock.block == crossblock.block,\\\n 'Block ID mismatch - files not sorted? {} vs {}'.format(symblock, crossblock)\n if comp(symblock, crossblock):\n send_blocks.send(symblock)\n else:\n send_lines.send((symblock, crossblock))\n while recv_checked.poll():\n item = recv_checked.recv()\n if item is None:\n raise RuntimeError('Reader received None before completion, read {} lines'.format(read_lines))\n send_blocks.send(item)\n except Exception as err:\n send_lines.send(None)\n send_blocks.send(None)\n raise err\n else:\n send_lines.send(None)\n while 1:\n if recv_checked.poll():\n item = recv_checked.recv()\n if item is None:\n break\n send_blocks.send(item)\n return\n\n\ndef dump_buffer(outbuffer):\n \"\"\"\n :param outbuffer:\n :return:\n \"\"\"\n conv = str\n for tup in outbuffer:\n _ = sys.stdout.write(tup[0] + '\\t' + conv(tup[1]) + '\\t' + conv(tup[2]) + '\\t' +\n tup[3] + '\\t' + conv(tup[4]) + '\\t' + tup[5] + '\\t' +\n conv(tup[6]) + '\\t' + conv(tup[7]) + '\\t' + tup[8] + '\\n')\n return\n\n\ndef main():\n \"\"\"\n :return:\n \"\"\"\n mp.set_start_method('forkserver')\n args = parse_command_line()\n outbuffer = []\n children = []\n recv_lines, send_lines = mp.Pipe(duplex=False)\n recv_checked, send_checked = mp.Pipe(duplex=False)\n recv_blocks, send_blocks = mp.Pipe(duplex=False)\n deadcount = 0\n try:\n\n reader = mp.Process(target=read_map_files, args=(args.symmmap, args.crossmap, send_lines,\n recv_checked, send_blocks),\n name='read')\n reader.daemon = True\n checker = mp.Process(target=lookup_chain, args=(args.chainlut, recv_lines, send_checked),\n name='check')\n checker.daemon = True\n children = [reader, checker]\n for p in children:\n p.start()\n p.join(0.01)\n while 1:\n if not (reader.is_alive() and checker.is_alive()):\n deadcount += 1\n if deadcount > 9:\n raise RuntimeError('I think he is dead, Jim.\\n'\n 'Reader process is alive: {}\\n'\n 'Checker process is alive {}'.format(reader.is_alive(), checker.is_alive()))\n if recv_blocks.poll():\n item = recv_blocks.recv()\n if item is None:\n break\n outbuffer.append(item)\n if len(outbuffer) > 100000:\n dump_buffer(outbuffer)\n outbuffer = []\n except Exception as err:\n for p in children:\n if p.name == 'check':\n try:\n send_lines.send(None)\n except:\n pass\n elif p.name == 'read':\n try:\n send_checked.send(None)\n except:\n pass\n raise err\n else:\n dump_buffer(outbuffer)\n return\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as err:\n trb.print_exc(file=sys.stderr)\n sys.stderr.write('\\nError: {}\\n'.format(str(err)))\n sys.exit(1)\n else:\n sys.exit(0)\n" ]
[ [ "pandas.HDFStore" ] ]
MatthijsBrem/wheelchair-design-platform
[ "7d1d574bec793a1737669e134b650c0f16eefd5e" ]
[ "Archive/aupyom-master/aupyom/sampler.py" ]
[ "from queue import Empty, Queue\nfrom threading import Condition, Event, Thread\n\nimport numpy\n\n\nclass Sampler(object):\n \"\"\" Sampler used to play, stop and mix multiple sounds.\n\n .. warning:: A single sampler instance should be used at a time.\n\n \"\"\"\n\n def __init__(self, sr=22050, backend='sounddevice', timeout=1):\n \"\"\"\n :param int sr: samplerate used - all sounds added to the sampler will automatically be resampled if needed (- his can be a CPU consumming task, try to use sound with all identical sampling rate if possible.\n :param str backend: backend used for playing sound. Can be either 'sounddevice' or 'dummy'.\n\n \"\"\"\n self.sr = sr\n self.sounds = []\n\n self.chunks = Queue(1)\n self.chunk_available = Condition()\n self.is_done = Event() # new event to prevent play to be called again before the sound is actually played\n self.timeout = timeout # timeout value for graceful exit of the BackendStream\n\n if backend == 'dummy':\n from .dummy_stream import DummyStream\n self.BackendStream = DummyStream\n elif backend == 'sounddevice':\n from sounddevice import OutputStream\n self.BackendStream = OutputStream\n else:\n raise ValueError(\"Backend can either be 'sounddevice' or 'dummy'\")\n\n # TODO: use a process instead?\n self.play_thread = Thread(target=self.run)\n self.play_thread.daemon = True\n self.play_thread.start()\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.play_thread.join()\n\n def play(self, sound):\n \"\"\" Adds and plays a new Sound to the Sampler.\n\n :param sound: sound to play\n\n .. note:: If the sound is already playing, it will restart from the beginning.\n\n \"\"\"\n self.is_done.clear() # hold is_done until the sound is played\n if self.sr != sound.sr:\n raise ValueError('You can only play sound with a samplerate of {} (here {}). Use the Sound.resample method for instance.', self.sr, sound.sr)\n\n if sound in self.sounds:\n self.remove(sound)\n\n with self.chunk_available:\n self.sounds.append(sound)\n sound.playing = True\n\n self.chunk_available.notify()\n self.is_done.wait() # wait for the sound to be entirely played\n\n def remove(self, sound):\n \"\"\" Remove a currently played sound. \"\"\"\n with self.chunk_available:\n sound.playing = False\n self.sounds.remove(sound)\n\n # Play loop\n\n def next_chunks(self):\n \"\"\" Gets a new chunk from all played sound and mix them together. \"\"\"\n with self.chunk_available:\n while True:\n playing_sounds = [s for s in self.sounds if s.playing]\n\n chunks = []\n for s in playing_sounds:\n try:\n chunks.append(next(s.chunks))\n except StopIteration:\n s.playing = False\n self.sounds.remove(s)\n self.is_done.set() # sound was played, release is_done to end the wait in play\n\n if chunks:\n break\n\n self.chunk_available.wait()\n\n return numpy.mean(chunks, axis=0)\n\n def run(self):\n \"\"\" Play loop, i.e. send all sound chunk by chunk to the soundcard. \"\"\"\n self.running = True\n\n def chunks_producer():\n while self.running:\n self.chunks.put(self.next_chunks())\n\n t = Thread(target=chunks_producer)\n t.start()\n\n with self.BackendStream(samplerate=self.sr, channels=1) as stream:\n while self.running:\n try:\n stream.write(self.chunks.get(timeout=self.timeout)) # timeout so stream.write() thread can exit\n except Empty:\n self.running = False # let play_thread exit\n" ]
[ [ "numpy.mean" ] ]
gistvision/DIPsureWithSTE
[ "853faac97a451e6430b47f4d4da54c6d08a7ee50", "853faac97a451e6430b47f4d4da54c6d08a7ee50" ]
[ "main.py", "models/downsampler.py" ]
[ "import os\nimport argparse\nimport glob\nimport json\n\nimport cv2\nimport torch\nimport numpy as np\nimport pandas as pd\n\nimport loss\nimport models\nimport config_parser\n\n\nfrom utils.common_utils import *\nfrom utils.denoising_utils import *\nfrom torch.utils.tensorboard import SummaryWriter\n\n# beta version code\nimport additional_utils\n\ndef get_net(img_np, noise_np, args):\n net = models.get_net(args)\n\n if args.dip_type in [\"dip_sure\", \"eSURE\", \"NCV_y\", \"eSURE_fixed\", 'eSURE_new', 'eSURE_alpha', \"eSURE_uniform\", \"eSURE_clip\",\"eSURE_real\", \"no_div\", \"PURE\", \"PURE_dc\", \"dip_sure_new\"]:\n net_input = cv2_to_torch(noise_np, dtype)\n print(\"[*] input_type : noisy image\")\n else:\n INPUT = 'noise'\n input_depth = 1 if args.gray else 3\n # For SR, the get_noise should be same as img_np\n net_input = get_noise(input_depth, INPUT, (img_np.shape[1], img_np.shape[2])).type(dtype).detach()\n print(\"[*] input_type : noise\")\n\n return net, net_input\n\ndef get_optim(name, net, lr, beta):\n if name == \"adam\":\n print(\"[*] optim_type : Adam\")\n return torch.optim.Adam(net.parameters(), lr, beta)\n elif name == \"adamw\":\n print(\"[*] optim_type : AdamW (wd : 1e-2)\")\n return torch.optim.AdamW(net.parameters(), lr, beta) # default weight decay is 1e-2.\n elif name == \"RAdam\":\n return additional_utils.RAdam(net.parameters(), lr, beta)\n else:\n raise NotImplementedError\n\ndef image_restorazation(file, args):\n # MAIN\n stat = {}\n task_type = args.task_type\n\n # Step 1. prepare clean & degradation(noisy) pair\n img_np, noisy_np = load_image_pair(file, task_type, args)\n if args.GT_noise:\n args.sigma = (img_np.astype(np.float) - noisy_np.astype(np.float)).std()\n # np_to_torch function from utils.common_utils.\n # _np : C,H,W [0, 255] -> _torch : C,H,W [0,1] scale\n img_torch = cv2_to_torch(img_np, args.dtype)\n noise_torch = cv2_to_torch(noisy_np, args.dtype)\n\n # For PSNR measure.\n noisy_clip_np = np.clip(noisy_np, 0, 255)\n # Step 2. make model and model input\n net, net_input = get_net(img_np, noisy_np, args)\n net.train()\n\n # Step 3. set loss function.\n cal_loss = loss.get_loss(net, net_input, args)\n optimizer = get_optim(args.optim, net, args.lr, (args.beta1, args.beta2))\n if args.force_steplr:\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, gamma=.9, step_size=300)\n else:\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[2000, 3000], gamma=0.5)\n\n # Step 4. optimization and inference.\n # Hyper_param for Learning\n psnr_noisy_last = 0\n psnr_gt_running = 0\n\n save_dir = args.save_dir\n\n # Ensemble methods.\n running_avg = None\n running_avg_ratio = args.running_avg_ratio\n\n image_name = file.split(\"/\")[-1][:-4]\n np_save_dir = os.path.join(args.save_dir, image_name)\n os.makedirs(np_save_dir, exist_ok=True)\n\n stat[\"max_psnr\"] = 0\n stat[\"max_ssim\"] = 0\n stat[\"NUM_Backtracking\"] = 0\n\n args.writer = SummaryWriter(log_dir=\"runs/%s/%s\" % (args.exp_tag, args.desc + image_name))\n for ep in range(args.epoch):\n optimizer.zero_grad()\n total_loss, out = cal_loss(net_input, noise_torch)\n with torch.no_grad():\n mse_loss = torch.nn.functional.mse_loss(out, img_torch).item()\n diff_loss = total_loss.item() - mse_loss\n args.writer.add_scalar(\"loss/used_loss\", total_loss.item(), global_step=ep)\n args.writer.add_scalar(\"loss/MSE_loss\", mse_loss, global_step=ep)\n args.writer.add_scalar(\"loss/diff\", diff_loss, global_step=ep)\n\n # _torch : C,H,W [0,1] scale => _np : C,H,W [0, 255]\n #out = torch_to_cv2(net(net_input))\n out = torch_to_cv2(out)\n psnr_noisy = calculate_psnr(noisy_clip_np, out)\n psnr_gt = calculate_psnr(img_np, out)\n lpips_noisy = calculate_lpips(noisy_clip_np, out, args.lpips)\n lpips_gt = calculate_lpips(img_np, out, args.lpips)\n args.writer.add_scalar(\"psnr/noisy_to_out\", psnr_noisy, global_step=ep)\n args.writer.add_scalar(\"psnr/clean_to_out\", psnr_gt, global_step=ep)\n args.writer.add_scalar(\"lpips/noisy_to_out\", lpips_noisy, global_step=ep)\n args.writer.add_scalar(\"lpips/clean_to_out\", lpips_gt, global_step=ep)\n\n if total_loss < 0:\n print('\\nLoss is less than 0')\n for new_param, net_param in zip(last_net, net.parameters()):\n net_param.data.copy_(new_param.cuda())\n break\n if (psnr_noisy - psnr_noisy_last < -5) and (ep > 5) :\n print('\\nFalling back to previous checkpoint.')\n for new_param, net_param in zip(last_net, net.parameters()):\n net_param.data.copy_(new_param.cuda())\n stat[\"NUM_Backtracking\"] += 1\n if stat[\"NUM_Backtracking\"] > 10:\n break\n # continue\n else:\n # Running ensemble\n if True: #(ep % 50 == 0) and\n if running_avg is None:\n running_avg = out\n else:\n running_avg = running_avg * running_avg_ratio + out * (1 - running_avg_ratio)\n psnr_gt_running = calculate_psnr(img_np, running_avg)\n lpips_gt_running = calculate_lpips(img_np, running_avg, args.lpips, color=\"BGR\")\n args.writer.add_scalar(\"psnr/clean_to_avg\", psnr_gt_running, global_step=ep)\n args.writer.add_scalar(\"lpips/clean_to_avg\", lpips_gt_running, global_step=ep)\n\n if (stat[\"max_psnr\"] <= psnr_gt):\n stat[\"max_step\"] = ep\n stat[\"max_psnr\"] = psnr_gt\n stat[\"max_psnr_avg\"] = psnr_gt_running\n stat[\"max_lpips_avg\"] = lpips_gt_running\n stat[\"max_lpips\"] = lpips_gt \n max_out, maxavg_out = out.copy(),running_avg.copy()\n\n #save file\n if args.save_np:\n state_dict = net.state_dict()\n torch.save(state_dict, os.path.join(np_save_dir, \"max_psnr_state_dict.pth\"))\n\n if (ep == 200 or ep == 10) and (psnr_gt_running < psnr_gt):\n running_avg = None\n\n # args.writer.add_image(\"result/gt_noise_out_avg\", np.concatenate([img_np, noisy_np, out, running_avg], axis=2), ep)\n print('Iteration %05d total loss / MSE / diff %f / %f / %f PSNR_noisy: %f psnr_gt: %f PSNR_gt_sm: %f' % (\n ep, total_loss.item(), mse_loss, diff_loss, psnr_noisy, psnr_gt, psnr_gt_running), end='\\r')\n\n last_net = [x.detach().cpu() for x in net.parameters()]\n psnr_noisy_last=psnr_noisy\n total_loss.backward()\n optimizer.step()\n scheduler.step()\n torch.cuda.empty_cache()\n\n if args.optim_init > 0:\n if ep % args.optim_init == 0:\n additional_utils.init_optim(net, optimizer)\n\n stat[\"final_ep\"] = ep\n stat[\"final_psnr\"] = psnr_gt\n stat[\"final_psnr_avg\"] = psnr_gt_running\n stat[\"final_lpips_avg\"]= lpips_gt_running\n stat[\"final_lpips\"] = lpips_gt\n\n\n # Make final images\n if True:\n save_CHW_np(save_dir + \"/%s.png\" % (image_name), out)\n save_CHW_np(save_dir + \"/%s_avg.png\" % (image_name), running_avg)\n save_CHW_np(save_dir + \"/%s_max.png\" % (image_name), max_out)\n save_CHW_np(save_dir + \"/%s_max_avg.png\" % (image_name), maxavg_out)\n\n if args.gray:\n stat[\"final_ssim\"] = calculate_ssim(img_np, out)\n stat[\"final_ssim_avg\"] = calculate_ssim(img_np, running_avg)\n stat[\"max_ssim\"] = calculate_ssim(img_np, max_out)\n stat[\"max_ssim_avg\"] = calculate_ssim(img_np, maxavg_out)\n log_file = open(save_dir + \"/%s_log.txt\" % (image_name), \"w\")\n print(stat, file=log_file)\n print(\"%s psnr clean_out : %.2f, %.2f noise_out : %.2f, max %.2f, %.2f\" % (\n image_name, psnr_gt_running, lpips_gt_running, psnr_noisy, stat[\"max_psnr\"], stat[\"max_lpips\"]), \" \" * 100)\n print(stat)\n args.writer.close()\n torch.cuda.empty_cache()\n return stat\n\n\ndef read_dataset_file_list(eval_data):\n dataset_dir = \"./testset/%s/\" % eval_data\n file_list1 = glob.glob(dataset_dir + \"*.tif\")\n file_list2 = glob.glob(dataset_dir + \"*.png\")\n file_list3 = glob.glob(dataset_dir + \"*.JPG\")\n file_list = file_list1 + file_list2 + file_list3\n return file_list\n\n\nif __name__ == \"__main__\":\n # For REPRODUCIBILITY\n print(\"[*] reproduce mode On\")\n torch.manual_seed(0)\n np.random.seed(0)\n if torch.cuda.is_available():\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n dtype = torch.cuda.FloatTensor\n lpips = get_lpips(\"cuda\")\n else:\n dtype = torch.FloatTensor\n lpips = get_lpips(\"cpu\")\n args = config_parser.main_parser()\n args.save_dir = \"./result/%s/%s/%s\" % (args.task_type, args.exp_tag, args.dip_type + args.desc)\n os.makedirs(args.save_dir, exist_ok = True)\n\n # default epoch setup.\n if args.task_type == \"denoising\":\n args.epoch = 3000 if args.epoch == 0 else args.epoch\n args.save_point = [1, 10, 100, 500, 1000, 2000, 3000, 4000]\n elif args.task_type == \"poisson\":\n args.epoch = 3000 if args.epoch == 0 else args.epoch\n args.save_point = [1, 10, 100, 500, 1000, 2000, 3000, 4000]\n\n\n with open(os.path.join(args.save_dir, 'args.json'), 'w') as f:\n json.dump(args.__dict__, f, indent=2)\n args.dtype = dtype\n args.lpips = lpips\n\n # file_list.\n file_list = read_dataset_file_list(args.eval_data)\n file_list = sorted(file_list)\n stat_list = []\n for file in file_list:\n print(\"[*] process image file : %s\" % file)\n stat = image_restorazation(file, args)\n stat_list.append(stat)\n\n data = pd.DataFrame(stat_list, index= [i.split(\"/\")[-1] for i in file_list])\n os.makedirs(\"./csv/%s/%s/\" % (args.task_type, args.exp_tag), exist_ok=True)\n data.to_csv(\"./csv/%s/%s/%s.csv\" % ( args.task_type, args.exp_tag ,args.dip_type+args.desc))\n print(\"experiment done\")\n print(data)\n", "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom utils.REDutils import fspecial_gauss\n\n\nclass Downsampler(nn.Module):\n \"\"\"\n http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf\n \"\"\"\n\n def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None,\n preserve_size=False, pad_type='reflection', transpose_conv=False):\n super(Downsampler, self).__init__()\n\n assert phase in [0, 0.5], 'phase should be 0 or 0.5'\n\n if kernel_type == 'lanczos2':\n support = 2\n kernel_width = 4 * factor + 1\n kernel_type_ = 'lanczos'\n\n elif kernel_type == 'lanczos3':\n support = 3\n kernel_width = 6 * factor + 1\n kernel_type_ = 'lanczos'\n\n elif kernel_type == 'gauss12':\n kernel_width = 7\n sigma = 1 / 2\n kernel_type_ = 'gauss'\n\n elif kernel_type == 'gauss1sq2':\n kernel_width = 9\n sigma = 1. / np.sqrt(2)\n kernel_type_ = 'gauss'\n\n elif kernel_type == 'uniform_blur':\n kernel_width = 9\n kernel_type_ = 'uniform'\n pad_type = 'circular'\n\n elif kernel_type == 'gauss_blur':\n kernel_width = 25\n sigma = 1.6\n kernel_type_ = 'gauss'\n pad_type = 'circular'\n\n elif kernel_type in {'lanczos', 'gauss', 'box'}:\n kernel_type_ = kernel_type\n\n else:\n assert False, 'wrong name kernel'\n\n # note that `kernel width` will be different to actual size for phase = 1/2\n self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)\n if transpose_conv:\n if self.kernel.shape[0] % 2 == 1:\n pad = int((self.kernel.shape[0] - 1) // 2.)\n else:\n pad = int((self.kernel.shape[0] - factor) // 2.)\n downsampler = nn.ConvTranspose2d(n_planes, n_planes, kernel_size=self.kernel.shape,\n stride=factor, padding=pad)\n else:\n downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)\n downsampler.weight.data[:] = 0\n downsampler.bias.data[:] = 0\n\n kernel_torch = torch.from_numpy(self.kernel)\n for i in range(n_planes):\n downsampler.weight.data[i, i] = kernel_torch\n\n self.downsampler_ = downsampler\n\n if preserve_size:\n if pad_type == 'circular':\n self.padding = lambda torch_in: pad_circular(torch_in, kernel_width // 2)\n elif pad_type == 'reflection':\n if self.kernel.shape[0] % 2 == 1:\n pad = int((self.kernel.shape[0] - 1) // 2.)\n else:\n pad = int((self.kernel.shape[0] - factor) // 2.)\n self.padding = nn.ReplicationPad2d(pad)\n else:\n assert False, \"pad_type have only circular or reflection options\"\n self.preserve_size = preserve_size\n\n def forward(self, input):\n if self.preserve_size:\n x = self.padding(input)\n else:\n x = input\n self.x = x\n return self.downsampler_(x)\n\n\ndef get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):\n assert kernel_type in ['lanczos', 'gauss', 'box', 'uniform', 'blur']\n\n # factor = float(factor)\n if phase == 0.5 and kernel_type != 'box':\n kernel = np.zeros([kernel_width - 1, kernel_width - 1])\n else:\n kernel = np.zeros([kernel_width, kernel_width])\n\n if kernel_type == 'box':\n assert phase == 0.5, 'Box filter is always half-phased'\n kernel[:] = 1. / (kernel_width * kernel_width)\n\n elif kernel_type == 'gauss':\n assert sigma, 'sigma is not specified'\n assert phase != 0.5, 'phase 1/2 for gauss not implemented'\n return fspecial_gauss(kernel_width, sigma)\n\n elif kernel_type == 'uniform':\n kernel = np.ones([kernel_width, kernel_width])\n\n elif kernel_type == 'lanczos':\n assert support, 'support is not specified'\n center = (kernel_width + 1) / 2.\n\n for i in range(1, kernel.shape[0] + 1):\n for j in range(1, kernel.shape[1] + 1):\n\n if phase == 0.5:\n di = abs(i + 0.5 - center) / factor\n dj = abs(j + 0.5 - center) / factor\n else:\n di = abs(i - center) / factor\n dj = abs(j - center) / factor\n\n pi_sq = np.pi * np.pi\n\n val = 1\n if di != 0:\n val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)\n val = val / (np.pi * np.pi * di * di)\n\n if dj != 0:\n val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)\n val = val / (np.pi * np.pi * dj * dj)\n kernel[i - 1][j - 1] = val\n else:\n assert False, 'wrong method name'\n kernel /= kernel.sum()\n return kernel\n\n\ndef pad_circular(x, pad):\n \"\"\"\n :param x: pytorch tensor of shape: [batch, ch, h, w]\n :param pad: uint\n :return:\n \"\"\"\n x = torch.cat([x, x[:, :, 0:pad]], dim=2)\n x = torch.cat([x, x[:, :, :, 0:pad]], dim=3)\n x = torch.cat([x[:, :, -2 * pad:-pad], x], dim=2)\n x = torch.cat([x[:, :, :, -2 * pad:-pad], x], dim=3)\n return x" ]
[ [ "torch.optim.lr_scheduler.StepLR", "numpy.random.seed", "torch.no_grad", "torch.optim.lr_scheduler.MultiStepLR", "torch.manual_seed", "torch.nn.functional.mse_loss", "torch.cuda.empty_cache", "torch.cuda.is_available", "numpy.clip", "torch.utils.tensorboard.SummaryWriter" ], [ "torch.cat", "numpy.sin", "numpy.zeros", "torch.nn.ReplicationPad2d", "torch.nn.ConvTranspose2d", "numpy.ones", "torch.from_numpy", "torch.nn.Conv2d", "numpy.sqrt" ] ]
makarandtapaswi/SlowFast
[ "39ef35c9a086443209b458cceaec86a02e27b369", "39ef35c9a086443209b458cceaec86a02e27b369" ]
[ "slowfast/utils/ava_evaluation/np_box_list_ops.py", "slowfast/models/common.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Bounding Box List operations for Numpy BoxLists.\n\nExample box operations that are supported:\n * Areas: compute bounding box areas\n * IOU: pairwise intersection-over-union scores\n\"\"\"\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\nimport numpy as np\n\nfrom . import np_box_list, np_box_ops\n\n\nclass SortOrder(object):\n \"\"\"Enum class for sort order.\n\n Attributes:\n ascend: ascend order.\n descend: descend order.\n \"\"\"\n\n ASCEND = 1\n DESCEND = 2\n\n\ndef area(boxlist):\n \"\"\"Computes area of boxes.\n\n Args:\n boxlist: BoxList holding N boxes\n\n Returns:\n a numpy array with shape [N*1] representing box areas\n \"\"\"\n y_min, x_min, y_max, x_max = boxlist.get_coordinates()\n return (y_max - y_min) * (x_max - x_min)\n\n\ndef intersection(boxlist1, boxlist2):\n \"\"\"Compute pairwise intersection areas between boxes.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n\n Returns:\n a numpy array with shape [N*M] representing pairwise intersection area\n \"\"\"\n return np_box_ops.intersection(boxlist1.get(), boxlist2.get())\n\n\ndef iou(boxlist1, boxlist2):\n \"\"\"Computes pairwise intersection-over-union between box collections.\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n\n Returns:\n a numpy array with shape [N, M] representing pairwise iou scores.\n \"\"\"\n return np_box_ops.iou(boxlist1.get(), boxlist2.get())\n\n\ndef ioa(boxlist1, boxlist2):\n \"\"\"Computes pairwise intersection-over-area between box collections.\n\n Intersection-over-area (ioa) between two boxes box1 and box2 is defined as\n their intersection area over box2's area. Note that ioa is not symmetric,\n that is, IOA(box1, box2) != IOA(box2, box1).\n\n Args:\n boxlist1: BoxList holding N boxes\n boxlist2: BoxList holding M boxes\n\n Returns:\n a numpy array with shape [N, M] representing pairwise ioa scores.\n \"\"\"\n return np_box_ops.ioa(boxlist1.get(), boxlist2.get())\n\n\ndef gather(boxlist, indices, fields=None):\n \"\"\"Gather boxes from BoxList according to indices and return new BoxList.\n\n By default, gather returns boxes corresponding to the input index list, as\n well as all additional fields stored in the boxlist (indexing into the\n first dimension). However one can optionally only gather from a\n subset of fields.\n\n Args:\n boxlist: BoxList holding N boxes\n indices: a 1-d numpy array of type int_\n fields: (optional) list of fields to also gather from. If None (default),\n all fields are gathered from. Pass an empty fields list to only gather\n the box coordinates.\n\n Returns:\n subboxlist: a BoxList corresponding to the subset of the input BoxList\n specified by indices\n\n Raises:\n ValueError: if specified field is not contained in boxlist or if the\n indices are not of type int_\n \"\"\"\n if indices.size:\n if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0:\n raise ValueError(\"indices are out of valid range.\")\n subboxlist = np_box_list.BoxList(boxlist.get()[indices, :])\n if fields is None:\n fields = boxlist.get_extra_fields()\n for field in fields:\n extra_field_data = boxlist.get_field(field)\n subboxlist.add_field(field, extra_field_data[indices, ...])\n return subboxlist\n\n\ndef sort_by_field(boxlist, field, order=SortOrder.DESCEND):\n \"\"\"Sort boxes and associated fields according to a scalar field.\n\n A common use case is reordering the boxes according to descending scores.\n\n Args:\n boxlist: BoxList holding N boxes.\n field: A BoxList field for sorting and reordering the BoxList.\n order: (Optional) 'descend' or 'ascend'. Default is descend.\n\n Returns:\n sorted_boxlist: A sorted BoxList with the field in the specified order.\n\n Raises:\n ValueError: if specified field does not exist or is not of single dimension.\n ValueError: if the order is not either descend or ascend.\n \"\"\"\n if not boxlist.has_field(field):\n raise ValueError(\"Field \" + field + \" does not exist\")\n if len(boxlist.get_field(field).shape) != 1:\n raise ValueError(\"Field \" + field + \"should be single dimension.\")\n if order != SortOrder.DESCEND and order != SortOrder.ASCEND:\n raise ValueError(\"Invalid sort order\")\n\n field_to_sort = boxlist.get_field(field)\n sorted_indices = np.argsort(field_to_sort)\n if order == SortOrder.DESCEND:\n sorted_indices = sorted_indices[::-1]\n return gather(boxlist, sorted_indices)\n\n\ndef non_max_suppression(\n boxlist, max_output_size=10000, iou_threshold=1.0, score_threshold=-10.0\n):\n \"\"\"Non maximum suppression.\n\n This op greedily selects a subset of detection bounding boxes, pruning\n away boxes that have high IOU (intersection over union) overlap (> thresh)\n with already selected boxes. In each iteration, the detected bounding box with\n highest score in the available pool is selected.\n\n Args:\n boxlist: BoxList holding N boxes. Must contain a 'scores' field\n representing detection scores. All scores belong to the same class.\n max_output_size: maximum number of retained boxes\n iou_threshold: intersection over union threshold.\n score_threshold: minimum score threshold. Remove the boxes with scores\n less than this value. Default value is set to -10. A very\n low threshold to pass pretty much all the boxes, unless\n the user sets a different score threshold.\n\n Returns:\n a BoxList holding M boxes where M <= max_output_size\n Raises:\n ValueError: if 'scores' field does not exist\n ValueError: if threshold is not in [0, 1]\n ValueError: if max_output_size < 0\n \"\"\"\n if not boxlist.has_field(\"scores\"):\n raise ValueError(\"Field scores does not exist\")\n if iou_threshold < 0.0 or iou_threshold > 1.0:\n raise ValueError(\"IOU threshold must be in [0, 1]\")\n if max_output_size < 0:\n raise ValueError(\"max_output_size must be bigger than 0.\")\n\n boxlist = filter_scores_greater_than(boxlist, score_threshold)\n if boxlist.num_boxes() == 0:\n return boxlist\n\n boxlist = sort_by_field(boxlist, \"scores\")\n\n # Prevent further computation if NMS is disabled.\n if iou_threshold == 1.0:\n if boxlist.num_boxes() > max_output_size:\n selected_indices = np.arange(max_output_size)\n return gather(boxlist, selected_indices)\n else:\n return boxlist\n\n boxes = boxlist.get()\n num_boxes = boxlist.num_boxes()\n # is_index_valid is True only for all remaining valid boxes,\n is_index_valid = np.full(num_boxes, 1, dtype=bool)\n selected_indices = []\n num_output = 0\n for i in range(num_boxes):\n if num_output < max_output_size:\n if is_index_valid[i]:\n num_output += 1\n selected_indices.append(i)\n is_index_valid[i] = False\n valid_indices = np.where(is_index_valid)[0]\n if valid_indices.size == 0:\n break\n\n intersect_over_union = np_box_ops.iou(\n np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :]\n )\n intersect_over_union = np.squeeze(intersect_over_union, axis=0)\n is_index_valid[valid_indices] = np.logical_and(\n is_index_valid[valid_indices],\n intersect_over_union <= iou_threshold,\n )\n return gather(boxlist, np.array(selected_indices))\n\n\ndef multi_class_non_max_suppression(\n boxlist, score_thresh, iou_thresh, max_output_size\n):\n \"\"\"Multi-class version of non maximum suppression.\n\n This op greedily selects a subset of detection bounding boxes, pruning\n away boxes that have high IOU (intersection over union) overlap (> thresh)\n with already selected boxes. It operates independently for each class for\n which scores are provided (via the scores field of the input box_list),\n pruning boxes with score less than a provided threshold prior to\n applying NMS.\n\n Args:\n boxlist: BoxList holding N boxes. Must contain a 'scores' field\n representing detection scores. This scores field is a tensor that can\n be 1 dimensional (in the case of a single class) or 2-dimensional, which\n which case we assume that it takes the shape [num_boxes, num_classes].\n We further assume that this rank is known statically and that\n scores.shape[1] is also known (i.e., the number of classes is fixed\n and known at graph construction time).\n score_thresh: scalar threshold for score (low scoring boxes are removed).\n iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap\n with previously selected boxes are removed).\n max_output_size: maximum number of retained boxes per class.\n\n Returns:\n a BoxList holding M boxes with a rank-1 scores field representing\n corresponding scores for each box with scores sorted in decreasing order\n and a rank-1 classes field representing a class label for each box.\n Raises:\n ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have\n a valid scores field.\n \"\"\"\n if not 0 <= iou_thresh <= 1.0:\n raise ValueError(\"thresh must be between 0 and 1\")\n if not isinstance(boxlist, np_box_list.BoxList):\n raise ValueError(\"boxlist must be a BoxList\")\n if not boxlist.has_field(\"scores\"):\n raise ValueError(\"input boxlist must have 'scores' field\")\n scores = boxlist.get_field(\"scores\")\n if len(scores.shape) == 1:\n scores = np.reshape(scores, [-1, 1])\n elif len(scores.shape) == 2:\n if scores.shape[1] is None:\n raise ValueError(\n \"scores field must have statically defined second \" \"dimension\"\n )\n else:\n raise ValueError(\"scores field must be of rank 1 or 2\")\n num_boxes = boxlist.num_boxes()\n num_scores = scores.shape[0]\n num_classes = scores.shape[1]\n\n if num_boxes != num_scores:\n raise ValueError(\"Incorrect scores field length: actual vs expected.\")\n\n selected_boxes_list = []\n for class_idx in range(num_classes):\n boxlist_and_class_scores = np_box_list.BoxList(boxlist.get())\n class_scores = np.reshape(scores[0:num_scores, class_idx], [-1])\n boxlist_and_class_scores.add_field(\"scores\", class_scores)\n boxlist_filt = filter_scores_greater_than(\n boxlist_and_class_scores, score_thresh\n )\n nms_result = non_max_suppression(\n boxlist_filt,\n max_output_size=max_output_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh,\n )\n nms_result.add_field(\n \"classes\", np.zeros_like(nms_result.get_field(\"scores\")) + class_idx\n )\n selected_boxes_list.append(nms_result)\n selected_boxes = concatenate(selected_boxes_list)\n sorted_boxes = sort_by_field(selected_boxes, \"scores\")\n return sorted_boxes\n\n\ndef scale(boxlist, y_scale, x_scale):\n \"\"\"Scale box coordinates in x and y dimensions.\n\n Args:\n boxlist: BoxList holding N boxes\n y_scale: float\n x_scale: float\n\n Returns:\n boxlist: BoxList holding N boxes\n \"\"\"\n y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)\n y_min = y_scale * y_min\n y_max = y_scale * y_max\n x_min = x_scale * x_min\n x_max = x_scale * x_max\n scaled_boxlist = np_box_list.BoxList(\n np.hstack([y_min, x_min, y_max, x_max])\n )\n\n fields = boxlist.get_extra_fields()\n for field in fields:\n extra_field_data = boxlist.get_field(field)\n scaled_boxlist.add_field(field, extra_field_data)\n\n return scaled_boxlist\n\n\ndef clip_to_window(boxlist, window):\n \"\"\"Clip bounding boxes to a window.\n\n This op clips input bounding boxes (represented by bounding box\n corners) to a window, optionally filtering out boxes that do not\n overlap at all with the window.\n\n Args:\n boxlist: BoxList holding M_in boxes\n window: a numpy array of shape [4] representing the\n [y_min, x_min, y_max, x_max] window to which the op\n should clip boxes.\n\n Returns:\n a BoxList holding M_out boxes where M_out <= M_in\n \"\"\"\n y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)\n win_y_min = window[0]\n win_x_min = window[1]\n win_y_max = window[2]\n win_x_max = window[3]\n y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min)\n y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min)\n x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min)\n x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min)\n clipped = np_box_list.BoxList(\n np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped])\n )\n clipped = _copy_extra_fields(clipped, boxlist)\n areas = area(clipped)\n nonzero_area_indices = np.reshape(\n np.nonzero(np.greater(areas, 0.0)), [-1]\n ).astype(np.int32)\n return gather(clipped, nonzero_area_indices)\n\n\ndef prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0):\n \"\"\"Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.\n\n For each box in boxlist1, we want its IOA to be more than minoverlap with\n at least one of the boxes in boxlist2. If it does not, we remove it.\n\n Args:\n boxlist1: BoxList holding N boxes.\n boxlist2: BoxList holding M boxes.\n minoverlap: Minimum required overlap between boxes, to count them as\n overlapping.\n\n Returns:\n A pruned boxlist with size [N', 4].\n \"\"\"\n intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor\n intersection_over_area = np.amax(\n intersection_over_area, axis=0\n ) # [N] tensor\n keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap))\n keep_inds = np.nonzero(keep_bool)[0]\n new_boxlist1 = gather(boxlist1, keep_inds)\n return new_boxlist1\n\n\ndef prune_outside_window(boxlist, window):\n \"\"\"Prunes bounding boxes that fall outside a given window.\n\n This function prunes bounding boxes that even partially fall outside the given\n window. See also ClipToWindow which only prunes bounding boxes that fall\n completely outside the window, and clips any bounding boxes that partially\n overflow.\n\n Args:\n boxlist: a BoxList holding M_in boxes.\n window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax]\n of the window.\n\n Returns:\n pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in.\n valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes\n in the input tensor.\n \"\"\"\n\n y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)\n win_y_min = window[0]\n win_x_min = window[1]\n win_y_max = window[2]\n win_x_max = window[3]\n coordinate_violations = np.hstack(\n [\n np.less(y_min, win_y_min),\n np.less(x_min, win_x_min),\n np.greater(y_max, win_y_max),\n np.greater(x_max, win_x_max),\n ]\n )\n valid_indices = np.reshape(\n np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1]\n )\n return gather(boxlist, valid_indices), valid_indices\n\n\ndef concatenate(boxlists, fields=None):\n \"\"\"Concatenate list of BoxLists.\n\n This op concatenates a list of input BoxLists into a larger BoxList. It also\n handles concatenation of BoxList fields as long as the field tensor shapes\n are equal except for the first dimension.\n\n Args:\n boxlists: list of BoxList objects\n fields: optional list of fields to also concatenate. By default, all\n fields from the first BoxList in the list are included in the\n concatenation.\n\n Returns:\n a BoxList with number of boxes equal to\n sum([boxlist.num_boxes() for boxlist in BoxList])\n Raises:\n ValueError: if boxlists is invalid (i.e., is not a list, is empty, or\n contains non BoxList objects), or if requested fields are not contained in\n all boxlists\n \"\"\"\n if not isinstance(boxlists, list):\n raise ValueError(\"boxlists should be a list\")\n if not boxlists:\n raise ValueError(\"boxlists should have nonzero length\")\n for boxlist in boxlists:\n if not isinstance(boxlist, np_box_list.BoxList):\n raise ValueError(\n \"all elements of boxlists should be BoxList objects\"\n )\n concatenated = np_box_list.BoxList(\n np.vstack([boxlist.get() for boxlist in boxlists])\n )\n if fields is None:\n fields = boxlists[0].get_extra_fields()\n for field in fields:\n first_field_shape = boxlists[0].get_field(field).shape\n first_field_shape = first_field_shape[1:]\n for boxlist in boxlists:\n if not boxlist.has_field(field):\n raise ValueError(\"boxlist must contain all requested fields\")\n field_shape = boxlist.get_field(field).shape\n field_shape = field_shape[1:]\n if field_shape != first_field_shape:\n raise ValueError(\n \"field %s must have same shape for all boxlists \"\n \"except for the 0th dimension.\" % field\n )\n concatenated_field = np.concatenate(\n [boxlist.get_field(field) for boxlist in boxlists], axis=0\n )\n concatenated.add_field(field, concatenated_field)\n return concatenated\n\n\ndef filter_scores_greater_than(boxlist, thresh):\n \"\"\"Filter to keep only boxes with score exceeding a given threshold.\n\n This op keeps the collection of boxes whose corresponding scores are\n greater than the input threshold.\n\n Args:\n boxlist: BoxList holding N boxes. Must contain a 'scores' field\n representing detection scores.\n thresh: scalar threshold\n\n Returns:\n a BoxList holding M boxes where M <= N\n\n Raises:\n ValueError: if boxlist not a BoxList object or if it does not\n have a scores field\n \"\"\"\n if not isinstance(boxlist, np_box_list.BoxList):\n raise ValueError(\"boxlist must be a BoxList\")\n if not boxlist.has_field(\"scores\"):\n raise ValueError(\"input boxlist must have 'scores' field\")\n scores = boxlist.get_field(\"scores\")\n if len(scores.shape) > 2:\n raise ValueError(\"Scores should have rank 1 or 2\")\n if len(scores.shape) == 2 and scores.shape[1] != 1:\n raise ValueError(\n \"Scores should have rank 1 or have shape \"\n \"consistent with [None, 1]\"\n )\n high_score_indices = np.reshape(\n np.where(np.greater(scores, thresh)), [-1]\n ).astype(np.int32)\n return gather(boxlist, high_score_indices)\n\n\ndef change_coordinate_frame(boxlist, window):\n \"\"\"Change coordinate frame of the boxlist to be relative to window's frame.\n\n Given a window of the form [ymin, xmin, ymax, xmax],\n changes bounding box coordinates from boxlist to be relative to this window\n (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).\n\n An example use case is data augmentation: where we are given groundtruth\n boxes (boxlist) and would like to randomly crop the image to some\n window (window). In this case we need to change the coordinate frame of\n each groundtruth box to be relative to this new window.\n\n Args:\n boxlist: A BoxList object holding N boxes.\n window: a size 4 1-D numpy array.\n\n Returns:\n Returns a BoxList object with N boxes.\n \"\"\"\n win_height = window[2] - window[0]\n win_width = window[3] - window[1]\n boxlist_new = scale(\n np_box_list.BoxList(\n boxlist.get() - [window[0], window[1], window[0], window[1]]\n ),\n 1.0 / win_height,\n 1.0 / win_width,\n )\n _copy_extra_fields(boxlist_new, boxlist)\n\n return boxlist_new\n\n\ndef _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):\n \"\"\"Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.\n\n Args:\n boxlist_to_copy_to: BoxList to which extra fields are copied.\n boxlist_to_copy_from: BoxList from which fields are copied.\n\n Returns:\n boxlist_to_copy_to with extra fields.\n \"\"\"\n for field in boxlist_to_copy_from.get_extra_fields():\n boxlist_to_copy_to.add_field(\n field, boxlist_to_copy_from.get_field(field)\n )\n return boxlist_to_copy_to\n\n\ndef _update_valid_indices_by_removing_high_iou_boxes(\n selected_indices, is_index_valid, intersect_over_union, threshold\n):\n max_iou = np.max(intersect_over_union[:, selected_indices], axis=1)\n return np.logical_and(is_index_valid, max_iou <= threshold)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport torch\nimport torch.nn as nn\n\n\nclass Mlp(nn.Module):\n def __init__(\n self,\n in_features,\n hidden_features=None,\n out_features=None,\n act_layer=nn.GELU,\n drop_rate=0.0,\n ):\n super().__init__()\n self.drop_rate = drop_rate\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n if self.drop_rate > 0.0:\n self.drop = nn.Dropout(drop_rate)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n if self.drop_rate > 0.0:\n x = self.drop(x)\n x = self.fc2(x)\n if self.drop_rate > 0.0:\n x = self.drop(x)\n return x\n\n\nclass Permute(nn.Module):\n def __init__(self, dims):\n super().__init__()\n self.dims = dims\n\n def forward(self, x):\n return x.permute(*self.dims)\n\n\ndef drop_path(x, drop_prob: float = 0.0, training: bool = False):\n \"\"\"\n Stochastic Depth per sample.\n \"\"\"\n if drop_prob == 0.0 or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (\n x.ndim - 1\n ) # work with diff dim tensors, not just 2D ConvNets\n mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n mask.floor_() # binarize\n output = x.div(keep_prob) * mask\n return output\n\n\nclass DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\"\"\"\n\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n" ]
[ [ "numpy.max", "numpy.full", "numpy.array", "numpy.less", "numpy.reshape", "numpy.logical_and", "numpy.nonzero", "numpy.where", "numpy.amin", "numpy.amax", "numpy.arange", "numpy.argsort", "numpy.greater", "numpy.hstack", "numpy.squeeze", "numpy.fmin", "numpy.expand_dims" ], [ "torch.nn.Linear", "torch.rand", "torch.nn.Dropout" ] ]
dwhitena/vachan-graph
[ "63aafc03c6077b805ef3f90fbef094493791c656" ]
[ "dgraph/dGraph_readOnly_server.py" ]
[ "from fastapi import FastAPI, Query, Path, Body\nfrom fastapi.responses import FileResponse, JSONResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.exceptions import RequestValidationError\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\n\nfrom dGraph_conn import dGraph_conn\nimport logging, csv, urllib, json, itertools, re\nfrom enum import Enum\nfrom pydantic import BaseModel, AnyUrl\nfrom typing import Optional, List\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\napp = FastAPI()\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\ngraph_conn = None\nrel_db_name = 'AutographaMT_Staging'\nlogging.basicConfig(filename='readOnly_server.log',level=logging.DEBUG)\nbase_URL = 'http://localhost:7000'\nDEFAULT_RETURN_LIMIT = 10\n\nbook_num_map = { \"mat\": 40 ,\"mrk\": 41 ,\"luk\": 42 ,\"jhn\": 43 ,\"act\": 44 ,\"rom\": 45 ,\"1co\": 46 ,\"2co\": 47 ,\"gal\": 48 ,\"eph\": 49 ,\"php\": 50 ,\"col\": 51 ,\"1th\": 52 ,\"2th\": 53 ,\"1ti\": 54 ,\"2ti\": 55 ,\"tit\": 56 ,\"phm\": 57 ,\"heb\": 58 ,\"jas\": 59 ,\"1pe\": 60 ,\"2pe\": 61 ,\"1jn\": 62 ,\"2jn\": 63 ,\"3jn\": 64 ,\"jud\": 65 ,\"rev\": 66}\n\nnum_book_map = {}\nfor key in book_num_map:\n\tnum_book_map[book_num_map[key]] = key\n\nbook_num_map2 = { \"Genesis\": 1, \"GEN\": 1, \"Exodus\": 2, \"EXO\": 2, \"Leviticus\": 3, \"LEV\": 3, \"Numbers\": 4, \"NUM\": 4, \"Deuteronomy\": 5, \"DEU\": 5, \"Joshua\": 6, \"JOS\": 6, \"Judges\": 7, \"JDG\": 7, \"Ruth\": 8, \"RUT\": 8, \"1 Samuel\": 9, \"1SA\": 9, \"2 Samuel\": 10, \"2SA\": 10, \"1 Kings\": 11, \"1KI\": 11, \"2 Kings\": 12, \"2KI\": 12, \"1 Chronicles\": 13, \"1CH\": 13, \"2 Chronicles\": 14, \"2CH\": 14, \"Ezra\": 15, \"EZR\": 15, \"Nehemiah\": 16, \"NEH\": 16, \"Esther\": 17, \"EST\": 17, \"Job\": 18, \"JOB\": 18, \"Psalms\": 19, \"PSA\": 19, \"Proverbs\": 20, \"PRO\": 20, \"Ecclesiastes\": 21, \"ECC\": 21, \"Song of Solomon\": 22, \"SNG\": 22, \"Isaiah\": 23, \"ISA\": 23, \"Jeremiah\": 24, \"JER\": 24, \"Lamentations\": 25, \"LAM\": 25, \"Ezekiel\": 26, \"EZK\": 26, \"Daniel\": 27, \"DAN\": 27, \"Hosea\": 28, \"HOS\": 28, \"Joel\": 29, \"JOL\": 29, \"Amos\": 30, \"AMO\": 30, \"Obadiah\": 31, \"OBA\": 31, \"Jonah\": 32, \"JON\": 32, \"Micah\": 33, \"MIC\": 33, \"Nahum\": 34, \"NAM\": 34, \"Habakkuk\": 35, \"HAB\": 35, \"Zephaniah\": 36, \"ZEP\": 36, \"Haggai\": 37, \"HAG\": 37, \"Zechariah\": 38, \"ZEC\": 38, \"Malachi\": 39, \"MAL\": 39, \"Matthew\": 40, \"MAT\": 40, \"Mark\": 41, \"MRK\": 41, \"Luke\": 42, \"LUK\": 42, \"John\": 43, \"JHN\": 43, \"Acts\": 44, \"ACT\": 44, \"Romans\": 45, \"ROM\": 45, \"1 Corinthians\": 46, \"1CO\": 46, \"2 Corinthians\": 47, \"2CO\": 47, \"Galatians\": 48, \"GAL\": 48, \"Ephesians\": 49, \"EPH\": 49, \"Philippians\": 50, \"PHP\": 50, \"Colossians\": 51, \"COL\": 51, \"1 Thessalonians\": 52, \"1TH\": 52, \"2 Thessalonians\": 53, \"2TH\": 53, \"1 Timothy\": 54, \"1TI\": 54, \"2 Timothy\": 55, \"2TI\": 55, \"Titus\": 56, \"TIT\": 56, \"Philemon\": 57, \"PHM\": 57, \"Hebrews\": 58, \"HEB\": 58, \"James\": 59, \"JAS\": 59, \"1 Peter\": 60, \"1PE\": 60, \"2 Peter\": 61, \"2PE\": 61, \"1 John\": 62, \"1JN\": 62, \"2 John\": 63, \"2JN\": 63, \"3 John\": 64, \"3JN\": 64, \"Jude\": 65, \"JUD\": 65, \"Revelation\": 66, \"REV\": 66, \"Psalm\": 19, \"PSA\": 19}\nbook_num_map.update(book_num_map2)\n\nclass BibleBook(str, Enum):\n\tmat = \"mat\"\n\tmrk = \"mrk\"\n\tluk = \"luk\"\n\tjhn = \"jhn\"\n\tact = \"act\"\n\trom = \"rom\"\n\tco1 = \"1co\"\n\tco2 = \"2co\"\n\tgal = \"gal\"\n\teph = \"eph\"\n\tphp = \"php\"\n\tcol = \"col\"\n\tth1 = \"1th\"\n\tth2 = \"2th\"\n\tti1 = \"1ti\"\n\tti2 = \"2ti\"\n\ttit = \"tit\"\n\tphm = \"phm\"\n\theb = \"heb\"\n\tjas = \"jas\"\n\tpe1 = \"1pe\"\n\tpe2 = \"2pe\"\n\tjn1 = \"1jn\"\n\tjn2 = \"2jn\"\n\tjn3 = \"3jn\"\n\tjud = \"jud\"\n\trev = \"rev\"\n\nclass verseReference(BaseModel):\n\tbible: str = None\n\tbook : BibleBook\n\tchapter: int\n\tverse: int\n\tverseLink: AnyUrl\n\nclass wordReference(BaseModel):\n\tbible: str = None\n\tbook : BibleBook\n\tchapter: int\n\tverse: int\n\tverseLink: AnyUrl\n\tverseText: str = None\n\tword: str\n\tposition: int\n\n\nclass NormalResponse(BaseModel):\n\tmessage: str\n\nclass ErrorResponse(BaseModel):\n\terror: str\n\tdetails: str\n\n\n######### Error Handling ##############\n\nclass GraphException(Exception):\n def __init__(self, detail: str):\n self.name = \"Graph Side Error\"\n self.detail = detail\n self.status_code = 502\n\n@app.exception_handler(GraphException)\nasync def graph_exception_handler(request, exc: GraphException):\n return JSONResponse(\n status_code=exc.status_code,\n content={\"error\": exc.name, \"details\" : exc.detail},\n )\n\nclass NotAvailableException(Exception):\n def __init__(self, detail: str):\n self.name = \"Requested Content Not Available\"\n self.detail = detail\n self.status_code = 404\n\n@app.exception_handler(NotAvailableException)\nasync def NA_exception_handler(request, exc: NotAvailableException):\n return JSONResponse(\n status_code=exc.status_code,\n content={\"error\": exc.name, \"details\" : exc.detail},\n )\n\n\n@app.exception_handler(StarletteHTTPException)\nasync def http_exception_handler(request, exc):\n\treturn JSONResponse(\n\t\tstatus_code=exc.status_code,\n\t\tcontent={\"error\": \"HTTP Error\", \"details\": str(exc.detail)}\n\t)\n\n@app.exception_handler(RequestValidationError)\nasync def validation_exception_handler(request, exc):\n\tlogging.info(str(exc))\n\treturn JSONResponse(\n\t\tstatus_code=422,\n\t\tcontent={\"error\": \"Input Validation Error\" ,\"details\": str(exc).replace(\"\\n\", \". \")}\n\t)\n\n\n################## APIS ###################\n\n\n@app.get(\"/\", response_model=NormalResponse, responses={502: {\"model\": ErrorResponse}, 422: {\"model\": ErrorResponse}}, status_code=200 )\ndef test():\n\tglobal graph_conn\n\ttry:\n\t\tgraph_conn = dGraph_conn()\n\texcept Exception as e:\n\t\tlogging.error('At connecting to graph DB')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details() \n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\treturn {'message': \"server up and running\"}\n\n########### STRONGS NUMBERS #############\n\nall_strongs_query = '''\n\t\tquery strongs($skip: int, $limit: int){\n\t\tstrongs(func: has(StrongsNumber), offset:$skip, first:$limit){\n\t\t\tuid,\n\t\t\tStrongsNumber,\n\t\t\tpronunciation,\n\t\t\tlexeme,\n\t\t\ttransliteration,\n\t\t\tdefinition,\n\t\t\tstrongsNumberExtended,\n\t\t\tenglishWord\n\t\t} }\n'''\n\nstrongs_link_query = '''\n\t\tquery strongs( $strongs: string, $skip: int, $limit: int){\n\t\tstrongs(func: eq(StrongsNumber, $strongs)) {\n\t\t\tStrongsNumber,\n\t\t\tpronunciation,\n\t\t\tlexeme,\n\t\t\ttransliteration,\n\t\t\tdefinition,\n\t\t\tstrongsNumberExtended,\n\t\t\tenglishWord,\n\t\t\toccurences:~strongsLink(offset:$skip, first:$limit) @normalize{\n\t\t\t\t~alignsTo{\n\t\t\t\tposition:position,\n\t\t\t\tword:word,\n\t\t\t\tbelongsTo{\n\t\t\t\t\tverse: verse,\n\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\tchapter:chapter,\n\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t book:bookNumber,\n\t\t\t\t\t\t belongsTo {\n\t\t\t\t\t\t bible:bible \n\t}\t}\t}\t}\t}\t} } }\n'''\n\n\nclass StrongsOut(BaseModel):\n\tStrongsNumber: int\n\tpronunciation: str\n\tlexeme :str\n\ttransliteration: str\n\tdefinition: str\n\tstrongsNumberExtended: str\n\tenglishWord: str\n\tstrongsLink: AnyUrl\n\toccurences: List[wordReference] = None\n\n@app.get(\"/strongs\", response_model=List[StrongsOut], responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Dictionaries\"])\ndef get_strongs(strongs_number:int=None, skip: int =0, limit: int=DEFAULT_RETURN_LIMIT):\n\t''' Get the list of strongs nodes and their property values.\n\t* If strongs_number is sepcified, only its properties are returned, along with occurences .\n\t* skip=n: skips the first n objects in return list\n\t* limit=n: limits the no. of items to be returned to n\t'''\n\tif not graph_conn:\n\t\ttest()\n\tresult = []\n\tvariables = {\"$skip\": str(skip), \"$limit\": str(limit)}\n\ttry:\n\t\tif not strongs_number:\n\t\t\tquery_res = graph_conn.query_data(all_strongs_query,variables)\n\t\telse:\n\t\t\tvariables['$strongs'] = str(strongs_number)\n\t\t\tquery_res = graph_conn.query_data(strongs_link_query,variables)\n\texcept Exception as e:\n\t\tlogging.error('At fetching strongs numbers')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details() \n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\tif len(query_res['strongs']) == 0:\n\t\traise NotAvailableException(\"Strongs Number: \"+str(variables))\n\n\tfor i, strong in enumerate(query_res['strongs']):\n\t\tif 'occurences' in strong:\n\t\t\toccurs = []\n\t\t\tfor j,occur in enumerate(strong['occurences']):\n\t\t\t\tlogging.info(occur)\n\t\t\t\tlogging.info(num_book_map)\n\t\t\t\tverse_link = '%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s'%(base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position'])\n\t\t\t\tlink = urllib.parse.quote(verse_link, safe='/:-')\n\t\t\t\tquery_res['strongs'][i]['occurences'][j]['verseLink'] = link\n\t\t\t\tquery_res['strongs'][i]['occurences'][j]['book'] = num_book_map[occur['book']]\n\t\tif 'StrongsNumber' in strong:\n\t\t\tstrong_link = '%s/strongs?strongs_number=%s'%(base_URL, strong['StrongsNumber'])\n\t\t\tquery_res['strongs'][i]['strongsLink'] = urllib.parse.quote(strong_link, safe='/:?=')\n\tresult = query_res['strongs']\n\treturn result\n\n############ TRANSLATION WORDS #################\n\nall_tw_query = '''\n\tquery tw($skip: int, $limit: int){\n\ttw(func: has(translationWord), offset:$skip, first:$limit){\n\t\ttranslationWord,\n\t\tslNo,\n\t\ttwType,\n\t\tdescription,\n\t}\n\t}\n'''\n\ntw_link_query = '''\n\tquery tw($tw: string, $skip: int, $limit: int){\n\ttw(func: eq(translationWord, $tw)){\n\t\ttranslationWord,\n\t\tslNo,\n\t\ttwType,\n\t\tdescription,\n\t\toccurences: ~twLink(offset:$skip, first:$limit) @normalize {\n\t\t\t~alignsTo {\n\t position:position,\n\t word:word,\n\t\t\tbelongsTo{\n\t\t\t\tverse: verse,\n\t\t\t\tverseText: verseText,\n\t\t belongsTo{\n\t\t chapter:chapter,\n\t\t belongsTo{\n\t\t book:bookNumber,\n\t\t belongsTo {\n\t\t bible:bible \n\t}\t}\t}\t}\t} }\t} }\n'''\n\nclass twOut(BaseModel):\n\ttranslationWord: str\n\tslNo: int\n\ttwType: str\n\tdescription: str\n\ttranslationWordLink: AnyUrl\n\toccurences: List[wordReference] = None\n\n@app.get(\"/translation-words\", response_model=List[twOut], responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Dictionaries\"])\ndef get_translationwords(translationWord:str=None, skip: int =0, limit: int=DEFAULT_RETURN_LIMIT):\n\t'''Get the list of translation words and their property values.\n\t* If tw is sepcified, only its properties are returned, along with occurences .\n\t* skip=n: skips the first n objects in return list\n\t* limit=n: limits the no. of items to be returned to n\t\n\t'''\n\tif not graph_conn:\n\t\ttest()\n\tresult = []\n\tvariables = {\"$skip\": str(skip), \"$limit\": str(limit)}\n\ttry:\n\t\tif not translationWord:\n\t\t\tquery_res = graph_conn.query_data(all_tw_query,variables)\n\t\telif translationWord:\n\t\t\tvariables['$tw'] = translationWord\n\t\t\tquery_res = graph_conn.query_data(tw_link_query,variables)\n\texcept Exception as e:\n\t\tlogging.error('At fetching translation words')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details() \n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\tif len(query_res['tw']) == 0:\n\t\traise NotAvailableException(\"Translation Words:\"+ str(variables))\n\n\tfor i, tw in enumerate(query_res['tw']):\n\t\tif 'occurences' in tw:\n\t\t\tfor j,occur in enumerate(tw['occurences']):\n\t\t\t\tverse_link = '%s/bibles/%s/books/%s/chapters/%s/verses/%s/words/%s'%(base_URL, occur['bible'], num_book_map[occur['book']], occur['chapter'], occur['verse'], occur['position'])\n\t\t\t\tlink = urllib.parse.quote(verse_link, safe='/:-')\n\t\t\t\tquery_res['tw'][i]['occurences'][j]['verseLink'] = link\n\t\t\t\tquery_res['tw'][i]['occurences'][j]['book'] = num_book_map[occur['book']]\n\t\tif 'translationWord' in tw:\n\t\t\ttw_link = '%s/translation-words?translationWord=%s'%(base_URL, tw['translationWord'])\n\t\t\tquery_res['tw'][i]['translationWordLink'] = urllib.parse.quote(tw_link, safe='/:?=')\n\tresult = query_res['tw']\n\treturn result\n\n\n############### BIBLE CONTENT ##################\n\nall_bibles_query = '''\n\tquery bibles($skip: int, $limit: int){\n\tbibles(func: has(bible), offset: $skip, first: $limit){\n\t\tbible,\n\t\tlanguage,\n\t\t# versification : ~belongsTo {\n\t\t# \tbook,\n\t\t# \tbookNumber,\n\t\t# \ttotalChapters: count(~belongsTo)\n\t\t# \tchapters: ~belongsTo{\n\t\t# \t\tchapterNumber: chapter,\n\t\t# \t\ttotalVerses: count(~belongsTo)\n\t\t# \t}\n\t\t# }\n\t}\n\t}\n\n'''\n\nbible_name_query = '''\n\tquery bibles($bib: string, $skip: int, $limit: int){\n\tbibles(func: eq(bible, $bib), offset: $skip, first: $limit){\n\t\tbible,\n\t\tlanguage,\n\t\t# versification : ~belongsTo {\n\t\t# \tbook,\n\t\t# \tbookNumber,\n\t\t# \ttotalChapters: count(~belongsTo)\n\t\t# \tchapters: ~belongsTo{\n\t\t# \t\tchapterNumber: chapter,\n\t\t# \t\ttotalVerses: count(~belongsTo)\n\t\t# \t}\n\t\t# }\n\t}\n\t}\n\n'''\n\nbible_lang_query = '''\n\tquery bibles($lang: string, $skip: int, $limit: int){\n\tbibles(func: has(bible), offset: $skip, first: $limit) @filter(eq(language, $lang)){\n\t\tbible,\n\t\tlanguage,\n\t\t# versification : ~belongsTo {\n\t\t# \tbook,\n\t\t# \tbookNumber,\n\t\t# \ttotalChapters: count(~belongsTo)\n\t\t# \tchapters: ~belongsTo{\n\t\t# \t\tchapterNumber: chapter,\n\t\t# \t\ttotalVerses: count(~belongsTo)\n\t\t# \t}\n\t\t# }\n\t}\n\t}\n\n'''\nclass ChapterVerification(BaseModel):\n\tchapterNumber: int\n\ttotalVerses: int\n\n# class BookVersification(BaseModel):\n# \tbookCode: BibleBook\n# \tbook: str\n# \tbookNumber: int\n# \ttotalChapters: int\n# \tchapters: List[ChapterVerification]\n\nclass BibleOut(BaseModel):\n\tbible: str\n\tlanguage: str\n\tbibleLink: AnyUrl\n\tversification : dict = None\n\nclass WordOut(BaseModel):\n\tword: str\n\tposition: str\n\tstrongsNumber: int = None\n\ttranslationWord: str = None\n\tname: str = None\n\tstrongsLink: AnyUrl = None\n\ttranslationWordLink: AnyUrl = None\n\tnameLink: AnyUrl = None\n\nclass VerseOut(BaseModel):\n\tverseNumber: int\n\tverseText: str\n\tverseLink: AnyUrl\n\twords: List[WordOut] = None\n\nclass ChapterOut(BaseModel):\n\tchapterNumber: int\n\tverses: List[VerseOut]\n\nclass BibleBookOut(BaseModel):\n\tbook: BibleBook\n\tchapters: List[ChapterOut]\n\nclass BibleContentOut(BaseModel):\n\tbible: str\n\tlanguage: str\n\tbooks : List[BibleBookOut]\n\n\n@app.get('/bibles', response_model=List[BibleOut], responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Bible Contents\"])\ndef get_bibles(bibleName : str = None, language: str = None, versification: bool = False, skip: int = 0, limit: int = DEFAULT_RETURN_LIMIT):\n\t''' fetches bibles nodes, properties and available books. \n\t* If no query params are given, all bibles in graph are fetched.\n\t* If bibleName is specified, only that node is returned.\n\t* If only language if given, all bible nodes, and details vavailable in that language is returned\n\t* versification flag can be set if the versification structure of the Bible(list of books, number of chapters and number of verses) needs to be returned\n\t* Number of items returned can be set using the skip and limit parameters.\n\t'''\n\tif not graph_conn:\n\t\ttest()\n\tresult = []\n\tvariables = {\"$skip\": str(skip), \"$limit\": str(limit)}\n\ttry:\n\t\tif not bibleName and not language:\n\t\t\tquery_res = graph_conn.query_data(all_bibles_query,variables)\n\t\telif bibleName:\n\t\t\tvariables['$bib'] = bibleName\n\t\t\tquery_res = graph_conn.query_data(bible_name_query,variables)\n\t\telse:\n\t\t\tvariables[\"$lang\"] = language\n\t\t\tquery_res = graph_conn.query_data(bible_lang_query,variables)\n\texcept Exception as e:\n\t\tlogging.error('At fetching Bibles')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details()\n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\tif len(query_res['bibles']) == 0:\n\t\traise NotAvailableException(\"Bibles: \"+str(variables))\n\n\tfor i, bib in enumerate(query_res['bibles']):\n\t\tbible_link = \"%s/bibles?bibleName=%s;versification=true\"%(base_URL, urllib.parse.quote(bib['bible']))\n\t\tquery_res['bibles'][i]['bibleLink'] = bible_link\n\t\tif versification:\n\t\t\tversi_obj = get_versification_map(bib['bible'])\n\t\t\tquery_res['bibles'][i]['versification'] = versi_obj\n\tresult = query_res['bibles']\n\treturn result\n\n\nwhole_chapter_query = '''\n\tquery chapter($bib: string, $book: int, $chapter: int){\n\tchapter(func: eq(bible, $bib)) {\n\t\tbible\n\t\t~belongsTo @filter(eq(bookNumber, $book)){\n\t\tbook,\n\t\tbookNumber,\n\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\tchapterNumber: chapter\n\t\tverses: ~belongsTo {\n\t\t\tverseNumber: verse,\n\t\t\tverseText: verseText,\n\t\t}\n\t\t}\n\t\t}\n\t}\n\t}\n'''\n\nwhole_chapter_detailed_query = '''\n\tquery chapter($bib: string, $book: int, $chapter: int){\n\tchapter(func: eq(bible, $bib)) {\n\t\tbible\n\t\t~belongsTo @filter(eq(bookNumber, $book)){\n\t\tbook,\n\t\tbookNumber,\n\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\tchapterNumber: chapter\n\t\tverses: ~belongsTo {\n\t\t\tverseNumber: verse,\n\t\t\tverseText: verseText,\n\t\t\twords: ~belongsTo @normalize{\n\t\t\t\tword:word,\n\t\t\t\tposition: position,\n\t\t\t\ttwLink {\n\t\t\t\t\ttranslationWord: translationWord,\n\t\t\t\t},\n\t\t\t\tstrongsLink {\n\t\t\t\t\tstrongsNumber: StrongsNumber,\n\t\t\t\t},\n\t\t\t\tnameLink {\n\t\t\t\t\tname: name\n\t\t\t\t}\n\t\t\t\talignsTo: alignsTo {\n\t\t\t\t\ttwLink {\n\t\t\t\t\t\ttranslationWord: translationWord,\n\t\t\t\t\t}\n\t\t\t\t\tstrongsLink {\n\t\t\t\t\t\tstrongsNumber: StrongsNumber,\n\t\t\t\t\t}\n\t\t\t\t\t~alignsTo {\n\t\t\t\t\t\tnameLink{\n\t\t\t\t\t\t\tname: name\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t}\n\t\t}\n\t}\n\t}\n'''\n\n@app.get('/bibles/{bibleName}/books/{bookCode}/chapters/{chapter}', response_model=ChapterOut, responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Bible Contents\"])\ndef get_whole_chapter(bibleName: str, bookCode: BibleBook, chapter: int, detailed: bool = False):\n\t''' fetches all verses of the chapter \n\t* if detailed flag if set, the individual words in verses, including their strong number, tw and bible name connections are returned\n\t'''\n\tif not graph_conn:\n\t\ttest()\n\tresult = []\n\ttry:\n\t\tvariables = {'$bib': bibleName,\n\t\t\t\t\t'$book': str(book_num_map[bookCode]),\n\t\t\t\t\t'$chapter': str(chapter)}\n\t\tif detailed:\n\t\t\tquery_res = graph_conn.query_data(whole_chapter_detailed_query,variables)\n\t\telse:\n\t\t\tquery_res = graph_conn.query_data(whole_chapter_query,variables)\n\texcept Exception as e:\n\t\tlogging.error('At fetching chapter contents')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details()\n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\ttry:\n\t\tresult = query_res['chapter'][0]['~belongsTo'][0]['~belongsTo'][0]\n\texcept Exception as e:\n\t\tlogging.error('At parsing chapter contents')\n\t\tlogging.error(e)\n\t\traise NotAvailableException(\"Whole Chapter: \"+str(variables))\n\tfor j,ver in enumerate(result['verses']):\n\t\tresult['verses'][j]['verseLink'] = \"%s/bibles/%s/books/%s/chapters/%s/verses/%s?detailed=True\"%(base_URL, urllib.parse.quote(bibleName), bookCode.value, chapter, ver['verseNumber'])\t\n\tif detailed:\n\t\tfor j,ver in enumerate(result['verses']):\n\t\t\tfor i,wrd in enumerate(ver['words']):\n\t\t\t\tif 'translationWord' in wrd:\n\t\t\t\t\tlink = '%s/translation-words?translationWord=%s'%(base_URL, urllib.parse.quote(wrd['translationWord']))\n\t\t\t\t\tresult['verses'][j]['words'][i]['translationWordLink'] = link\n\t\t\t\tif 'strongsNumber' in wrd:\n\t\t\t\t\tlink = '%s/strongs?strongsNumber=%s'%(base_URL, wrd['strongsNumber'])\n\t\t\t\t\tresult['verses'][j]['words'][i]['strongsLink'] = link\n\t\t\t\tif 'name' in wrd:\n\t\t\t\t\tif isinstance(wrd['name'], list):\n\t\t\t\t\t\tresult['verses'][j]['words'][i]['name'] = ', '.join(wrd['name'])\n\t\t\t\t\t\tresult['verses'][j]['words'][i]['nameLink'] = ', '.join(['%s/names?name=%s'%(base_URL, urllib.parse.quote(nm)) for nm in wrd['name']])\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult['verses'][j]['words'][i]['nameLink'] = '%s/names?name=%s'%(base_URL, urllib.parse.quote(wrd['name']))\n\t\n\treturn result\n\none_verse_query = '''\n\tquery verse($bib: string, $book: int, $chapter: int, $verse: int){\n\tverse(func: eq(bible, $bib)) {\n\t\tbible\n\t\t~belongsTo @filter(eq(bookNumber, $book)){\n\t\tbook,\n\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\tchapter\n\t\t~belongsTo @filter(eq(verse, $verse)){\n\t\t\tverseNumber: verse,\n\t\t\tverseText: verseText,\n\t\t}\n\t\t}\n\t\t}\n\t}\n\t}\n'''\n\none_verse_detailed_query = '''\n\tquery verse($bib: string, $book: int, $chapter: int, $verse: int){\n\tverse(func: eq(bible, $bib)) {\n\t\tbible\n\t\t~belongsTo @filter(eq(bookNumber, $book)){\n\t\tbook,\n\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\tchapter\n\t\t~belongsTo @filter(eq(verse, $verse)){\n\t\t\tverseNumber: verse,\n\t\t\tverseText: verseText,\n\t\t\twords: ~belongsTo @normalize{\n\t\t\t\tword:word,\n\t\t\t\tuid,\n\t\t\t\tposition: position,\n\t\t\t\ttwLink: twLink {\n\t\t\t\t\ttranslationWord: translationWord,\n\t\t\t\t},\n\t\t\t\tstrongsLink {\n\t\t\t\t\tstrongsNumber: StrongsNumber,\n\t\t\t\t},\n\t\t\t\tnameLink{\n\t\t\t\t\tname: name,\n\t\t\t\t\texternalUid: externalUid\n\t\t\t\t},\n\t\t\t\talignsTo: alignsTo {\n\t\t\t\t\ttwLink: twLink {\n\t\t\t\t\t\ttranslationWord: translationWord,\n\t\t\t\t\t}\n\t\t\t\t\tstrongsLink {\n\t\t\t\t\t\tstrongsNumber: StrongsNumber,\n\t\t\t\t\t}\n\t\t\t\t\t~alignsTo{\n\t\t\t\t\t\tnameLink{\n\t\t\t\t\t\t\tname: name,\n\t\t\t\t\t\t\texternalUid: externalUid\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\t}\n\t\t}\n\t}\n\t}\n'''\n\n\n@app.get('/bibles/{bibleName}/books/{bookCode}/chapters/{chapter}/verses/{verse}', response_model=VerseOut, responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Bible Contents\"])\ndef get_one_verse(bibleName: str, bookCode: BibleBook, chapter: int, verse: int, detailed: bool = False):\n\t''' fetches one verse.\n\t* detailed flag can be used to include individual words list and their strong number, tw and bible name connections\n\t'''\n\tif not graph_conn:\n\t\ttest()\n\ttry:\n\t\tvariables = {'$bib': bibleName,\n\t\t\t\t\t'$book': str(book_num_map[bookCode]),\n\t\t\t\t\t'$chapter': str(chapter),\n\t\t\t\t\t'$verse': str(verse)}\n\t\tif detailed:\n\t\t\tquery_res = graph_conn.query_data(one_verse_detailed_query,variables)\n\t\telse:\n\t\t\tquery_res = graph_conn.query_data(one_verse_query,variables)\n\texcept Exception as e:\n\t\tlogging.error('At fetching chapter contents')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details()\n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\ttry:\n\t\tresult = query_res['verse'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]\n\texcept Exception as e:\n\t\tlogging.error('At parsing verse contents')\n\t\tlogging.error(e)\n\t\traise NotAvailableException(\"One verse: \"+str(variables))\n\tresult['verseLink'] = \"%s/bibles/%s/books/%s/chapters/%s/verses/%s?detailed=True\"%(base_URL, urllib.parse.quote(bibleName), bookCode.value, chapter, result['verseNumber'])\t\n\n\tif detailed:\n\t\tfor i,wrd in enumerate(result['words']):\n\t\t\tif 'translationWord' in wrd:\n\t\t\t\tlink = '%s/translation-words?translationWord=%s'%(base_URL, wrd['translationWord'])\n\t\t\t\tresult['words'][i]['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')\n\t\t\tif 'strongsNumber' in wrd:\n\t\t\t\tlink = '%s/strongs?strongs_number=%s'%(base_URL, wrd['strongsNumber'])\n\t\t\t\tresult['words'][i]['strongsLink'] = urllib.parse.quote(link, safe='/:?=')\n\t\t\tif \"name\" in wrd:\n\t\t\t\tif isinstance(wrd['name'], list):\n\t\t\t\t\tresult['words'][i]['name'] = ', '.join(wrd['name'])\n\t\t\t\t\tresult['words'][i]['nameLink'] = ', '.join([\"%s/names?externalUid=%s\"%(base_URL, urllib.parse.quote(nm)) for nm in wrd['externalUid']])\n\t\t\t\telse:\n\t\t\t\t\tresult['words'][i]['nameLink'] = \"%s/names?externalUid=%s\"%(base_URL, urllib.parse.quote(wrd['externalUid']))\n\treturn result\n\nword_query = '''\n\tquery words($bib: string, $book: int, $chapter: int, $verse: int, $pos: int){\n\twords(func: eq(bible, $bib)) {\n\t\tbible\n\t\t~belongsTo @filter(eq(bookNumber, $book)){\n\t\tbook,\n\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\tchapter\n\t\t~belongsTo @filter(eq(verse, $verse)){\n\t\t\t~belongsTo @filter(eq(position, $pos))@normalize{\n\t\t\t\tword:word,\n\t\t\t\tuid,\n\t\t\t\tposition: position,\n\t\t\t\ttwLink: twLink {\n\t\t\t\t\ttranslationWord: translationWord,\n\t\t\t\t},\n\t\t\t\tstrongsLink {\n\t\t\t\t\tstrongsNumber: StrongsNumber,\n\t\t\t\t},\n\t\t\t\tnameLink{\n\t\t\t\t\tname: name,\n\t\t\t\t\texternalUid: externalUid\n\t\t\t\t},\n\t\t\t\talignsTo: alignsTo {\n\t\t\t\t\ttwLink: twLink {\n\t\t\t\t\t\ttranslationWord: translationWord,\n\t\t\t\t\t}\n\t\t\t\t\tstrongsLink {\n\t\t\t\t\t\tstrongsNumber: StrongsNumber,\n\t\t\t\t\t}\n\t\t\t\t\t~alignsTo{\n\t\t\t\t\t\tnameLink{\n\t\t\t\t\t\t\tname: name,\n\t\t\t\t\t\t\texternalUid: externalUid\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\t}\n\t\t}\n\t}\n\t}\n'''\n\n\n@app.get('/bibles/{bible_name}/books/{bookcode}/chapters/{chapter}/verses/{verse}/words/{position}', response_model=WordOut, responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Bible Contents\"])\ndef get_verse_word(bible_name: str, bookcode: BibleBook, chapter: int, verse: int, position: int):\n\t''' fetches all details of a bible word \n\tincluding their strong number, tw and bible name connections\n\t'''\n\tif not graph_conn:\n\t\ttest()\n\ttry:\n\t\tvariables = {'$bib': bible_name,\n\t\t\t\t\t'$book': str(book_num_map[bookcode]),\n\t\t\t\t\t'$chapter': str(chapter),\n\t\t\t\t\t'$verse': str(verse),\n\t\t\t\t\t'$pos': str(position)}\n\t\tquery_res = graph_conn.query_data(word_query,variables)\n\t\tlogging.info('query_res: %s' % query_res)\n\texcept Exception as e:\n\t\tlogging.error('At fetching chapter contents')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details()\n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\ttry:\n\t\tresult = query_res['words'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]['~belongsTo'][0]\n\t\tif 'translationWord' in result:\n\t\t\tlink = '%s/translationwords?translation_word=%s'%(base_URL, result['translationWord'])\n\t\t\tresult['translationWordLink'] = urllib.parse.quote(link, safe='/:?=')\n\t\tif 'strongsNumber' in result:\n\t\t\tlink = '%s/strongs?strongs_number=%s'%(base_URL, result['strongsNumber'])\n\t\t\tresult['strongsLink'] = urllib.parse.quote(link, safe='/:?=')\n\texcept Exception as e:\n\t\tlogging.error('At parsing verse contents')\n\t\tlogging.error(e)\n\t\traise NotAvailableException(\"Bible word: \"+str(variables))\n\treturn result\n\n\n################### BIBLE NAMES #####################\n\nall_names_query = \"\"\"\n\t\t\tquery names($skip: int, $limit: int){\n\t\t\tnames(func: has(externalUid), offset: $skip, first: $limit) {\n\t\t\t\t\tname:name,\n\t\t\t\t\texternalUid: externalUid,\n\t\t\t\t\tdescription: description,\n\t\t\t\t\tgender: gender,\n\t\t\t\t\tbornIn: brithPlace,\n\t\t\t\t\tbrithDate: birthDate,\n\t\t\t\t\tdiedIn:deathPlace,\n\t\t\t\t\tdeathDate: deathDate\n\t\t\t\t\tsameAs{\n\t\t\t\t\t\totherName:name,\n\t\t\t\t\t\texternalUid: externalUid\n\t\t\t\t\t}\n\t\t\t} }\n\"\"\"\n\none_name_xuid_query = \"\"\"\n\t\t\tquery names($skip: int, $limit: int, $xuid: string){\n\t\t\tnames(func: eq(externalUid, $xuid), offset: $skip, first: $limit){\n\t\t\t\t\tname:name,\n\t\t\t\t\texternalUid: externalUid,\n\t\t\t\t\tdescription: description,\n\t\t\t\t\tgender: gender,\n\t\t\t\t\tbornIn: brithPlace,\n\t\t\t\t\tbrithDate: birthDate,\n\t\t\t\t\tdiedIn:deathPlace,\n\t\t\t\t\tdeathDate: deathDate\n\t\t\t\t\tsameAs{\n\t\t\t\t\t\totherName:name,\n\t\t\t\t\t\texternalUid: externalUid\n\t\t\t\t\t}\n\t\t\t} } \n\"\"\"\n\nname_match_query = \"\"\"\n\t\t\tquery names($skip: int, $limit: int, $name: string){\n\t\t\tnames(func: eq(name, $name), offset: $skip, first: $limit) {\n\t\t\t\t\tname:name,\n\t\t\t\t\texternalUid: externalUid,\n\t\t\t\t\tdescription: description,\n\t\t\t\t\tgender: gender,\n\t\t\t\t\tbornIn: brithPlace,\n\t\t\t\t\tbrithDate: birthDate,\n\t\t\t\t\tdiedIn:deathPlace,\n\t\t\t\t\tdeathDate: deathDate\n\t\t\t\t\tsameAs{\n\t\t\t\t\t\totherName:name,\n\t\t\t\t\t\texternalUid: externalUid\n\t\t\t\t\t}\n\t\t\t} } \n\"\"\"\n\nfamily_tree_query = '''\n\tquery relations($xuid: string){\n\trelations(func: eq(externalUid,$xuid)){\n\t\tname,\n\t\texternalUid,\n\t\tfather{\n\t\t\tname,\n\t\t\texternalUid,\n\t\t\tsibling: ~father{\n\t\t\t\tname,\n\t\t\t\texternalUid\t} },\n\t\tmother{\n\t\t\tname,\n\t\t\texternalUid,\n\t\t\tsibling: ~mother{\n\t\t\t\tname,\n\t\t\t\texternalUid\t} },\n\t\tspouse{\n\t\t\tname,\n\t\t\texternalUid\t},\n\t\tchildren1:~father{\n\t\t\tname,\n\t\t\texternalUid },\n\t\tchildren2:~mother{\n\t\t\tname,\n\t\t\texternalUid },\n\t\tsameAs{\n\t\t\tname,\n\t\t\texternalUid,\n\t\t\tfather{\n\t\t\t\tname,\n\t\t\t\texternalUid,\n\t\t\t\tsibling: ~father{\n\t\t\t\t\tname,\n\t\t\t\t\texternalUid\t} },\n\t\t\tmother{\n\t\t\t\tname,\n\t\t\t\texternalUid,\n\t\t\t\tsibling: ~mother{\n\t\t\t\t\tname,\n\t\t\t\t\texternalUid\t} },\n\t\t\tspouse{\n\t\t\t\tname,\n\t\t\t\texternalUid\t},\n\t\t\tchildren1:~father{\n\t\t\t\tname,\n\t\t\t\texternalUid },\n\t\t\tchildren2:~mother{\n\t\t\t\tname,\n\t\t\t\texternalUid } }\n\t} }\n'''\n\nnames_link_query = '''\n\tquery occurences($xuid: string, $skip:int, $limit:int){\n\toccurences(func: eq(externalUid, $xuid)){\n\t~nameLink(offset: $skip, first: $limit) @normalize{\n\t\tword: word,\n\t\tposition: position,\n\t\tbelongsTo{\n\t\t\tverse:verse,\n\t\t\tverseText: verseText,\n\t\t\tbelongsTo{\n\t\t\t\tchapter:chapter,\n\t\t\t\tbelongsTo{\n\t\t\t\t\tbook: book,\n\t\t\t\t\tbookNumber: bookNumber,\n\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\tbible:bible} }\t} } }\n\tsameAs{\n\t\t~nameLink(offset: $skip, first: $limit) @normalize{\n\t\t\tword: word,\n\t\t\tposition: position,\n\t\t\tbelongsTo{\n\t\t\t\tverse:verse,\n\t\t\t\tverseText: verseText,\n\t\t\t\tbelongsTo{\n\t\t\t\t\tchapter:chapter,\n\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\tbook: book,\n\t\t\t\t\t\tbookNumber: bookNumber,\n\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t\tbible:bible} }\t} } }\n\t}\n\t}\t}\n'''\n\nclass OtherName(BaseModel):\n\totherName: str\n\tnameLink: AnyUrl\n\texternalUid: str\n\nclass NameOut(BaseModel):\n\tname: str\n\tdescription: str\n\tgender: str = None\n\tbornIn: str = None\n\tbrithDate: str = None\n\tdiedIn: str = None\n\tdeathDate: str = None\n\tnameLink: AnyUrl\n\tsameAs: List[OtherName] = None\n\trelations: AnyUrl = None\n\texternalUid: str\n\toccurences: List[wordReference] = None\n\n@app.get('/names', response_model=List[NameOut], responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Dictionaries\"])\ndef get_names(name: str = None, externalUid: str = None, occurences: bool = False, skip:int = 0, limit: int = 10):\n\t''' Fetched the list of Bible Names in Graph\n\t* name or externalUid can be used to obtaine specific names and details\n\t* occurences flag can be used to fetch occurences of the name in bible\n\t* skip and limit can be used for pagination'''\n\tif not graph_conn:\n\t\ttest()\n\tvariables = {\n\t\t\"$skip\": str(skip),\n\t\t\"$limit\": str(limit)\n\t\t}\n\tresult = []\n\ttry:\n\t\tif not name and not externalUid:\n\t\t\tnames_query_res = graph_conn.query_data(all_names_query, variables)\n\t\telif externalUid:\n\t\t\tvariables['$xuid'] = externalUid\n\t\t\tnames_query_res = graph_conn.query_data(one_name_xuid_query, variables)\n\t\telif name:\n\t\t\tvariables['$name'] = name\n\t\t\tnames_query_res = graph_conn.query_data(name_match_query, variables)\n\texcept Exception as e:\n\t\tlogging.error('At fetching names')\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details()\n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\n\tif len(names_query_res['names']) == 0:\n\t\traise NotAvailableException(\"Bible Names: \"+str(variables))\n\n\tresult = names_query_res['names']\n\n\tfor i,person in enumerate(result):\n\t\tresult[i]['nameLink'] = '%s/names?externalUid=%s;occurences=True'%(base_URL,urllib.parse.quote(person['externalUid']))\n\t\tresult[i]['relations'] = '%s/names/relations?externalUid=%s'%(base_URL, urllib.parse.quote(person['externalUid']))\n\t\tif \"sameAs\" in person:\n\t\t\tfor j,otherName in enumerate(person['sameAs']):\n\t\t\t\tresult[i]['sameAs'][j]['nameLink'] = '%s/names?externalUid=%s;occurences=True'%(base_URL, urllib.parse.quote(otherName['externalUid']))\n\tif occurences:\n\t\tfor i, person in enumerate(result):\n\t\t\tresult[i]['occurences'] = []\n\t\t\ttry:\n\t\t\t\toccurences_query_res = graph_conn.query_data(names_link_query,{'$xuid': person['externalUid'], \"$skip\": str(skip), \"$limit\": str(limit)})\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.error('At fetching names occurences of %s'%person['name'])\n\t\t\t\tlogging.error(e)\n\t\t\t\tif \"details\" in dir(e):\n\t\t\t\t\tdetails = e.details()\n\t\t\t\telse:\n\t\t\t\t\tdetails = str(e)\n\t\t\t\traise GraphException(details)\n\t\t\tif len(occurences_query_res['occurences']) == 0:\n\t\t\t\tlogging.warn('At fetching names occurences of %s'%person['name'])\n\t\t\t\tlogging.warn(\"Requested contents not available\")\n\t\t\telse:\n\t\t\t\tif '~nameLink' in occurences_query_res['occurences'][0]:\n\t\t\t\t\tresult[i]['occurences'] = occurences_query_res['occurences'][0]['~nameLink']\n\t\t\t\tif 'sameAs' in occurences_query_res['occurences'][0]:\n\t\t\t\t\tfor otherName_occurences in occurences_query_res['occurences'][0]['sameAs']:\n\t\t\t\t\t\tresult[i]['occurences'] += otherName_occurences['~nameLink']\n\t\t\t\tfor j,occur in enumerate(result[i]['occurences']):\n\t\t\t\t\tverseLink = '%s/bibles/%s/books/%s/chapters/%s/verses/%s'%(base_URL, urllib.parse.quote(occur['bible']), occur['bookNumber'], occur['chapter'], occur['verse'])\n\t\t\t\t\tresult[i]['occurences'][j]['verseLink'] = verseLink\n\t\t\t\t\tresult[i]['occurences'][j]['book'] = num_book_map[occur['bookNumber']]\n\treturn result\n\n\n@app.get(\"/names/relations\", response_class=FileResponse, responses = {502:{\"model\":ErrorResponse}, 404:{\"model\": ErrorResponse}, 422:{\"model\": ErrorResponse}}, response_model_exclude_unset=True, status_code=200, tags=[\"Extras\"])\ndef get_person_relations(externalUid: str):\n\tif not graph_conn:\n\t\ttest()\n\tresult = {}\n\ttry:\n\t\trelations_query_result = graph_conn.query_data(family_tree_query,{'$xuid': externalUid})\n\texcept Exception as e:\n\t\tlogging.error('At fetching family relations of %s'%extrenalUid)\n\t\tlogging.error(e)\n\t\tif \"details\" in dir(e):\n\t\t\tdetails = e.details()\n\t\telse:\n\t\t\tdetails = str(e)\n\t\traise GraphException(details)\n\tif len(relations_query_result['relations']) == 0:\n\t\traise NotAvailableException(\"Realtions: \"+str(externalUid))\n\n\tresult['person'] = {\"name\": relations_query_result['relations'][0]['name'],\n\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(relations_query_result['relations'][0]['externalUid']))}\n\tresult['siblings'] = []\n\tif \"father\" in relations_query_result['relations'][0]:\n\t\tresult['father'] = {\"name\": relations_query_result['relations'][0]['father'][0]['name'],\n\t\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(relations_query_result['relations'][0]['father'][0]['externalUid'])) \n\t\t\t\t\t\t\t}\n\t\tif 'sibling' in relations_query_result['relations'][0]['father'][0]:\n\t\t\tfor sibling in relations_query_result['relations'][0]['father'][0]['sibling']:\n\t\t\t\tif sibling['externalUid'] != externalUid:\n\t\t\t\t\tresult['siblings'].append({\"name\": sibling['name'],\n\t\t\t\t\t\t\t\t\t\t\t\t\"link\":\"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(sibling['externalUid']))\n\t\t\t\t\t\t\t\t\t\t\t\t})\n\telif \"sameAs\" in relations_query_result['relations'][0]:\n\t\tfor otherName in relations_query_result['relations'][0]['sameAs']:\n\t\t\tif 'father' in otherName:\n\t\t\t\tif 'father' not in result:\n\t\t\t\t\tresult['father'] = {\"name\": otherName['father'][0]['name'],\n\t\t\t\t\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(otherName['father'][0]['externalUid']))}\n\t\t\t\tif 'sibling' in otherName['father'][0]:\n\t\t\t\t\tfor sibling in otherName['father'][0]['sibling']:\n\t\t\t\t\t\tresult['siblings'].append({\"name\": sibling['name'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"link\":\"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(sibling['externalUid']))\n\t\t\t\t\t\t\t\t\t\t\t\t\t})\n\t\t\t\tbreak\n\tif \"mother\" in relations_query_result['relations'][0]:\n\t\tresult['mother'] = {\"name\": relations_query_result['relations'][0]['mother'][0]['name'],\n\t\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(relations_query_result['relations'][0]['mother'][0]['externalUid'])) \n\t\t\t\t\t\t\t}\n\t\tif 'sibling' in relations_query_result['relations'][0]['mother'][0]:\n\t\t\tfor sibling in relations_query_result['relations'][0]['mother'][0]['sibling']:\n\t\t\t\tsib = {\"name\": sibling['name'],\n\t\t\t\t\t\t\"link\":\"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(sibling['externalUid']))\n\t\t\t\t\t\t}\n\t\t\t\tif sibling['externalUid'] != externalUid and sib not in result['siblings']:\n\t\t\t\t\tresult['siblings'].append()\n\telif \"sameAs\" in relations_query_result['relations'][0]:\n\t\tfor otherName in relations_query_result['relations'][0]['sameAs']:\n\t\t\tif 'mother' in otherName:\n\t\t\t\tif 'mother' not in result:\n\t\t\t\t\tresult['mother'] = {\"name\": otherName['mother'][0]['name'],\n\t\t\t\t\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(otherName['mother'][0]['externalUid']))}\n\t\t\t\tif 'sibling' in otherName['mother'][0]:\n\t\t\t\t\tfor sibling in otherName['mother'][0]['sibling']:\n\t\t\t\t\t\tsib = {\"name\": sibling['name'],\n\t\t\t\t\t\t\t\t\"link\":\"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(sibling['externalUid']))\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tif sibling['externalUid'] != externalUid and sib not in result['siblings']:\n\t\t\t\t\t\t\tresult['siblings'].append({\"name\": sibling['name'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"link\":\"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(sibling['externalUid']))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t})\n\t\t\t\tbreak\n\tresult['spouses'] = []\n\tif \"spouse\" in relations_query_result['relations'][0]:\n\t\tfor spouse in relations_query_result['relations'][0]['spouse']:\n\t\t\tsps = {\"name\": spouse['name'],\n\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(spouse['externalUid']))}\n\t\t\tif sps not in result['spouses']:\n\t\t\t\tresult['spouses'].append(sps)\n\telif \"sameAs\" in relations_query_result['relations'][0]:\n\t\tfor otherName in relations_query_result['relations'][0]['sameAs']:\n\t\t\tif \"spouse\" in otherName:\n\t\t\t\tfor spouse in otherName['spouse']:\n\t\t\t\t\tsps = {\"name\": spouse['name'],\n\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(spouse['externalUid']))}\n\t\t\t\t\tif sps not in result['spouses']:\n\t\t\t\t\t\tresult['spouses'].append(sps)\n\t\t\t\t\t\t\n\tresult['children'] = []\n\tif \"children1\" in relations_query_result['relations'][0]:\n\t\tfor child in relations_query_result['relations'][0]['children1']:\n\t\t\tch = {\"name\": child['name'],\n\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(child['externalUid']))}\n\t\t\tif ch not in result['children']:\n\t\t\t\tresult['children'].append(ch)\t\n\telif \"children2\" in relations_query_result['relations'][0]:\n\t\tfor child in relations_query_result['relations'][0]['children2']:\n\t\t\tch = {\"name\": child['name'],\n\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(child['externalUid']))}\n\t\t\tif ch not in result['children']:\n\t\t\t\tresult['children'].append(ch)\t\n\telif \"sameAs\" in relations_query_result['relations'][0]:\n\t\tfor otherName in relations_query_result['relations'][0]['sameAs']:\n\t\t\tif 'children1' in otherName:\n\t\t\t\tfor child in otherName['children1']:\n\t\t\t\t\tch = {\"name\": child['name'],\n\t\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(child['externalUid']))}\n\t\t\t\t\tif ch not in result['children']:\n\t\t\t\t\t\tresult['children'].append(ch)\t\n\t\t\telif 'children2' in otherName:\n\t\t\t\tfor child in otherName['children2']:\n\t\t\t\t\tch = {\"name\": child['name'],\n\t\t\t\t\t\t\t\"link\": \"%s/names/relations?externalUid=%s\"%(base_URL, urllib.parse.quote(child['externalUid']))}\n\t\t\t\t\tif ch not in result['children']:\n\t\t\t\t\t\tresult['children'].append(ch)\t\n\tif len(result['spouses']) == 0:\n\t\tdel result['spouses']\n\tif len(result['siblings']) == 0:\n\t\tdel result['siblings']\n\tif len(result['children']) == 0:\n\t\tdel result['children']\n\n\tfig = plt.figure(figsize=(12,12))\n\tax = plt.subplot(111)\n\tax.set_title('Graph - Shapes', fontsize=10)\n\tG = nx.DiGraph()\n\n\tG.add_node(result['person']['name'], level=3)\n\tcolour_list = [\"blue\"]\n\n\tif 'father' in result:\n\t\tG.add_node(result['father']['name'], level=4)\n\t\tG.add_edge(result['person']['name'], result['father']['name'], label='father')\n\t\tcolour_list.append(\"green\")\n\n\tif 'mother' in result:\n\t\tG.add_node(result['mother']['name'], level=4 )\n\t\tG.add_edge(result['person']['name'], result['mother']['name'], label='mother')\n\t\tcolour_list.append(\"green\")\n\n\tif 'siblings' in result:\n\t\tfor sib in result['siblings']:\n\t\t\tG.add_node(sib['name'], level=2)\n\t\t\tG.add_edge(result['person']['name'], sib['name'], label='sibling')\n\t\t\tcolour_list.append(\"purple\")\n\n\tif 'spouses' in result:\n\t\tfor sps in result['spouses']:\n\t\t\tG.add_node(sps['name'], level=2)\n\t\t\tG.add_edge(result['person']['name'], sps['name'], label='spouse')\n\t\t\tcolour_list.append(\"pink\")\n\n\tif 'children' in result:\n\t\tfor child in result['children']:\n\t\t\tG.add_node(child['name'], level=1)\n\t\t\tG.add_edge(result['person']['name'], child['name'], label='child')\n\t\t\tcolour_list.append(\"orange\")\n\n\tpos = nx.multipartite_layout(G, subset_key='level', align='horizontal')\n\tnx.draw(G, pos, node_size=5000, node_color=colour_list, font_size=20, font_weight='bold')\n\tnx.draw_networkx_labels(G, pos)\n\tnx.draw_networkx_edge_labels(G, pos)\n\tplt.tight_layout()\n\tplt.savefig(\"static/Family-tree.png\")\n\n\trels_html = \"\"\n\tfor key in result:\n\t\tif key in ['person', 'mother', 'father']:\n\t\t\trels_html += '%s:<a href=\"%s\">%s</a><br>'%(key.upper(), result[key]['link'], result[key]['name'])\n\t\telse:\n\t\t\titems = \", \".join(['<a href=\"%s\">%s</a>'%(it['link'], it['name']) for it in result[key]])\n\t\t\t# items = ['<a href=\"%s\">%s</a>'%(it['link'], it['name']) for it in result[key]]\n\t\t\trels_html += '%s:%s <br>'%(key.upper(), str(items))\n\n\thtml_file = open(\"Family-tree.html\", \"w\")\n\thtml_content = '''\n\t<html>\n\t<body>\n\t<div style=\"float:left\" width=\"25%%\">\n\t%s\n\t</div>\n\t<div style=\"float:left\" width=\"75%%\">\n\t<img src=\"/static/Family-tree.png\" height=\"750px\">\n\t</div>\n\t</body>\n\t</html>\n\t'''%rels_html\n\n\thtml_file.write(html_content)\n\thtml_file.close()\n\treturn FileResponse(\"Family-tree.html\")\n\nexluded_verses_query = '''query verses($bib_uid: string){\n\tverse(func: uid($bib_uid)) @normalize{\n\t\tbible,\n\t\texcludedVerse{\n\t\t\tverse: verseNumber,\n\t\t\tbelongsTo{\n\t\t\t\tchapter: chapter,\n\t\t\t\tbelongsTo{\n\t\t\t\t\tbook: bookcode\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\t\n}\n\n'''\n\nverse_mappings_query = '''\nquery verses($bib_uid: string){\n\tverse(func: uid($bib_uid)) @cascade @normalize{\n\t\tbible,\n\t\t~belongsTo{\n\t\t\tsrcBook:bookNumber,\n\t\t\t~belongsTo{\n\t\t\t\tsrcChapter:chapter,\n\t\t\t\t~belongsTo{\n\t\t\t\t\tsrcVerse: verse,\n\t\t\t\t\tverseMapping{\n\t\t\t\t\t\ttrgVerse: verseNumber,\n\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t\ttrgChapter: chapter,\n\t\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t\t\ttrgBook: bookcode\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\t\n}\n'''\n\nmaxVerse_query = '''query struct($bib_uid: string){\n\tstruct(func: uid($bib_uid)) {\n\t\tbible,\n\t\t~belongsTo{\n\t\t\tbookNumber,\n\t\t\t~belongsTo (orderasc: chapter) @normalize{\n\t\t\t\tchapter: chapter,\n\t \t\t~belongsTo{\n\t\t\t\t\tverseNum as verse\n \t\t\t\t}\n \t\t\t\tmaxVerse: max(val(verseNum))\n\t\t\t}\n\t\t}\n\t}\t\n}\n'''\nbible_uid_query = '''\n\tquery bible($bib: string){\n\tbible(func: eq(bible, $bib)){\n\t\tuid\n\t}\n\t}\n'''\nverse_range_pattern = re.compile(r'([\\w\\d]\\w\\w) (\\d+):(\\d+\\w*)\\-?(\\d+\\w*)?$')\n\n\n@app.get(\"/versification/map\", status_code=200, tags=[\"READ\", \"Versification\"])\ndef get_versification_map(bible_name:str):\n\t'''Gets a text output as given by versification sniffer, if mapping is added for the bible'''\n\tversification = {}\n\tversification[\"maxVerses\"] = {}\n\tversification[\"partialVerses\"] = {}\n\tversification[\"verseMappings\"] = {}\n\tversification[\"excludedVerses\"] = []\n\tversification[\"unexcludedVerses\"] = {}\n\tif not graph_conn:\n\t\ttest()\n\tbib_res = graph_conn.query_data(bible_uid_query, {\"$bib\":bible_name})\n\tif len(bib_res['bible']) < 1:\n\t\traise GraphException(\"Bible not found:%s\"%bible_name)\n\tbib_uid = bib_res['bible'][0]['uid']\n\n\t## exlcudedVerses\n\tverses = graph_conn.query_data(exluded_verses_query, {\"$bib_uid\": str(bib_uid)})\n\tfor ver in verses['verse']:\n\t\tref = '%s %s:%s'%(ver['book'], ver['chapter'], ver['verse'])\n\t\tversification[\"excludedVerses\"].append(ref)\n\tprint(versification[\"excludedVerses\"])\n\n\t# verseMappings\n\tmapped_verses = graph_conn.query_data(verse_mappings_query, {\"$bib_uid\": str(bib_uid)})\n\n\tfor ver in mapped_verses['verse']:\n\t\tkey = \"%s %s:%s\"%(num_book_map[ver[\"srcBook\"]], ver[\"srcChapter\"], ver[\"srcVerse\"])\n\t\tval = \"%s %s:%s\"%(ver[\"trgBook\"], ver[\"trgChapter\"], ver[\"trgVerse\"])\n\t\tif key in versification['verseMappings']:\n\t\t\tmatch_obj = re.match(verse_range_pattern, versification['verseMappings'][key])\n\t\t\tbook = match_obj.group(1)\n\t\t\tchapter = match_obj.group(2)\n\t\t\tverse_s = match_obj.group(3)\n\t\t\tverse_e = match_obj.group(4)\n\t\t\tif book == ver[\"trgBook\"] and chapter == ver[\"trgChapter\"]:\n\t\t\t\tif verse_e is None:\n\t\t\t\t\trange_ = sorted([int(verse_s), ver[\"trgVerse\"]])\n\t\t\t\telse:\n\t\t\t\t\trange_ = sorted([int(verse_s), int(verse_e), ver[\"trgVerse\"]])\n\t\t\t\tsorted_range = str(range_[0])+\"-\"+str(range_[-1])\n\t\t\t\tval = \"%s %s:%s\"%(ver[\"trgBook\"], ver[\"trgChapter\"], sorted_range)\n\t\t\telse:\n\t\t\t\tval = versification['verseMappings'][key] +\", \"+ val\n\t\tversification['verseMappings'][key] = val\n\tprint(versification['verseMappings'])\n\n\t# maxVerses\n\tbook_chapters = graph_conn.query_data(maxVerse_query, {\"$bib_uid\": str(bib_uid)})\n\tfor book in book_chapters['struct'][0]['~belongsTo']:\n\t\t# print(book)\n\t\tbook_code = num_book_map[book['bookNumber']]\n\t\tbook_entry = []\n\t\tfor chap in book['~belongsTo']:\n\t\t\tbook_entry.append(chap[\"maxVerse\"])\n\t\tversification['maxVerses'][book_code] = book_entry\n\tprint(versification['maxVerses'])\n\n\t# partialVerses: to be implemented\n\t# unExcludedVerses: to be implemented\n\treturn versification\n\nparallel_versi_verses_query = '''query verse($book: string, $chapter:int, $verse:int){\n\tverse(func: eq(versification, \"original\")) @cascade @normalize{\n\t\tversification,\n\t\t~belongsTo @filter(eq(bookcode, $book)){\n\t\t\tbookcode,\n\t\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\t\t\tchapter,\n\t\t\t\t~belongsTo @filter(eq(verseNumber, $verse)){\n\t\t\t\t\tuid\n\t\t\t\t\t~verseMapping{\n\t\t\t\t\t\tverse: verseText,\n\t\t\t\t\t\tverseNum: verse,\n\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t\tchapter: chapter,\n\t\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t\t\tbook:book,\n\t\t\t\t\t\t\t\tbookNumber: bookNumber,\n\t\t\t\t\t\t\t\tbelongsTo{\n\t\t\t\t\t\t\t\t\tbible:bible\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\t\n}\n'''\n\nsimple_parallel_verses_query = '''query verse($book: string, $chapter:int, $verse:int){\n\tverse(func: has(bible)) @normalize @cascade{\n\t\tbible:bible,\n\t\t~belongsTo @filter(eq(bookNumber, $book)){\n\t\t\tbook:book,\n\t\t\tbookNumber:bookNumber,\n\t\t\t~belongsTo @filter(eq(chapter, $chapter)){\n\t\t\t\tchapter:chapter,\n\t\t\t\t~belongsTo @filter(eq(verse, $verse)){\n\t\t\t\t\tverseNumber:verse,\n\t\t\t\t\tverseText:verseText\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\t\n}\n'''\n\n@app.get(\"/versification/verse\", status_code=200, tags=[\"READ\", \"Versification\"])\ndef get_verse_map(bookcode: BibleBook, chapter:int, verse:int):\n\t'''Gets all verses mapped to the original verse given by bcv.'''\n\tif not graph_conn:\n\t\ttest()\n\tvar = {\"$book\": bookcode.upper(), \"$chapter\":str(chapter), \"$verse\":str(verse)}\n\tmapped_verses = graph_conn.query_data(parallel_versi_verses_query, var)['verse']\n\t# print(mapped_verses)\n\tres = mapped_verses\n\tmapped_bibles = set([item['bible'] for item in mapped_verses])\n\n\tvar['$book'] = str(book_num_map[bookcode])\n\tparallelverses = graph_conn.query_data(simple_parallel_verses_query, var)['verse']\n\tfor ver in parallelverses:\n\t\tif ver['bible'] not in mapped_bibles:\n\t\t\tres.append(ver)\n\n\treturn res\n\n" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.savefig", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.figure" ] ]
shun60s/Wave-DNN
[ "d7d74676ffa540c6159bd16b36ccea3b5b381109" ]
[ "get_fbank.py" ]
[ "#coding: utf-8\r\n\r\n#######################################################\r\n#Description:\r\n# This is a python implement to get FBANK_D_A from a wave file as input.\r\n# warning: this is not complete compatible with julius.\r\n#\r\n# This is based on HTKFeat.py in PyHTK and C program in julius-4.4.2.zip.\r\n#\r\n# PyHTK: <https://github.com/danijel3/PyHTK>\r\n# License: Apache License, Version 2.0 (see LICENSE-PyHTK)\r\n#\r\n#\r\n# julius-4.4.2:\r\n#\r\n# Copyright (c) 1991-2016 Kawahara Lab., Kyoto University\r\n# Copyright (c) 1997-2000 Information-technology Promotion Agency, Japan\r\n# Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology\r\n# Copyright (c) 2005-2016 Julius project team, Nagoya Institute of Technology\r\n#\r\n# License: see LICENSE-Julius.txt\r\n#\r\n#Date 2018-03-19\r\n#By Shun\r\n#########################################################\r\n\r\n\r\nimport numpy as np\r\nimport wave\r\n\r\n# Check version\r\n# Python 2.7.12 on win32 (Windows version)\r\n# numpy 1.14.0 \r\n\r\n\r\nclass GetFBANK_D_A:\r\n\tdef __init__(self,NFRAME=25, NSHIFT=10, sampling_rate=16000, num_banks=40,deltawindowlen=2):\r\n\t\t\r\n\t\tself.NFRAME=int(NFRAME * 0.001/ (1.0 /sampling_rate)) # xxx ms is What points ? # 400 sr=16Khz # 551 sr=22.05Khz\r\n\t\tself.NSHIFT=int(NSHIFT * 0.001/ (1.0 /sampling_rate)) # xxx ms is What points ? # 160 sr=16khz # 220 sr=22.050khz\r\n\t\tself.fft_len = 2 ** int(np.asscalar((np.floor(np.log2(self.NFRAME)) + 1)))\r\n\t\tself.fft_data_len= self.fft_len / 2 + 1 # np.rfft output is N/2+1\r\n\t\tself.num_banks=num_banks\r\n\t\tself.sampling_rate=sampling_rate\r\n\t\tself.deltawindowlen=deltawindowlen\r\n\t\tprint ('NFRAME ', self.NFRAME, ' NSHIFT ', self.NSHIFT, ' fft_len', self.fft_len)\r\n\r\n\t\t# Windows is Hamming\r\n\t\tself.window = np.hamming(self.NFRAME)\r\n\t\t# make filter bank weight\r\n\t\tself.melmat, self.loChan, self.loWt, self.klo, self.khi = self.cal_melmat()\r\n\t\t# pre-emphasis \r\n\t\tself.preemph=0.97\r\n\r\n\r\n\r\n\tdef cal_delta(self,db0):\r\n\t#\r\n\t# db0[num_frame, num_vec]\r\n\t#\r\n\t\tB = 2.0 * (sum(np.arange(1, self.deltawindowlen + 1) ** 2))\r\n\t\tnum_frame=db0.shape[0]\r\n\t\tnum_vec=db0.shape[1]\r\n\t\tdeltas = np.zeros( (num_frame, num_vec))\r\n\t\tfor n in range(num_frame):\r\n\t\t\tdelta1 = np.zeros(num_vec)\r\n\t\t\tfor t in range(1, self.deltawindowlen + 1):\r\n\t\t\t\ttm = n - t\r\n\t\t\t\ttp = n + t\r\n\t\t\t\tif tm < 0:\r\n\t\t\t\t\ttm = 0\r\n\t\t\t\tif tp >= num_frame:\r\n\t\t\t\t\ttp = num_frame - 1\r\n\r\n\t\t\t\tdelta1 += (t * (db0[tp] - db0[tm])) \r\n\r\n\t\t\t#print (delta1)\r\n\t\t\tdeltas[n,:]= delta1 / B\r\n\t\t\r\n\t\treturn deltas\r\n\r\n\r\n\tdef get_fbank_d_a(self,file_name,fshow=False ):\r\n \t\r\n\t\twaveFile= wave.open( file_name, 'r')\r\n\t\t\r\n\t\tnchannles= waveFile.getnchannels()\r\n\t\tsamplewidth = waveFile.getsampwidth()\r\n\t\tsampling_rate = waveFile.getframerate()\r\n\t\tnframes = waveFile.getnframes()\r\n\t\t\r\n\t\tassert sampling_rate == self.sampling_rate, ' sampling rate is miss match ! '\r\n\t\t\r\n\t\t\r\n\t\tif fshow :\r\n\t\t\tprint(\"Channel num : \", nchannles)\r\n\t\t\tprint(\"Sampling rate : \", sampling_rate)\r\n\t\t\tprint(\"Frame num : \", nframes)\r\n\t\t\tprint(\"Sample width : \", samplewidth)\r\n\t\t\r\n\t\tbuf = waveFile.readframes(-1) # read all, or readframes( 1024)\r\n\t\t\r\n\t\twaveFile.close()\r\n\t\t\r\n\t\tif samplewidth == 2:\r\n\t\t\tdata = np.frombuffer(buf, dtype='int16')\r\n\t\t\tfdata = data.astype(np.float32) / 32768.\r\n\t\telif samplewidth == 4:\r\n\t\t\tdata = np.frombuffer(buf, dtype='int32')\r\n\t\t\tfdata = data.astype(np.float32) / np.power(2.0, 31)\r\n\t\t\r\n\t\t# convert to 16bit integer scale\r\n\t\tfdata *= 32768.\r\n\t\t\r\n\t\t# convert to MONO, if stereo input\r\n\t\tif nchannles == 2:\r\n\t\t\t#l_channel = fdata[::nchannles]\r\n\t\t\t#r_channel = fdata[1::nchannles]\r\n\t\t\tfdata= (fdata[::nchannles] + fdata[1::nchannles]) /2.0\r\n\t\t\r\n\t\tcount= ((nframes - ( self.NFRAME - self.NSHIFT)) / self.NSHIFT)\r\n\t\ttime_song = float(nframes) / sampling_rate\r\n\t\ttime_unit = 1 / float(sampling_rate)\r\n\t\t\r\n\t\tif fshow :\r\n\t\t\tprint(\"time song : \", time_song)\r\n\t\t\tprint(\"time unit : \", time_unit)\r\n\t\t\tprint(\"count : \", count)\r\n\t\t\r\n\t\t\r\n\t\t# initi spect \r\n\t\tfbank = np.zeros([count,self.num_banks]) \r\n\t\tpos = 0\r\n\t\tcountr=0\r\n\t\tfor fft_index in range(count):\r\n\t\t\tframe = fdata[pos:pos + self.NFRAME].copy()\r\n\t\t\t\r\n\t\t\t## pre-emphasis\r\n\t\t\tframe -= np.hstack((frame[0], frame[:-1])) * self.preemph\r\n\t\t\t\r\n\t\t\twindowed = self.window * frame\r\n\t\t\tfft_result = np.fft.rfft(windowed, n=self.fft_len) # real input, output dimension N/2+1, zero padding\r\n\t\t\tfft_data = np.abs(fft_result) \r\n\t\t\tfft_data2= np.dot(self.melmat, fft_data)\r\n\t\t\tfft_data2[ fft_data2 < 1.0] = 1.0 # as of julius if(temp < 1.0) temp = 1.0;\r\n\t\t\tfft_data2 = np.log(fft_data2) \r\n\t\t\t\r\n\t\t\tfbank[countr] = fft_data2\r\n\t\t\t# index count up\r\n\t\t\tcountr +=1\r\n\t\t\t# next\r\n\t\t\tpos += self.NSHIFT\r\n\r\n\t\t# get delta\r\n\t\tfbank_d= self.cal_delta(fbank)\r\n\t\t# get acceleration\r\n\t\tfbank_a= self.cal_delta(fbank_d)\r\n\t\t\r\n\t\t# con cat three data\r\n\t\tfbankda=np.concatenate( (fbank, fbank_d, fbank_a),axis=1)\r\n\t\t\r\n\t\treturn fbankda \r\n\r\n\r\n\tdef cal_melmat(self,):\r\n\t\tnv2 = self.fft_len/2 #w->fb.fftN / 2; 512/2=256\r\n\t\tfres = self.sampling_rate / (self.fft_len * 700.0) # freq_d700\r\n\t\tmaxChan = self.num_banks + 1\r\n\t\t\r\n\t\tMel = lambda k, freq_d700 : 1127.0 * (np.log(1.0 + (k - 1) * (freq_d700)))\r\n\t\t\r\n\t\tklo = 2\r\n\t\tkhi = nv2\r\n\t\tmlo = 0\r\n\t\tmhi = Mel(nv2 + 1, fres)\r\n\t\t#print (' mlo, mhi ', mlo, mhi)\r\n\t\t\r\n\t\tcf=np.zeros( maxChan+1) # size is numbank+2\r\n\t\tms = mhi - mlo\r\n\t\tfor chan in range(1, maxChan+1): \r\n\t\t\tcf[chan] = (1.0 * chan / maxChan)*ms + mlo\r\n\t\t#print ('center of each channel',cf)\r\n\t\t\r\n\t\tloChan = np.zeros(nv2 + 1 +1)\r\n\t\tchan=1\r\n\t\tfor k in range(1, nv2+1):\r\n\t\t\tif k < klo or k > khi:\r\n\t\t\t\tloChan[k] = -1\r\n\t\t\telse:\r\n\t\t\t\tmelk = Mel(k, fres)\r\n\t\t\t\twhile cf[chan] < melk and chan <= maxChan:\r\n\t\t\t\t\tchan+=1\r\n\t\t\t\tloChan[k] = chan - 1\r\n\t\t\r\n\t\t#print('loChan', loChan)\r\n\t\t\r\n\t\tloWt = np.zeros(nv2 + 1 +1)\r\n\t\tfor k in range (1,nv2+1):\r\n\t\t\tchan = int(loChan[k])\r\n\t\t\tif k < klo or k > khi :\r\n\t\t\t\tloWt[k] = 0.0\r\n\t\t\telse:\r\n\t\t\t\tif chan > 0 :\r\n\t\t\t\t\tloWt[k] = (cf[chan + 1] - Mel(k, fres)) / (cf[chan + 1] - cf[chan])\r\n\t\t\t\telse:\r\n\t\t\t\t\tloWt[k] = (cf[1] - Mel(k, fres)) / (cf[1] - mlo)\r\n\t\t\r\n\t\t#print ('loWt', loWt)\r\n\t\t\r\n\t\tmelmat=np.zeros((self.num_banks, self.fft_data_len))\r\n\t\t\r\n\t\tfor k in range ( klo, khi+1):\r\n\t\t\t#A=spec[k-1] \r\n\t\t\tbin = int(loChan[k])\r\n\t\t\tif bin > 0:\r\n\t\t\t\tmelmat[bin-1][k-1] +=loWt[k]\r\n\t\t\tif bin < self.num_banks : \r\n\t\t\t\tmelmat[bin][k-1] += (1.0 - loWt[k])\r\n\t\t#return fbank[1:]\r\n\t\t\r\n\t\treturn melmat, loChan, loWt, klo, khi\r\n\t\r\n\t\r\n\t\r\n\r\n\r\n# this file use TAB\r\n" ]
[ [ "numpy.concatenate", "numpy.fft.rfft", "numpy.dot", "numpy.zeros", "numpy.log", "numpy.hamming", "numpy.arange", "numpy.power", "numpy.abs", "numpy.frombuffer", "numpy.hstack", "numpy.log2" ] ]
Divjyot/kohonen-network
[ "5fda5d7b2541589398f53ba58f008b5dde18edf9" ]
[ "app/utils/utils.py" ]
[ "from settings import NPY_EXT, MODELS_DIR\n\nimport os\nimport math\nimport numpy as np\nfrom datetime import datetime\n\ndef euc(vec:np.array, pC:np.array):\n pC_vec = np.full((pC.shape[0], pC.shape[1]), vec)\n step1 = np.subtract(pC, pC_vec)\n step2 = np.square(step1)\n step3 = np.sum(step2, axis=1, dtype=float).reshape(pC.shape[0],)\n step4 = np.sqrt(step3, dtype=float)\n return step4\n\ndef eucledian_between_point(point1: tuple, point2: tuple):\n \"\"\"\n Return eucledian distance between two points.\n\n Parameters\n ----------\n point1 : tuple\n (x,y) coordinate pair.\n\n point2 : tuple\n (x,y) coordinate pair.\n\n Returns\n -------\n Eucledian distance between both vectors.\n \"\"\"\n point1_x, point1_y = point1\n point2_x, point2_y = point2\n return math.sqrt(((point1_x - point2_x) ** 2) + ((point1_y - point2_y) ** 2))\n\n\ndef eucledian_between_vec(vec1: np.array, vec2: np.array):\n \"\"\"\n Return eucledian distance between two vectors.\n\n Parameters\n ----------\n vec1 : numpy.array\n Array contains coordinate set of points.\n\n vec2 : numpy.array\n Array contains coordinate set of points.\n\n Returns\n -------\n Eucledian distance between both vectors.\n \"\"\"\n return np.sqrt(np.sum(np.square(np.subtract(vec1, vec2))))\n\n\ndef get_model_path(model_name):\n \"\"\"\n Returns a path with extension based on param:model_name.\n\n Parameters\n ----------\n model_name : str\n Name of file under which weights are saved.\n \"\"\"\n model_name = model_name.replace(NPY_EXT, \"\")\n return os.path.join(MODELS_DIR, f\"{model_name}{NPY_EXT}\")\n\n\ndef generate_model_name(grid_size, max_iterations, learning_rate):\n \"\"\"\n Parameters\n ----------\n grid_size : api_params.TrainParameters\n Same parameter object used for training a model.\n\n max_iterations : int\n Max iterations that model training on.\n\n learning_rate : float\n Learning rate that model training on.\n\n Returns\n -------\n model_name : str\n A unique string build using parameters attributes.\n \"\"\"\n grid_x, grid_y = grid_size\n return f\"{datetime.now().strftime('%d-%m-%Y_%Hh%Mm%Ss')}T_{grid_x}X{grid_y}_{max_iterations}N_{learning_rate}LR\"\n\n\n##############################################################################\nimport multiprocessing\nimport enum\n\n\nclass ParallelProcessingTargets(enum.Enum):\n \"\"\"\n This enum class helps to facilitate boolean flags in code\n to isolate parallel processing code for conditional execution.\n \"\"\"\n\n FIND_BMU = \"pp_FIND_BMU\"\n INF_BMU_W = \"pp_INF_BMU_W\"\n INF_BMU_POS = \"pp_INF_BMU_POS\"\n\n\ndef apply_along_axis_wrapper(apply_along_axis_args):\n \"\"\"\n Wrapper around numpy.apply_along_axis().\n\n Parameters\n ----------\n apply_along_axis_args : n-tuple\n Tuple containing arguments to numpy.apply_along_axis arguments\n\n\n Returns\n -------\n A numpy array to which func1D has applied.\n \"\"\"\n (func1d, axis, arr, args, kwargs) = apply_along_axis_args\n return np.apply_along_axis(func1d, axis, arr, *args, **kwargs)\n\n\ndef parallel_apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"\n A multiprocessing variant of numpy.apply_along_axis() which divides the\n numpy.array into n-chunks based on the number of CPUs. It processes these\n chunks in parallel and later concates the results from each chunk into a array.\n\n\n Parameters\n ----------\n func1d : function\n A function that has to map to numpy array.\n\n axis : int (0,1)\n Axis along which arr is sliced.\n\n arr : ndarray (Ni…, M, Nk…)\n Input array\n\n args : any\n Additional arguments to func1d.\n\n kwargs : any\n Additional named arguments to func1d.\n\n Returns\n -------\n A numpy array to which func1D has applied.\n \"\"\"\n\n pool = multiprocessing.Pool()\n chunks = [\n (func1d, axis, arr_chunk, args, kwargs) for arr_chunk in np.array_split(arr, multiprocessing.cpu_count())\n ]\n chunk_results = pool.map(apply_along_axis_wrapper, chunks)\n pool.close()\n pool.join()\n return np.concatenate(chunk_results)\n" ]
[ [ "numpy.square", "numpy.full", "numpy.concatenate", "numpy.sum", "numpy.subtract", "numpy.apply_along_axis", "numpy.sqrt" ] ]
Alina-Mingchi/TOPO_final
[ "a8983006929b60bda0ed1d2e9a9130427b628431" ]
[ "MSG_TOPO/src/depth_utils/reprojections.py" ]
[ "import torch\n\n\ndef depth_to_absolute_coordinates(depth, depth_type, K=None, calibration=None):\n \"\"\"Convert depth map to absolute coordinates.\n\n Parameters\n ----------\n depth : array_like\n Depth map of shape (h, w) or tensor of depth maps of shape (**, 1, h, w).\n depth_type : str\n Type of the depth map, one of 'perspective' -- meaning the distance from point to camera,\n 'orthogonal' -- meaning the distance from point to image plane, or 'disparity'.\n K : array_like, optional\n Camera projection matrix.\n calibration : dict, optional if `type` is not 'disparity'\n Intrinsic parameters of the camera:\n cx, cy -- coordinates of the principal point in the image coordinate system (i.e in pixels),\n f -- focal length in pixels,\n baseline, required for disparity -- baseline of the camera in metric units.\n Either `K` or `calibration` is required.\n Returns\n -------\n coordinates : torch.Tensor\n Coordinates of the points (**, 3, h, w) in the camera coordinate system.\n X is pointing rightward, Y is pointing downward, Z is pointing forward.\n \"\"\"\n depth = torch.as_tensor(depth)\n dtype = depth.dtype\n h, w = depth.shape[-2:]\n\n if K is not None:\n K = torch.as_tensor(K, dtype=dtype)\n else:\n K = torch.zeros(3, 3, dtype=dtype)\n K[0, 0] = K[1, 1] = float(calibration['f'])\n K[2, 2] = 1\n K[0, 2] = float(calibration['cx'])\n K[1, 2] = float(calibration['cy'])\n\n v, u = torch.meshgrid(torch.arange(h, dtype=dtype) + .5, torch.arange(w, dtype=dtype) + .5)\n if depth.ndim < 3: # ensure depth has channel dimension\n depth = depth[None]\n ones = torch.ones_like(v)\n points = torch.einsum('lk,kij->lij', K.inverse(), torch.stack([u, v, ones]))\n if depth_type == 'perspective':\n points = torch.nn.functional.normalize(points, dim=-3)\n points = points.to(depth) * depth\n elif depth_type == 'orthogonal':\n points = points / points[2:3]\n points = points.to(depth) * depth\n elif depth_type == 'disparity':\n points = points / points[2:3]\n z = calibration['baseline'] * K[0, 0] / depth\n points = points.to(depth) * z\n else:\n raise ValueError(f'Unknown type {depth_type}')\n return points\n" ]
[ [ "torch.zeros", "torch.nn.functional.normalize", "torch.stack", "torch.arange", "torch.ones_like", "torch.as_tensor" ] ]
roholazandie/ParlAI
[ "32352cab81ecb666aefd596232c5ed9f33cbaeb9" ]
[ "parlai/agents/programr/nlp/semantic/sentiment_analysis.py" ]
[ "import numpy as np\nimport torch\nfrom transformers import DistilBertTokenizer, DistilBertForSequenceClassification\n\nfrom parlai.agents.programr.config.brain.nlp import BrainNLPConfiguration\nfrom parlai.agents.programr.config.brain.sentiment_analysis import BrainSentimentAnalysisConfiguration\n\n\nclass SentimentAnalysis():\n\n def __init__(self, ):\n pass\n\n @staticmethod\n def factory(config: BrainNLPConfiguration):\n if config.sentiment_analysis.method == \"corenlp\":\n print(\"corenlp is not supported anymore\")\n elif config.sentiment_analysis.method == \"distilbert\":\n print(\"Using distilbert for sentiment analysis\")\n return DistilBertSentimentAnalysis(config.sentiment_analysis)\n elif config.sentiment_analysis.method == \"default\":\n return DefaultSentimentAnalysis()\n\n def get_sentence_sentiment(self, sentence):\n raise NotImplementedError(\"Should be override to be used.\")\n\n def get_sentences_sentiments(self, sentences):\n raise NotImplementedError(\"Should be override to be used.\")\n\n def alpha(self):\n raise NotImplementedError(\"Should be override to be used.\")\n\n def positive_threshold(self):\n raise NotImplementedError(\"Should be override to be used.\")\n\n def negative_threshold(self):\n raise NotImplementedError(\"Should be override to be used.\")\n\n\nclass SentimentClassifer:\n\n def __init__(self, model, tokenizer):\n self.model = model\n self.tokenizer = tokenizer\n\n def __call__(self, text):\n try:\n input_ids = torch.tensor(self.tokenizer.encode(text, add_special_tokens=True)).unsqueeze(0)\n outputs = self.model(input_ids)\n outputs = outputs[0].detach().numpy()\n scores = np.exp(outputs) / np.exp(outputs).sum(-1)\n scores = scores[0].tolist()\n result = {\"negative\": scores[0], \"neutral\": scores[1], \"positive\": scores[2]}\n return result\n except Exception as ex:\n print(\"Exception caught classifying sentiment - {}\".format(ex))\n return {\"negative\": 0, \"neutral\": 1, \"positive\": 0}\n\n\nclass DistilBertSentimentAnalysis(SentimentAnalysis):\n\n def __init__(self, semantic_analysis_config: BrainSentimentAnalysisConfiguration):\n super().__init__()\n self._semantic_analysis_config = semantic_analysis_config\n model_dir = semantic_analysis_config.model_dir\n tokenizer = DistilBertTokenizer.from_pretrained(model_dir)\n model = DistilBertForSequenceClassification.from_pretrained(model_dir)\n self.sentiment_classifier = SentimentClassifer(model, tokenizer)\n\n def get_sentence_sentiment(self, sentence):\n sentence = sentence.lower()\n\n result = self.sentiment_classifier(sentence)\n print(\"result: {}\".format(result))\n\n sentiment = max(result, key=result.get)\n sentiment_distribution = list(result.values())\n print(\"sentiment score: {}\".format(sentiment))\n print(\"sentiment_distribution score: {}\".format(sentiment_distribution))\n return sentiment, sentiment_distribution\n\n def get_sentences_sentiments(self, sentences):\n sentiments, sentiment_distributions = [], []\n for sentence in sentences:\n sentiment, sentiment_distribution = self.get_sentence_sentiment(sentence)\n sentiments.append(sentiment)\n sentiment_distributions.append(sentiment_distribution)\n\n return sentiments, sentiment_distributions\n\n def expected_sentiment_value(self, sentiment_distribution):\n value = -sentiment_distribution[0] + sentiment_distribution[2]\n return value\n\n @property\n def alpha(self):\n return self._semantic_analysis_config.alpha\n\n @property\n def positive_threshold(self):\n return self._semantic_analysis_config.positive_threshold\n\n @property\n def negative_threshold(self):\n return self._semantic_analysis_config.negative_threshold\n\n\nclass DefaultSentimentAnalysis(SentimentAnalysis):\n\n def __init__(self):\n super().__init__()\n\n def get_sentence_sentiment(self, sentence):\n return\n\n def get_sentences_sentiments(self, sentences):\n return\n\n def alpha(self):\n return\n\n def positive_threshold(self):\n return\n\n def negative_threshold(self):\n return\n\n\nif __name__ == \"__main__\":\n semantic_analysis_config = BrainSentimentAnalysisConfiguration()\n semantic_analysis_config._model_dir = \"/home/rohola/codes/program-r/models/pretrain_distilbert_full_sentiment\"\n s = DistilBertSentimentAnalysis(semantic_analysis_config)\n #a = s.get_sentence_sentiment(\"this is so cute!\")\n a = s.get_sentence_sentiment(\"I am happy\")\n print(a)\n l = s.get_sentences_sentiments([\"this is cute!\", \"that's horrible\"])\n print(l)\n k = s.expected_sentiment_value(a[1])\n print(k)\n" ]
[ [ "numpy.exp" ] ]
auderson/numba
[ "3d67c9850ab56457f418cf40af6245fd9c337705", "3d67c9850ab56457f418cf40af6245fd9c337705" ]
[ "numba/cuda/tests/cudapy/test_datetime.py", "numba/cuda/tests/cudapy/test_array.py" ]
[ "import numpy as np\n\nfrom numba import cuda, vectorize, guvectorize\nfrom numba.np.numpy_support import from_dtype\nfrom numba.cuda.testing import CUDATestCase, skip_on_cudasim\nimport unittest\n\n\nclass TestCudaDateTime(CUDATestCase):\n def test_basic_datetime_kernel(self):\n @cuda.jit\n def foo(start, end, delta):\n for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):\n delta[i] = end[i] - start[i]\n\n arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')\n arr2 = arr1 + np.random.randint(0, 10000, arr1.size)\n delta = np.zeros_like(arr1, dtype='timedelta64[D]')\n\n foo[1, 32](arr1, arr2, delta)\n\n self.assertPreciseEqual(delta, arr2 - arr1)\n\n def test_scalar_datetime_kernel(self):\n @cuda.jit\n def foo(dates, target, delta, matches, outdelta):\n for i in range(cuda.grid(1), matches.size, cuda.gridsize(1)):\n matches[i] = dates[i] == target\n outdelta[i] = dates[i] - delta\n arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')\n target = arr1[5] # datetime\n delta = arr1[6] - arr1[5] # timedelta\n matches = np.zeros_like(arr1, dtype=np.bool_)\n outdelta = np.zeros_like(arr1, dtype='datetime64[D]')\n\n foo[1, 32](arr1, target, delta, matches, outdelta)\n where = matches.nonzero()\n\n self.assertEqual(list(where), [5])\n self.assertPreciseEqual(outdelta, arr1 - delta)\n\n @skip_on_cudasim('ufunc API unsupported in the simulator')\n def test_ufunc(self):\n datetime_t = from_dtype(np.dtype('datetime64[D]'))\n\n @vectorize([(datetime_t, datetime_t)], target='cuda')\n def timediff(start, end):\n return end - start\n\n arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')\n arr2 = arr1 + np.random.randint(0, 10000, arr1.size)\n\n delta = timediff(arr1, arr2)\n\n self.assertPreciseEqual(delta, arr2 - arr1)\n\n @skip_on_cudasim('ufunc API unsupported in the simulator')\n def test_gufunc(self):\n datetime_t = from_dtype(np.dtype('datetime64[D]'))\n timedelta_t = from_dtype(np.dtype('timedelta64[D]'))\n\n @guvectorize([(datetime_t, datetime_t, timedelta_t[:])], '(),()->()',\n target='cuda')\n def timediff(start, end, out):\n out[0] = end - start\n\n arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')\n arr2 = arr1 + np.random.randint(0, 10000, arr1.size)\n\n delta = timediff(arr1, arr2)\n\n self.assertPreciseEqual(delta, arr2 - arr1)\n\n @skip_on_cudasim('no .copy_to_host() in the simulator')\n def test_datetime_view_as_int64(self):\n arr = np.arange('2005-02', '2006-02', dtype='datetime64[D]')\n darr = cuda.to_device(arr)\n viewed = darr.view(np.int64)\n self.assertPreciseEqual(arr.view(np.int64), viewed.copy_to_host())\n self.assertEqual(viewed.gpu_data, darr.gpu_data)\n\n @skip_on_cudasim('no .copy_to_host() in the simulator')\n def test_timedelta_view_as_int64(self):\n arr = np.arange('2005-02', '2006-02', dtype='datetime64[D]')\n arr = arr - (arr - 1)\n self.assertEqual(arr.dtype, np.dtype('timedelta64[D]'))\n darr = cuda.to_device(arr)\n viewed = darr.view(np.int64)\n self.assertPreciseEqual(arr.view(np.int64), viewed.copy_to_host())\n self.assertEqual(viewed.gpu_data, darr.gpu_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "import numpy as np\n\nfrom numba.cuda.testing import unittest, CUDATestCase\nfrom numba.cuda.testing import skip_on_cudasim, skip_unless_cudasim\nfrom numba import config, cuda\n\n\nif config.ENABLE_CUDASIM:\n ARRAY_LIKE_FUNCTIONS = (cuda.device_array_like, cuda.pinned_array_like)\nelse:\n ARRAY_LIKE_FUNCTIONS = (cuda.device_array_like, cuda.mapped_array_like,\n cuda.pinned_array_like)\n\n\nclass TestCudaArray(CUDATestCase):\n def test_gpu_array_zero_length(self):\n x = np.arange(0)\n dx = cuda.to_device(x)\n hx = dx.copy_to_host()\n self.assertEqual(x.shape, dx.shape)\n self.assertEqual(x.size, dx.size)\n self.assertEqual(x.shape, hx.shape)\n self.assertEqual(x.size, hx.size)\n\n def test_null_shape(self):\n null_shape = ()\n shape1 = cuda.device_array(()).shape\n shape2 = cuda.device_array_like(np.ndarray(())).shape\n self.assertEqual(shape1, null_shape)\n self.assertEqual(shape2, null_shape)\n\n def test_gpu_array_strided(self):\n\n @cuda.jit('void(double[:])')\n def kernel(x):\n i = cuda.grid(1)\n if i < x.shape[0]:\n x[i] = i\n\n x = np.arange(10, dtype=np.double)\n y = np.ndarray(shape=10 * 8, buffer=x, dtype=np.byte)\n z = np.ndarray(9, buffer=y[4:-4], dtype=np.double)\n kernel[10, 10](z)\n self.assertTrue(np.allclose(z, list(range(9))))\n\n def test_gpu_array_interleaved(self):\n\n @cuda.jit('void(double[:], double[:])')\n def copykernel(x, y):\n i = cuda.grid(1)\n if i < x.shape[0]:\n x[i] = i\n y[i] = i\n\n x = np.arange(10, dtype=np.double)\n y = x[:-1:2]\n # z = x[1::2]\n # n = y.size\n try:\n cuda.devicearray.auto_device(y)\n except ValueError:\n pass\n else:\n raise AssertionError(\"Should raise exception complaining the \"\n \"contiguous-ness of the array.\")\n # Should we handle this use case?\n # assert z.size == y.size\n # copykernel[1, n](y, x)\n # print(y, z)\n # assert np.all(y == z)\n # assert np.all(y == list(range(n)))\n\n def test_auto_device_const(self):\n d, _ = cuda.devicearray.auto_device(2)\n self.assertTrue(np.all(d.copy_to_host() == np.array(2)))\n\n def _test_array_like_same(self, like_func, array):\n \"\"\"\n Tests of *_array_like where shape, strides, dtype, and flags should\n all be equal.\n \"\"\"\n array_like = like_func(array)\n self.assertEqual(array.shape, array_like.shape)\n self.assertEqual(array.strides, array_like.strides)\n self.assertEqual(array.dtype, array_like.dtype)\n self.assertEqual(array.flags['C_CONTIGUOUS'],\n array_like.flags['C_CONTIGUOUS'])\n self.assertEqual(array.flags['F_CONTIGUOUS'],\n array_like.flags['F_CONTIGUOUS'])\n\n def test_array_like_1d(self):\n d_a = cuda.device_array(10, order='C')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_2d(self):\n d_a = cuda.device_array((10, 12), order='C')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_2d_transpose(self):\n d_a = cuda.device_array((10, 12), order='C')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_3d(self):\n d_a = cuda.device_array((10, 12, 14), order='C')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_1d_f(self):\n d_a = cuda.device_array(10, order='F')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_2d_f(self):\n d_a = cuda.device_array((10, 12), order='F')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_2d_f_transpose(self):\n d_a = cuda.device_array((10, 12), order='F')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def test_array_like_3d_f(self):\n d_a = cuda.device_array((10, 12, 14), order='F')\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_same(like_func, d_a)\n\n def _test_array_like_view(self, like_func, view, d_view):\n \"\"\"\n Tests of device_array_like where the original array is a view - the\n strides should not be equal because a contiguous array is expected.\n \"\"\"\n nb_like = like_func(d_view)\n self.assertEqual(d_view.shape, nb_like.shape)\n self.assertEqual(d_view.dtype, nb_like.dtype)\n\n # Use NumPy as a reference for the expected strides\n np_like = np.zeros_like(view)\n self.assertEqual(nb_like.strides, np_like.strides)\n self.assertEqual(nb_like.flags['C_CONTIGUOUS'],\n np_like.flags['C_CONTIGUOUS'])\n self.assertEqual(nb_like.flags['F_CONTIGUOUS'],\n np_like.flags['F_CONTIGUOUS'])\n\n def test_array_like_1d_view(self):\n shape = 10\n view = np.zeros(shape)[::2]\n d_view = cuda.device_array(shape)[::2]\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_view(like_func, view, d_view)\n\n def test_array_like_1d_view_f(self):\n shape = 10\n view = np.zeros(shape, order='F')[::2]\n d_view = cuda.device_array(shape, order='F')[::2]\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_view(like_func, view, d_view)\n\n def test_array_like_2d_view(self):\n shape = (10, 12)\n view = np.zeros(shape)[::2, ::2]\n d_view = cuda.device_array(shape)[::2, ::2]\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_view(like_func, view, d_view)\n\n def test_array_like_2d_view_f(self):\n shape = (10, 12)\n view = np.zeros(shape, order='F')[::2, ::2]\n d_view = cuda.device_array(shape, order='F')[::2, ::2]\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_view(like_func, view, d_view)\n\n @skip_on_cudasim('Numba and NumPy stride semantics differ for transpose')\n def test_array_like_2d_view_transpose_device(self):\n shape = (10, 12)\n d_view = cuda.device_array(shape)[::2, ::2].T\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n # This is a special case (see issue #4974) because creating the\n # transpose creates a new contiguous allocation with different\n # strides. In this case, rather than comparing against NumPy,\n # we can only compare against expected values.\n like = like_func(d_view)\n self.assertEqual(d_view.shape, like.shape)\n self.assertEqual(d_view.dtype, like.dtype)\n self.assertEqual((40, 8), like.strides)\n self.assertTrue(like.flags['C_CONTIGUOUS'])\n self.assertFalse(like.flags['F_CONTIGUOUS'])\n\n @skip_unless_cudasim('Numba and NumPy stride semantics differ for '\n 'transpose')\n def test_array_like_2d_view_transpose_simulator(self):\n shape = (10, 12)\n view = np.zeros(shape)[::2, ::2].T\n d_view = cuda.device_array(shape)[::2, ::2].T\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n # On the simulator, the transpose has different strides to on a\n # CUDA device (See issue #4974). Here we can compare strides\n # against NumPy as a reference.\n np_like = np.zeros_like(view)\n nb_like = like_func(d_view)\n self.assertEqual(d_view.shape, nb_like.shape)\n self.assertEqual(d_view.dtype, nb_like.dtype)\n self.assertEqual(np_like.strides, nb_like.strides)\n self.assertEqual(np_like.flags['C_CONTIGUOUS'],\n nb_like.flags['C_CONTIGUOUS'])\n self.assertEqual(np_like.flags['F_CONTIGUOUS'],\n nb_like.flags['F_CONTIGUOUS'])\n\n def test_array_like_2d_view_f_transpose(self):\n shape = (10, 12)\n view = np.zeros(shape, order='F')[::2, ::2].T\n d_view = cuda.device_array(shape, order='F')[::2, ::2].T\n for like_func in ARRAY_LIKE_FUNCTIONS:\n with self.subTest(like_func=like_func):\n self._test_array_like_view(like_func, view, d_view)\n\n @skip_on_cudasim('Kernel overloads not created in the simulator')\n def test_issue_4628(self):\n # CUDA Device arrays were reported as always being typed with 'A' order\n # so launching the kernel with a host array and then a device array\n # resulted in two overloads being compiled - one for 'C' order from\n # the host array, and one for 'A' order from the device array. With the\n # resolution of this issue, the order of the device array is also 'C',\n # so after the kernel launches there should only be one overload of\n # the function.\n @cuda.jit\n def func(A, out):\n i = cuda.grid(1)\n out[i] = A[i] * 2\n\n n = 128\n a = np.ones((n,))\n d_a = cuda.to_device(a)\n result = np.zeros((n,))\n\n func[1, 128](a, result)\n func[1, 128](d_a, result)\n\n self.assertEqual(1, len(func.overloads))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.zeros_like", "numpy.random.randint", "numpy.arange", "numpy.dtype" ], [ "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.ones", "numpy.ndarray", "numpy.arange" ] ]
declare-lab/BBFN
[ "1a251bd2a82193902591a15ae2f28b902c295310" ]
[ "src/main.py" ]
[ "import torch\nimport argparse\nimport numpy as np\n\nfrom utils import *\nfrom torch.utils.data import DataLoader\nfrom solver import Solver\nfrom config import get_config\nfrom data_loader import get_loader\n\nparser = argparse.ArgumentParser(description='MOSEI Sentiment Analysis')\nparser.add_argument('-f', default='', type=str)\n\n# Fixed\nparser.add_argument('--model', type=str, default='MulT',\n help='name of the model to use (Transformer, etc.)')\n\n# Tasks\nparser.add_argument('--vonly', action='store_true',\n help='use the crossmodal fusion into v (default: False)')\nparser.add_argument('--aonly', action='store_true',\n help='use the crossmodal fusion into a (default: False)')\nparser.add_argument('--lonly', action='store_true',\n help='use the crossmodal fusion into l (default: False)')\nparser.add_argument('--aligned', action='store_true',\n help='consider aligned experiment or not (default: False)')\nparser.add_argument('--dataset', type=str, default='mosi', choices=['mosi','mosei','ur_funny'],\n help='dataset to use (default: mosei)')\nparser.add_argument('--data_path', type=str, default='data',\n help='path for storing the dataset')\n\n# Dropouts\nparser.add_argument('--attn_dropout', type=float, default=0.1,\n help='attention dropout')\nparser.add_argument('--attn_dropout_a', type=float, default=0.0,\n help='attention dropout (for audio)')\nparser.add_argument('--attn_dropout_v', type=float, default=0.0,\n help='attention dropout (for visual)')\nparser.add_argument('--relu_dropout', type=float, default=0.1,\n help='relu dropout')\nparser.add_argument('--embed_dropout', type=float, default=0.25,\n help='embedding dropout')\nparser.add_argument('--res_dropout', type=float, default=0.1,\n help='residual block dropout')\nparser.add_argument('--out_dropout', type=float, default=0.0,\n help='output layer dropout')\nparser.add_argument('--div_dropout', type=float, default=0.1)\n\n# Embedding\nparser.add_argument('--use_bert', action='store_true', help='whether to use bert \\\n to encode text inputs (default: False)')\n\n# Losses\nparser.add_argument('--lambda_d', type=float, default=0.1, help='portion of discriminator loss added to total loss (default: 0.1)')\n\n# Architecture\nparser.add_argument('--nlevels', type=int, default=5,\n help='number of layers in the network (default: 5)')\nparser.add_argument('--num_heads', type=int, default=5,\n help='number of heads for the transformer network (default: 5)')\nparser.add_argument('--attn_mask', action='store_false',\n help='use attention mask for Transformer (default: true)')\nparser.add_argument('--attn_hidden_size', type=int, default=40,\n help='The size of hiddens in all transformer blocks')\nparser.add_argument('--uni_nlevels', type=int, default=3,\n help='number of transformer blocks for unimodal attention')\nparser.add_argument('--enc_layers', type=int, default=1,\n help='Layers of GRU or LSTM in sequence encoder')\nparser.add_argument('--use_disc', action='store_true',\n help='whether to add a discriminator to the domain-invariant encoder and the corresponding loss to the final training process')\n\nparser.add_argument('--proj_type', type=str, default='cnn',help='network type for input projection', choices=['LINEAR', 'CNN','LSTM','GRU'])\nparser.add_argument('--lksize', type=int, default=3,\n help='Kernel size of language projection CNN')\nparser.add_argument('--vksize', type=int, default=3,\n help='Kernel size of visual projection CNN')\nparser.add_argument('--aksize', type=int, default=3,\n help='Kernel size of accoustic projection CNN')\n\n# Tuning\nparser.add_argument('--batch_size', type=int, default=24, metavar='N',\n help='batch size (default: 24)')\nparser.add_argument('--clip', type=float, default=0.8,\n help='gradient clip value (default: 0.8)')\nparser.add_argument('--lr', type=float, default=5e-4,\n help='initial learning rate (default: 1e-3)')\nparser.add_argument('--optim', type=str, default='Adam',\n help='optimizer to use (default: Adam)')\nparser.add_argument('--num_epochs', type=int, default=40,\n help='number of epochs (default: 40)')\nparser.add_argument('--when', type=int, default=20,\n help='when to decay learning rate (default: 20)')\nparser.add_argument('--batch_chunk', type=int, default=1,\n help='number of chunks per batch (default: 1)')\n\n# Logistics\nparser.add_argument('--log_interval', type=int, default=30,\n help='frequency of result logging (default: 30)')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--no_cuda', action='store_true',\n help='do not use cuda')\nparser.add_argument('--name', type=str, default='mult',\n help='name of the trial (default: \"mult\")')\nargs = parser.parse_args()\n\n# set numpy manual seed\n# np.random.seed(args.seed)\n\ntorch.manual_seed(args.seed)\nvalid_partial_mode = args.lonly + args.vonly + args.aonly\n\n# configurations for data_loader\ndataset = str.lower(args.dataset.strip())\nbatch_size = args.batch_size\n\nif valid_partial_mode == 0:\n args.lonly = args.vonly = args.aonly = True\nelif valid_partial_mode != 1:\n raise ValueError(\"You can only choose one of {l/v/a}only.\")\n\nuse_cuda = False\n\noutput_dim_dict = {\n 'mosi': 1,\n 'mosei_senti': 1,\n 'iemocap': 8,\n # 'ur_funny': 1 # comment this if using BCELoss\n 'ur_funny': 2 # comment this if using CrossEntropyLoss\n}\n\ncriterion_dict = {\n 'mosi': 'L1Loss',\n 'iemocap': 'CrossEntropyLoss',\n 'ur_funny': 'CrossEntropyLoss'\n}\n\ntorch.set_default_tensor_type('torch.FloatTensor')\nif torch.cuda.is_available():\n if args.no_cuda:\n print(\"WARNING: You have a CUDA device, so you should probably not run with --no_cuda\")\n else:\n torch.cuda.manual_seed_all(args.seed)\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n use_cuda = True\n\n####################################################################\n####### Load the dataset (aligned or non-aligned) ######\n####################################################################\n\nprint(\"Start loading the data....\")\n\ntrain_config = get_config(dataset, mode='train', batch_size=args.batch_size, use_bert=args.use_bert)\nvalid_config = get_config(dataset, mode='valid', batch_size=args.batch_size, use_bert=args.use_bert)\ntest_config = get_config(dataset, mode='test', batch_size=args.batch_size, use_bert=args.use_bert)\n\n# print(train_config)\n\nhyp_params = args\n\n# pretrained_emb saved in train_config here\ntrain_loader = get_loader(hyp_params, train_config, shuffle=True)\nvalid_loader = get_loader(hyp_params, valid_config, shuffle=False)\ntest_loader = get_loader(hyp_params, test_config, shuffle=False)\n\nprint('Finish loading the data....')\nif not args.aligned:\n print(\"### Note: You are running in unaligned mode.\")\n\n####################################################################\n#\n# Hyperparameters\n#\n####################################################################\n\ntorch.autograd.set_detect_anomaly(True)\n\n# addintional appending\nhyp_params.word2id = train_config.word2id\nhyp_params.pretrained_emb = train_config.pretrained_emb\n\n# architecture parameters\nhyp_params.orig_d_l, hyp_params.orig_d_a, hyp_params.orig_d_v = train_config.lav_dim\nif hyp_params.use_bert:\n hyp_params.orig_d_l = 768\nhyp_params.l_len, hyp_params.a_len, hyp_params.v_len = train_config.lav_len\nhyp_params.layers = args.nlevels\nhyp_params.l_ksize = args.lksize\nhyp_params.v_ksize = args.vksize\nhyp_params.a_ksize = args.aksize\n\nhyp_params.proj_type = args.proj_type.lower()\nhyp_params.num_enc_layers = args.enc_layers\n\nhyp_params.use_cuda = use_cuda\nhyp_params.dataset = hyp_params.data = dataset\nhyp_params.when = args.when\nhyp_params.attn_dim = args.attn_hidden_size\nhyp_params.batch_chunk = args.batch_chunk\n# hyp_params.n_train, hyp_params.n_valid, hyp_params.n_test = train_len, valid_len, test_len\nhyp_params.model = str.upper(args.model.strip())\nhyp_params.output_dim = output_dim_dict.get(dataset, 1)\n# hyp_params.criterion = criterion_dict.get(dataset, 'MAELoss')\nhyp_params.criterion = criterion_dict.get(dataset, 'MSELoss')\n\n\nif __name__ == '__main__':\n solver = Solver(hyp_params, train_loader=train_loader, dev_loader=valid_loader,\n test_loader=test_loader, is_train=True)\n solver.train_and_eval()\n exit()\n\n" ]
[ [ "torch.cuda.manual_seed_all", "torch.set_default_tensor_type", "torch.autograd.set_detect_anomaly", "torch.manual_seed", "torch.cuda.is_available" ] ]
Master-cai/C2
[ "cb6dbfdd9a1928139bcea9e926256b29f4fc6e8e" ]
[ "C2Server/yolo/yolo.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport random\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo.yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo.yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'yolo/trained_weights_final.h5',\n \"anchors_path\": 'yolo/model_data/yolo_anchors.txt',\n \"classes_path\": 'yolo/model_data/my_class.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n print('*'*50 + classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n # print('*' * 50)\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10103) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n print(image_data.shape)\n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n print('Found {} boxes for {}'.format(len(out_boxes), 'img'))\n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n \n # for box in out_boxes:\n # print(box)\n\n posList = []\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n # print('*'*50)\n # print(out_classes.size)\n # print(i)\n score = out_scores[i]\n \n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n print(label, (left, top), (right, bottom))\n posList.append([i, label, (left, top), (right, bottom)])\n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(# box\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle( # label background\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n print(end - start)\n image.save('test.jpg')\n return image, posList\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path), type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"result\", result)\n if isOutput:\n out.write(result)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n yolo.close_session()\n\n\n\n# if __name__ == \"__main__\":\n# yolo = YOLO(score=0.15)\n# # print('*'*50)\n# # print(yolo.score)\n# # path = 'VOCdevkit\\VOC2007\\JPEGImages\\PartA_00{}.jpg'.format(str(random.randint(100, 1000)))\n# # path = '00002000_640x480.png'\n# path = '1504070977-JCwHA.jpg'\n# try:\n# image = Image.open(path)\n# except:\n# print('Open Error! Check path and try again!')\n# else:\n# # image.show()\n# # print(image.size)\n# r_image = yolo.detect_image(image)\n# r_image.show()\n# print(path)\n# yolo.close_session()\n\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.random.seed", "numpy.random.shuffle", "numpy.expand_dims", "numpy.floor" ] ]
airium/pysot
[ "c34158ba7b4a9b73938320e0019b61e6460537fc", "c34158ba7b4a9b73938320e0019b61e6460537fc" ]
[ "pysot/tracker/siammask_tracker.py", "pysot/models/head/rpn.py" ]
[ "# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport numpy as np\n\nfrom pysot.core.config import cfg\nfrom pysot.utils.bbox import cxy_wh_2_rect\nfrom pysot.tracker.siamrpn_tracker import SiamRPNTracker\n\n\nclass SiamMaskTracker(SiamRPNTracker):\n def __init__(self, model):\n super(SiamMaskTracker, self).__init__(model)\n assert hasattr(self.model, 'mask_head'), \\\n \"SiamMaskTracker must have mask_head\"\n assert hasattr(self.model, 'refine_head'), \\\n \"SiamMaskTracker must have refine_head\"\n\n def _crop_back(self, image, bbox, out_sz, padding=0):\n a = (out_sz[0] - 1) / bbox[2]\n b = (out_sz[1] - 1) / bbox[3]\n c = -a * bbox[0]\n d = -b * bbox[1]\n mapping = np.array([[a, 0, c], [0, b, d]]).astype(np.float)\n crop = cv2.warpAffine(image,\n mapping, (out_sz[0], out_sz[1]),\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=padding)\n return crop\n\n def _mask_post_processing(self, mask):\n target_mask = (mask > cfg.TRACK.MASK_THERSHOLD)\n target_mask = target_mask.astype(np.uint8)\n if cv2.__version__[-5] == '4':\n contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n else:\n _, contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n cnt_area = [cv2.contourArea(cnt) for cnt in contours]\n if len(contours) != 0 and np.max(cnt_area) > 100:\n contour = contours[np.argmax(cnt_area)]\n polygon = contour.reshape(-1, 2)\n prbox = cv2.boxPoints(cv2.minAreaRect(polygon))\n rbox_in_img = prbox\n else: # empty mask\n location = cxy_wh_2_rect(self.center_pos, self.size)\n rbox_in_img = np.array([[location[0], location[1]], [location[0] + location[2], location[1]],\n [location[0] + location[2], location[1] + location[3]],\n [location[0], location[1] + location[3]]])\n return rbox_in_img\n\n def track(self, img):\n \"\"\"\n args:\n img(np.ndarray): BGR image\n return:\n bbox(list):[x, y, width, height]\n \"\"\"\n w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)\n h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)\n s_z = np.sqrt(w_z * h_z)\n scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z\n s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)\n s_x = round(s_x)\n\n x_crop = self.get_subwindow(img, self.center_pos, cfg.TRACK.INSTANCE_SIZE, s_x, self.channel_average)\n crop_box = [self.center_pos[0] - s_x / 2, self.center_pos[1] - s_x / 2, s_x, s_x]\n\n outputs = self.model.track(x_crop)\n score = self._convert_score(outputs['cls'])\n pred_bbox = self._convert_bbox(outputs['loc'], self.anchors)\n\n def change(r):\n return np.maximum(r, 1. / r)\n\n def sz(w, h):\n pad = (w + h) * 0.5\n return np.sqrt((w + pad) * (h + pad))\n\n # scale penalty\n s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) / (sz(self.size[0] * scale_z, self.size[1] * scale_z)))\n # aspect ratio penalty\n r_c = change((self.size[0] / self.size[1]) / (pred_bbox[2, :] / pred_bbox[3, :]))\n penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K)\n pscore = penalty * score\n\n # window penalty\n pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \\\n self.window * cfg.TRACK.WINDOW_INFLUENCE\n best_idx = np.argmax(pscore)\n\n bbox = pred_bbox[:, best_idx] / scale_z\n lr = penalty[best_idx] * score[best_idx] * cfg.TRACK.LR\n\n cx = bbox[0] + self.center_pos[0]\n cy = bbox[1] + self.center_pos[1]\n\n # smooth bbox\n width = self.size[0] * (1 - lr) + bbox[2] * lr\n height = self.size[1] * (1 - lr) + bbox[3] * lr\n\n # clip boundary\n cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2])\n\n # udpate state\n self.center_pos = np.array([cx, cy])\n self.size = np.array([width, height])\n\n bbox = [cx - width / 2, cy - height / 2, width, height]\n best_score = score[best_idx]\n\n # processing mask\n pos = np.unravel_index(best_idx, (5, self.score_size, self.score_size))\n delta_x, delta_y = pos[2], pos[1]\n\n mask = self.model.mask_refine((delta_y, delta_x)).sigmoid().squeeze()\n out_size = cfg.TRACK.MASK_OUTPUT_SIZE\n mask = mask.view(out_size, out_size).cpu().data.numpy()\n\n s = crop_box[2] / cfg.TRACK.INSTANCE_SIZE\n base_size = cfg.TRACK.BASE_SIZE\n stride = cfg.ANCHOR.STRIDE\n sub_box = [\n crop_box[0] + (delta_x - base_size / 2) * stride * s, crop_box[1] + (delta_y - base_size / 2) * stride * s,\n s * cfg.TRACK.EXEMPLAR_SIZE, s * cfg.TRACK.EXEMPLAR_SIZE\n ]\n s = out_size / sub_box[2]\n\n im_h, im_w = img.shape[:2]\n back_box = [-sub_box[0] * s, -sub_box[1] * s, im_w * s, im_h * s]\n mask_in_img = self._crop_back(mask, back_box, (im_w, im_h))\n polygon = self._mask_post_processing(mask_in_img)\n polygon = polygon.flatten().tolist()\n return {\n 'bbox': bbox,\n 'best_score': best_score,\n 'mask': mask_in_img,\n 'polygon': polygon,\n }\n", "# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom pysot.core.xcorr import xcorr_fast, xcorr_depthwise\nfrom pysot.models.init_weight import init_weights\n\n\nclass RPN(nn.Module):\n def __init__(self):\n super(RPN, self).__init__()\n\n def forward(self, z_f, x_f):\n raise NotImplementedError\n\n\nclass UPChannelRPN(RPN):\n def __init__(self, anchor_num=5, feature_in=256):\n super(UPChannelRPN, self).__init__()\n\n cls_output = 2 * anchor_num\n loc_output = 4 * anchor_num\n\n self.template_cls_conv = nn.Conv2d(feature_in, feature_in * cls_output, kernel_size=3)\n self.template_loc_conv = nn.Conv2d(feature_in, feature_in * loc_output, kernel_size=3)\n\n self.search_cls_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3)\n self.search_loc_conv = nn.Conv2d(feature_in, feature_in, kernel_size=3)\n\n self.loc_adjust = nn.Conv2d(loc_output, loc_output, kernel_size=1)\n\n def forward(self, z_f, x_f):\n cls_kernel = self.template_cls_conv(z_f)\n loc_kernel = self.template_loc_conv(z_f)\n\n cls_feature = self.search_cls_conv(x_f)\n loc_feature = self.search_loc_conv(x_f)\n\n cls = xcorr_fast(cls_feature, cls_kernel)\n loc = self.loc_adjust(xcorr_fast(loc_feature, loc_kernel))\n return cls, loc\n\n\nclass DepthwiseXCorr(nn.Module):\n def __init__(self, in_channels, hidden, out_channels, kernel_size=3, hidden_kernel_size=5):\n super(DepthwiseXCorr, self).__init__()\n self.conv_kernel = nn.Sequential(\n nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),\n nn.BatchNorm2d(hidden),\n nn.ReLU(inplace=True),\n )\n self.conv_search = nn.Sequential(\n nn.Conv2d(in_channels, hidden, kernel_size=kernel_size, bias=False),\n nn.BatchNorm2d(hidden),\n nn.ReLU(inplace=True),\n )\n self.head = nn.Sequential(nn.Conv2d(hidden, hidden, kernel_size=1, bias=False), nn.BatchNorm2d(hidden),\n nn.ReLU(inplace=True), nn.Conv2d(hidden, out_channels, kernel_size=1))\n\n def forward(self, kernel, search):\n kernel = self.conv_kernel(kernel)\n search = self.conv_search(search)\n feature = xcorr_depthwise(search, kernel)\n out = self.head(feature)\n return out\n\n\nclass DepthwiseRPN(RPN):\n def __init__(self, anchor_num=5, in_channels=256, out_channels=256):\n super(DepthwiseRPN, self).__init__()\n self.cls = DepthwiseXCorr(in_channels, out_channels, 2 * anchor_num)\n self.loc = DepthwiseXCorr(in_channels, out_channels, 4 * anchor_num)\n\n def forward(self, z_f, x_f):\n cls = self.cls(z_f, x_f)\n loc = self.loc(z_f, x_f)\n return cls, loc\n\n\nclass MultiRPN(RPN):\n def __init__(self, anchor_num, in_channels, weighted=False):\n super(MultiRPN, self).__init__()\n self.weighted = weighted\n for i in range(len(in_channels)):\n self.add_module('rpn' + str(i + 2), DepthwiseRPN(anchor_num, in_channels[i], in_channels[i]))\n if self.weighted:\n self.cls_weight = nn.Parameter(torch.ones(len(in_channels)))\n self.loc_weight = nn.Parameter(torch.ones(len(in_channels)))\n\n def forward(self, z_fs, x_fs):\n cls = []\n loc = []\n for idx, (z_f, x_f) in enumerate(zip(z_fs, x_fs), start=2):\n rpn = getattr(self, 'rpn' + str(idx))\n c, l = rpn(z_f, x_f)\n cls.append(c)\n loc.append(l)\n\n if self.weighted:\n cls_weight = F.softmax(self.cls_weight, 0)\n loc_weight = F.softmax(self.loc_weight, 0)\n\n def avg(lst):\n return sum(lst) / len(lst)\n\n def weighted_avg(lst, weight):\n s = 0\n for i in range(len(weight)):\n s += lst[i] * weight[i]\n return s\n\n if self.weighted:\n return weighted_avg(cls, cls_weight), weighted_avg(loc, loc_weight)\n else:\n return avg(cls), avg(loc)\n" ]
[ [ "numpy.max", "numpy.array", "numpy.sum", "numpy.exp", "numpy.unravel_index", "numpy.argmax", "numpy.sqrt", "numpy.maximum" ], [ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.nn.functional.softmax" ] ]
BA-HanseML/NF_Prj_MIMII_Dataset
[ "c9dd130a48c5ee28491a3f9369ace8f7217753d6" ]
[ "utility/feature_extractor/feature_extractor_ICA2.py" ]
[ "print('load feature_extractor_ICA2')\r\n\r\nfrom sklearn.decomposition import FastICA\r\n\r\n# Feature extractor for blind source separation from eight channels \r\n# to two and selecting the main channel based on the estimated mixing matrix\r\n\r\nclass feature_extractor_ICA2(feature_extractor):\r\n def __init__(self, base_folder, name='ICA2'):\r\n super().__init__(base_folder,name,\r\n xlabel = 'time',\r\n ylabel = 'amp',\r\n zlabel = 'none')\r\n \r\n \r\n # set type\r\n self.para_dict['type'] = feature_extractor_type.ICA2\r\n self.para_dict['type_name'] = 'ICA2'\r\n # default hyper\r\n self.set_hyperparamter()\r\n \r\n self.wave_data = None\r\n \r\n def set_hyperparamter(self,\r\n random_state=25):\r\n\r\n self.para_dict['hyperpara']={ \\\r\n 'random_state': random_state}\r\n \r\n\r\n if os.path.isfile(self._full_wave_path()):\r\n self.create_from_wav(self.para_dict['wave_filepath'] )\r\n \r\n \r\n def create_from_wav(self, filepath):\r\n \r\n transformer = FastICA(n_components=2, \r\n random_state=self.para_dict['hyperpara']['random_state'])\r\n \r\n af = np.array(self._read_wav(filepath))\r\n afT = transformer.fit_transform(af.T)\r\n \r\n self.wave_data = afT.T\r\n self.feature_data = transformer.mixing_\r\n \r\n def get_wav_memory_file(self, main=False):\r\n \r\n wmf = memory_wave_file()\r\n wmf.filepath = self.para_dict['wave_filepath']\r\n if main:\r\n ica_range, ica_chnr, in_chnr = self._ICA_2_main_channel(self.feature_data)\r\n wmf.channel = self.wave_data[ica_chnr].reshape(1,-1)\r\n else:\r\n wmf.channel = self.wave_data\r\n wmf.srate= self.para_dict['wave_srate']\r\n wmf.length = self.feature_data.shape[1] \r\n return wmf\r\n \r\n def _ICA_2_main_channel(self, mix_matrix):\r\n # returns dominat ica channel with the most mixing varaiton\r\n # and the dominate input channel mis that is directed to the device\r\n r = np.array([0.,0.])\r\n r[0] = np.abs(np.max(mix_matrix[:,0])- np.min(mix_matrix[:,0]))\r\n r[1] = np.abs(np.max(mix_matrix[:,1])- np.min(mix_matrix[:,1]))\r\n #print(mix_matrix[:,1])\r\n ica_range = np.max(r)\r\n ica_chnr = np.argmax(r)\r\n in_chnr = int(np.where(mix_matrix[:,ica_chnr]==np.min(mix_matrix[:,ica_chnr]))[0])\r\n return ica_range, ica_chnr, in_chnr \r\n \r\n def flat_feature(self):\r\n return self.feature_data.flatten()\r\n \r\n def maxrange_feature(self):\r\n ica_range, ica_chnr, in_chnr = self._ICA_2_main_channel(self.feature_data)\r\n return ica_range\r\n \r\n def get_feature(self, feat_para_dict):\r\n if feat_para_dict['function'] == 'flat':\r\n return self.flat_feature()\r\n elif feat_para_dict['function'] == 'maxrange':\r\n return self.maxrange_feature()\r\n else:\r\n raise Exception('feat get function \"' + feat_para_dict['function'] + '\" unknown')" ]
[ [ "sklearn.decomposition.FastICA" ] ]
gasdaf/graph-learn
[ "4a77b39be37bb7507f0e9fb5d4ed40ca623b2ceb" ]
[ "graphlearn/python/nn/tf/test/test_trainer.py" ]
[ "# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom graphlearn.python.data.feature_spec import FeatureSpec\nfrom graphlearn.python.nn.tf.model.ego_sage import EgoGraphSAGE\nfrom graphlearn.python.nn.tf.app.link_predictor import *\nfrom graphlearn.python.nn.tf.data.entity import Vertex\nfrom graphlearn.python.nn.tf.data.ego_graph import EgoGraph\nfrom graphlearn.python.nn.tf.layers.ego_sage_layer import EgoSAGELayer\nfrom graphlearn.python.nn.tf.layers.ego_sage_layer import EgoSAGELayerGroup\nfrom graphlearn.python.nn.tf.trainer import Trainer\n\nclass TrainerTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.src_emb, cls.dst_emb, cls.loss = cls.model_func()\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n @classmethod\n def get_graph(cls, hops, neg=None):\n spec = FeatureSpec(10)\n for i in range(3):\n spec.append_dense()\n\n total_dim = 3\n for i in range(7):\n dim = i + 1\n spec.append_sparse(20 + 10 * i, dim, False)\n total_dim += dim\n\n neg = 1 if not neg else int(neg)\n hops[0] = int(hops[0] * neg)\n # the centric vertices share the same spec with 2-hop neighbors\n schema = [(\"nodes\", spec), (\"nodes\", spec), (\"nodes\", spec)]\n\n # batch_size = 2\n # [f_num, batch_size] = [3, 2 * neg]\n batch_floats = np.array([[1.0 * i, 2.0 * i] * neg for i in range(3)])\n batch_floats = tf.convert_to_tensor(batch_floats, dtype=tf.float32)\n # [i_num, batch_size] = [7, 2 * neg]\n batch_ints = np.array([[i, 2 * i] * neg for i in range(7)])\n batch_ints = tf.convert_to_tensor(batch_ints, dtype=tf.int64)\n # [batch_size] = [2]\n batch_labels = np.array([1, 0])\n batch_labels = tf.convert_to_tensor(batch_labels, dtype=tf.int32)\n vertices = Vertex(floats=batch_floats, ints=batch_ints, labels=batch_labels)\n\n # [f_num, batch_size] = [3, 2 * neg * hop0]\n hop1_floats = np.array([[1.0 * i, 2.0 * i] * hops[0] for i in range(3)])\n hop1_floats = tf.convert_to_tensor(hop1_floats, dtype=tf.float32)\n # [i_num, batch_size] = [7, 2 * neg * hop0]\n hop1_ints = np.array([[i, 2 * i] * hops[0] for i in range(7)])\n hop1_ints = tf.convert_to_tensor(hop1_ints, dtype=tf.int64)\n neighbor_hop_1 = Vertex(floats=hop1_floats, ints=hop1_ints)\n\n # [f_num, batch_size] = [3, 2 * neg * hop0 * hop1]\n hop2_floats = np.array([[1.0 * i, 2.0 * i] * hops[0] * hops[1] for i in range(3)])\n hop2_floats = tf.convert_to_tensor(hop2_floats, dtype=tf.float32)\n # [i_num, batch_size] = [7, 2 * neg * hop0 * hop1]\n hop2_ints = np.array([[i, 2 * i] * hops[0] * hops[1] for i in range(7)])\n hop2_ints = tf.convert_to_tensor(hop2_ints, dtype=tf.int64)\n neighbor_hop_2 = Vertex(floats=hop2_floats, ints=hop2_ints)\n\n hops[0] = int(hops[0] / neg)\n g = EgoGraph(vertices, [neighbor_hop_1, neighbor_hop_2], schema, hops)\n return g, total_dim\n\n @classmethod\n def model_func(cls):\n src_hops = [4, 5]\n dst_hops = [2, 6]\n neg = 2\n src_g, src_dim = cls.get_graph(src_hops)\n dst_g, dst_dim = cls.get_graph(dst_hops)\n neg_g, neg_dim = cls.get_graph(dst_hops, neg)\n\n layer_ui = EgoSAGELayer(\"heter_ui\",\n input_dim=(src_dim, dst_dim),\n output_dim=12,\n agg_type=\"mean\",\n com_type=\"concat\")\n layer_ii = EgoSAGELayer(\"heter_ii\",\n input_dim=dst_dim,\n output_dim=12,\n agg_type=\"mean\",\n com_type=\"concat\")\n layer_uii = EgoSAGELayer(\"heter_uii\",\n input_dim=(12, 12),\n output_dim=8,\n agg_type=\"sum\",\n com_type=\"concat\")\n layer_iii = EgoSAGELayer(\"heter_iii\",\n input_dim=(12, 12),\n output_dim=8,\n agg_type=\"sum\",\n com_type=\"concat\")\n layer_group_1 = EgoSAGELayerGroup([layer_ui, layer_ii])\n layer_group_2 = EgoSAGELayerGroup([layer_uii])\n src_model = EgoGraphSAGE(\n [layer_group_1, layer_group_2],\n bn_fn=None,\n active_fn=tf.nn.relu,\n droput=0.1)\n\n layer_group_3 = EgoSAGELayerGroup([layer_ii, layer_ii])\n layer_group_4 = EgoSAGELayerGroup([layer_iii])\n dst_model = EgoGraphSAGE(\n [layer_group_3, layer_group_4],\n bn_fn=None,\n active_fn=tf.nn.relu,\n droput=0.1)\n\n src_embeddings = src_model.forward(src_g)\n dst_embeddings = dst_model.forward(dst_g)\n neg_embeddings = dst_model.forward(neg_g)\n neg_embeddings = tf.reshape(neg_embeddings, [-1, neg, 8])\n\n lp = UnsupervisedLinkPredictor(name=\"unlp\", dims=[8, 4])\n loss = lp.forward(src_embeddings, dst_embeddings, neg_embeddings)\n return src_embeddings, dst_embeddings, loss\n\n def test_step(self):\n trainer = Trainer()\n trainer.minimize(TrainerTestCase.loss)\n trainer.step()\n trainer.close()\n\n def test_step_with_args(self):\n trainer = Trainer()\n trainer.minimize(TrainerTestCase.loss)\n\n def trace(ret):\n self.assertEqual(len(ret), 2)\n return 2\n\n ret = trainer.step(\n [TrainerTestCase.loss, TrainerTestCase.src_emb],\n trace)\n self.assertEqual(ret, 2)\n trainer.close()\n\n def test_run(self):\n trainer = Trainer()\n ret = trainer.run([TrainerTestCase.src_emb])\n self.assertEqual(len(ret), 1)\n self.assertEqual(list(ret[0].shape), [2, 8])\n\n def trace(ret):\n self.assertEqual(len(ret), 1)\n return 1\n\n ret = trainer.run([TrainerTestCase.src_emb], trace)\n self.assertEqual(ret, 1)\n trainer.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "tensorflow.convert_to_tensor", "tensorflow.reshape" ] ]
Saro00/graphtransformer
[ "3fbbefc9927dac277156485bedc842657214f25d" ]
[ "data/molecules.py" ]
[ "import torch\nimport pickle\nimport torch.utils.data\nimport time\nimport os\nimport numpy as np\n\nimport csv\n\nimport dgl\n\nfrom scipy import sparse as sp\nimport numpy as np\nimport networkx as nx\nimport hashlib\n\n# *NOTE\n# The dataset pickle and index files are in ./zinc_molecules/ dir\n# [<split>.pickle and <split>.index; for split 'train', 'val' and 'test']\n\n\n\n\nclass MoleculeDGL(torch.utils.data.Dataset):\n def __init__(self, data_dir, split, num_graphs=None):\n self.data_dir = data_dir\n self.split = split\n self.num_graphs = num_graphs\n \n with open(data_dir + \"/%s.pickle\" % self.split,\"rb\") as f:\n self.data = pickle.load(f)\n\n if self.num_graphs in [10000, 1000]:\n # loading the sampled indices from file ./zinc_molecules/<split>.index\n with open(data_dir + \"/%s.index\" % self.split,\"r\") as f:\n data_idx = [list(map(int, idx)) for idx in csv.reader(f)]\n self.data = [ self.data[i] for i in data_idx[0] ]\n\n assert len(self.data)==num_graphs, \"Sample num_graphs again; available idx: train/val/test => 10k/1k/1k\"\n \n \"\"\"\n data is a list of Molecule dict objects with following attributes\n \n molecule = data[idx]\n ; molecule['num_atom'] : nb of atoms, an integer (N)\n ; molecule['atom_type'] : tensor of size N, each element is an atom type, an integer between 0 and num_atom_type\n ; molecule['bond_type'] : tensor of size N x N, each element is a bond type, an integer between 0 and num_bond_type\n ; molecule['logP_SA_cycle_normalized'] : the chemical property to regress, a float variable\n \"\"\"\n \n self.graph_lists = []\n self.graph_labels = []\n self.n_samples = len(self.data)\n self._prepare()\n \n def _prepare(self):\n print(\"preparing %d graphs for the %s set...\" % (self.num_graphs, self.split.upper()))\n \n for molecule in self.data:\n node_features = molecule['atom_type'].long()\n \n adj = molecule['bond_type']\n edge_list = (adj != 0).nonzero() # converting adj matrix to edge_list\n \n edge_idxs_in_adj = edge_list.split(1, dim=1)\n edge_features = adj[edge_idxs_in_adj].reshape(-1).long()\n \n # Create the DGL Graph\n g = dgl.DGLGraph()\n g.add_nodes(molecule['num_atom'])\n g.ndata['feat'] = node_features\n \n for src, dst in edge_list:\n g.add_edges(src.item(), dst.item())\n g.edata['feat'] = edge_features\n \n self.graph_lists.append(g)\n self.graph_labels.append(molecule['logP_SA_cycle_normalized'])\n \n def __len__(self):\n \"\"\"Return the number of graphs in the dataset.\"\"\"\n return self.n_samples\n\n def __getitem__(self, idx):\n \"\"\"\n Get the idx^th sample.\n Parameters\n ---------\n idx : int\n The sample index.\n Returns\n -------\n (dgl.DGLGraph, int)\n DGLGraph with node feature stored in `feat` field\n And its label.\n \"\"\"\n return self.graph_lists[idx], self.graph_labels[idx]\n \n \nclass MoleculeDatasetDGL(torch.utils.data.Dataset):\n def __init__(self, name='Zinc'):\n t0 = time.time()\n self.name = name\n \n self.num_atom_type = 28 # known meta-info about the zinc dataset; can be calculated as well\n self.num_bond_type = 4 # known meta-info about the zinc dataset; can be calculated as well\n \n data_dir='./data/molecules'\n \n if self.name == 'ZINC-full':\n data_dir='./data/molecules/zinc_full'\n self.train = MoleculeDGL(data_dir, 'train', num_graphs=220011)\n self.val = MoleculeDGL(data_dir, 'val', num_graphs=24445)\n self.test = MoleculeDGL(data_dir, 'test', num_graphs=5000)\n else: \n self.train = MoleculeDGL(data_dir, 'train', num_graphs=10000)\n self.val = MoleculeDGL(data_dir, 'val', num_graphs=1000)\n self.test = MoleculeDGL(data_dir, 'test', num_graphs=1000)\n print(\"Time taken: {:.4f}s\".format(time.time()-t0))\n \n\n\ndef self_loop(g):\n \"\"\"\n Utility function only, to be used only when necessary as per user self_loop flag\n : Overwriting the function dgl.transform.add_self_loop() to not miss ndata['feat'] and edata['feat']\n \n \n This function is called inside a function in MoleculeDataset class.\n \"\"\"\n new_g = dgl.DGLGraph()\n new_g.add_nodes(g.number_of_nodes())\n new_g.ndata['feat'] = g.ndata['feat']\n \n src, dst = g.all_edges(order=\"eid\")\n src = dgl.backend.zerocopy_to_numpy(src)\n dst = dgl.backend.zerocopy_to_numpy(dst)\n non_self_edges_idx = src != dst\n nodes = np.arange(g.number_of_nodes())\n new_g.add_edges(src[non_self_edges_idx], dst[non_self_edges_idx])\n new_g.add_edges(nodes, nodes)\n \n # This new edata is not used since this function gets called only for GCN, GAT\n # However, we need this for the generic requirement of ndata and edata\n new_g.edata['feat'] = torch.zeros(new_g.number_of_edges())\n return new_g\n\n\ndef make_full_graph(g):\n \"\"\"\n Converting the given graph to fully connected\n This function just makes full connections\n removes available edge features \n \"\"\"\n\n full_g = dgl.from_networkx(nx.complete_graph(g.number_of_nodes()))\n full_g.ndata['feat'] = g.ndata['feat']\n full_g.edata['feat'] = torch.zeros(full_g.number_of_edges()).long()\n full_g.ndata['lap_pos_enc'] = g.ndata['lap_pos_enc']\n return full_g\n\n\n\ndef laplacian_positional_encoding(g, pos_enc_dim):\n \"\"\"\n Graph positional encoding v/ Laplacian eigenvectors\n \"\"\"\n\n # Laplacian\n A = g.adjacency_matrix_scipy(return_edge_ids=False).astype(float)\n N = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float)\n L = sp.eye(g.number_of_nodes()) - N * A * N\n\n # Eigenvectors with numpy\n EigVal, EigVec = np.linalg.eig(L.toarray())\n idx = EigVal.argsort() # increasing order\n EigVal, EigVec = EigVal[idx], np.real(EigVec[:,idx])\n g.ndata['lap_pos_enc'] = torch.from_numpy(EigVec[:,1:pos_enc_dim+1]).float() \n \n return g\n\ndef wl_positional_encoding(g):\n \"\"\"\n WL-based absolute positional embedding \n adapted from \n \n \"Graph-Bert: Only Attention is Needed for Learning Graph Representations\"\n Zhang, Jiawei and Zhang, Haopeng and Xia, Congying and Sun, Li, 2020\n https://github.com/jwzhanggy/Graph-Bert\n \"\"\"\n max_iter = 2\n node_color_dict = {}\n node_neighbor_dict = {}\n\n edge_list = torch.nonzero(g.adj().to_dense() != 0, as_tuple=False).numpy()\n node_list = g.nodes().numpy()\n\n # setting init\n for node in node_list:\n node_color_dict[node] = 1\n node_neighbor_dict[node] = {}\n\n for pair in edge_list:\n u1, u2 = pair\n if u1 not in node_neighbor_dict:\n node_neighbor_dict[u1] = {}\n if u2 not in node_neighbor_dict:\n node_neighbor_dict[u2] = {}\n node_neighbor_dict[u1][u2] = 1\n node_neighbor_dict[u2][u1] = 1\n\n\n # WL recursion\n iteration_count = 1\n exit_flag = False\n while not exit_flag:\n new_color_dict = {}\n for node in node_list:\n neighbors = node_neighbor_dict[node]\n neighbor_color_list = [node_color_dict[neb] for neb in neighbors]\n color_string_list = [str(node_color_dict[node])] + sorted([str(color) for color in neighbor_color_list])\n color_string = \"_\".join(color_string_list)\n hash_object = hashlib.md5(color_string.encode())\n hashing = hash_object.hexdigest()\n new_color_dict[node] = hashing\n color_index_dict = {k: v+1 for v, k in enumerate(sorted(set(new_color_dict.values())))}\n for node in new_color_dict:\n new_color_dict[node] = color_index_dict[new_color_dict[node]]\n if node_color_dict == new_color_dict or iteration_count == max_iter:\n exit_flag = True\n else:\n node_color_dict = new_color_dict\n iteration_count += 1\n \n g.ndata['wl_pos_enc'] = torch.LongTensor(list(node_color_dict.values()))\n return g\n\n\nclass MoleculeDataset(torch.utils.data.Dataset):\n\n def __init__(self, name):\n \"\"\"\n Loading ZINC dataset\n \"\"\"\n start = time.time()\n print(\"[I] Loading dataset %s...\" % (name))\n self.name = name\n data_dir = 'data/molecules/'\n with open(data_dir+name+'.pkl',\"rb\") as f:\n f = pickle.load(f)\n self.train = f[0]\n self.val = f[1]\n self.test = f[2]\n self.num_atom_type = f[3]\n self.num_bond_type = f[4]\n print('train, test, val sizes :',len(self.train),len(self.test),len(self.val))\n print(\"[I] Finished loading.\")\n print(\"[I] Data load time: {:.4f}s\".format(time.time()-start))\n\n\n # form a mini batch from a given list of samples = [(graph, label) pairs]\n def collate(self, samples):\n # The input samples is a list of pairs (graph, label).\n graphs, labels = map(list, zip(*samples))\n labels = torch.tensor(np.array(labels)).unsqueeze(1)\n batched_graph = dgl.batch(graphs) \n \n return batched_graph, labels\n \n \n def _add_self_loops(self):\n \n # function for adding self loops\n # this function will be called only if self_loop flag is True\n \n self.train.graph_lists = [self_loop(g) for g in self.train.graph_lists]\n self.val.graph_lists = [self_loop(g) for g in self.val.graph_lists]\n self.test.graph_lists = [self_loop(g) for g in self.test.graph_lists]\n\n def _make_full_graph(self):\n \n # function for converting graphs to full graphs\n # this function will be called only if full_graph flag is True\n self.train.graph_lists = [make_full_graph(g) for g in self.train.graph_lists]\n self.val.graph_lists = [make_full_graph(g) for g in self.val.graph_lists]\n self.test.graph_lists = [make_full_graph(g) for g in self.test.graph_lists]\n \n \n def _add_laplacian_positional_encodings(self, pos_enc_dim):\n \n # Graph positional encoding v/ Laplacian eigenvectors\n self.train.graph_lists = [laplacian_positional_encoding(g, pos_enc_dim) for g in self.train.graph_lists]\n self.val.graph_lists = [laplacian_positional_encoding(g, pos_enc_dim) for g in self.val.graph_lists]\n self.test.graph_lists = [laplacian_positional_encoding(g, pos_enc_dim) for g in self.test.graph_lists]\n\n def _add_wl_positional_encodings(self):\n \n # WL positional encoding from Graph-Bert, Zhang et al 2020.\n self.train.graph_lists = [wl_positional_encoding(g) for g in self.train.graph_lists]\n self.val.graph_lists = [wl_positional_encoding(g) for g in self.val.graph_lists]\n self.test.graph_lists = [wl_positional_encoding(g) for g in self.test.graph_lists]\n\n\n" ]
[ [ "numpy.array", "numpy.real", "torch.from_numpy" ] ]
NREL/K_Road
[ "ec8049cf1b81c58dd3b95f8298a362d863cd4a68" ]
[ "scenario/road/road_observer.py" ]
[ "import math\nfrom typing import Optional\n\nimport numpy as np\nimport pymunk\nfrom gym import spaces\nfrom pymunk import Vec2d\n\nfrom factored_gym import Observer\nfrom k_road.constants import Constants\nfrom k_road.entity.entity_type import EntityType\nfrom k_road.k_road_view import KRoadView\nfrom k_road.scan import pathfinder\nfrom k_road.util import *\nfrom scenario.road import RoadProcess\nfrom scenario.road.ray_scanner import RayScanner\n\n\nclass RoadObserver(Observer):\n\n def __init__(self,\n scaling: float = 1.0,\n forward_scan_angle=(30.0 / 360) * 2 * math.pi,\n forward_scan_resolution: int = 33, # 33,\n forward_scan_radius: float = .1, # 3.7 / 2.0,\n forward_scan_distance: float = 200, # 150-200m industry standard forward scan distance\n rear_scan_resolution: int = 13, # 33,\n rear_scan_radius: float = .1, # 3.7 / 2.0,\n rear_scan_distance: float = 30,\n ):\n self.scaling: float = scaling\n self.forward_scan_angle: float = forward_scan_angle\n self.forward_scan_resolution: int = forward_scan_resolution\n self.rear_scan_resolution: int = rear_scan_resolution\n\n self.observation_length: int = 0\n\n # 0\n self.distance_to_target_index: int = self.observation_length\n self.observation_length += 1\n\n # 1\n self.speed_along_path_index: int = self.observation_length\n self.observation_length += 1\n\n # 2\n self.heading_along_path_index: int = self.observation_length\n self.observation_length += 1\n\n # 3\n self.cross_track_error_index: int = self.observation_length\n self.observation_length += 1\n\n # 4\n self.yaw_rate_index: int = self.observation_length\n self.observation_length += 1\n\n # 5\n self.steer_angle_index: int = self.observation_length\n self.observation_length += 1\n\n # 6\n self.acceleration_index: int = self.observation_length\n self.observation_length += 1\n\n # 7\n self.lateral_velocity_index: int = self.observation_length\n self.observation_length += 1\n\n # 8\n self.longitudinal_velocity_index: int = self.observation_length\n self.observation_length += 1\n\n # self.time_left_index: int = self.observation_length\n # self.observation_length += 1\n\n # self.left_lane_marking_yellow_distance_index: int = self.observation_length\n # self.observation_length += 1\n #\n # self.right_lane_marking_yellow_distance_index: int = self.observation_length\n # self.observation_length += 1\n\n self.forward_scan_offset = self.forward_scan_angle / 2\n self.forward_scanner = \\\n RayScanner(forward_scan_distance, self.forward_scan_angle, forward_scan_radius,\n self.forward_scan_resolution)\n\n rear_scan_step = (2 * math.pi - self.forward_scan_angle) / (rear_scan_resolution + 2)\n rear_scan_angle = rear_scan_step * rear_scan_resolution\n self.rear_scan_offset = -self.forward_scan_offset + self.forward_scan_angle + rear_scan_step\n self.rear_scanner = \\\n RayScanner(rear_scan_distance, rear_scan_angle, rear_scan_radius, self.rear_scan_resolution)\n\n # self.scan_resolution: int = 16\n # self.scan_distance: float = 30.0\n # self.scan_radius = .1\n # self.scan_arc: float = 2 * math.pi / self.scan_resolution\n\n # self.forward_scan_lane_marking_yellow_index: int = self.observation_length\n # self.observation_length: int = self.observation_length + self.forward_scan_resolution\n\n num_scan_arrays = 1\n\n self.forward_scan_vehicle_index: int = self.observation_length\n self.observation_length: int = self.observation_length + self.forward_scan_resolution * num_scan_arrays\n\n # self.forward_scan_vehicle_closing_index: int = self.observation_length\n # self.observation_length: int = self.observation_length + self.forward_scan_resolution\n\n # self.forward_scan_lane_marking_white_dashed_scan_index: int = self.observation_length\n # self.observation_length: int = self.observation_length + self.forward_scan_resolution\n\n # self.rear_scan_lane_marking_yellow_index: int = self.observation_length\n # self.observation_length: int = self.observation_length + self.rear_scan_resolution\n\n self.rear_scan_vehicle_index: int = self.observation_length\n self.observation_length: int = self.observation_length + self.rear_scan_resolution * num_scan_arrays\n\n # self.rear_scan_vehicle_closing_index: int = self.observation_length\n # self.observation_length: int = self.observation_length + self.rear_scan_resolution\n\n # self.rear_scan_lane_marking_white_dashed_scan_index: int = self.observation_length\n # self.observation_length: int = self.observation_length + self.rear_scan_resolution\n\n self.forward_scan_results = None\n self.rear_scan_results = None\n\n # self.baseline_acceleration_index = self.observation_length\n # self.observation_length += 1\n #\n # self.baseline_steer_angle_index = self.observation_length\n # self.observation_length += 1\n\n self.observation_space = \\\n spaces.Box(low=-self.scaling, high=self.scaling, shape=(self.observation_length,))\n\n self.num = 0\n\n def get_observation_space(self, process: RoadProcess):\n return self.observation_space\n\n def reset(self, process: RoadProcess):\n # self.contacts.clear()\n # self.scan_endpoints.clear()\n # self.lane_marking_yellow_contacts.clear()\n # self.lane_contacts.clear()\n # self.scan_results = None\n\n # attach sensors to the ego vehicle\n # self.sensor_shapes = []\n # for scan_index in range(self.scan_resolution):\n # scan_angle = -math.pi + self.scan_arc * scan_index\n # vertices = [\n # Vec2d(0, 0),\n # Vec2d(self.scan_distance, 0),\n # Vec2d(self.scan_distance * math.cos(self.scan_arc), self.scan_distance * math.sin(self.scan_arc))\n # ]\n #\n # vertices = [v.rotated(scan_angle) for v in vertices]\n #\n # sensor_shape = pymunk.Poly(process.ego_vehicle.body, vertices)\n # sensor_shape.sensor = True\n # self.sensor_shapes.append(sensor_shape)\n # sensor_shape.entity = process.ego_vehicle\n #\n # process.sensor_shapes = self.sensor_shapes\n pass\n\n def get_observation(self, process: RoadProcess):\n # compute observation\n observation = np.empty(self.observation_space.shape)\n\n ego_vehicle = process.ego_vehicle\n position = ego_vehicle.position\n velocity = ego_vehicle.velocity\n yaw = ego_vehicle.angle\n\n space = process.space\n\n # delta_angle = math.atan2(process.ego_to_target[1], process.ego_to_target[0])\n # heading = delta_angle(ego.angle, delta_angle)\n\n scan_position: Vec2d = position + Vec2d(ego_vehicle.length / 2, 0).rotated(yaw)\n path_pqi: Optional[pymunk.PointQueryInfo] = \\\n pathfinder.find_best_path(space, scan_position, 1 * Constants.lane_width)[0]\n cross_track_error: float = 0 if path_pqi is None else (path_pqi.point - position).length\n best_path: Optional[Path] = None if path_pqi is None else path_pqi.shape.body.entity\n\n observation[self.distance_to_target_index] = \\\n min(1.0, max(-1.0, 2 * (process.distance_to_target / process.target_offset) - 1))\n\n # maxed at 1\n speed_along_path: float = \\\n velocity.dot(best_path.direction) if best_path is not None else 0\n observation[self.speed_along_path_index] = min(1.0, max(-1.0, speed_along_path / (1.1 * Constants.max_speed)))\n\n angle_agreement_with_path: float = \\\n signed_delta_angle(best_path.direction.angle, velocity.angle) if best_path is not None else 0\n observation[self.heading_along_path_index] = min(1.0, max(-1.0, angle_agreement_with_path / math.pi))\n\n observation[self.cross_track_error_index] = min(1.0, max(-1.0, cross_track_error / (Constants.lane_width * 2)))\n\n # maxed at 1.0?\n observation[self.yaw_rate_index] = \\\n min(1.0, max(-1.0, ego_vehicle.angular_velocity / (.04 * 2 * math.pi))) # 4\n\n observation[self.steer_angle_index] = \\\n min(1.0, max(-1.0, ego_vehicle.steer_angle / Constants.max_steer_angle)) # 5\n\n # minned at -1.0?\n observation[self.acceleration_index] = \\\n min(1.0, max(-1.0,\n ego_vehicle.acceleration / (1.1 * Constants.max_acceleration)\n if ego_vehicle.acceleration >= 0 else\n ego_vehicle.acceleration / (1.1 * Constants.max_deceleration))) # 6\n # print('acc: ', ego_vehicle.acceleration, observation[self.acceleration_index])\n\n # 0'ed\n observation[self.lateral_velocity_index] = \\\n min(1.0, max(-1.0, ego_vehicle.lateral_velocity / 1.0)) # 1 m/s cap (7)\n\n # 1'ed\n observation[self.longitudinal_velocity_index] = \\\n min(1.0, max(-1.0, ego_vehicle.longitudinal_velocity / (1.1 * Constants.max_speed)))\n\n # print(process.distance_to_target, process.target_offset)\n # print(observation[self.distance_to_target_index:self.heading_along_path_index + 1])\n # print(ego_vehicle.lateral_velocity)\n # print(observation[self.distance_to_target_index:self.longitudinal_velocity_index + 1])\n\n # observation[self.time_left_index] = 2.0 * (process.time / process.time_limit) - 1.0\n\n yaw = ego_vehicle.angle\n space = process.space\n\n self.forward_scan_results = \\\n self.forward_scanner.scan_closest_of_each_type(\n position,\n yaw - self.forward_scan_offset,\n space,\n lambda rsr: rsr.entity != ego_vehicle)\n self.__extract_scan_data(\n self.forward_scan_results,\n observation,\n self.forward_scan_vehicle_index,\n position,\n velocity)\n\n self.rear_scan_results = \\\n self.rear_scanner.scan_closest_of_each_type(\n position, yaw + self.rear_scan_offset, space,\n lambda rsr: rsr.entity != ego_vehicle)\n self.__extract_scan_data(\n self.rear_scan_results,\n observation,\n self.rear_scan_vehicle_index,\n position,\n velocity)\n\n # baseline_steer_rate, baseline_jerk = self.get_baseline_action()\n # # observation[self.baseline_acceleration_index] = baseline_jerk / Constants.max_jerk\n # # observation[self.baseline_steer_angle_index] = baseline_steer_rate / Constants.max_steer_rate\n # observation[self.baseline_acceleration_index] = \\\n # inverse_two_sided_exponential(2, baseline_jerk / Constants.max_jerk)\n # observation[self.baseline_steer_angle_index] = \\\n # inverse_two_sided_exponential(2, baseline_steer_rate / Constants.max_steer_rate)\n # # observation[self.baseline_acceleration_index] = baseline_acceleration / Constants.max_acceleration\n # # observation[self.baseline_steer_angle_index] = max_baseline_steer_angle / Constants.max_steer_angle\n\n if self.num % 1000 == 0:\n print(observation)\n self.num = self.num + 1\n\n observation = np.multiply(self.scaling, observation)\n # print(ego_position, target_position, delta, observation)\n # print(observation)\n return observation\n\n def render(self, process: RoadProcess, view: KRoadView) -> None:\n self.draw_scan(process, view, self.forward_scanner, self.forward_scan_results)\n self.draw_scan(process, view, self.rear_scanner, self.rear_scan_results, (96, 0, 0))\n\n def draw_scan(self, process: RoadProcess, view: KRoadView, ray_scanner, scan_results, color=(64, 64, 64)) -> None:\n if scan_results is None:\n return\n\n position = process.ego_vehicle.position\n contact_size = min(.1, ray_scanner.beam_radius / 2)\n for (endpoint, ray_contacts) in scan_results:\n ray_vector = (endpoint - position)\n view.draw_segment(color, False, position, endpoint, ray_scanner.beam_radius)\n\n def draw_contact(color, type_):\n if type_ in ray_contacts:\n view.draw_circle(\n color,\n position + ray_vector * ray_contacts[type_].alpha,\n contact_size)\n\n draw_contact((255, 255, 255), EntityType.curb)\n # draw_contact((0, 255, 0), EntityType.lane_marking_white_dashed)\n draw_contact((255, 0, 0), EntityType.vehicle)\n\n def __extract_scan_data(self, scan_results, observation, index_offset, position, velocity):\n num_rays = len(scan_results)\n\n for i in range(num_rays):\n (endpoint, ray_contacts) = scan_results[i]\n\n ray_unit = (endpoint - position).normalized()\n\n # def get_contact(type_):\n # if type_ in ray_contacts:\n # return 2 * ray_contacts[type_].alpha - 1\n # else:\n # return 1.0\n\n def get_contact(type_):\n if type_ in ray_contacts:\n return 1 - ray_contacts[type_].alpha\n else:\n return 0.0\n\n def get_contact_closing_speed(type_):\n if type_ not in ray_contacts:\n return 0.0\n rcr = ray_contacts[type_]\n relative_velocity = rcr.entity.velocity - velocity\n return max(-1.0, min(1.0, relative_velocity.dot(ray_unit) / Constants.max_speed))\n\n offset = 0\n\n # observation[index_offset + num_rays * offset + i] = get_contact(EntityType.vehicle)\n # offset = offset + 1\n #\n # observation[index_offset + num_rays * offset + i] = get_contact_closing_speed(EntityType.vehicle)\n # offset = offset + 1\n\n observation[index_offset + num_rays * offset + i] = get_contact(EntityType.curb)\n offset = offset + 1\n\n # observation[index_offset + num_rays * offset + i] = get_contact_closing_speed(EntityType.curb)\n # offset = offset + 1\n\n # try closing time as well?\n # closing time to vehicle vs along ray? maybe essentially the same?\n\n # observation[index_offset + num_rays * offset + i] = get_contact(EntityType.lane_marking_white_dashed)\n # offset = offset + 1\n" ]
[ [ "numpy.multiply", "numpy.empty" ] ]
miterion/self-paced-rl
[ "352c92e0b7b74c807af343211ebf833213626e87" ]
[ "sprl/util/gym_envs/reach_avoid.py" ]
[ "import os\nfrom gym import utils as gym_utils\nfrom gym.envs.robotics import fetch_env, utils\nimport numpy as np\nfrom gym import spaces\nfrom gym.envs.robotics.fetch_env import goal_distance\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'fetch', 'reach_avoid.xml')\n\n\nclass FetchReachAvoidEnv(fetch_env.FetchEnv, gym_utils.EzPickle):\n def __init__(self, reward_type='sparse'):\n initial_qpos = {\n 'robot0:slide0': 0.4049,\n 'robot0:slide1': 0.48,\n 'robot0:slide2': 0.0,\n }\n\n self.obstacle_body_id = None\n self.obstacle_geom_id = None\n self.obstacle_information = None\n self.collision_detected = False\n self.goal_reached = False\n\n fetch_env.FetchEnv.__init__(\n self, MODEL_XML_PATH, has_object=False, block_gripper=True, n_substeps=5, # n_substeps=20,\n gripper_extra_height=0.2, target_in_the_air=True, target_offset=0.0,\n obj_range=0.15, target_range=0.15, distance_threshold=0.05,\n initial_qpos=initial_qpos, reward_type=reward_type)\n\n self.obstacle_lo = np.array([0.02, 0.02])\n self.obstacle_hi = np.array([0.14, 0.14])\n self.obstacle_geom_id = self.sim.model._geom_name2id[\"obstacle_geom\"]\n self.obstacle_body_id = self.sim.model._body_name2id[\"obstacle\"]\n\n self.obstacle2_geom_id = self.sim.model._geom_name2id[\"obstacle_geom1\"]\n self.obstacle2_body_id = self.sim.model._body_name2id[\"obstacle1\"]\n\n self.fixed_obstacle_geom_ids = [self.sim.model._geom_name2id[\"obstacle_geom2\"],\n self.sim.model._geom_name2id[\"obstacle_geom3\"]]\n self._sample_goal()\n\n obs = self._get_obs()\n self.observation_space = spaces.Dict(dict(\n desired_goal=spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32'),\n achieved_goal=spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype='float32'),\n observation=spaces.Box(-np.inf, np.inf, shape=obs['observation'].shape, dtype='float32'),\n ))\n self.reduced_action_space = spaces.Box(-1., 1., shape=(2,), dtype='float32')\n\n gym_utils.EzPickle.__init__(self)\n\n # RobotEnv methods\n # ----------------------------\n\n def _step_callback(self):\n for coni in range(0, self.sim.data.ncon):\n con = self.sim.data.contact[coni]\n\n block_collision = con.geom1 == self.obstacle_geom_id or con.geom2 == self.obstacle_geom_id\n block2_collision = con.geom1 == self.obstacle2_geom_id or con.geom2 == self.obstacle2_geom_id\n rem_collision = con.geom1 in self.fixed_obstacle_geom_ids or con.geom2 in self.fixed_obstacle_geom_ids\n\n # This means we have a collision with one of the obstacles\n if rem_collision or block_collision or block2_collision:\n # Now we check if we have a collision with the robot\n if self.sim.model._geom_id2name[con.geom1].startswith(\"robot\") or \\\n self.sim.model._geom_id2name[con.geom2].startswith(\"robot\"):\n self.collision_detected = True\n\n super(FetchReachAvoidEnv, self)._step_callback()\n\n def _get_obs(self):\n obs = super(FetchReachAvoidEnv, self)._get_obs()\n obs[\"obstacle_information\"] = self.obstacle_information\n return obs\n\n def _sample_goal(self):\n goal = np.array([1.3, 0.35, 0.5])\n\n if self.obstacle_body_id is not None:\n self._set_obstacle_information(np.random.uniform(self.obstacle_lo, self.obstacle_hi))\n\n # goal = np.concatenate((np.random.uniform(np.array([1.15, 0.4]), np.array([1.45, 1.1])), [0.5]))\n\n return goal.copy()\n\n def _set_obstacle_information(self, obs_inf):\n self.obstacle_information = obs_inf\n\n size = np.copy(self.sim.model.geom_size[self.obstacle_geom_id])\n size[0] = self.obstacle_information[0]\n\n xp = np.copy(self.sim.data.body_xpos[self.obstacle_body_id])\n correction_x = np.cos(-0.5) * size[0]\n correction_y = np.sin(-0.5) * size[0]\n xp[0] = 1.14 + correction_x\n xp[1] = 0.92 + correction_y\n # xp[0] = 1.12 + size[0]\n self.sim.model.body_pos[self.obstacle_body_id][:] = xp\n\n self.sim.model.geom_rbound[self.obstacle_geom_id] = np.sqrt(np.sum(np.square(size)))\n self.sim.model.geom_size[self.obstacle_geom_id][:] = size\n self.sim.data.body_xpos[self.obstacle_body_id][:] = xp\n\n # The same for the second block\n size = np.copy(self.sim.model.geom_size[self.obstacle2_geom_id])\n size[0] = self.obstacle_information[1]\n\n xp = np.copy(self.sim.data.body_xpos[self.obstacle2_body_id])\n correction_x = np.cos(-0.2) * size[0]\n correction_y = np.sin(-0.2) * size[0]\n xp[0] = 1.47 - correction_x\n xp[1] = 0.65 - correction_y\n # xp[0] = 1.48 - size[0]\n self.sim.model.body_pos[self.obstacle2_body_id][:] = xp\n\n self.sim.model.geom_rbound[self.obstacle2_geom_id] = np.sqrt(np.sum(np.square(size)))\n self.sim.model.geom_size[self.obstacle2_geom_id][:] = size\n self.sim.data.body_xpos[self.obstacle2_body_id][:] = xp\n\n def _is_success(self, achieved_goal, desired_goal):\n suc = super(FetchReachAvoidEnv, self)._is_success(achieved_goal, desired_goal)\n return suc and (not self.collision_detected)\n\n def _env_setup(self, initial_qpos):\n for name, value in initial_qpos.items():\n self.sim.data.set_joint_qpos(name, value)\n utils.reset_mocap_welds(self.sim)\n self.sim.forward()\n\n # Move end effector into position.\n gripper_target = np.array([-0.498 - 0.0418, 0.005 + 0.4009, -0.431 - 0.055 + self.gripper_extra_height]) + \\\n self.sim.data.get_site_xpos('robot0:grip')\n gripper_rotation = np.array([1., 0., 1., 0.])\n self.sim.data.set_mocap_pos('robot0:mocap', gripper_target)\n self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation)\n for _ in range(10):\n self.sim.step()\n\n # Extract information for sampling goals.\n self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy()\n if self.has_object:\n self.height_offset = self.sim.data.get_site_xpos('object0')[2]\n\n def reset(self):\n obs = super(FetchReachAvoidEnv, self).reset()\n self.collision_detected = False\n self.goal_reached = False\n return obs\n\n def compute_reward(self, achieved_goal, goal, info):\n # Compute distance between goal and the achieved goal.\n d = goal_distance(achieved_goal, goal)\n if self.reward_type == 'sparse':\n return -(d > self.distance_threshold).astype(np.float32)\n else:\n return np.exp(-d)\n\n def step(self, action):\n # If a collision has been detected we ignore the action and just return the observation from the step where\n # the collision occurred\n if self.collision_detected or self.goal_reached:\n obs = self._get_obs()\n info = {\n 'is_success': self._is_success(obs['achieved_goal'], self.goal),\n 'is_collision': self.collision_detected\n }\n reward = self.compute_reward(obs['achieved_goal'], self.goal, info)\n return obs, reward, False, info\n else:\n clipped_action = np.clip(action, self.reduced_action_space.low, self.reduced_action_space.high)\n cur_z = self._get_obs()[\"achieved_goal\"][2]\n complete_action = np.concatenate((clipped_action, [2 * (0.5 - cur_z), 0]))\n obs, reward, __, info = super(FetchReachAvoidEnv, self).step(complete_action)\n self.goal_reached = info[\"is_success\"]\n info['is_collision'] = self.collision_detected\n return obs, reward, False, info\n" ]
[ [ "numpy.concatenate", "numpy.square", "numpy.array", "numpy.sin", "numpy.copy", "numpy.exp", "numpy.random.uniform", "numpy.cos", "numpy.clip" ] ]
canggihpw/mlshow
[ "86a7368a3acffc4d3f944b57cc0fa241731aa4db" ]
[ "lib/svm.py" ]
[ "import streamlit as st\n\nfrom lib._plot import plot_decision_regions\nfrom sklearn.svm import SVC\n\ndef svm(X_train, X_test, y_train, y_test,label):\n pil = st.sidebar.radio(\"\",(\"Result\",\"Documentation\"))\n if pil == \"Result\":\n # Controller\n gammaval = st.sidebar.slider('gamma', 0.1, 1.0, 1.0)\n cval = st.sidebar.selectbox(\"C\",(1, 10, 100, 1000),0)\n kernel = st.sidebar.selectbox(\"Kernel\",('linear', 'poly','rbf','sigmoid'),0)\n degree = st.sidebar.number_input(\"Degree (d)\",1)\n coef0 = st.sidebar.number_input(\"Coef0 (r)\",0.0)\n\n clf = SVC(gamma=gammaval, C =cval, kernel=kernel, degree=degree, coef0=coef0,probability=True)\n clf.fit(X_train, y_train)\n\n st.title(\"SVM Classification Result\")\n plot_decision_regions(X_test,y_test,clf,len(y_train),label)\n elif pil == \"Documentation\":\n st.title(\"SVM Documentation\")\n documentation()\n\ndef documentation():\n st.header(\"A. Main theoretical expression\")\n \n st.subheader(\"Basic Decision Rule\")\n st.latex(r'\\mathbf{w} \\cdot \\mathbf{u} + b \\geq 0, \\textnormal{then } \\oplus')\n\n st.subheader(\"Gutter\")\n st.latex(r'y_i(\\mathbf{x}_i \\cdot \\mathbf{w} + b) - 1 = 0, \\textnormal{for } \\mathbf{x}_i \\textnormal{ in gutter}')\n\n st.subheader(\"Boundary Width\")\n st.latex(r'(\\mathbf{x}_{\\oplus} - \\mathbf{x}_{\\ominus}) \\cdot \\frac{\\mathbf{w}} {\\| \\mathbf{w} \\|} = \\frac{2}{\\| \\mathbf{w} \\|}')\n\n st.subheader(\"Optimal w\")\n st.latex(r'\\mathbf{w} = \\sum_i \\alpha_i y_i \\mathbf{x}_i')\n\n st.subheader(\"Objective Function Optimal\")\n st.latex(r'L = \\sum_i \\alpha_i - \\frac{1}{2}\\sum_i\\sum_j \\alpha_i \\alpha_j y_i y_j \\mathbf{x}_i \\cdot \\mathbf{x}_j')\n\n st.subheader(\"Optimal Decision Rule\")\n st.latex(r'\\sum_i \\alpha_i y_i \\mathbf{x}_i \\cdot \\mathbf{v} + b \\geq 0, \\textnormal{then } \\oplus')\n\n st.subheader(\"Kernel Function\")\n st.latex(r'K(\\mathbf{x}_i,\\mathbf{x}_j) = \\phi(\\mathbf{x}_i) \\cdot \\phi(\\mathbf{x}_j)')\n\n #------------------\n\n st.header(\"B. Implementation\")\n st.subheader(\"Linear kernel\")\n st.latex(r'K(\\mathbf{x}_i,\\mathbf{x}_j) = \\langle \\mathbf{x}_i,\\mathbf{x}_j \\rangle')\n\n st.subheader(\"Polynomial kernel\")\n st.latex(r'K(\\mathbf{x}_i,\\mathbf{x}_j) = (\\gamma \\langle \\mathbf{x}_i,\\mathbf{x}_j \\rangle + r)^d')\n\n st.subheader(\"RBF kernel\")\n st.latex(r'K(\\mathbf{x}_i,\\mathbf{x}_j) = e^{-\\gamma \\| \\mathbf{x}_i - \\mathbf{x}_j \\|^2}')\n\n st.subheader(\"Sigmoid kernel\")\n st.latex(r'K(\\mathbf{x}_i,\\mathbf{x}_j) = \\tanh(\\gamma \\langle \\mathbf{x}_i,\\mathbf{x}_j \\rangle + r)')\n\n\n #------------------\n\n st.header(\"C. Parameter Description\")\n st.markdown('''\n |parameters|description|\n |-----|-----|\n |gamma|defines how much influence a single training example has. The larger gamma is, the closer other examples must be to be affected.|\n |C|The parameter C, common to all SVM kernels, trades off misclassification of training examples against simplicity of the decision surface. A low C makes the decision surface smooth, while a high C aims at classifying all training examples correctly.|\n |kernel|linear,poly,rbf,sigmoid|\n |degree|degree of polynomial kernel|\n |coef0|independent coefficient in polynomial & sigmoid kernel|\n \n ''')" ]
[ [ "sklearn.svm.SVC" ] ]
zfit/benchmarks
[ "9d76ecce9514438113ebb7a8e487f225e905c0a8" ]
[ "src/wofz.py" ]
[ "# /////////////////////////////////////////////////////////////////////////////\n# //\n# // DATE\n# // 06/22/2015\n# //\n# // AUTHORS\n# // Hannes Bartosik, Adrian Oeftiger\n# //\n# // DESCRIPTION\n# // FADDEEVA error function for GPU in CUDA.\n# // This file is intended to be used as a\n# // preamble to depending kernels, e.g. in PyCUDA\n# // via ElementwiseKernel(..., preamble=open( <this_file> ).read()).\n# //\n# /////////////////////////////////////////////////////////////////////////////\n\n# include <math.h>\nimport time\n\nerrf_const = 1.12837916709551\nxLim = 5.33\nyLim = 4.29\n\nimport tensorflow.experimental.numpy as znp\n\nznp.experimental_enable_numpy_behavior()\n\n# from tensorflow.experimental.numpy import *\nimport tensorflow as tf\nfrom math import sqrt, exp, cos, sin\n\n\n@tf.function\ndef wofz2(in_real, in_imag):\n # /**\n # this function calculates the double precision complex error function\n # based on the algorithm of the FORTRAN function written at CERN by\n # K. Koelbig, Program C335, 1970.\n #\n # See also M. Bassetti and G.A. Erskine, \"Closed expression for the\n # electric field of a two-dimensional Gaussian charge density\",\n # CERN-ISR-TH/80-06.\n # */\n\n x = abs(in_real)\n y = abs(in_imag)\n\n cond = znp.logical_and(y < yLim, x < xLim)\n nevents = tf.shape(x)[0]\n\n def if_true():\n # Rx = znp.zeros([nevents, 33], dtype=znp.float64)\n # Ry = znp.zeros([nevents, 33], dtype=znp.float64)\n q = (1.0 - y / yLim) * sqrt(1.0 - (x / xLim) * (x / xLim))\n h = 1.0 / (3.2 * q)\n nc = 7 + tf.cast(23.0 * q, dtype=znp.int32)\n xl = pow(h, 1. - nc)\n xh = y + 0.5 / h\n yh = x\n nu = 10 + tf.cast(21.0 * q, dtype=znp.int32)\n Rx = znp.zeros_like(x, dtype=znp.float64)\n Ry = znp.zeros_like(y, dtype=znp.float64)\n n = nu\n\n n2 = nc\n\n # rxs = []\n # rys = []\n\n Sx = znp.zeros_like(x, dtype=znp.float64)\n Sy = znp.zeros_like(x, dtype=znp.float64)\n while znp.any(n > 0):\n n = znp.maximum(n, 0)\n Tx = xh + n * Rx\n Ty = yh - n * Ry\n Tn = Tx * Tx + Ty * Ty\n # indices = znp.asarray([tf.range(nevents), n - 1])\n # Rx = tf.transpose(Rx)\n # Ry = tf.transpose(Ry)\n # Rx = tf.tensor_scatter_nd_update(Rx, [n - 1], (0.5 * Tx / Tn))\n # Ry = tf.tensor_scatter_nd_update(Ry, [n - 1], (0.5 * Ty / Tn))\n # Rx = tf.transpose(Rx)\n # Ry = tf.transpose(Ry)\n Rx = (0.5 * Tx / Tn)\n Ry = (0.5 * Ty / Tn)\n\n Saux = Sx + xl\n indices = znp.stack([n - 1, tf.range(n.shape[0])], axis=1)\n mask = tf.cast(n2 == n, dtype=float64)\n rx_n1 = Rx * mask\n ry_n1 = Ry * mask\n Sx_tmp = rx_n1 * Saux - ry_n1 * Sy\n Sy_tmp = rx_n1 * Sy + ry_n1 * Saux\n cond_inside = n > 0\n Sx = znp.where(cond_inside, Sx_tmp, Sx)\n Sy = znp.where(cond_inside, Sy_tmp, Sy)\n xl = h * xl\n n -= 1\n n2 = tf.maximum(n, n2 - 1)\n print(znp.max(n))\n\n # Rx = znp.stack(rxs)\n # Ry = znp.stack(rys)\n # # Rx = tf.transpose(Rx)\n # # Ry = tf.transpose(Ry)\n #\n #\n # n = nc\n #\n # while znp.any(n > 0):\n # n = znp.maximum(n, 0)\n # Saux = Sx + xl\n # indices = znp.stack([n - 1, tf.range(n.shape[0])], axis=1)\n # rx_n1 = tf.gather_nd(Rx, indices)\n # ry_n1 = tf.gather_nd(Ry, indices)\n # Sx = rx_n1 * Saux - ry_n1 * Sy\n # Sy = rx_n1 * Sy + ry_n1 * Saux\n # xl = h * xl\n # n -= 1\n\n Wx = errf_const * Sx\n Wy = errf_const * Sy\n return Wx, Wy\n\n def if_false():\n\n xh = y\n yh = x\n rx = znp.zeros_like(x, dtype=znp.float64)\n ry = znp.zeros_like(y, dtype=znp.float64)\n for n in tf.range(1, 10):\n Tx = xh + n * rx\n Ty = yh - n * ry\n Tn = Tx ** 2 + Ty ** 2\n rx = 0.5 * Tx / Tn\n ry = 0.5 * Ty / Tn\n\n Wx = errf_const * rx\n Wy = errf_const * ry\n return Wx, Wy\n\n # if y == 0.:\n # Wx = exp(-x * x)\n\n cond2 = in_imag < 0.\n\n def if_true2(Wx, Wy):\n Wx = 2.0 * exp(y * y - x * x) * cos(2.0 * x * y) - Wx\n Wy = - 2.0 * exp(y * y - x * x) * sin(2.0 * x * y) - Wy\n Wy = -Wy * znp.sign(in_real)\n return Wx, Wy\n\n def if_false2(Wx, Wy):\n return Wx, Wy * znp.sign(in_real)\n\n value = znp.where(cond, if_true(), if_false())\n true2 = if_true2(*tf.unstack(value))\n false2 = if_false2(*tf.unstack(value))\n value = znp.where(cond2, true2, false2)\n return value[0] + 1j * value[1]\n\n\nerrf_const = 1.12837916709551\nxLim = 5.33\nyLim = 4.29\n#\n# __device__ void wofz(double in_real, double in_imag,\n# double* out_real, double* out_imag)\n\n# /**\n# this function calculates the double precision complex error function\n# based on the algorithm of the FORTRAN function written at CERN by\n# K. Koelbig, Program C335, 1970.\n# See also M. Bassetti and G.A. Erskine, \"Closed expression for the\n# electric field of a two-dimensional Gaussian charge density\",\n# CERN-ISR-TH/80-06.\n# */\n\n# int n, nc, nu\n# double h, q, Saux, Sx, Sy, Tn, Tx, Ty, Wx, Wy, xh, xl, x, yh, y\nimport numba\n\n\n@numba.vectorize()\ndef wofz(in_real, in_imag) -> complex:\n Rx = []\n Ry = []\n\n x = abs(in_real)\n y = abs(in_imag)\n\n if (y < yLim and x < xLim):\n q = (1.0 - y / yLim) * sqrt(1.0 - (x / xLim) * (x / xLim))\n h = 1.0 / (3.2 * q)\n nc = 7 + int(23.0 * q)\n xl = pow(h, 1. - nc)\n xh = y + 0.5 / h\n yh = x\n nu = 10 + int(21.0 * q)\n Rx[nu] = 0.\n Ry[nu] = 0.\n n = nu\n while (n > 0):\n Tx = xh + n * Rx[n]\n Ty = yh - n * Ry[n]\n Tn = Tx * Tx + Ty * Ty\n Rx[n - 1] = 0.5 * Tx / Tn\n Ry[n - 1] = 0.5 * Ty / Tn\n n -= 1\n\n Sx = 0.\n Sy = 0.\n n = nc\n while n > 0:\n Saux = Sx + xl\n Sx = Rx[n - 1] * Saux - Ry[n - 1] * Sy\n Sy = Rx[n - 1] * Sy + Ry[n - 1] * Saux\n xl = h * xl\n n -= 1\n\n Wx = errf_const * Sx\n Wy = errf_const * Sy\n\n else:\n xh = y\n yh = x\n Rx[0] = 0.\n Ry[0] = 0.\n for n in tf.range(9, 0, -1):\n Tx = xh + n * Rx[0]\n Ty = yh - n * Ry[0]\n Tn = Tx * Tx + Ty * Ty\n Rx[0] = 0.5 * Tx / Tn\n Ry[0] = 0.5 * Ty / Tn\n\n Wx = errf_const * Rx[0]\n Wy = errf_const * Ry[0]\n\n if (y == 0.):\n Wx = exp(-x * x)\n\n if (in_imag < 0.):\n Wx = 2.0 * exp(y * y - x * x) * cos(2.0 * x * y) - Wx\n Wy = - 2.0 * exp(y * y - x * x) * sin(2.0 * x * y) - Wy\n if (in_real > 0.):\n Wy = -Wy\n elif (in_real < 0.):\n Wy = -Wy\n\n\nif __name__ == '__main__':\n import scipy.special\n import numpy as np\n\n wofz(\n # znp.array([10.], dtype=znp.float64), znp.array([5.], dtype=znp.float64))\n *np.random.uniform(-10, 10, (2, 1000000)))\n print(\"compiled\")\n start = time.time()\n x = np.random.uniform(-10, 10, (2, 1000000))\n n = 10\n for _ in range(n):\n wofz_our = wofz(\n # znp.array([10.], dtype=znp.float64), znp.array([5.], dtype=znp.float64))\n *x\n )\n print('tensorflow', time.time() - start)\n x = x[0] + 1j * x[1]\n start = time.time()\n for _ in range(n):\n y = scipy.special.wofz(x)\n print('scipy', time.time() - start)\n\n print(abs(wofz_our - y), znp.std(wofz_our - y))\n" ]
[ [ "tensorflow.experimental.numpy.experimental_enable_numpy_behavior", "tensorflow.shape", "tensorflow.experimental.numpy.where", "tensorflow.range", "tensorflow.experimental.numpy.sign", "tensorflow.experimental.numpy.std", "tensorflow.unstack", "tensorflow.experimental.numpy.logical_and", "tensorflow.experimental.numpy.maximum", "tensorflow.experimental.numpy.max", "numpy.random.uniform", "tensorflow.experimental.numpy.zeros_like", "tensorflow.maximum", "tensorflow.experimental.numpy.any", "tensorflow.cast" ] ]
i7p9h9/swishnet
[ "5f2bd9920e980ceaaaca1ae302da5b57d3eb67ff" ]
[ "swishnet.py" ]
[ "from keras import models\nfrom keras import layers\n\n\n# causal conv\ndef __causal_gated_conv1D(x=None, filters=16, length=6, strides=1):\n def causal_gated_conv1D(x, filters, length, strides):\n x_in_1 = layers.Conv1D(filters=filters // 2,\n kernel_size=length,\n dilation_rate=strides, # it's correct, use this instead strides for shape matching\n strides=1,\n padding=\"causal\")(x)\n x_sigmoid = layers.Activation(activation=\"sigmoid\")(x_in_1)\n\n x_in_2 = layers.Conv1D(filters=filters // 2,\n kernel_size=length,\n dilation_rate=strides, # it's correct, use this instead strides for shape matching\n strides=1,\n padding=\"causal\")(x)\n x_tanh = layers.Activation(activation=\"tanh\")(x_in_2)\n\n x_out = layers.Multiply()([x_sigmoid, x_tanh])\n\n return x_out\n\n if x is None:\n return lambda _x: causal_gated_conv1D(x=_x, filters=filters, length=length, strides=strides)\n else:\n return causal_gated_conv1D(x=x, filters=filters, length=length, strides=strides)\n\n\ndef SwishNet(input_shape, classes, width_multiply=1):\n _x_in = layers.Input(shape=input_shape)\n\n # 1 block\n _x_up = __causal_gated_conv1D(filters=16 * width_multiply, length=3)(_x_in)\n _x_down = __causal_gated_conv1D(filters=16 * width_multiply, length=6)(_x_in)\n _x = layers.Concatenate()([_x_up, _x_down])\n\n # 2 block\n _x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)\n _x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)\n _x = layers.Concatenate()([_x_up, _x_down])\n\n # 3 block\n _x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)\n _x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)\n _x_concat = layers.Concatenate()([_x_up, _x_down])\n\n _x = layers.Add()([_x, _x_concat])\n\n # 4 block\n _x_loop1 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=3)(_x)\n _x = layers.Add()([_x, _x_loop1])\n\n # 5 block\n _x_loop2 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)\n _x = layers.Add()([_x, _x_loop2])\n\n # 6 block\n _x_loop3 = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)\n _x = layers.Add()([_x, _x_loop3])\n\n # 7 block\n _x_forward = __causal_gated_conv1D(filters=16 * width_multiply, length=3, strides=2)(_x)\n\n # 8 block\n _x_loop4 = __causal_gated_conv1D(filters=32 * width_multiply, length=3, strides=2)(_x)\n\n # output\n _x = layers.Concatenate()([_x_loop2, _x_loop3, _x_forward, _x_loop4])\n _x = layers.Conv1D(filters=classes, kernel_size=1)(_x)\n _x = layers.GlobalAveragePooling1D()(_x)\n _x = layers.Activation(\"softmax\")(_x)\n\n model = models.Model(inputs=_x_in, outputs=_x)\n\n return model\n\n\ndef SwishNetWide(input_shape, classes):\n return SwishNet(input_shape=input_shape, classes=classes, width_multiply=2)\n\n\ndef SwishnetSlim(input_shape, classes):\n return SwishNet(input_shape=input_shape, classes=classes, width_multiply=1)\n\n\nif __name__ == \"__main__\":\n import numpy as np\n\n net = SwishNet(input_shape=(16, 20), classes=2)\n net.summary()\n print(net.predict(np.random.randn(2, 16, 20)))\n\n" ]
[ [ "numpy.random.randn" ] ]
ChayanBansal/tapas
[ "32f90d608f59a203fa4e0a8470c89b2cd8305ba6" ]
[ "tapas/models/tapas_pretraining_model.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Lint as: python3\n\"\"\"TAPAS bert model.\"\"\"\n\nfrom typing import Iterable, Text, Optional, Set\nfrom tapas.datasets import dataset\nfrom tapas.datasets import table_dataset\nfrom tapas.models.bert import modeling\nfrom tapas.models.bert import optimization\nfrom tapas.models.bert import table_bert\n\nimport tensorflow.compat.v1 as tf\n\n\ndef _gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor\n\n\ndef _get_next_sentence_output(bert_config, input_tensor, labels):\n \"\"\"Get loss and log probs for the next sentence prediction.\"\"\"\n\n # Simple binary classification. Note that 0 is \"next sentence\" and 1 is\n # \"random sentence\". This weight matrix is not used after pre-training.\n with tf.variable_scope(\"cls/seq_relationship\"):\n output_weights = tf.get_variable(\n \"output_weights\",\n shape=[2, bert_config.hidden_size],\n initializer=modeling.create_initializer(bert_config.initializer_range))\n output_bias = tf.get_variable(\n \"output_bias\", shape=[2], initializer=tf.zeros_initializer())\n\n logits = tf.nn.bias_add(\n tf.matmul(input_tensor, output_weights, transpose_b=True), output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n labels = tf.reshape(labels, [-1])\n one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, per_example_loss, log_probs)\n\n\ndef _get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = _gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.nn.bias_add(\n tf.matmul(input_tensor, output_weights, transpose_b=True), output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n predictions = tf.argmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator / denominator\n\n return (loss, per_example_loss, log_probs, predictions)\n\n\ndef model_fn_builder(bert_config,\n init_checkpoint,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu,\n disabled_features = None,\n disable_position_embeddings = False):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n del labels, params # Unused.\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features):\n tf.logging.info(\" name = %s, shape = %s\", name, features[name].shape)\n\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n next_sentence_labels = features[\"next_sentence_labels\"]\n\n model = table_bert.create_model(\n features=features,\n mode=mode,\n bert_config=bert_config,\n disabled_features=disabled_features,\n disable_position_embeddings=disable_position_embeddings)\n\n (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs,\n masked_lm_predictions) = _get_masked_lm_output(bert_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n\n (next_sentence_loss, next_sentence_example_loss,\n next_sentence_log_probs) = _get_next_sentence_output(\n bert_config, model.get_pooled_output(), next_sentence_labels)\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n init_tvars = [\n tvar for tvar in tvars if \"position_embeddings\" not in tvar.name\n ]\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(init_tvars,\n init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss, learning_rate,\n num_train_steps,\n num_warmup_steps, use_tpu)\n\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n next_sentence_log_probs = tf.reshape(\n next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])\n next_sentence_predictions = tf.argmax(\n next_sentence_log_probs, axis=-1, output_type=tf.int32)\n next_sentence_labels = tf.reshape(next_sentence_labels, [-1])\n next_sentence_accuracy = tf.metrics.accuracy(\n labels=next_sentence_labels, predictions=next_sentence_predictions)\n next_sentence_mean_loss = tf.metrics.mean(\n values=next_sentence_example_loss)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n \"next_sentence_accuracy\": next_sentence_accuracy,\n \"next_sentence_loss\": next_sentence_mean_loss,\n }\n\n eval_metrics = (metric_fn, [\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights, next_sentence_example_loss,\n next_sentence_log_probs, next_sentence_labels\n ])\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"masked_lm_predictions\": masked_lm_predictions,\n }\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n predictions=predictions,\n scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Unsupported mode: %s\" % mode)\n return output_spec\n\n return model_fn\n\n\ndef input_fn(\n name,\n file_patterns,\n data_format,\n compression_type,\n is_training,\n max_seq_length,\n max_predictions_per_seq,\n params,\n):\n \"\"\"Returns an input_fn compatible with the tf.estimator API.\"\"\"\n parse_example_fn = table_dataset.parse_table_examples(\n max_seq_length=max_seq_length,\n max_predictions_per_seq=max_predictions_per_seq,\n task_type=table_dataset.TableTask.PRETRAINING,\n add_aggregation_function_id=False,\n add_classification_labels=False,\n add_answer=False,\n include_id=False,\n add_candidate_answers=False,\n max_num_candidates=0,\n params=params)\n ds = dataset.read_dataset(\n parse_example_fn,\n name=name,\n file_patterns=file_patterns,\n data_format=data_format,\n compression_type=compression_type,\n is_training=is_training,\n params=params)\n return ds\n" ]
[ [ "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.train.Scaffold", "tensorflow.compat.v1.one_hot", "tensorflow.compat.v1.logging.info", "tensorflow.compat.v1.metrics.mean", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.matmul", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.estimator.tpu.TPUEstimatorSpec", "tensorflow.compat.v1.metrics.accuracy", "tensorflow.compat.v1.gather", "tensorflow.compat.v1.argmax", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.train.init_from_checkpoint", "tensorflow.compat.v1.nn.log_softmax", "tensorflow.compat.v1.range", "tensorflow.compat.v1.reduce_mean" ] ]
richmanbtc/bot_snippets
[ "a498cdb97f8568c1e05c117462a85b877d7dcf7d" ]
[ "fear_greedy.py" ]
[ "import pandas as pd\nimport requests\nimport json\n\ndef fetch_fear_greedy():\n url = 'https://api.alternative.me/fng/?limit=3000'\n df = pd.DataFrame(json.loads(requests.get(url).text)['data'])\n df = df[df['time_until_update'].isna()]\n df = df.drop(columns=['time_until_update', 'value_classification'])\n df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s', utc=True)\n df['value'] = df['value'].astype('float')\n df = df.sort_values('timestamp')\n df = df.set_index('timestamp')\n df = df.rename(columns={ 'value': 'fear_greedy_index' })\n return df\n" ]
[ [ "pandas.to_datetime" ] ]
Omodaka9375/Custom-Mask-Detection-Model
[ "864ba58c50b4baeddbd5bbc6e1c6c22c2a409b98" ]
[ "detect_mask_on_image.py" ]
[ "# python detect_mask_image.py --image examples/example_01.png\n\n# import the necessary packages\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport argparse\nimport cv2\nimport os\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True,\n\thelp=\"path to input image\")\nap.add_argument(\"-f\", \"--face\", type=str,\n\tdefault=\"face_detector\",\n\thelp=\"path to face detector model directory\")\nap.add_argument(\"-m\", \"--model\", type=str,\n\tdefault=\"./models/mask_detector_98.h5\",\n\thelp=\"path to trained face mask detector model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# load our serialized face detector model from disk\nprint(\"[INFO] loading face detector model...\")\nprototxtPath = os.path.sep.join([args[\"face\"], \"deploy.prototxt\"])\nweightsPath = os.path.sep.join([args[\"face\"],\n\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\nnet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# load the face mask detector model from disk\nprint(\"[INFO] loading face mask detector model...\")\nmodel = load_model(args[\"model\"])\n\ntry: \n # creating a folder named data \n if not os.path.exists('result'): \n os.makedirs('result') \n \n# if not created then raise error \nexcept OSError: \n print ('Error: Creating directory of data') \n\n\n# load the input image from disk, clone it, and grab the image spatial\n# dimensions\nimage = cv2.imread(args[\"image\"])\n(h, w) = image.shape[:2]\n\n# construct a blob from the image\nblob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),\n\t(104.0, 177.0, 123.0))\n\n# pass the blob through the network and obtain the face detections\nprint(\"[INFO] computing face detections...\")\nnet.setInput(blob)\ndetections = net.forward()\n\n# loop over the detections\nfor i in range(0, detections.shape[2]):\n\t# extract the confidence (i.e., probability) associated with\n\t# the detection\n\tconfidence = detections[0, 0, i, 2]\n\n\t# filter out weak detections by ensuring the confidence is\n\t# greater than the minimum confidence\n\tif confidence > args[\"confidence\"]:\n\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t# the object\n\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t# ensure the bounding boxes fall within the dimensions of\n\t\t# the frame\n\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n\t\t# extract the face ROI, convert it from BGR to RGB channel\n\t\t# ordering, resize it to 224x224, and preprocess it\n\t\tface = image[startY:endY, startX:endX]\n\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\tface = cv2.resize(face, (224, 224))\n\t\tface = img_to_array(face)\n\t\tface = preprocess_input(face)\n\t\tface = np.expand_dims(face, axis=0)\n\n\t\t# pass the face through the model to determine if the face\n\t\t# has a mask or not\n\t\t(mask, withoutMask) = model.predict(face)[0]\n\n\t\t# determine the class label and color we'll use to draw\n\t\t# the bounding box and text\n\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\n\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n\t\t# include the probability in the label\n\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n\t\t# display the label and bounding box rectangle on the output\n\t\t# frame\n\t\tcv2.putText(image, label, (startX, startY - 10),\n\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n\t\tcv2.rectangle(image, (startX, startY), (endX, endY), color, 2)\n\n\n# show the output image\nname = args[\"image\"][-1]\ncv2.imwrite(\"result/\" + name + \".jpg\", image)\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.array", "tensorflow.keras.applications.mobilenet_v2.preprocess_input", "tensorflow.keras.models.load_model", "tensorflow.keras.preprocessing.image.img_to_array", "numpy.expand_dims" ] ]
anandharaju/Echelon_TF2
[ "4349b8bcc46ca90b277900b2c6f85521c2f4e5df" ]
[ "src/trend/activation_trend_identification.py" ]
[ "import numpy as np\nfrom keras.models import Model\nfrom keras.models import load_model, model_from_json\nfrom os.path import join\nimport config.settings as cnst\nimport plots.plots as plots\nfrom predict.predict import predict_byte, predict_byte_by_section\nfrom predict.predict_args import DefaultPredictArguments, Predict as pObj\nfrom .ati_args import SectionActivationDistribution\nimport pandas as pd\nfrom analyzers.collect_exe_files import get_partition_data, store_partition_data\nimport gc\nimport logging\nimport pefile\n\n\ndef find_qualified_sections(sd, trend, common_trend, support, fold_index):\n \"\"\" Function for training Tier-1 model with whole byte sequence data\n Args:\n sd: object to hold activation distribution of PE sections\n trend: plain activation trend found by core ATI process\n common_trend: not used here\n support: not used here\n fold_index: current fold index of cross validation\n Returns:\n q_sections_by_q_criteria: a dict with q_criterion found for each percentile supplied and\n their respective list of sections qualified.\n \"\"\"\n btrend = trend.loc[\"BENIGN_ACTIVATION_MAGNITUDE\"]\n mtrend = trend.loc[\"MALWARE_ACTIVATION_MAGNITUDE\"]\n\n # Averaging based on respective benign and malware population\n btrend = btrend / sd.b1_b_truth_count\n mtrend = mtrend / sd.b1_m_truth_count\n\n btrend[btrend == 0] = 1\n mtrend[mtrend == 0] = 1\n\n malfluence = mtrend / btrend\n benfluence = btrend / mtrend\n\n mal_q_criteria_by_percentiles = np.percentile(malfluence, q=cnst.PERCENTILES)\n ben_q_criteria_by_percentiles = np.percentile(benfluence, q=cnst.PERCENTILES)\n\n q_sections_by_q_criteria = {}\n for i, _ in enumerate(cnst.PERCENTILES):\n # Uncomment [:50] for unqualified sections. Set percentile to 48\n q_sections_by_q_criteria[mal_q_criteria_by_percentiles[i]] = np.unique(np.concatenate([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]])) # [:50]\n if i == 0: # Do once for lowest percentile\n list_qsec = np.concatenate([trend.columns[malfluence > mal_q_criteria_by_percentiles[i]], trend.columns[benfluence > ben_q_criteria_by_percentiles[i]]]) # [:50]\n list_avg_act_mag_signed = np.concatenate([malfluence[malfluence > mal_q_criteria_by_percentiles[i]] * -1, benfluence[benfluence > ben_q_criteria_by_percentiles[i]]]) # [:50]\n\n available_sec = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'available_sections.csv', header=None)\n available_sec = list(available_sec.iloc[0])\n sec_emb = pd.read_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'section_embeddings.csv')\n\n list_qsec_id = []\n list_qsec_emb = []\n for q in list_qsec:\n try:\n list_qsec_emb.append(sec_emb[q][0])\n list_qsec_id.append(available_sec.index(q) + 1)\n except Exception as e:\n if not (cnst.LEAK in str(e) or cnst.PADDING in str(e)):\n logging.debug(\"The section [\"+str(q)+\"] is not present in available_sections.csv/section_embeddings.csv\")\n\n influence = np.concatenate([malfluence[malfluence > mal_q_criteria_by_percentiles[i]], benfluence[benfluence > ben_q_criteria_by_percentiles[i]]])\n qdf = pd.DataFrame([list_qsec, list_qsec_id, list_qsec_emb, list_avg_act_mag_signed, influence], columns=list_qsec, index=['a', 'b', 'c', 'd', 'e'])\n qdf = qdf.transpose().sort_values(by='e', ascending=False).transpose()\n qdf.to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + 'data' + cnst.ESC + 'qsections_meta_'+str(fold_index)+'.csv', header=None, index=False)\n # print(\"Mal Sections:\", trend.columns[malfluence > mal_q_criteria_by_percentiles[i]])\n # print(\"Ben Sections:\", trend.columns[benfluence > ben_q_criteria_by_percentiles[i]])\n logging.info(\"Qsections found - \" + str(len(q_sections_by_q_criteria.keys())))\n logging.info(q_sections_by_q_criteria.keys())\n return q_sections_by_q_criteria\n\n\ndef parse_pe_pkl(file_index, file_id, fjson, unprocessed):\n \"\"\" Function to parse pickle file to find the boundaries of PE sections in a sample's pickle representation\n Args:\n file_index: PE sample index\n file_id: PE name\n fjson: pickle data representation of PE sample\n unprocessed: keeps track of count of samples not processed properly\n Returns:\n section_bounds: PE section boundaries\n unprocessed: keeps track of count of samples not processed properly\n file_byte_size: size of full sample\n \"\"\"\n section_bounds = []\n file_byte_size = None\n max_section_end_offset = 0\n try:\n # file_byte_size = fjson['size_byte']\n with open(cnst.RAW_SAMPLE_DIR + file_id, 'rb') as f:\n file_byte_size = len(list(f.read()))\n pe = pefile.PE(cnst.RAW_SAMPLE_DIR + file_id)\n for pkl_section in pe.sections:\n section_bounds.append(\n (pkl_section.Name.strip(b'\\x00').decode(\"utf-8\").strip(),\n pkl_section.PointerToRawData,\n pkl_section.PointerToRawData + pkl_section.SizeOfRawData))\n if (pkl_section.PointerToRawData + pkl_section.SizeOfRawData) > max_section_end_offset:\n max_section_end_offset = (pkl_section.PointerToRawData + pkl_section.SizeOfRawData)\n\n # Placeholder section \"padding\" - for activations in padding region\n # if max_section_end_offset < fjson[\"size_byte\"]:\n # section_bounds.append((cnst.TAIL, max_section_end_offset + 1, fjson[\"size_byte\"]))\n # section_bounds.append((cnst.PADDING, fjson[\"size_byte\"] + 1, cnst.MAX_FILE_SIZE_LIMIT))\n except Exception as e:\n logging.Exception(\"parse failed . . . [FILE INDEX - \" + str(file_index) + \"] [\" + str(file_id) + \"] \")\n unprocessed += 1\n return section_bounds, unprocessed, file_byte_size\n\n\ndef map_act_to_sec(ftype, fmap, sbounds, sd):\n \"\"\"\n Function to map each hidden layer activation found to corresponding PE section\n Params:\n ftype: Benign or Malware\n fmap: Hidden layer activation map\n sbounds: Dict of PE sections and their boundaries\n Return:\n sd: Object to hold computed activation distribution of PE sections\n\n Description of other variables/objects used:\n section_support: Information about how many samples in a given category has a section <Influence by presence>\n activation_histogram: Information about total count of activations occurred in a given section for all samples\n of given category <Influence by activation count>\n activation_magnitude: Information about total sum of magnitude of activations occurred in a given section\n for all samples of given category <Influence by activation strength>\n \"\"\"\n # fmap = fmap // 1 # print(\"FEATURE MAP \", len(feature_map), \" : \\n\", feature_map)\n idx = np.argsort(fmap)[::-1][:len(fmap)] # Sort activations in descending order -- Helpful to find top activations\n if sbounds is not None:\n\n for j in range(0, len(sbounds)):\n section = sbounds[j][0]\n sd.a_section_support[section] = (\n sd.a_section_support[section] + 1) if section in sd.a_section_support.keys() else 1\n if ftype == cnst.BENIGN:\n sd.b_section_support[section] = (\n sd.b_section_support[section] + 1) if section in sd.b_section_support.keys() else 1\n if section not in sd.m_section_support.keys():\n sd.m_section_support[section] = 0\n else:\n if section not in sd.b_section_support.keys():\n sd.b_section_support[section] = 0\n sd.m_section_support[section] = (\n sd.m_section_support[section] + 1) if section in sd.m_section_support.keys() else 1\n\n for current_activation_window in range(0, len(fmap)): # range(0, int(cnst.MAX_FILE_SIZE_LIMIT / cnst.CONV_STRIDE_SIZE)):\n section = None\n offset = idx[current_activation_window] * cnst.CONV_WINDOW_SIZE\n act_val = fmap[idx[current_activation_window]]\n\n ######################################################################################\n # Change for Pooling layer based Activation trend - Only Max activation is traced back\n if act_val == 0:\n continue\n ######################################################################################\n for j in range(0, len(sbounds)):\n cur_section = sbounds[j]\n if cur_section[1] <= offset <= cur_section[2]:\n section = cur_section[0]\n break\n\n if section is not None:\n # if \".\" not in section: section = \".\" + section #Same section's name with and without dot are different\n # Sum of Magnitude of Activations\n if section in sd.a_activation_magnitude.keys():\n sd.a_activation_magnitude[section] += act_val\n sd.a_activation_histogram[section] += 1\n if ftype == cnst.BENIGN:\n if sd.b_activation_magnitude[section] is None:\n sd.b_activation_magnitude[section] = act_val\n sd.b_activation_histogram[section] = 1\n else:\n sd.b_activation_magnitude[section] += act_val\n sd.b_activation_histogram[section] += 1\n else:\n if sd.m_activation_magnitude[section] is None:\n sd.m_activation_magnitude[section] = act_val\n sd.m_activation_histogram[section] = 1\n else:\n sd.m_activation_magnitude[section] += act_val\n sd.m_activation_histogram[section] += 1\n else:\n sd.a_activation_magnitude[section] = act_val\n sd.a_activation_histogram[section] = 1\n if ftype == cnst.BENIGN:\n sd.b_activation_magnitude[section] = act_val\n sd.b_activation_histogram[section] = 1\n sd.m_activation_magnitude[section] = None\n sd.m_activation_histogram[section] = None\n else:\n sd.b_activation_magnitude[section] = None\n sd.b_activation_histogram[section] = None\n sd.m_activation_magnitude[section] = act_val\n sd.m_activation_histogram[section] = 1\n else:\n # !!! VERIFY ALL OFFSET IS MATCHED AND CHECK FOR LEAKAGE !!!\n # print(\"No matching section found for OFFSET:\", offset)\n sd.a_activation_magnitude[cnst.LEAK] += act_val\n sd.a_activation_histogram[cnst.LEAK] += 1\n if ftype == cnst.BENIGN:\n sd.b_activation_magnitude[cnst.LEAK] += act_val\n sd.b_activation_histogram[cnst.LEAK] += 1\n else:\n sd.m_activation_magnitude[cnst.LEAK] += act_val\n sd.m_activation_histogram[cnst.LEAK] += 1\n return sd\n\n\ndef get_feature_maps(smodel, partition, files):\n \"\"\"\n Function to obtain hidden layer activation (feature) maps using given stunted model\n Params:\n smodel: stunted model to use\n partition: partition for current set of B1 samples under process\n files: IDs of the samples to be processed from the partition\n Returns:\n raw_feature_maps: hidden layer activation (feature) maps\n \"\"\"\n predict_args = DefaultPredictArguments()\n predict_args.verbose = cnst.ATI_PREDICT_VERBOSE\n\n xlen = len(files)\n predict_args.pred_steps = xlen // predict_args.batch_size if xlen % predict_args.batch_size == 0 else xlen // predict_args.batch_size + 1\n\n raw_feature_maps = predict_byte(smodel, files, predict_args)\n return raw_feature_maps\n\n\ndef process_files(stunted_model, args, sd):\n \"\"\"\n Function to process the B1 samples to obtain hidden layer activation maps and trace back their PE sections\n Params:\n stunted_model: Tier-1 model that is stunted up to required hidden layer where activation maps are collected.\n args: contains various config data\n Returns:\n sd: Object to hold computed activation distribution of PE sections\n \"\"\"\n unprocessed = 0\n samplewise_feature_maps = []\n files = args.t2_x_train\n files_type = args.t2_y_train\n\n logging.info(\"FMAP MODULE Total B1 [{0}]\\tGroundTruth [{1}:{2}]\".format(len(args.t2_y_train), len(np.where(args.t2_y_train == cnst.BENIGN)[0]), len(np.where(args.t2_y_train == cnst.MALWARE)[0])))\n\n # file_type = pObj_fmap.ytrue[i] # Using Ground Truth to get trend of actual benign and malware files\n # file_whole_bytes = {file[:-4]: args.whole_b1_train_partition[file[:-4]]}\n raw_feature_maps = get_feature_maps(stunted_model, args.whole_b1_train_partition, files)\n del args.whole_b1_train_partition\n gc.collect()\n\n for i in range(0, len(files)):\n section_bounds, unprocessed, fsize = parse_pe_pkl(i, files[i], args.section_b1_train_partition[files[i]], unprocessed)\n if cnst.USE_POOLING_LAYER:\n try:\n pooled_max_1D_map = np.sum(raw_feature_maps[i] == np.amax(raw_feature_maps[i], axis=0), axis=1)[:np.min([cnst.MAX_FILE_CONVOLUTED_SIZE,int(fsize/cnst.CONV_STRIDE_SIZE)+2])]\n sd = map_act_to_sec(files_type[i], pooled_max_1D_map, section_bounds, sd)\n except Exception as e:\n logging.exception(\"$$$$$$$$ \" + str(np.shape(raw_feature_maps[i]))) # .size, files[i], args.whole_b1_train_partition[files[i][:-4]])\n else:\n feature_map = raw_feature_maps[i].sum(axis=1).ravel()\n # feature_map_histogram(feature_map, prediction)\n samplewise_feature_maps.append(feature_map)\n sd = map_act_to_sec(files_type[i], feature_map, section_bounds, sd)\n\n del args.section_b1_train_partition\n gc.collect()\n return sd\n\n # print(section_stat)\n # print(\"Unprocessed file count: \", unprocessed)\n\n # Find activation distribution\n # raw_arr = np.array(np.squeeze(temp_feature_map_list))\n # print(len(raw_arr), raw_arr.max())\n # raw_arr = raw_arr[raw_arr > 0.3]\n # print(len(raw_arr))\n # plt.hist(raw_arr, 10)#range(0, len(raw_arr)))\n # plt.show()\n\n '''for key in act.keys():\n # key = \".\"+key if \".\" not in key else key\n if key is not None and key != '' and key != '.padding':\n with open(\"BENIGN\" if \"benign\" in section_stat_file else \"MALWARE\" + \"_activation_\" + key[1:] + \".csv\", mode='a+') as f:\n f.write(str(act[key]))\n '''\n '''\n #overall_stat.append(section_stat)\n for x in pcs_keys:\n overall_stat_str += str(section_stat[x]) + \",\"\n overall_stat_str = overall_stat_str[:-1] + \"\\n\"\n\n print(\"\\n[Unprocessed Files : \", unprocessed, \"] Overall Stats: \", overall_stat_str)\n\n processed_file_count = len(fn_list) - unprocessed\n normalized_stats_str = str(section_stat[\"header\"]/processed_file_count) + \",\" \\\n + str(section_stat[\"text\"]/processed_file_count) + \",\" \\\n + str(section_stat[\"data\"]/processed_file_count) + \",\" \\\n + str(section_stat[\"rsrc\"]/processed_file_count) + \",\" \\\n + str(section_stat[\"pdata\"]/processed_file_count) + \",\" \\\n + str(section_stat[\"rdata\"]/processed_file_count) + \"\\n\"\n #+ str(section_stat[\"padding\"]/processed_file_count) \\\n\n print(\"Normalized Stats: \", normalized_stats_str)\n #plt.show()\n\n with open(section_stat_file, 'w+') as f:\n f.write(overall_stat_str)\n f.write(\"\\n\")\n f.write(normalized_stats_str)\n '''\n\n\ndef change_model(model, new_input_shape=(None, cnst.SAMPLE_SIZE)):\n \"\"\" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.\n Args:\n model: An object with required parameters/hyper-parameters for loading, configuring and compiling\n new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )\n Returns:\n new_model: new model with reduced input shape and weights updated\n \"\"\"\n model._layers[0].batch_input_shape = new_input_shape\n new_model = model_from_json(model.to_json())\n for layer in new_model.layers:\n try:\n layer.set_weights(model.get_layer(name=layer.name).get_weights())\n # logging.info(\"Loaded and weights set for layer {}\".format(layer.name))\n except Exception as e:\n logging.exception(\"Could not transfer weights for layer {}\".format(layer.name))\n return new_model\n\n\ndef get_stunted_model(args, tier):\n \"\"\" Function to stunt the given model up to the required hidden layer\n based on the supplied hidden layer number\n \"\"\"\n complete_model = load_model(join(args.save_path, args.t1_model_name if tier == 1 else args.t2_model_name))\n complete_model = change_model(complete_model, new_input_shape=(None, cnst.SAMPLE_SIZE))\n # model.summary()\n # redefine model to output right after the sixth hidden layer\n # (ReLU activation layer after convolution - before max pooling)\n\n stunted_outputs = [complete_model.layers[x].output for x in [args.layer_num]]\n # stunted_outputs = complete_model.get_layer('multiply_1').output\n stunted_model = Model(inputs=complete_model.inputs, outputs=stunted_outputs)\n # stunted_model.summary()\n logging.debug(\"Model stunted upto \" + str(stunted_outputs[0]) + \" Layer number passed to stunt:\" + str(args.layer_num))\n return stunted_model\n\n\ndef save_activation_trend(sd):\n \"\"\"\n Function to save the various activation trends identified in CSV format files.\n Params:\n sd: Object containing computed activation distribution of PE sections\n Returns:\n fmaps_trend: used to identify the qualified sections in subsequent steps\n others: Not in use currently\n \"\"\"\n fmaps_trend = pd.DataFrame()\n fmaps_common_trend = pd.DataFrame()\n fmaps_section_support = pd.DataFrame()\n\n fmaps_trend[\"ACTIVATION / HISTOGRAM\"] = [\"ALL_ACTIVATION_MAGNITUDE\", \"BENIGN_ACTIVATION_MAGNITUDE\",\n \"MALWARE_ACTIVATION_MAGNITUDE\", \"HISTOGRAM_ALL\", \"HISTOGRAM_BENIGN\",\n \"HISTOGRAM_MALWARE\"]\n fmaps_common_trend[\"COMMON\"] = [\"ALL_ACTIVATION_MAGNITUDE\", \"BENIGN_ACTIVATION_MAGNITUDE\",\n \"MALWARE_ACTIVATION_MAGNITUDE\", \"HISTOGRAM_ALL\", \"HISTOGRAM_BENIGN\",\n \"HISTOGRAM_MALWARE\"]\n fmaps_section_support[\"SUPPORT\"] = [\"PRESENCE_IN_ALL\", \"PRESENCE_IN_BENIGN\", \"PRESENCE_IN_MALWARE\",\n \"SUPPORT_IN_ALL\", \"SUPPORT_IN_BENIGN\", \"SUPPORT_IN_MALWARE\"]\n\n for key in sd.a_activation_histogram.keys():\n fmaps_trend[key] = [int(sd.a_activation_magnitude[key]) if sd.a_activation_magnitude[key] is not None else\n sd.a_activation_magnitude[key],\n int(sd.b_activation_magnitude[key]) if sd.b_activation_magnitude[key] is not None else\n sd.b_activation_magnitude[key],\n int(sd.m_activation_magnitude[key]) if sd.m_activation_magnitude[key] is not None else\n sd.m_activation_magnitude[key],\n int(sd.a_activation_histogram[key]) if sd.a_activation_histogram[key] is not None else\n sd.a_activation_histogram[key],\n int(sd.b_activation_histogram[key]) if sd.b_activation_histogram[key] is not None else\n sd.b_activation_histogram[key],\n int(sd.m_activation_histogram[key]) if sd.m_activation_histogram[key] is not None else\n sd.m_activation_histogram[key]]\n\n if sd.b_activation_histogram[key] is not None and sd.m_activation_histogram[key] is not None:\n fmaps_common_trend[key] = [\n int(sd.a_activation_magnitude[key]) if sd.a_activation_magnitude[key] is not None else\n sd.a_activation_magnitude[key],\n int(sd.b_activation_magnitude[key]) if sd.b_activation_magnitude[key] is not None else\n sd.b_activation_magnitude[key],\n int(sd.m_activation_magnitude[key]) if sd.m_activation_magnitude[key] is not None else\n sd.m_activation_magnitude[key],\n int(sd.a_activation_histogram[key]) if sd.a_activation_histogram[key] is not None else\n sd.a_activation_histogram[key],\n int(sd.b_activation_histogram[key]) if sd.b_activation_histogram[key] is not None else\n sd.b_activation_histogram[key],\n int(sd.m_activation_histogram[key]) if sd.m_activation_histogram[key] is not None else\n sd.m_activation_histogram[key]]\n\n if sd.b1_count > 0 and sd.b1_b_truth_count > 0 and sd.b1_m_truth_count > 0:\n for key in sd.a_section_support.keys():\n fmaps_section_support[key] = [sd.a_section_support[key], sd.b_section_support[key],\n sd.m_section_support[key],\n \"{:0.1f}%\".format(sd.a_section_support[key] / sd.b1_count * 100),\n \"{:0.1f}%\".format(sd.b_section_support[key] / sd.b1_b_truth_count * 100),\n \"{:0.1f}%\".format(sd.m_section_support[key] / sd.b1_m_truth_count * 100)]\n\n fmaps_trend.fillna(-1, inplace=True)\n\n fmaps_trend.set_index('ACTIVATION / HISTOGRAM', inplace=True)\n fmaps_common_trend.set_index('COMMON', inplace=True)\n fmaps_section_support.set_index('SUPPORT', inplace=True)\n\n # Store activation trend identified\n fmaps_trend.to_csv(cnst.COMBINED_FEATURE_MAP_STATS_FILE, index=True)\n fmaps_common_trend.to_csv(cnst.COMMON_COMBINED_FEATURE_MAP_STATS_FILE, index=True)\n fmaps_section_support.to_csv(cnst.SECTION_SUPPORT, index=True)\n\n # Drop padding and leak information after saving - not useful for further processing\n try:\n fmaps_trend.drop([cnst.PADDING], axis=1, inplace=True)\n fmaps_common_trend.drop([cnst.PADDING], axis=1, inplace=True)\n fmaps_section_support.drop([cnst.PADDING], axis=1, inplace=True)\n fmaps_trend.drop([cnst.LEAK], axis=1, inplace=True)\n fmaps_common_trend.drop([cnst.LEAK], axis=1, inplace=True)\n except:\n logging.info(\"Proceeding after trying to clean fmap data.\")\n return fmaps_trend, fmaps_common_trend, fmaps_section_support\n\n\ndef start_ati_process(args, fold_index, partition_count, sd):\n \"\"\"\n Function to perform the ATI process over all partitions of B1 training set\n Params:\n args: contains various config data\n fold_index: current fold index of cross validation\n partition_count: count of train B1 partitions\n Returns:\n sd: Object containing computed activation distribution of PE sections\n \"\"\"\n args.layer_num = cnst.LAYER_NUM_TO_STUNT\n stunted_model = get_stunted_model(args, tier=1)\n for pcount in range(0, partition_count):\n logging.info(\"ATI for partition: %s\", pcount)\n b1datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + \"b1_train_\" + str(fold_index) + \"_p\" + str(pcount) + \".csv\", header=None)\n args.t2_x_train, args.t2_y_train = b1datadf.iloc[:, 0], b1datadf.iloc[:, 1]\n args.whole_b1_train_partition = get_partition_data(\"b1_train\", fold_index, pcount, \"t1\")\n args.section_b1_train_partition = get_partition_data(\"b1_train\", fold_index, pcount, \"t2\")\n\n sd = process_files(stunted_model, args, sd)\n del stunted_model\n gc.collect()\n return sd\n\n\ndef get_top_act_blocks(top_acts_idx, sbounds, q_sections, whole_bytes):\n \"\"\"\n Function to map the top activation back to Qualified section's byte blocks and collating them to form block dataset\n Params:\n top_acts_idx: act as offsets of top activations in the hidden layer activation (feature) map\n sbounds: Pe section boundaries\n q_sections: qualified sections\n whole_bytes: Entire byte content of a PE sample\n Returns:\n top_blocks: single sequence of all top blocks found\n \"\"\"\n top_blocks = []\n top_acts_idx.sort()\n if sbounds is not None:\n for idx, offset in enumerate(top_acts_idx * cnst.CONV_STRIDE_SIZE):\n for sname, low, upp in sbounds:\n if low <= offset <= upp:\n if sname in q_sections:\n try:\n top_blocks.extend(whole_bytes[offset:offset+cnst.CONV_STRIDE_SIZE])\n break\n except Exception as e:\n logging.exception(\"[MODULE: get_section_id_vector()] Error occurred while mapping section id: %s %s %s %s %s %s\",\n idx, low, offset, upp, sname, sname in q_sections)\n # else:\n # print(sname, sname in q_sections, sname in available_sections)\n else:\n logging.info(\"Sections bounds not available. Returning a vector of Zeroes for section id vector.\")\n return top_blocks\n\n\ndef collect_b1_block_dataset(args, fold_index, partition_count, mode, qcnt='X'):\n \"\"\"\n Function to generate the top ativation blocks based dataset from B1 sample set\n Params:\n args: an object containing various config data\n fold_index: current fold index of cross validation\n partition_count: count of B1 train partitions\n mode: phase of data collection - Train / Val / Test\n qcnt: index of the current q_criterion. 'X' for Testing phase\n Returns:\n None (collected data is persisted directly to disk storage)\n \"\"\"\n args.layer_num = cnst.LAYER_NUM_TO_COLLECT_NN_DATASET\n stunted_model = get_stunted_model(args, tier=cnst.TIER_TO_COLLECT_BLOCK_DATA)\n for pcount in range(0, partition_count):\n logging.info(\"Collecting Block data for partition: %s\", pcount)\n b1datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + \"b1_\"+mode+\"_\" + str(fold_index) + \"_p\" + str(pcount) + \".csv\", header=None)\n files, files_type = b1datadf.iloc[:, 0], b1datadf.iloc[:, 1]\n args.whole_b1_partition = get_partition_data(\"b1_\"+mode, fold_index, pcount, \"t1\")\n args.section_b1_partition = get_partition_data(\"b1_\"+mode, fold_index, pcount, \"t2\")\n unprocessed = 0\n logging.info(\"Block Module Total B1 [{0}]\\tGroundTruth [{1}:{2}]\".format(len(files_type), len(np.where(files_type == cnst.BENIGN)[0]), len(np.where(files_type == cnst.MALWARE)[0])))\n nn_predict_args = DefaultPredictArguments()\n nn_predict_args.verbose = cnst.ATI_PREDICT_VERBOSE\n raw_feature_maps = predict_byte_by_section(stunted_model, args.section_b1_partition, files, args.q_sections, None, nn_predict_args)\n logging.info(\"Raw feature maps found.\")\n for i in range(0, len(files)):\n section_bounds, unprocessed, fsize = parse_pe_pkl(i, files[i][:-4], args.section_b1_partition[files[i][:-4]], unprocessed)\n if cnst.USE_POOLING_LAYER:\n try:\n cur_fmap = raw_feature_maps[i]\n top_acts_idx = np.argmax(cur_fmap, axis=0)\n top_blocks = get_top_act_blocks(top_acts_idx, section_bounds, args.q_sections, args.whole_b1_partition[files[i][:-4]][\"whole_bytes\"])\n if sum(top_blocks) == 0:\n logging.debug(\"No useful top block data added for sample \" + files[i])\n except Exception as e:\n logging.exception(\"$$$$ Error occurred in Top Activation Block Module. $$$$\")\n args.whole_b1_partition[files[i][:-4]][\"whole_bytes\"] = top_blocks\n store_partition_data(\"block_b1_\"+mode, fold_index, pcount, \"t1\", args.whole_b1_partition)\n del args.section_b1_partition\n del args.whole_b1_partition\n gc.collect()\n del stunted_model\n gc.collect()\n\n\ndef init(args, fold_index, partition_count, b1_all_file_cnt, b1b_all_truth_cnt, b1m_all_truth_cnt):\n \"\"\" Activation Trend Identification (ATI) Module\n Args:\n args: various data required for ATI\n fold_index: current fold of cross-validation\n partition_count: number of partitions created for b1 training set\n b1_all_file_cnt: count of samples in b1 set\n b1b_all_truth_cnt: count of benign samples in b1 training set\n b1m_all_truth_cnt: count of malware samples in b1 training set\n Returns:\n None (Resultant data are stored in CSV for further use)\n \"\"\"\n sd = SectionActivationDistribution()\n sd.b1_count = b1_all_file_cnt\n sd.b1_b_truth_count = b1b_all_truth_cnt\n sd.b1_m_truth_count = b1m_all_truth_cnt\n\n sd = start_ati_process(args, fold_index, partition_count, sd)\n trend, common_trend, support = save_activation_trend(sd)\n # select sections for Tier-2 based on identified activation trend\n q_sections_by_q_criteria = find_qualified_sections(sd, trend, common_trend, support, fold_index)\n\n # select, drop = plots.save_stats_as_plot(fmaps, qualification_criteria)\n\n # Save qualified sections by Q_criteria\n qdata = [np.concatenate([[str(q_criterion)], q_sections_by_q_criteria[q_criterion]]) for q_criterion in q_sections_by_q_criteria]\n pd.DataFrame(qdata).to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + \"out\" + cnst.ESC + \"result\" + cnst.ESC + \"qsections_by_qcriteria_\" + str(fold_index) + \".csv\", index=False, header=None)\n return # select, drop\n\n\nif __name__ == '__main__':\n # start_visualization_process(args)\n plots.save_stats_as_plot()\n\n # pe = pefile.PE(\"D:\\\\08_Dataset\\\\benign\\\\git-gui.exe\")\n # parse_pe(0, \"D:\\\\08_Dataset\\\\benign\\\\git-gui.exe\", 204800, 0)\n # for section in pe.sections:\n # print(section)\n # print(pe.OPTIONAL_HEADER, \"\\n\", pe.NT_HEADERS, \"\\n\", pe.FILE_HEADER, \"\\n\", pe.RICH_HEADER, \"\\n\", pe.DOS_HEADER,\n # \\\"\\n\", pe.__IMAGE_DOS_HEADER_format__, \"\\n\", pe.header, \"\\n\", \"LENGTH\", len(pe.header))\n\n'''def display_edit_distance():\n sections = []\n top_sections = []\n malware_edit_distance = []\n print(\"\\n SECTION [EDIT DISTANCE SCORE]\")\n df = pd.read_csv(combined_stat_file)\n df.set_index(\"type\", inplace=True)\n for i in range(0, len(keys)):\n a = df.loc['FN'].values[i]\n b = df.loc['BENIGN'].values[i]\n c = df.loc['MALWARE'].values[i]\n dist1 = norm(a-b) // 1\n dist2 = norm(a-c) // 1\n print(keys[i], dist1, dist2, \"[MALWARE]\" if dist2 < dist1 else \"[BENIGN]\", dist1 - dist2)\n if dist2 < dist1:\n malware_edit_distance.append(dist1 - dist2)\n sections.append(keys[i])\n idx = np.argsort(malware_edit_distance)[::-1]\n for t in idx:\n print(\"%10s\" % sections[t], \"%20s\" % str(malware_edit_distance[t]))\n top_sections.append(sections[t])\n return top_sections[:3]\n\n def ks(cutoff):\n from scipy import stats\n keys = ['header', 'text', 'data', 'rsrc', 'pdata', 'rdata']\n for key in keys:\n b = pd.read_csv('D:\\\\03_GitWorks\\\\echelon\\\\out\\\\result_multi\\\\benign.csv' + \".activation_\" + key + \".csv\", header=None)\n m = pd.read_csv('D:\\\\03_GitWorks\\\\echelon\\\\out\\\\result_multi\\\\malware.csv' + \".activation_\" + key + \".csv\", header=None)\n b = np.squeeze((b.get_values()))\n m = np.squeeze((m.get_values()))\n b = (b - b.min()) / (b.max() - b.min())\n m = (m - m.min()) / (m.max() - m.min())\n print(key, b.max(), len(b), len(b[b > cutoff]))\n print(key, m.max(), len(m), len(m[m > cutoff]))\n print(\"Section: \", key[:4], \"\\t\\t\", stats.ks_2samp(np.array(b), np.array(m)))\n plt.hist(b[b > cutoff], 100)\n plt.hist(m[m > cutoff], 100)\n plt.legend(['benign', 'malware'])\n plt.show()\n # break\n '''\n" ]
[ [ "numpy.concatenate", "numpy.percentile", "pandas.DataFrame", "numpy.shape", "numpy.where", "numpy.argmax", "numpy.amax", "numpy.argsort", "pandas.read_csv" ] ]
mispchallenge/MISP2021-AVSR
[ "e26ab87bd3134d08d9571632bbffb0b5e731a830", "e26ab87bd3134d08d9571632bbffb0b5e731a830" ]
[ "NN-HMM/optimizer/optimizer_AdaBound.py", "NN-HMM/network/network_mixup.py" ]
[ "# coding:utf-8\nimport torch\nimport math\nimport torch.optim as optim\n\n\nclass AdaBound(optim.Optimizer):\n \"\"\"Implements AdaBound algorithm.\n It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): Adam learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n final_lr (float, optional): final (SGD) learning rate (default: 0.1)\n gamma (float, optional): convergence speed of the bound functions (default: 1e-3)\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm\n .. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:\n https://openreview.net/forum?id=Bkg3g2R9FX\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3, eps=1e-8, weight_decay=0, amsbound=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n if not 0.0 <= final_lr:\n raise ValueError(\"Invalid final learning rate: {}\".format(final_lr))\n if not 0.0 <= gamma < 1.0:\n raise ValueError(\"Invalid gamma parameter: {}\".format(gamma))\n defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps, weight_decay=weight_decay, amsbound=amsbound)\n super(AdaBound, self).__init__(params, defaults)\n self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))\n\n def __setstate__(self, state):\n super(AdaBound, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsbound', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n for group, base_lr in zip(self.param_groups, self.base_lrs):\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')\n amsbound = group['amsbound']\n state = self.state[p]\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n if amsbound:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n state['step'] += 1\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n if amsbound:\n max_exp_avg_sq = state['max_exp_avg_sq']\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n # Applies bounds on actual learning rate\n # lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay\n final_lr = group['final_lr'] * group['lr'] / base_lr\n lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))\n upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))\n step_size = torch.full_like(denom, step_size)\n step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)\n p.data.add_(-step_size)\n return loss\n\n\nclass AdaBoundW(optim.Optimizer):\n \"\"\"Implements AdaBound algorithm with Decoupled Weight Decay (arxiv.org/abs/1711.05101)\n It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining parameter groups\n lr (float, optional): Adam learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))\n final_lr (float, optional): final (SGD) learning rate (default: 0.1)\n gamma (float, optional): convergence speed of the bound functions (default: 1e-3)\n eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm\n .. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:\n https://openreview.net/forum?id=Bkg3g2R9FX\n \"\"\"\n\n def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,\n eps=1e-8, weight_decay=0, amsbound=False):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n if not 0.0 <= final_lr:\n raise ValueError(\"Invalid final learning rate: {}\".format(final_lr))\n if not 0.0 <= gamma < 1.0:\n raise ValueError(\"Invalid gamma parameter: {}\".format(gamma))\n defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps, weight_decay=weight_decay, amsbound=amsbound)\n super(AdaBoundW, self).__init__(params, defaults)\n self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))\n\n def __setstate__(self, state):\n super(AdaBoundW, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('amsbound', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n for group, base_lr in zip(self.param_groups, self.base_lrs):\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Adam does not support sparse gradients, please consider SparseAdam instead')\n amsbound = group['amsbound']\n state = self.state[p]\n # State initialization\n if len(state) == 0:\n state['step'] = 0\n # Exponential moving average of gradient values\n state['exp_avg'] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state['exp_avg_sq'] = torch.zeros_like(p.data)\n if amsbound:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state['max_exp_avg_sq'] = torch.zeros_like(p.data)\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n beta1, beta2 = group['betas']\n state['step'] += 1\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n if amsbound:\n max_exp_avg_sq = state['max_exp_avg_sq']\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n bias_correction1 = 1 - beta1 ** state['step']\n bias_correction2 = 1 - beta2 ** state['step']\n step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1\n # Applies bounds on actual learning rate\n # lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay\n final_lr = group['final_lr'] * group['lr'] / base_lr\n lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))\n upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))\n step_size = torch.full_like(denom, step_size)\n step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)\n if group['weight_decay'] != 0:\n decayed_weights = torch.mul(p.data, group['weight_decay'])\n p.data.add_(-step_size)\n p.data.sub_(decayed_weights)\n else:\n p.data.add_(-step_size)\n return loss\n", "#!/usr/bin/env python\n# _*_ coding: UTF-8 _*_\nimport random\nimport torch\nimport numpy as np\nimport torch.nn as nn\n\n\nclass MixUp(nn.Module):\n def __init__(self, mix_probability, alpha, mix_types):\n super(MixUp, self).__init__()\n self.mix_probability = mix_probability\n self.alpha = alpha\n self.mix_types = mix_types\n \n def forward(self, *x):\n assert len(x) == len(self.mix_types), '{} but {}'.format(len(x), len(self.mix_types))\n if self.training and random.uniform(0, 1) < self.mix_probability:\n y = []\n lam = np.random.beta(self.alpha, self.alpha)\n for sub_x, mix_type in zip(x, self.mix_types):\n if sub_x is None:\n y.append(sub_x)\n else:\n if sub_x.shape[0] % 2 != 0:\n padded_sub_x = torch.cat([sub_x, sub_x[-1:]], dim=0)\n else:\n padded_sub_x = sub_x\n padded_sub_x_a, padded_sub_x_b = padded_sub_x.chunk(chunks=2, dim=0)\n if mix_type == 'sum':\n y.append(lam * padded_sub_x_a + (1. - lam) * padded_sub_x_b)\n elif mix_type == 'max':\n y.append(torch.stack([padded_sub_x_a, padded_sub_x_b], dim=0).max(dim=0)[0])\n else:\n raise NotImplementedError('Unknown mix_type: {}'.format(mix_type))\n return y\n return [*x]\n" ]
[ [ "torch.zeros_like", "torch.max", "torch.mul", "torch.full_like" ], [ "numpy.random.beta", "torch.cat", "torch.stack" ] ]
silastittes/parv_local
[ "67d0a804af320bc50024fd1e6e2190b0c55fb388" ]
[ "src/subset_beagle.py" ]
[ "import argparse\nimport gzip\nimport numpy as np\n\n#from parse_read import parse_read\n\nprog = 'Simple helper script to subset a beagle file based on the Ind[0-9][0-9]* labels in the header',\n\nparser = argparse.ArgumentParser(description=\"Given a list of Ind IDs in a beagle file, subsets the beagle file for those samples. Silly but useful.\")\n\nparser.add_argument(\"-b\", \"--beagle_file\", type = str, help = \"Name of the beagle file to subset\", required = True)\n\nparser.add_argument(\"-i\", \"--id_file\", type = str, help = \"Name of the ID file with and Ind[0-9][0-9] on each line.\", required = True)\n\nargs = parser.parse_args()\n\ndef openfile(filename):\n if filename.endswith(\".gz\"):\n return gzip.open(filename, \"rt\")\n else:\n return open(filename, \"r\")\n\n#add ind ids to dictionary\nid_list = []\nwith openfile(args.id_file) as f:\n for line in f:\n ind_id = line.strip().split()[0]\n id_list.append(ind_id)\nid_list = np.asarray(id_list)\n\n#filter beagle file\nwith openfile(args.beagle_file) as f:\n line1 = np.array(f.readline().strip().split())\n id_match = np.where(np.in1d(line1, id_list))[0]\n beagle_idx = np.concatenate((np.array([0,1,2]), id_match))\n\n print('\\t'.join(line1[beagle_idx]))\n\n for line in f:\n ln = np.array(line.strip().split())\n print('\\t'.join(ln[beagle_idx]))\n \n \n" ]
[ [ "numpy.in1d", "numpy.array", "numpy.asarray" ] ]
AndrewKirby2/data_synthesis
[ "656858137a348fd5dcb57bcd04bdfece2b9eac1b" ]
[ "venv/lib/python3.8/site-packages/mogp_emulator/MCMC.py" ]
[ "import numpy as np\nfrom numpy.linalg import LinAlgError\nfrom inspect import signature\nimport warnings\n\ndef MH_proposal(current_params, step_sizes):\n \"\"\"\n Propose an MCMC step using a Metropolis-Hastings method\n \n Proposes the next point in an MCMC sampler using the Metropolis-Hastings method.\n Inputs are the current point and a covariance matrix. The next point is drawn from\n a multivariate normal distribution centered around the current point. The covariance\n matrix must be a 2D ``n`` by ``n`` array, where ``n`` is the number of parameters, and\n must be positive definite. Returns a 1D array of length ``n`` holding the new proposed\n parameter values.\n \n :param current_params: Current value of the parameters. Must be a 1D array.\n :type current_params: ndarray\n :param step_sizes: Covariance matrix from which steps are drawn. Must be a 2D array\n with both dimensions the same length as ``current_params``, and\n must be positive definite.\n :type step_sizes: ndarray\n :returns: New value of parameters, a 1D array with the same length as ``current_params``\n :rtype: ndarray\n \"\"\"\n \n current_params = np.array(current_params)\n step_sizes = np.array(step_sizes)\n \n assert len(current_params.shape) == 1, \"current parameters must be a 1D array\"\n assert len(step_sizes.shape) == 2, \"step sizes must be a 2D array\"\n assert step_sizes.shape[0] == step_sizes.shape[1], \"step sizes must be a square array\"\n assert len(current_params) == step_sizes.shape[0], \"length of current parameters must match length of step sizes\"\n assert np.all(np.diag(step_sizes) > 0.), \"step sizes must be a positive definite matrix\"\n \n return np.random.multivariate_normal(mean=current_params, cov = step_sizes)\n\n\ndef MCMC_step(loglikelihood, current_params, step_sizes, loglike_sign = 1.):\n \"\"\"\n Method to take a weak prior Metropolis-Hastings MCMC step\n \n Take a Metropolis-Hastings MCMC step with given log-likelihood function and step sizes.\n Method uses a multivariate normal distribution centered around the current parameter\n values from which the next point is drawn, and evaluates the log-likelihood for both\n points. If the next point has a larger log-likelihood, the step is always accepted,\n and if the log-likelihood is less for the proposed step it is accepted with a\n probability based on the difference between the two. If the method encounters an\n error when evalutaing the log-likelihood, the step is rejected. Returns the next point\n and a boolean indicating whether or not the step was accepted.\n \n An optional parameter ``loglike_sign`` can be passed that must be a float with the\n value +/- 1. This is multiplied by the log-likelihood and thus allows methods that\n compute the negative log-likelihood to be used in this routine. If values other than\n +/- 1 are passed, the method will raise an error.\n \n :param loglikelihood: Log-likelihood function to be used in the MCMC step. Must be\n callable and must accept a single argument, which is the array\n holding the parameters. If this function computes the negative\n log-likelihood, pass ``loglike_sign = -1.`` to the function as well.\n :type loglikelihood: function or other callable\n :param current_params: Current value of the parameters. Must be a 1D array.\n :type current_params: ndarray\n :param step_sizes: Covariance matrix from which steps are drawn. Must be a 2D array\n with both dimensions the same length as ``current_params``, and\n must be positive definite.\n :type step_sizes: ndarray\n :param loglike_sign: Sign for the log-likelihood function. If the provided\n ``loglikelihood`` function computes the negative log-likelihood,\n pass ``-1.`` for this parameter. Optional, default value is ``1.``\n :type loglike_sign: float\n :returns: Proposed next point and whether or not the point is accepted as a tuple.\n The first return item is the next point (as a 1D array with the same length\\\n as ``current_params``) and the second item is a boolean indicating whether\n or not the step was accepted.\n :rtype: tuple containing a ndarray and a bool\n \"\"\"\n \n assert callable(loglikelihood), \"loglikelihood must be a callable function\"\n assert len(signature(loglikelihood).parameters) == 1\n assert loglike_sign == 1. or loglike_sign == -1., \"loglikelihood sign must be +/- 1\"\n \n next_point = MH_proposal(current_params, step_sizes)\n\n try:\n H = loglike_sign*(-loglikelihood(current_params) + loglikelihood(next_point))\n except (FloatingPointError, AssertionError, LinAlgError):\n H = np.nan\n\n if H >= np.log(np.random.random()) and np.isfinite(H):\n accept = True\n else:\n accept = False\n\n return next_point, accept\n\ndef sample_MCMC(loglikelihood, start, step_sizes, n_samples = 1000, thin = 0, loglike_sign = 1.):\n \"\"\"\n Draw MCMC samples for a given log-likelihood function with weak priors\n \n Compute an MCMC chain for a given log-likelihood function with weak priors. Function\n requires the log-likelihood function, the starting point for the MCMC chain, and an\n array describing the step sizes. Optional parameters are the number of steps to take,\n how the thin the samples, and a sign for the log-likelihood function.\n \n The log-likelihood function must be a function or other callable that accepts a single\n argument, which is a 1D array holding the current parameter values. The starting point\n must be a 1D array holding the starting parameter values, which must match the length of\n the input to the log-likelihood function. The step sizes must be a 2D array with each\n dimension having the same length as the number of parameters, and must be positive\n definite. The step size array is used as the covariance matrix for a multivariate\n normal distribution, from which steps are drawn.\n \n Optional parameters are the number of steps to be taken (must be a positive integer).\n Note that if the chain is thinned, the number of points in the final MCMC chain will\n differ from the number of steps taken.\n \n Thinning may be specified with a non-negative integer. If a positive integer is\n given, the chain will be thinned by only keeping every ``thin`` steps. Note that\n ``thin = 1`` means that the chain will not be thinned. If ``thin = 0`` is given\n (the default value), the chain will automatically be thinned by computing the\n autocorrelation of the chain for each parameter separately and estimating the value\n needed to eliminate correlations in the chain. If the autothinning method fails\n (usually occurrs if the posterior is multimodal), the chain will not be thinned\n and a warning will be given. More details on the autothinning procedure are\n described in the corresponding function.\n \n An optional parameter ``loglike_sign`` can be passed that must be a float with the\n value +/- 1. This is multiplied by the log-likelihood and thus allows methods that\n compute the negative log-likelihood to be used in this routine.\n \n Returns the final thinned MCMC chain (a 2D array, where the first dimension indicates\n the different samples and the second dimension indicates the different parameters),\n an array holding all rejected steps (also a 2D array like the MCMC chain, useful for\n diagnosing problems with convergence), the fraction of steps that are accepted (also\n useful for diagnosing problems with convergence), and the first lag autocorrelation of\n the thinned MCMC chain.\n \n :param loglikelihood: Log-likelihood function to be used in the MCMC step. Must be\n callable and must accept a single argument, which is the array\n holding the parameters. If this function computes the negative\n log-likelihood, pass ``loglike_sign = -1.`` to the function as well.\n :type loglikelihood: function or other callable\n :param start: Starting value of the parameters. Must be a 1D array.\n :type start: ndarray\n :param step_sizes: Covariance matrix from which steps are drawn. Must be a 2D array\n with both dimensions the same length as ``current_params``, and\n must be positive definite.\n :type step_sizes: ndarray\n :param n_samples: Number of steps to be taken. Must be a positive integer. Optional,\n default value is 1000. Note that if the chain is thinned, the\n final MCMC chain will be shorter than this.\n :type n_samples: int\n :param thin: Integer describing how to thin the MCMC chain. A positive integer\n indicates manual thinning by keeping every ``thin`` steps (note\n that ``thin = 1`` means the chain will not be thinned). A value\n of ``0`` will attempt to autothin the chain.\n :type thin: int\n :param loglike_sign: Sign for the log-likelihood function. If the provided\n ``loglikelihood`` function computes the negative log-likelihood,\n pass ``-1.`` for this parameter. Optional, default value is ``1.``\n :type loglike_sign: float\n :returns: MCMC chain (2D array), array of rejected points (2D array), acceptance rate\n (float), and first lag autocorrelation of the thinned MCMC chain (float)\n :rtype: tuple containing (ndarray, ndarray, float, float)\n \"\"\"\n\n n_samples = int(n_samples)\n thin = int(thin)\n\n assert n_samples > 0, \"number of samples must be a positive integer\"\n assert thin >= 0, \"thin must be a non-negative integer\"\n \n start = np.array(start)\n \n assert start.ndim == 1, \"starting point must be a 1d array\"\n n_params = len(start)\n\n samples = np.zeros((n_samples, n_params))\n samples[0] = start\n rejected = []\n\n for i in range(n_samples - 1):\n next_point, accept = MCMC_step(loglikelihood, samples[i], step_sizes, loglike_sign)\n if accept:\n samples[i+1] = np.copy(next_point)\n else:\n samples[i+1] = np.copy(samples[i])\n rejected.append(np.array(next_point))\n \n acceptance = float(n_samples - len(rejected))/float(n_samples)\n\n if thin == 0:\n thin_freq = autothin_samples(samples)\n else:\n thin_freq = thin\n \n thinned = samples[::thin_freq]\n\n first_lag = np.zeros(n_params)\n\n if n_samples > 1:\n for i in range(n_params):\n autocorr = np.correlate(thinned[:,i]-np.mean(thinned[:,i]), thinned[:,i]-np.mean(thinned[:,i]), mode=\"full\")\n if np.max(autocorr) > 0.:\n first_lag[i] = autocorr[np.argmax(autocorr)+1]/np.max(autocorr)\n\n return thinned, np.array(rejected), acceptance, first_lag\n\ndef autothin_samples(signal):\n \"\"\"\n Automatically estimate thinning needed to obtain uncorrelated samples\n \n This function attempts to estimate the thinning needed to obtain uncorrelated samples in an\n MCMC chain. For each separate parameter, the function computes the autocorrelation and\n estimates the lag needed to obtain uncorrelated samples. This is done by recognizing that\n the standard deviation of the autocorrelation of a random signal scales inversely with the\n square root of the number of samples, so when the autocorrelation drops below three times\n this, we use this as a guess to when the signal is uncorrelated. The maximum lag across all\n parameters is returned. If the chain contains fewer than 10 points, or the autocorrelation\n never drops below the target value, the method gives a warning and\n returns 1.\n \n :param signal: MCMC chain to be thinned. Must be a 1D or 2D array. If 2D, the first dimension\n indicates the MCMC samples, and the second dimension indicates the different\n parameter values. If 1D, the array is assumed to hold samples of a single\n parameter, and the array is reshaped to have a singleton second dimension.\n :type signal: ndarray\n :returns: Estimated stride needed to thin the samples to obtain uncorrelated samples. If\n the method does not succeed, gives a warning and returns ``1``.\n :rtype: int\n \"\"\"\n \n signal = np.array(signal)\n if signal.ndim == 1:\n signal = np.reshape(signal, (len(signal), 1))\n\n assert signal.ndim == 2, \"input signal to be thinned must be a 1d or 2d array\"\n \n n_samples, n_params = signal.shape\n \n maxthin = 0\n \n if n_samples >= 10:\n for i in range(n_params):\n autocorr = np.correlate(signal[:,i]-np.mean(signal[:,i]), signal[:,i]-np.mean(signal[:,i]), mode=\"full\")\n start = np.argmax(autocorr)\n if np.max(autocorr) > 0.:\n autocorr = autocorr/np.max(autocorr)\n for j in range(1, len(autocorr) - start):\n if np.abs(autocorr[start + j]) < 3./np.sqrt(float(n_samples)):\n if j > maxthin:\n maxthin = j\n break\n \n if maxthin == 0:\n warnings.warn(\"automatic thinning failed, posterior distribution may be multimodal\")\n maxthin = 1\n \n return maxthin\n " ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.copy", "numpy.mean", "numpy.random.multivariate_normal", "numpy.argmax", "numpy.isfinite", "numpy.abs", "numpy.random.random", "numpy.diag" ] ]
yzhangswingman/ranking
[ "6c55d0bc87c761448f1937f46998039c8dbfe280" ]
[ "tensorflow_ranking/examples/keras/keras_m2e_tfrecord.py" ]
[ "# Copyright 2020 The TensorFlow Ranking Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Keras Model to Estimator example code for proto formats stored in TFRecord.\n\nThe supported proto formats are listed at ../python/data.py.\n--------------------------------------------------------------------------------\nSample command lines:\n\nMODEL_DIR=/tmp/output && \\\nTRAIN=tensorflow_ranking/examples/data/train_elwc.tfrecord && \\\nEVAL=tensorflow_ranking/examples/data/eval_elwc.tfrecord && \\\nVOCAB=tensorflow_ranking/examples/data/vocab.txt && \\\nWEIGHT_FEATURE_NAME=\"doc_weight\" && \\\nrm -rf $MODEL_DIR && \\\nbazel build -c opt \\\ntensorflow_ranking/examples/keras/keras_m2e_tfrecord_py_binary && \\\n./bazel-bin/tensorflow_ranking/examples/keras/keras_m2e_tfrecord_py_binary \\\n--train_path=$TRAIN \\\n--eval_path=$EVAL \\\n--vocab_path=$VOCAB \\\n--model_dir=$MODEL_DIR \\\n--data_format=example_list_with_context \\\n--weights_feature_name=$WEIGHT_FEATURE_NAME\n\nYou can use TensorBoard to display the training results stored in $MODEL_DIR.\n\nNotes:\n * Use --alsologtostderr if the output is not printed into screen.\n\"\"\"\n\nfrom absl import flags\n\nimport tensorflow as tf\nimport tensorflow_ranking as tfr\n\nflags.DEFINE_enum(\n \"data_format\", \"example_list_with_context\",\n [\"example_list_with_context\", \"example_in_example\", \"sequence_example\"],\n \"Data format defined in data.py.\")\nflags.DEFINE_string(\"train_path\", None, \"Input file path used for training.\")\nflags.DEFINE_string(\"eval_path\", None, \"Input file path used for eval.\")\nflags.DEFINE_string(\"vocab_path\", None,\n \"Vocabulary path for query and document tokens.\")\nflags.DEFINE_string(\"model_dir\", None, \"Output directory for models.\")\nflags.DEFINE_integer(\"batch_size\", 32, \"The batch size for train.\")\nflags.DEFINE_integer(\"num_train_steps\", 15000, \"Number of steps for train.\")\nflags.DEFINE_float(\"learning_rate\", 0.05, \"Learning rate for optimizer.\")\nflags.DEFINE_float(\"dropout_rate\", 0.8, \"The dropout rate before output layer.\")\nflags.DEFINE_list(\"hidden_layer_dims\", [\"64\", \"32\", \"16\"],\n \"Sizes for hidden layers.\")\nflags.DEFINE_integer(\n \"list_size\", None,\n \"List size used for training. Use None for dynamic list size.\")\nflags.DEFINE_integer(\"group_size\", 1, \"Group size used in score function.\")\nflags.DEFINE_string(\"loss\", \"approx_ndcg_loss\",\n \"The RankingLossKey for the loss function.\")\nflags.DEFINE_string(\"weights_feature_name\", \"\",\n \"The name of the feature where unbiased learning-to-rank \"\n \"weights are stored.\")\nflags.DEFINE_bool(\n \"use_document_interactions\", False,\n \"If true, uses cross-document interactions to generate scores.\")\n\nFLAGS = flags.FLAGS\n\n_LABEL_FEATURE = \"relevance\"\n_PADDING_LABEL = -1\n_EMBEDDING_DIMENSION = 20\n_SIZE = \"example_list_size\"\n\n\ndef _create_feature_columns(use_weight_feature=True):\n \"\"\"Returns context and example feature columns.\n\n Args:\n use_weight_feature: (bool) Whether to use weight feature.\n\n Returns:\n A tuple of dicts (context_feature_columns, example_feature_columns), where\n the dicts are a mapping from feature name to feature column.\n \"\"\"\n if FLAGS.vocab_path:\n sparse_column = tf.feature_column.categorical_column_with_vocabulary_file(\n key=\"query_tokens\", vocabulary_file=FLAGS.vocab_path)\n else:\n sparse_column = tf.feature_column.categorical_column_with_hash_bucket(\n key=\"query_tokens\", hash_bucket_size=100)\n query_embedding_column = tf.feature_column.embedding_column(\n sparse_column, _EMBEDDING_DIMENSION)\n context_feature_columns = {\"query_tokens\": query_embedding_column}\n\n if FLAGS.vocab_path:\n sparse_column = tf.feature_column.categorical_column_with_vocabulary_file(\n key=\"document_tokens\", vocabulary_file=FLAGS.vocab_path)\n else:\n sparse_column = tf.feature_column.categorical_column_with_hash_bucket(\n key=\"document_tokens\", hash_bucket_size=100)\n document_embedding_column = tf.feature_column.embedding_column(\n sparse_column, _EMBEDDING_DIMENSION)\n example_feature_columns = {\"document_tokens\": document_embedding_column}\n if use_weight_feature and FLAGS.weights_feature_name:\n example_feature_columns[FLAGS.weights_feature_name] = (\n tf.feature_column.numeric_column(\n FLAGS.weights_feature_name, default_value=1.))\n return context_feature_columns, example_feature_columns\n\n\ndef make_input_fn(file_pattern,\n batch_size,\n randomize_input=True,\n num_epochs=None):\n \"\"\"Returns `Estimator` `input_fn` for TRAIN and EVAL.\n\n Args:\n file_pattern: (string) file pattern for the TFRecord input data.\n batch_size: (int) number of input examples to process per batch.\n randomize_input: (bool) if true, randomize input example order. It should\n almost always be true except for unittest/debug purposes.\n num_epochs: (int) Number of times the input dataset must be repeated. None\n to repeat the data indefinitely.\n\n Returns:\n An `input_fn` for `Estimator`.\n \"\"\"\n tf.compat.v1.logging.info(\"FLAGS.data_format={}\".format(FLAGS.data_format))\n\n def _input_fn():\n \"\"\"Defines the input_fn.\"\"\"\n context_feature_columns, example_feature_columns = _create_feature_columns()\n context_feature_spec = tf.feature_column.make_parse_example_spec(\n context_feature_columns.values())\n label_column = tf.feature_column.numeric_column(\n _LABEL_FEATURE, dtype=tf.int64, default_value=_PADDING_LABEL)\n example_feature_spec = tf.feature_column.make_parse_example_spec(\n list(example_feature_columns.values()) + [label_column])\n dataset = tfr.data.build_ranking_dataset(\n file_pattern=file_pattern,\n data_format=FLAGS.data_format,\n batch_size=batch_size,\n list_size=FLAGS.list_size,\n context_feature_spec=context_feature_spec,\n example_feature_spec=example_feature_spec,\n reader=tf.data.TFRecordDataset,\n shuffle=randomize_input,\n num_epochs=num_epochs,\n size_feature_name=_SIZE)\n features = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()\n label = tf.squeeze(features.pop(_LABEL_FEATURE), axis=2)\n label = tf.cast(label, tf.float32)\n\n return features, label\n\n return _input_fn\n\n\ndef make_serving_input_fn():\n \"\"\"Returns serving input fn.\"\"\"\n context_feature_columns, example_feature_columns = _create_feature_columns()\n context_feature_spec = tf.feature_column.make_parse_example_spec(\n context_feature_columns.values())\n example_feature_spec = tf.feature_column.make_parse_example_spec(\n example_feature_columns.values())\n return tfr.data.build_ranking_serving_input_receiver_fn(\n data_format=FLAGS.data_format,\n context_feature_spec=context_feature_spec,\n example_feature_spec=example_feature_spec,\n size_feature_name=_SIZE)\n\n\ndef get_estimator():\n \"\"\"Create Keras ranking estimator.\"\"\"\n context_feature_columns, example_feature_columns = _create_feature_columns()\n # To build your own custom ranking network, look at how canned\n # DNNRankingNetwork is implemented. You can subclass\n # tfr.keras.network.UnivariateRankingNetwork, or the more generic\n # tfr.keras.network.RankingNetwork to build your own network.\n network = tfr.keras.canned.DNNRankingNetwork(\n context_feature_columns=context_feature_columns,\n example_feature_columns=example_feature_columns,\n hidden_layer_dims=[int(d) for d in FLAGS.hidden_layer_dims],\n activation=tf.nn.relu,\n dropout=FLAGS.dropout_rate,\n use_batch_norm=True,\n batch_norm_moment=0.99,\n name=\"dnn_ranking_model\")\n loss = tfr.keras.losses.get(\n FLAGS.loss, reduction=tf.compat.v2.losses.Reduction.SUM_OVER_BATCH_SIZE)\n metrics = tfr.keras.metrics.default_keras_metrics()\n optimizer = tf.keras.optimizers.Adagrad(learning_rate=FLAGS.learning_rate)\n config = tf.estimator.RunConfig(save_checkpoints_steps=1000)\n ranker = tfr.keras.model.create_keras_model(\n network=network,\n loss=loss,\n metrics=metrics,\n optimizer=optimizer,\n size_feature_name=_SIZE)\n estimator = tfr.keras.estimator.model_to_estimator(\n model=ranker, model_dir=FLAGS.model_dir, config=config)\n\n return estimator\n\n\ndef train_and_eval():\n \"\"\"Train and Evaluate.\"\"\"\n train_input_fn = make_input_fn(FLAGS.train_path, FLAGS.batch_size)\n eval_input_fn = make_input_fn(\n FLAGS.eval_path, FLAGS.batch_size, randomize_input=False, num_epochs=1)\n\n estimator = get_estimator()\n train_spec = tf.estimator.TrainSpec(\n input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)\n exporters = tf.estimator.LatestExporter(\n \"saved_model_exporter\", serving_input_receiver_fn=make_serving_input_fn())\n eval_spec = tf.estimator.EvalSpec(\n name=\"eval\",\n input_fn=eval_input_fn,\n steps=1,\n exporters=exporters,\n start_delay_secs=0,\n throttle_secs=15)\n\n # Train and validate.\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\ndef main(_):\n tf.compat.v1.set_random_seed(1234)\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n train_and_eval()\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"train_path\")\n flags.mark_flag_as_required(\"eval_path\")\n flags.mark_flag_as_required(\"model_dir\")\n\n tf.compat.v1.app.run()\n" ]
[ [ "tensorflow.compat.v1.data.make_one_shot_iterator", "tensorflow.estimator.TrainSpec", "tensorflow.feature_column.numeric_column", "tensorflow.estimator.EvalSpec", "tensorflow.feature_column.categorical_column_with_hash_bucket", "tensorflow.estimator.RunConfig", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.compat.v1.app.run", "tensorflow.estimator.train_and_evaluate", "tensorflow.keras.optimizers.Adagrad", "tensorflow.feature_column.categorical_column_with_vocabulary_file", "tensorflow.compat.v1.set_random_seed", "tensorflow.cast", "tensorflow.feature_column.embedding_column" ] ]
zhangjiulong/calamari
[ "ecd29d46f807a3ad406f0a65bdc3283e358c3585" ]
[ "calamari_ocr/scripts/dataset_viewer.py" ]
[ "import matplotlib.pyplot as plt\nimport argparse\n\nfrom calamari_ocr.ocr.augmentation.dataaugmentationparams import DataAugmentationAmount\nfrom tfaip.base.data.pipeline.datapipeline import SamplePipelineParams\nfrom tfaip.base.data.pipeline.definitions import DataProcessorFactoryParams, INPUT_PROCESSOR, \\\n TARGETS_PROCESSOR, PipelineMode\n\nfrom calamari_ocr.ocr.dataset import DataSetType\n\nfrom calamari_ocr import __version__\nfrom calamari_ocr.ocr.dataset.data import Data\nfrom calamari_ocr.ocr.dataset.datareader.base import DataReader\nfrom calamari_ocr.ocr.dataset.imageprocessors import AugmentationProcessor, PrepareSampleProcessor\nfrom calamari_ocr.ocr.dataset.imageprocessors.data_preprocessor import ImageProcessor\nfrom calamari_ocr.ocr.dataset.imageprocessors.default_image_processors import default_image_processors\nfrom calamari_ocr.ocr.dataset.params import FileDataReaderArgs, PipelineParams, DataParams\nfrom calamari_ocr.ocr.dataset.textprocessors import TextNormalizer, TextRegularizer, StripTextProcessor, \\\n BidiTextProcessor\nfrom calamari_ocr.ocr.dataset.textprocessors.text_regularizer import default_text_regularizer_replacements\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', action='version', version='%(prog)s v' + __version__)\n parser.add_argument(\"--files\", nargs=\"+\",\n help=\"List all image files that shall be processed. Ground truth fils with the same \"\n \"base name but with '.gt.txt' as extension are required at the same location\",\n required=True)\n parser.add_argument(\"--text_files\", nargs=\"+\", default=None,\n help=\"Optional list of GT files if they are in other directory\")\n parser.add_argument(\"--gt_extension\", default=None,\n help=\"Default extension of the gt files (expected to exist in same dir)\")\n parser.add_argument(\"--dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--line_height\", type=int, default=48,\n help=\"The line height\")\n parser.add_argument(\"--pad\", type=int, default=16,\n help=\"Padding (left right) of the line\")\n parser.add_argument(\"--processes\", type=int, default=1,\n help=\"The number of threads to use for all operations\")\n\n parser.add_argument(\"--n_cols\", type=int, default=1)\n parser.add_argument(\"--n_rows\", type=int, default=5)\n parser.add_argument(\"--select\", type=int, nargs=\"+\", default=[])\n\n # text normalization/regularization\n parser.add_argument(\"--n_augmentations\", type=float, default=0,\n help=\"Amount of data augmentation per line (done before training). If this number is < 1 \"\n \"the amount is relative.\")\n parser.add_argument(\"--text_regularization\", type=str, nargs=\"+\", default=[\"extended\"],\n help=\"Text regularization to apply.\")\n parser.add_argument(\"--text_normalization\", type=str, default=\"NFC\",\n help=\"Unicode text normalization to apply. Defaults to NFC\")\n parser.add_argument(\"--data_preprocessing\", nargs=\"+\", type=str,\n choices=[k for k, p in Data.data_processor_factory().processors.items() if issubclass(p, ImageProcessor)],\n default=[p.name for p in default_image_processors()])\n parser.add_argument(\"--bidi_dir\", type=str, default=None, choices=[\"ltr\", \"rtl\", \"auto\"],\n help=\"The default text direction when preprocessing bidirectional text. Supported values \"\n \"are 'auto' to automatically detect the direction, 'ltr' and 'rtl' for left-to-right and \"\n \"right-to-left, respectively\")\n\n parser.add_argument(\"--preload\", action='store_true', help='Simulate preloading')\n parser.add_argument(\"--as_validation\", action='store_true', help=\"Access as validation instead of training data.\")\n\n args = parser.parse_args()\n\n if args.gt_extension is None:\n args.gt_extension = DataSetType.gt_extension(args.dataset)\n\n dataset_args = FileDataReaderArgs(\n pad=args.pad,\n )\n\n data_params: DataParams = Data.get_default_params()\n data_params.train = PipelineParams(\n type=args.dataset,\n remove_invalid=True,\n files=args.files,\n text_files=args.text_files,\n gt_extension=args.gt_extension,\n data_reader_args=dataset_args,\n batch_size=1,\n num_processes=args.processes,\n )\n data_params.val = data_params.train\n\n data_params.pre_processors_ = SamplePipelineParams(run_parallel=True)\n data_params.post_processors_ = SamplePipelineParams(run_parallel=True)\n for p in args.data_preprocessing:\n p_p = Data.data_processor_factory().processors[p].default_params()\n if 'pad' in p_p:\n p_p['pad'] = args.pad\n data_params.pre_processors_.sample_processors.append(DataProcessorFactoryParams(p, INPUT_PROCESSOR, p_p))\n\n # Text pre processing (reading)\n data_params.pre_processors_.sample_processors.extend(\n [\n DataProcessorFactoryParams(TextNormalizer.__name__, TARGETS_PROCESSOR, {'unicode_normalization': args.text_normalization}),\n DataProcessorFactoryParams(TextRegularizer.__name__, TARGETS_PROCESSOR, {'replacements': default_text_regularizer_replacements(args.text_regularization)}),\n DataProcessorFactoryParams(StripTextProcessor.__name__, TARGETS_PROCESSOR)\n ])\n\n # Text post processing (prediction)\n data_params.post_processors_.sample_processors.extend(\n [\n DataProcessorFactoryParams(TextNormalizer.__name__, TARGETS_PROCESSOR,\n {'unicode_normalization': args.text_normalization}),\n DataProcessorFactoryParams(TextRegularizer.__name__, TARGETS_PROCESSOR,\n {'replacements': default_text_regularizer_replacements(args.text_regularization)}),\n DataProcessorFactoryParams(StripTextProcessor.__name__, TARGETS_PROCESSOR)\n ])\n if args.bidi_dir:\n data_params.pre_processors_.sample_processors.append(\n DataProcessorFactoryParams(BidiTextProcessor.__name__, TARGETS_PROCESSOR, {'bidi_direction': args.bidi_dir})\n )\n data_params.post_processors_.sample_processors.append(\n DataProcessorFactoryParams(BidiTextProcessor.__name__, TARGETS_PROCESSOR, {'bidi_direction': args.bidi_dir})\n )\n\n data_params.pre_processors_.sample_processors.extend([\n DataProcessorFactoryParams(AugmentationProcessor.__name__, {PipelineMode.Training}, {'augmenter_type': 'simple'}),\n # DataProcessorFactoryParams(PrepareSampleProcessor.__name__), # NOT THIS, since, we want to access raw input\n ])\n\n data_params.data_aug_params = DataAugmentationAmount.from_factor(args.n_augmentations)\n data_params.line_height_ = args.line_height\n\n data = Data(data_params)\n data_pipeline = data.get_val_data() if args.as_validation else data.get_train_data()\n if not args.preload:\n reader: DataReader = data_pipeline.reader()\n if len(args.select) == 0:\n args.select = range(len(reader))\n else:\n reader._samples = [reader.samples()[i] for i in args.select]\n else:\n data.preload()\n data_pipeline = data.get_val_data() if args.as_validation else data.get_train_data()\n samples = data_pipeline.samples\n if len(args.select) == 0:\n args.select = range(len(samples))\n else:\n data_pipeline.samples = [samples[i] for i in args.select]\n\n f, ax = plt.subplots(args.n_rows, args.n_cols, sharey='all')\n row, col = 0, 0\n with data_pipeline as dataset:\n for i, (id, sample) in enumerate(zip(args.select, dataset.generate_input_samples(auto_repeat=False))):\n line, text, params = sample\n if args.n_cols == 1:\n ax[row].imshow(line.transpose())\n ax[row].set_title(\"ID: {}\\n{}\".format(id, text))\n else:\n ax[row, col].imshow(line.transpose())\n ax[row, col].set_title(\"ID: {}\\n{}\".format(id, text))\n\n row += 1\n if row == args.n_rows:\n row = 0\n col += 1\n\n if col == args.n_cols or i == len(dataset) - 1:\n plt.show()\n f, ax = plt.subplots(args.n_rows, args.n_cols, sharey='all')\n row, col = 0, 0\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
lidiaxp/newPyLayer
[ "65dd798c30370ae64edfceaf37bf6c47a92330d2", "65dd798c30370ae64edfceaf37bf6c47a92330d2" ]
[ "pylayers/antprop/diffRT.py", "pylayers/antprop/tests/test_diffraction.py" ]
[ "#!/usr/bin/python\n# -*- coding: latin1 -*-\nimport numpy as np\nimport scipy.special as sps\nimport matplotlib.pyplot as plt\nimport pdb\ndef diff(fGHz,phi0,phi,si,sd,N,mat0,matN,beta=np.pi/2,mode='tab',debug=False):\n \"\"\" Luebbers Diffration coefficient\n for Ray tracing\n\n\n\n Parameters\n ----------\n\n Nf : number of frequencies\n Nr : number of rays\n\n fGHz : np.array (Nf)\n phi0 : np.array (Nr)\n phi : np.array (Nr)\n si : np.array (Nr)\n sd : np.array (Nr)\n N: np.array (Nb)\n mat0 : Mat\n matN : Mat\n beta : np.array (Nb)\n skew incidence angle (rad)\n mode : str ( 'tab','exact')\n if 'tab': the Fresnel function is interpolated\n ( increase speed)\n if 'exact': the Fresnel function is computed for each values\n ( increase accuracy)\n (see FreF)\n\n Return\n ------\n\n Ds : numpy array\n Diffraction soft\n Dh : numpy array\n Diffraction hard\n\n Examples\n --------\n\n\n >>> import numpy as np\n >>> from pylayers.antprop.slab import *\n >>> Nf=3\n >>> Nr=10\n >>> Nb=5\n >>> fGHz = np.linspace(0,10,Nf)\n >>> N = np.linspace(1,10,Nb)#320/180.\n >>> phi0 = np.linspace(0.01,2*np.pi-0.01,Nr)#40*np.pi/180.\n >>> phi = np.linspace(0.01,2*np.pi-0.01,Nr)\n >>> dm = MatDB()\n >>> mat0 = dm['METAL']\n >>> matN = dm['METAL']\n >>> si = 10000.*np.ones(Nr)\n >>> sd = 1.*np.ones(Nr)\n >>> plt.ion()\n >>> Ds,Dh,D1,D2,D3,D4 = diff(fGHz,phi0,phi,si,sd,N,mat0,matN)\n\n \"\"\"\n\n if not isinstance(fGHz,np.ndarray):\n fGHz = np.array([fGHz])\n if not isinstance(phi0,np.ndarray):\n phi0 = np.array([phi0])\n if not isinstance(phi,np.ndarray):\n phi = np.array([phi])\n if not isinstance(si,np.ndarray):\n si = np.array([si])\n if not isinstance(sd,np.ndarray):\n sd = np.array([sd])\n if not isinstance(N,np.ndarray):\n N = np.array([N])\n if not isinstance(beta,np.ndarray):\n beta = np.array([beta])\n\n fGHz = fGHz[:,None]\n phi0 = phi0[None,:]\n phi = phi[None,:]\n si = si[None,:]\n sd = sd[None,:]\n N = N[None,:]\n beta = beta[None,:]\n\n L = si*sd/(si+sd)\n k = 2*np.pi*fGHz/0.3\n\n#--------------------------------------------------\n# R on faces 'o' and 'n'\n#--------------------------------------------------\n\n\n tho = np.empty((fGHz.shape[0],phi.shape[1]))\n thn = np.empty((fGHz.shape[0],phi.shape[1]))\n # PHI0 = phi0 * np.ones(phi.shape)\n # PHI = np.ones(phi0.shape)*phi\n # BN = np.ones(phi0.shape)*N\n\n\n\n c1 = phi>phi0\n c2 = ~c1\n tho[:,c1[0,:]] = phi0[:,c1[0,:]]\n thn[:,c1[0,:]] = N[:,c1[0,:]]*np.pi-phi[:,c1[0,:]]\n tho[:,c2[0,:]] = phi[:,c2[0,:]]\n thn[:,c2[0,:]] = N[:,c2[0,:]]*np.pi-phi0[:,c2[0,:]]\n\n\n\n er0 = np.real(mat0['epr'])\n err0 = np.imag(mat0['epr'])\n ur0 = np.real(mat0['mur'])\n urr0 = np.imag(mat0['mur'])\n sigma0 = mat0['sigma']\n deltah0 = mat0['roughness']\n\n erN = np.real(matN['epr'])\n errN = np.imag(matN['epr'])\n urN = np.real(mat0['mur'])\n urrN = np.imag(mat0['mur'])\n sigmaN = matN['sigma']\n deltahN = matN['roughness']\n\n\n Rsofto,Rhardo = R(tho,k,er0,err0,sigma0,ur0,urr0,deltah0)\n Rsoftn,Rhardn = R(thn,k,erN,errN,sigmaN,urN,urrN,deltahN)\n\n#--------------------------------------------------\n# grazing angle Go et Gn\n#--------------------------------------------------\n\n Gsofto,Gsoftn = G(N,phi0,Rsofto,Rsoftn)\n\n Ghardo,Ghardn = G(N,phi0,Rhardo,Rhardn)\n\n#--------------------------------------------------\n#calcul des 4 termes du coeff diff\n#--------------------------------------------------\n #by construction\n #0 < KLA < 2*k*L\n klamax = 2*np.max(k)*np.max(L)\n if mode == 'tab':\n #xF0 = np.logspace(-6,-2,1000)\n #xF1 = np.logspace(-2,np.log10(klamax),1000)\n #xF = np.hstack((xF0,xF1))\n #pdb.set_trace()\n # xF = np.logspace(-6,np.log10(klamax),1000)\n xF = np.linspace(-8,np.log10(klamax),2000)\n pxF = 10**xF\n F = FreF(pxF)[0]\n else :\n xF = []\n F=[]\n\n sign = 1.0\n D1 = Dfunc(sign,k,N,phi-phi0,si,sd,xF,F,beta)\n\n sign = -1.0\n D2 = Dfunc(sign,k,N,phi-phi0,si,sd,xF,F,beta)\n\n sign = +1.0\n D3 = Dfunc(sign,k,N,phi+phi0,si,sd,xF,F,beta)\n \n sign = -1.0\n D4 = Dfunc(sign,k,N,phi+phi0,si,sd,xF,F,beta)\n\n#--------------------------------------\n#n>=1 : exterior wedge\n#--------------------------------------\n Dsoft =np.empty(np.shape(D1),dtype=complex)\n Dhard =np.empty(np.shape(D1),dtype=complex)\n\n #c1 = BN>=1.0\n\n Dsoft = D1+D2+Rsoftn*D3+Rsofto*D4\n Dhard = D1+D2+Rhardn*D3+Rhardo*D4\n# Dsoft = D2-D4\n# Dhard = D2+D4\n #Dsoft = D1+D2-D3-D4\n #Dhard = D1+D2+D3+D4\n# Dsoft = Gsoftn*(D1+Rsoftn*D3)+Gsofto*(D2+Rsofto*D4)\n# Dhard = Ghardn*(D1+Rhardn*D3)+Ghardo*(D2+Rhardo*D4)\n# c1 = abs(Gsoftn+1.0) < 1e-6\n# c2 = abs(Gsofto+1.0) < 1e-6\n# c3 = abs(Ghardn+1.0) < 1e-6\n# c4 = abs(Ghardo+1.0) < 1e-6\n#\n# Dsoft[c1]= 0.5*(D1[c1]+D3[c1])+Gsofto[c1]*(D2[c1]+Rsofto[c1]*D4[c1])\n# Dsoft[c2]= Gsoftn[c2]*(D1[c2]+Rsoftn[c2]*D3[c2])+0.5*(D2[c2]+D4[c2])\n# Dhard[c3]= 0.5*(D1[c3]+D3[c3])+Ghardo[c3]*(D2[c3]+Rhardo[c3]*D4[c3])\n# Dhard[c4]= Ghardn[c4]*(D1[c4]+Rhardn[c4]*D3[c4])+0.5*(D2[c4]+D4[c4])\n#--------------------------------------\n#traitement des cas ou Go (ou Gn) = -1\n#--------------------------------------\n\n# if (abs(Gsoftn+1.0) < 1e-6):\n# DTsoft = 0.5*(D1+D3)+Gsofto*(D2+Rsofto*D4)\n#\n# if (abs(Gsofto+1.0)<1e-6):\n# DTsoft = Gsoftn*(D1+Rsoftn*D3)+0.5*(D2+D4)\n#\n# if (abs(Ghardn+1.0) < 1.0e-6):\n# DThard = 0.5*(D1+D3)+Ghardo*(D2+Rhardo*D4)\n#\n# if (abs(Ghardo+1.0)<1e-6):\n# DThard = Ghardn*(D1+Rhardn*D3)+0.5*(D2+D4)\n#\n##--------------------------------------\n##cas ou n<1 : interior wedge\n##--------------------------------------\n# else:\n#\n# thoz = N*np.pi-tho\n# thnz = N*np.pi-thn\n#\n#\n# [Rsoftnz,Rhardnz] = R(thnz,k,ero,erro,condo,uro,deltaho)\n# [Rsoftoz,Rhardoz] = R(thoz,k,ern,errn,condn,urn,deltahn)\n#\n# DTsoft = Rsoftoz*Rsoftnz*D1+Rsoftn*D3+(Rsofto*Rsoftn*D2+Rsofto*D4)\n#\n# DThard = Rhardoz*Rhardnz*D1+Rhardn*D3+(Rhardo*Rhardn*D2+Rhardo*D4)\n if np.isnan(Dsoft).any():\n u = np.isnan(Dsoft)\n pdb.set_trace()\n if np.isnan(Dhard).any():\n v = np.where(Dhard==np.nan)\n pdb.set_trace()\n if debug:\n return Dsoft,Dhard,D1,D2,D3,D4\n else :\n return Dsoft,Dhard#,D1,D2,D3,D4\n\n\ndef G(N,phi0,Ro,Rn):\n \"\"\" grazing angle correction\n\n Parameters\n ----------\n\n N : wedge parameter\n phi0 : incidence angle (rad)\n Ro : R coefficient on face o\n Rn : R coefficient on face n\n\n Luebbers 89 \"a heuristique UTD slope diffraction coefficient for\n rough lossy wedges\"\n \"\"\"\n\n\n if not isinstance(phi0,np.ndarray):\n phi0 = np.array([phi0])\n if not isinstance(N,np.ndarray):\n N = np.array([N])\n\n PHI0 = phi0 * np.ones(Ro.shape)\n BN = N * np.ones(Ro.shape)\n\n# face o\n\n Go = np.ones(np.shape(Ro),dtype='complex')\n\n c1 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)>1.0e-6)\n c2 = (abs(PHI0) < 1.0e-6) * (abs(Ro+1.0)<1.0e-6)\n c3 = abs(PHI0-BN*np.pi) < 1.0e-6\n\n Go[c1] = 1.0/(1.0+Ro[c1])\n Go[c2] = -1.\n Go[c3] = 0.5\n\n# face n\n Gn = np.ones(np.shape(Rn),dtype='complex')\n\n c1 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)>1.0e-6)\n c2 = (abs(PHI0-BN*np.pi) < 1.0e-6) * (abs(Rn+1.0)<1.0e-6)\n c3 = abs(PHI0) < 1.0e-6\n Gn[c1] = 1.0/(1.0+Rn[c1])\n Gn[c2] = -1.\n Gn[c3] = 0.5\n\n return Go,Gn\n\ndef Dfunc(sign,k,N,dphi,si,sd,xF=[],F=[],beta=np.pi/2):\n \"\"\"\n\n Parameters\n ----------\n\n sign : int\n +1 | -1\n k : wave number\n N : wedge parameter\n dphi : phi-phi0 or phi+phi0\n si : distance source-D\n sd : distance D-observation\n beta : skew incidence angle\n xF : array\n support of Fresnel function. \n F : array\n Values of Fresnel function in regard of support\n if F =[], fresnel function is computed\n otherwise the passed interpolation F is used.\n Reference\n ---------\n\n [1] KOUYOUMJIAN-PATHAK a uniform geometrical theory of diffraction for an edge\n in a perfectly conducting surface\" IEEE AP nov 74 vol 62 N11\n\n Notes\n -----\n\n e-jnp.pi/4 1\n Di= ------------------ * ----------- * F(kla) ([1] eq 25)\n 2n*racine(2*np.pi*k) np.tan(dphi/n)sin(beta)\n\n \"\"\"\n\n\n cste = (1.0-1.0*1j)*(1.0/(4.0*N*np.sqrt(k*np.pi)*np.sin(beta)))\n rnn = (dphi+np.pi*sign)/(2.0*N*np.pi)\n nn = np.zeros(np.shape(rnn))\n\n nn[rnn>0.5] = 1\n nn[rnn>1.5] = 2\n nn[rnn<-0.5] = -1\n nn[rnn<-1.5] = -2\n\n# KLA ref[1] eq 27\n L = ((si*sd)*np.sin(beta)**2)/(1.*(si+sd))\n AC = np.cos( (2.0*N*nn*np.pi-dphi) / 2.0 )\n A = 2*AC**2\n KLA = k * L * A\n\n epsi = AC*2.0\n angle = (np.pi+sign*dphi)/(2.0*N)\n tan = np.tan(angle)\n\n Di = np.empty(KLA.shape)\n \n if F == []:\n Fkla,ys,yL = FreF(KLA)\n else :\n #pxF = 10**xF\n #uF = (np.abs(KLA[:,:]-pxF[:,None,None])).argmin(axis=0)\n val = np.maximum(np.log10(np.abs(KLA))-xF[0,None,None],0)\n uF2 = (len(F)-1)*(val)/(xF[-1,None,None]-xF[0,None,None])\n uF2_int = np.floor(uF2).astype('int')\n Fkla = F[uF2_int]\n #if np.max(Fkla) > 1:\n # Warning('diffRT : Fkla tab probably wrong')\n # 4.56 Mac Namara\n try:\n Di = -cste*Fkla/tan\n except:\n print('tan=0 : It can happen')\n pdb.set_trace()\n\n c5 = np.where(np.abs(tan)<1e-9)\n BL = np.ones(Di.shape)*L\n Di[:,c5] = 0.5*np.sqrt(BL[c5])\n # if np.isinf(Di).any():\n # pdb.set_trace()\n\n return(Di)\n\ndef FresnelI(x) :\n \"\"\" calculates Fresnel integral\n\n Parameters\n ----------\n\n x : array\n real argument\n\n \"\"\"\n\n v = np.empty(x.shape,dtype=complex)\n y = np.abs(x)\n z = .25*y\n\n u1 = np.where(z>1)\n u2 = np.where(z<=1)\n\n y1 = y[u1]\n y2 = y[u2]\n\n d1 = np.cos(y1)\n d2 = np.cos(y2)\n\n e1 = np.sin(y1)\n e2 = np.sin(y2)\n\n z1 = z[u1]\n z2 = z[u2]\n\n c1 = np.sqrt(z1)\n c2 = np.sqrt(z2)\n\n# ----------------------------------------\n# x>4, z>1\n# ----------------------------------------\n\n v1 = 0.5 - 0.5*1j\n\n c1 = (1.0)/c1\n z1 = c1*c1\n\n a1=((((((((((\n .23393900e-3*z1 -.12179300e-2)*z1 +.21029670e-2)*z1\n +.2464200e-3)*z1 -.67488730e-2)*z1 +.11948809e-1)*z1\n -.9497136e-2)*z1 +.68989200e-3)*z1 +.57709560e-2)*z1\n +.3936000e-5)*z1 -.24933975e-1)*z1*c1\n\n\n b1=(((((((((((\n .838386000e-3*z1 -.55985150e-2)*z1 +.16497308e-1)*z1\n -.27928955e-1)*z1 +.29064067e-1)*z1 -.17122914e-1)*z1\n +.19032180e-2)*z1 +.48514660e-2)*z1 +.23006000e-4)*z1\n -.93513410e-2)*z1 +.23000000e-7)*z1 +.19947114000)*c1\n\n# ----------------------------------------\n# x<4, z<1\n# ----------------------------------------\n\n\n a2=(((((((((((\n 0.34404779e-1 *z2 - 0.15023096)*z2 - 0.25639041e-1)*z2\n +0.850663781 )*z2 - 0.75752419e-1 )*z2 - 0.305048566e1)*z2\n -0.16898657e-1 )*z2 + 0.6920691902e1)*z2 - 0.576361e-3 )*z2\n -0.6808568854e1)*z2 - 0.1702e-5)*z2 + 0.159576914e1)*c2\n\n b2=(((((((((((\n .19547031e-1 *z2 -.216195929e0 )*z2 +.702222016e0)*z2\n -.4033492760e0)*z2 -.1363729124e1)*z2 -.138341947e0)*z2\n +.5075161298e1)*z2 -.952089500e-2)*z2 -.778002040e1)*z2\n -.928100000e-4)*z2 +.4255387524e1)*z2 -.33000000e-7)*c2\n\n\n w1 = a1*d1+b1*e1+ 1j*(b1*d1-a1*e1) + v1\n w2 = a2*d2+b2*e2+ 1j*(b2*d2-a2*e2)\n\n v[u1] = w1\n v[u2] = w2\n\n y = v*(np.sqrt(np.pi/2.0))\n\n return y\n\n\ndef FreF(x) :\n \"\"\" F function from Pathack\n\n Parameters\n ----------\n\n x : array\n real argument\n\n Examples\n --------\n\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> x = np.logspace(-4,2,400);\n >>> F = FreF(x)\n >>> plt.semilogx(x,,np.abs(F))\n >>> plt.grid()\n\n \"\"\"\n ejp4 = np.exp(1j*np.pi/4)\n emjp4 = np.exp(-1j*np.pi/4)\n y = np.empty(x.shape,dtype=complex)\n\n u1 = np.where(x>10)[0]\n u2 = np.where(x<=10)[0]\n xu1 = x[u1]\n xu2 = x[u2]\n\n\n x2 = xu1*xu1\n x3 = x2*xu1\n x4 = x3*xu1\n w1 = 1-0.75/x2+4.6875/x4 + 1j*( 0.5/xu1 -1.875/x3)\n\n cst = (1.0 - 1j )*0.5*np.sqrt(np.pi/2)\n carx = abs(xu2)\n racx = np.sqrt(carx)\n modx = np.mod(xu2,2*np.pi)\n expjx = np.exp(1j*modx)\n fr = FresnelI(carx)\n into = cst - fr\n w2 = 2.0*racx*1j*expjx*into\n\n y[u1] = w1\n y[u2] = w2\n\n # [1] eq 30\n ys = (np.sqrt(np.pi*x)-2*x*ejp4-(2/3.)*x**2*emjp4)*np.exp(1j*(np.pi/4+x))\n yl = 1-0.75/(x*x)+4.6875/(x*x*x*x) + 1j*( 0.5/x -1.875/(x*x*x))\n\n return y,ys,yl\n\ndef FreF2(x):\n \"\"\" F function using numpy fresnel function\n\n Parameters\n ----------\n Not working for large argument\n\n \"\"\"\n y = np.empty(x.shape,dtype=complex)\n u1 = np.where(x>5)[0]\n u2 = np.where(x<=5)[0]\n xu1 = x[u1]\n xu2 = x[u2]\n x2 = xu1*xu1\n x3 = x2*xu1\n x4 = x3*xu1\n w1 = 1-0.75/x2+4.6875/x4 + 1j*( 0.5/xu1 -1.875/x3)\n cst = np.sqrt(np.pi/2.)\n sF,cF = sps.fresnel(np.sqrt(xu2/cst))\n Fc = (0.5-cF)*cst\n Fs = (0.5-sF)*cst\n modx = np.mod(xu2,2*np.pi)\n expjx = np.exp(1j*modx)\n w2 = 2*1j*np.sqrt(xu2)*expjx*(Fc-1j*Fs)\n y[u1] = w1\n y[u2] = w2\n return(y)\n\n\ndef R(th,k,er,err,sigma,ur,urr,deltah):\n \"\"\" R coeff\n\n Parameters\n ----------\n\n th : np.array\n incidence angle (axe 0)\n k : np.array\n wave number (axe 1)\n er : real part of permittivity\n err : imaginary part of permittivity\n sigma : conductivity\n ur : real part of permeability\n urr : imaginary part of permeability\n deltah : height standard deviation\n\n Examples\n --------\n\n >>> import numpy as np\n >>> th = np.linspace(0,np.pi/2,180)[None,:]\n >>> fGHz = 0.3\n >>> lamda = 0.3/fGHz\n >>> k = np.array([2*np.pi/2])[:,None]\n >>> Rs,Rh = R(th,k,9,0,0.01,1,0,0)\n\n \"\"\"\n\n cel = 299792458\n #--------------------------------------------\n #cas des surfaces dielectriques (sinon er=-1)\n #--------------------------------------------\n\n if (er >= 0.0 ):\n if ( (( ur-1.0)<1e-16) & ((er-1.0)<1e-16) ):\n Rs = np.zeros(len(th),dtype=complex)\n Rh = np.zeros(len(th),dtype=complex)\n\n u1 = np.where(th >= 1.5*np.pi)\n u2 = np.where(th >= np.pi )\n u3 = np.where(th >= 0.5*np.pi)\n\n th[u1] = 2.0*np.pi - th[u1]\n th[u2] = th[u2] - np.pi\n th[u3] = np.pi - th[u3]\n\n #if (th >= 1.5*np.pi ):\n # th = 2.0*np.pi - th\n #elif (th >= np.pi ):\n # th = th - np.pi\n #elif (th >= 0.5*np.pi):\n # th = np.pi - th\n\n uo = 4.0*np.pi*1e-7\n eo = 1.0/(uo*cel*cel)\n\n pulse = k*cel\n permi = (er-1j*err)-(1j*sigma)/(pulse*eo)\n\n perme = ur - 1j*urr\n\n yy = (permi/perme)\n\n st = np.sin(th)\n ct = np.cos(th)\n\n bb = np.sqrt(yy-ct**2)\n\n Rs = (st - bb) / (st + bb )\n Rh = (yy*st-bb)/(yy*st+bb)\n\n else: # metalic case\n Rs = -np.ones(th.shape,dtype=complex)\n Rh = np.ones(th.shape,dtype=complex)\n\n roughness = 1.0\n\n Rs = Rs* roughness\n Rh = Rh* roughness\n return Rs,Rh\n", "import numpy as np\nimport pdb\nfrom pylayers.antprop.slab import *\nfrom pylayers.antprop.diff import *\n#\n# Metalic case : MacNamara Page 202\n#\nfGHz = 3.\nN = 320/180.\nphi0 = 40*np.pi/180.\nphi = np.linspace(0.01,N*np.pi-0.01,800)\n#phi = np.linspace(0,3*np.pi/2,10)\ndm = MatDB()\ndm.load('matDB.ini')\nmat0 = dm['METAL']\nmatN = dm['METAL']\nsi = 10000.\nsd = 1.\nplt.ion()\nDs,Dh,D1,D2,D3,D4 = diff(fGHz,phi0,phi,si,sd,N,mat0,matN)\n#plt.plot(phi*180/np.pi,np.real(Ds[0,0,:,0,0,0,0]),'b',label='Soft -')\n#plt.plot(phi*180/np.pi,np.imag(Ds[0,0,:,0,0,0,0]),'b.',label='Soft -')\n#plt.plot(phi*180/np.pi,np.real(Dh[0,0,:,0,0,0,0]),'r',label='Hard +')\n#plt.plot(phi*180/np.pi,np.imag(Dh[0,0,:,0,0,0,0]),'r.',label='Hard +')\n#plt.plot(phi*180/np.pi,np.real(D1[0,0,:,0,0,0,0]),'k',label='ISBinf')\n#plt.plot(phi*180/np.pi,np.imag(D1[0,0,:,0,0,0,0]),'k',label='ISBinf')\n#plt.plot(phi*180/np.pi,np.real(D2[0,0,:,0,0,0,0]),'g',label='ISBsup')\n#plt.plot(phi*180/np.pi,np.imag(D2[0,0,:,0,0,0,0]),'g.',label='ISBsup')\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(D3[0,0,:,0,0,0,0])),'c',label='RSBn')\n#plt.plot(phi*180/np.pi,np.real(D4[0,0,:,0,0,0,0]),'m',label='RSBo')\n#plt.plot(phi*180/np.pi,np.imag(D4[0,0,:,0,0,0,0]),'m.',label='RSBo')\nplt.legend()\nplt.grid()\nUI = np.zeros(np.shape(Ds))\nUII = np.zeros(np.shape(Ds))\nUI[0,0,phi<(np.pi+phi0),0,0,0,0]=1\nUII[0,0,phi<(np.pi-phi0),0,0,0,0]=1.\ndi = np.sqrt(sd**2+si**2-2*si*sd*np.cos(phi[None,None,:,None,None,None,None]-phi0))\ndr = np.sqrt(sd**2+si**2-2*si*sd*np.cos(phi[None,None,:,None,None,None,None]+phi0))\nds = si + sd\n#di = np.cos(phi[None,None,:,None,None,None,None]-phi0)\n#dr = np.cos(phi[None,None,:,None,None,None,None]+phi0)\n#ds = sd\nEi = np.exp(-1j*2*np.pi*fGHz*di/0.3)*UI\nErs = -np.exp(-1j*2*np.pi*fGHz*dr/0.3)*UII\nErh = np.exp(-1j*2*np.pi*fGHz*dr/0.3)*UII\nEd2 = D2*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\nEd4 = D4*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\n#Ets = Ei+Ers+Ed1\nEts = Ei + Ers + (Ed2-Ed4)\nEth = Ei + Erh + (Ed2+Ed4)\nEthf = Ei + Erh + Dh*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\nEtsf = Ei + Ers + Ds*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\nEc2 = Ei + Ed2\nEc4 = Ers - Ed4\n#Ec24 = Ed2+Ei+Ed4+Ers\n#Eth = Ei+Erh\n#plt.figure()\nplt.plot(phi*180/np.pi,np.abs(Ed2[0,0,:,0,0,0,0]),'g',label='ISBinf')\nplt.plot(phi*180/np.pi,np.abs(Ei[0,0,:,0,0,0,0]),'g')\nplt.plot(phi*180/np.pi,np.abs(Ec2[0,0,:,0,0,0,0]),'g')\nplt.plot(phi*180/np.pi,np.abs(Ed4[0,0,:,0,0,0,0]),'r',label='ISBinf')\nplt.plot(phi*180/np.pi,np.abs(Ers[0,0,:,0,0,0,0]),'r')\nplt.plot(phi*180/np.pi,np.abs(Ec4[0,0,:,0,0,0,0]),'r')\n#plt.plot(phi*180/np.pi,np.abs(Ec24[0,0,:,0,0,0,0]),'r')\nplt.figure()\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Ets[0,0,:,0,0,0,0])),'b',linewidth=2.5)\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Eth[0,0,:,0,0,0,0])),'r',linewidth=2.5)\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Etsf[0,0,:,0,0,0,0])),'b',linewidth=1.5,label='soft')\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Ethf[0,0,:,0,0,0,0])),'r',linewidth=1.5,label='hard')\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Ds[0,0,:,0,0,0,0])),'b.')\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Dh[0,0,:,0,0,0,0])),'r.')\nplt.ylim([-40,20])\nplt.xlim([0,320])\nplt.xlabel(u'Angle $\\phi$')\nplt.ylabel(u'Magnitude (dB)')\nplt.title(u'$\\\\alpha=4^{\\circ},\\phi_0=55^{\\circ},f= 3 GHz, sd=1m$',fontsize=14)\nplt.grid()\n#plt.plot(phi*180/np.pi,np.real(Ei[0,0,:,0,0,0,0]),'g')\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Ets[0,0,:,0,0,0,0])),'g.')\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Eth[0,0,:,0,0,0,0])),'r')\n#plt.plot(phi*180/np.pi,np.real(Eth[0,0,:,0,0,0,0]),'r.')\nfGHz = 60.\nN = 320/180.\nphi0 = 40*np.pi/180.\nphi = np.linspace(0.01,N*np.pi-0.01,800)\n#phi = np.linspace(0,3*np.pi/2,10)\ndm = MatDB()\ndm.load('matDB.ini')\nmat0 = dm['METAL']\nmatN = dm['METAL']\nsi = 10000.\nsd = 1.\nplt.ion()\nDs,Dh,D1,D2,D3,D4 = diff(fGHz,phi0,phi,si,sd,N,mat0,matN)\n#plt.plot(phi*180/np.pi,np.real(Ds[0,0,:,0,0,0,0]),'b',label='Soft -')\n#plt.plot(phi*180/np.pi,np.imag(Ds[0,0,:,0,0,0,0]),'b.',label='Soft -')\n#plt.plot(phi*180/np.pi,np.real(Dh[0,0,:,0,0,0,0]),'r',label='Hard +')\n#plt.plot(phi*180/np.pi,np.imag(Dh[0,0,:,0,0,0,0]),'r.',label='Hard +')\n#plt.plot(phi*180/np.pi,np.real(D1[0,0,:,0,0,0,0]),'k',label='ISBinf')\n#plt.plot(phi*180/np.pi,np.imag(D1[0,0,:,0,0,0,0]),'k',label='ISBinf')\n#plt.plot(phi*180/np.pi,np.real(D2[0,0,:,0,0,0,0]),'g',label='ISBsup')\n#plt.plot(phi*180/np.pi,np.imag(D2[0,0,:,0,0,0,0]),'g.',label='ISBsup')\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(D3[0,0,:,0,0,0,0])),'c',label='RSBn')\n#plt.plot(phi*180/np.pi,np.real(D4[0,0,:,0,0,0,0]),'m',label='RSBo')\n#plt.plot(phi*180/np.pi,np.imag(D4[0,0,:,0,0,0,0]),'m.',label='RSBo')\nplt.legend()\nplt.grid()\nUI = np.zeros(np.shape(Ds))\nUII = np.zeros(np.shape(Ds))\nUI[0,0,phi<(np.pi+phi0),0,0,0,0]=1\nUII[0,0,phi<(np.pi-phi0),0,0,0,0]=1.\ndi = np.sqrt(sd**2+si**2-2*si*sd*np.cos(phi[None,None,:,None,None,None,None]-phi0))\ndr = np.sqrt(sd**2+si**2-2*si*sd*np.cos(phi[None,None,:,None,None,None,None]+phi0))\nds = si + sd\n#di = np.cos(phi[None,None,:,None,None,None,None]-phi0)\n#dr = np.cos(phi[None,None,:,None,None,None,None]+phi0)\n#ds = sd\nEi = np.exp(-1j*2*np.pi*fGHz*di/0.3)*UI\nErs = -np.exp(-1j*2*np.pi*fGHz*dr/0.3)*UII\nErh = np.exp(-1j*2*np.pi*fGHz*dr/0.3)*UII\nEd2 = D2*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\nEd4 = D4*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\n#Ets = Ei+Ers+Ed1\nEts = Ei + Ers + (Ed2-Ed4)\nEth = Ei + Erh + (Ed2+Ed4)\nEthf = Ei + Erh + Dh*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\nEtsf = Ei + Ers + Ds*np.exp(-1j*2*np.pi*fGHz*ds/0.3)\nEc2 = Ei + Ed2\nEc4 = Ers - Ed4\n#Ec24 = Ed2+Ei+Ed4+Ers\n#Eth = Ei+Erh\nplt.figure()\nplt.plot(phi*180/np.pi,np.abs(Ed2[0,0,:,0,0,0,0]),'g',label='ISBinf')\nplt.plot(phi*180/np.pi,np.abs(Ei[0,0,:,0,0,0,0]),'g')\nplt.plot(phi*180/np.pi,np.abs(Ec2[0,0,:,0,0,0,0]),'g')\nplt.plot(phi*180/np.pi,np.abs(Ed4[0,0,:,0,0,0,0]),'r',label='ISBinf')\nplt.plot(phi*180/np.pi,np.abs(Ers[0,0,:,0,0,0,0]),'r')\nplt.plot(phi*180/np.pi,np.abs(Ec4[0,0,:,0,0,0,0]),'r')\n#plt.plot(phi*180/np.pi,np.abs(Ec24[0,0,:,0,0,0,0]),'r')\nplt.figure()\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Ets[0,0,:,0,0,0,0])),'b',linewidth=2.5)\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Eth[0,0,:,0,0,0,0])),'r',linewidth=2.5)\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Etsf[0,0,:,0,0,0,0])),'b',linewidth=1.5,label='soft')\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Ethf[0,0,:,0,0,0,0])),'r',linewidth=1.5,label='hard')\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Ds[0,0,:,0,0,0,0])),'b.')\nplt.plot(phi*180/np.pi,20*np.log10(np.abs(Dh[0,0,:,0,0,0,0])),'r.')\nplt.ylim([-40,20])\nplt.xlim([0,320])\nplt.xlabel(u'Angle $\\phi$')\nplt.ylabel(u'Magnitude (dB)')\nplt.title(u'$\\\\alpha=4^{\\circ},\\phi_0=55^{\\circ},f= 3 GHz, sd=1m$',fontsize=14)\nplt.grid()\n#plt.plot(phi*180/np.pi,np.real(Ei[0,0,:,0,0,0,0]),'g')\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Ets[0,0,:,0,0,0,0])),'g.')\n#plt.plot(phi*180/np.pi,20*np.log10(np.abs(Eth[0,0,:,0,0,0,0])),'r')\n#plt.plot(phi*180/np.pi,np.real(Eth[0,0,:,0,0,0,0]),'r.')\n" ]
[ [ "numpy.max", "numpy.sin", "numpy.array", "numpy.empty", "numpy.isnan", "numpy.tan", "numpy.ones", "numpy.real", "numpy.exp", "numpy.shape", "numpy.where", "numpy.cos", "numpy.abs", "numpy.sqrt", "numpy.imag", "numpy.log10", "numpy.mod", "numpy.floor" ], [ "numpy.exp", "numpy.shape", "numpy.abs", "numpy.cos", "numpy.linspace" ] ]
HARSHAL-IITB/spa-design-tool
[ "84d250a02cc3f4af56770550c9f559feb524cb07" ]
[ "models/ogden9.py" ]
[ "#! /usr/bin/env python\r\n# The MIT License (MIT)\r\n#\r\n# Copyright (c) 2015, EPFL Reconfigurable Robotics Laboratory,\r\n# Philip Moseley, philip.moseley@gmail.com\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n\r\nimport numpy as np\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\n# Material model name.\r\n#--------------------------------------------------------------------------------\r\ndef name(): return 'ogden9'\r\ndef pname(): return 'Ogden-9'\r\ndef params(): return 'u1 a1 u2 a2 u3 a3 u4 a4 u5 a5 u6 a6 u7 a7 u8 a8 u9 a9'\r\ndef descr(): return 'Ogden Model with order 9 (modified form).'\r\n\r\n# NOTE - this is the Abaqus form of the functions. Ogden2004 is similar, but they\r\n# show these functions as being multiplied by (a[i]/2.0)\r\n\r\n#--------------------------------------------------------------------------------\r\n# Function defining the uniaxial stress given strain.\r\n#--------------------------------------------------------------------------------\r\ndef stressU(x, u1, a1, u2, a2, u3, a3, u4, a4, u5, a5, u6, a6, u7, a7, u8, a8, u9, a9):\r\n L = 1.0+x\r\n S1 = 2.0 * u1 * (np.power(L,a1-1.0) - np.power(L,-0.5*a1-1.0)) / a1\r\n S2 = 2.0 * u2 * (np.power(L,a2-1.0) - np.power(L,-0.5*a2-1.0)) / a2\r\n S3 = 2.0 * u3 * (np.power(L,a3-1.0) - np.power(L,-0.5*a3-1.0)) / a3\r\n S4 = 2.0 * u4 * (np.power(L,a4-1.0) - np.power(L,-0.5*a4-1.0)) / a4\r\n S5 = 2.0 * u5 * (np.power(L,a5-1.0) - np.power(L,-0.5*a5-1.0)) / a5\r\n S6 = 2.0 * u6 * (np.power(L,a6-1.0) - np.power(L,-0.5*a6-1.0)) / a6\r\n S7 = 2.0 * u7 * (np.power(L,a7-1.0) - np.power(L,-0.5*a7-1.0)) / a7\r\n S8 = 2.0 * u8 * (np.power(L,a8-1.0) - np.power(L,-0.5*a8-1.0)) / a8\r\n S9 = 2.0 * u9 * (np.power(L,a9-1.0) - np.power(L,-0.5*a9-1.0)) / a9\r\n return S1+S2+S3+S4+S5+S6\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\n# Function defining the biaxial stress given strain.\r\n#--------------------------------------------------------------------------------\r\ndef stressB(x, u1, a1, u2, a2, u3, a3, u4, a4, u5, a5, u6, a6, u7, a7, u8, a8, u9, a9):\r\n L = 1.0+x\r\n S1 = 2.0 * u1 * (np.power(L,a1-1.0) - np.power(L,-2.0*a1-1.0)) / a1\r\n S2 = 2.0 * u2 * (np.power(L,a2-1.0) - np.power(L,-2.0*a2-1.0)) / a2\r\n S3 = 2.0 * u3 * (np.power(L,a3-1.0) - np.power(L,-2.0*a3-1.0)) / a3\r\n S4 = 2.0 * u4 * (np.power(L,a4-1.0) - np.power(L,-2.0*a4-1.0)) / a4\r\n S5 = 2.0 * u5 * (np.power(L,a5-1.0) - np.power(L,-2.0*a5-1.0)) / a5\r\n S6 = 2.0 * u6 * (np.power(L,a6-1.0) - np.power(L,-2.0*a6-1.0)) / a6\r\n S7 = 2.0 * u7 * (np.power(L,a7-1.0) - np.power(L,-2.0*a7-1.0)) / a7\r\n S8 = 2.0 * u8 * (np.power(L,a8-1.0) - np.power(L,-2.0*a8-1.0)) / a8\r\n S9 = 2.0 * u9 * (np.power(L,a9-1.0) - np.power(L,-2.0*a9-1.0)) / a9\r\n return S1+S2+S3+S4+S5+S6\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\n# Function defining the planar stress given strain.\r\n#--------------------------------------------------------------------------------\r\ndef stressP(x, u1, a1, u2, a2, u3, a3, u4, a4, u5, a5, u6, a6, u7, a7, u8, a8, u9, a9):\r\n L = 1.0+x\r\n S1 = 2.0 * u1 * (np.power(L,a1-1.0) - np.power(L,-a1-1.0)) / a1\r\n S2 = 2.0 * u2 * (np.power(L,a2-1.0) - np.power(L,-a2-1.0)) / a2\r\n S3 = 2.0 * u3 * (np.power(L,a3-1.0) - np.power(L,-a3-1.0)) / a3\r\n S4 = 2.0 * u4 * (np.power(L,a4-1.0) - np.power(L,-a4-1.0)) / a4\r\n S5 = 2.0 * u5 * (np.power(L,a5-1.0) - np.power(L,-a5-1.0)) / a5\r\n S6 = 2.0 * u6 * (np.power(L,a6-1.0) - np.power(L,-a6-1.0)) / a6\r\n S7 = 2.0 * u7 * (np.power(L,a7-1.0) - np.power(L,-a7-1.0)) / a7\r\n S8 = 2.0 * u8 * (np.power(L,a8-1.0) - np.power(L,-a8-1.0)) / a8\r\n S9 = 2.0 * u9 * (np.power(L,a9-1.0) - np.power(L,-a9-1.0)) / a9\r\n return S1+S2+S3+S4+S5+S6\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\n# Calculate the Ds\r\n#--------------------------------------------------------------------------------\r\ndef compressibility(v, u1, u2, u3, u4, u5, u6, u7, u8, u9, a1, a2, a3, a4, a5, a6, a7, a8, a9):\r\n # This sum is what's in the ABQ manual (and what ABQ calculates with the data).\r\n # We get an error message which implies that u1 is what ABQ actually expects.\r\n # I believe the error message to be incorrect; setting u0=u1 typically results\r\n # in a much less compressible material, even though the error goes away.\r\n # u0 = u1\r\n u0 = u1+u2+u3+u4+u5+u6+u7+u8+u9\r\n D1 = 3.0*(1.0-2.0*v) / (u0*(1.0+v))\r\n return [D1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]\r\n" ]
[ [ "numpy.power" ] ]
minjeekim00/stylegan2
[ "8b64fc348a99a1e517ee6dd5b80731b1c893d47f" ]
[ "custom/utils.py" ]
[ "# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"utility functions.\"\"\"\n\nimport os\nimport cv2\nimport numpy as np\nimport PIL.Image\nimport PIL.ImageFont\n\n#----------------------------------------------------------------------------\n\n# Image utils.\n\ndef windowing_brain(img_png, npy, channel=3): \n dcm = npy.copy()\n img_rows = 512\n img_cols = 512\n \n if channel == 1:\n npy = npy.squeeze()\n npy = cv2.resize(npy, (512,512), interpolation = cv2.INTER_LINEAR)\n npy = npy + 40\n npy = np.clip(npy, 0, 160)\n npy = npy / 160\n npy = 255 * npy\n npy = npy.astype(np.uint8)\n \n elif channel == 3:\n dcm1 = dcm[0] + 0\n dcm1 = np.clip(dcm1, 0, 80)\n dcm1 = dcm1 / 80.\n dcm1 *= (2**8-1)\n dcm1 = dcm1.astype(np.uint8)\n \n dcm2 = dcm[0] + 20\n dcm2 = np.clip(dcm2, 0, 200)\n dcm2 = dcm2 / 200.\n dcm2 *= (2**8-1)\n dcm2 = dcm2.astype(np.uint8)\n \n dcm3 = dcm[0] - 5\n dcm3 = np.clip(dcm3, 0, 50)\n dcm3 = dcm3 / 50.\n dcm3 *= (2**8-1)\n dcm3 = dcm3.astype(np.uint8)\n \n npy = np.zeros([img_rows,img_cols,3], dtype=int)\n npy[:,:,0] = dcm2\n npy[:,:,1] = dcm1\n npy[:,:,2] = dcm3\n \n return npy\n\ndef windowing_thorax(img_png, npy, channel=3):\n dcm = npy.copy()\n img_rows = 512\n img_cols = 512\n\n if channel == 1:\n npy = npy.squeeze()\n npy = cv2.resize(npy, (512,512), interpolation = cv2.INTER_LINEAR)\n npy = npy + 40 ## change to lung/med setting\n npy = np.clip(npy, 0, 160)\n npy = npy / 160\n npy = 255 * npy\n npy = npy.astype(np.uint8)\n\n elif channel == 3:\n dcm1 = dcm[0] + 150\n dcm1 = np.clip(dcm1, 0, 400)\n dcm1 = dcm1 / 400.\n dcm1 *= (2**8-1)\n dcm1 = dcm1.astype(np.uint8) \n \t\n dcm2 = dcm[0] - 250\n dcm2 = np.clip(dcm2, 0, 100)\n dcm2 = dcm2 / 100.\n dcm2 *= (2**8-1)\n dcm2 = dcm2.astype(np.uint8) \n \t\n dcm3 = dcm[0] + 950 \n dcm3 = np.clip(dcm3, 0, 1000)\n dcm3 = dcm3 / 1000.\n dcm3 *= (2**8-1)\n dcm3 = dcm3.astype(np.uint8) \n \t\n npy = np.zeros([img_rows,img_cols,3], dtype=int)\n npy[:,:,0] = dcm2\n npy[:,:,1] = dcm1\n npy[:,:,2] = dcm3\n\n return npy\n\n\ndef write_png_image(img_png, npy):\n if not os.path.exists(img_png):\n return cv2.imwrite(img_png, npy)\n else:\n return False\n \n\ndef adjust_dynamic_range(data, drange_in, drange_out):\n if drange_in != drange_out:\n scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))\n bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)\n data = data * scale + bias\n return data\n\n#----------------------------------------------------------------------------\n\n# Image utils to save figures\n\ndef smooth_image(image, threshold=7.5e4, kernel_x=6):\n kernel = np.ones((kernel_x,kernel_x),np.float32)/(kernel_x**2)\n return cv2.filter2D(image,-1,kernel)\n\ndef convert_to_numpy_array(image, drange=[0,1], rgbtogray=False):\n assert image.ndim == 2 or image.ndim == 3\n if image.ndim == 3:\n if image.shape[0] == 1:\n image = image[0] # grayscale CHW => HW\n else:\n image = image.transpose(1, 2, 0) # CHW -> HWC\n\n image = adjust_dynamic_range(image, drange, [0,255])\n image = np.rint(image).clip(0, 255).astype(np.uint8)\n \n if rgbtogray:\n return convert_rgb_to_gray(image)\n \n return image\n\ndef convert_to_pil_image(image, drange=[0,1]):\n import PIL.Image\n \n image = convert_to_numpy_array(image, drange)\n fmt = 'RGB' if image.ndim == 3 else 'L'\n return PIL.Image.fromarray(image, fmt)\n\ndef convert_rgb_to_gray(image):\n# return np.dot(img_np[...,:3], [0.299, 0.587, 0.114])\n return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\ndef create_summary_figure(img_in_np, img_out_np, mask, i, checkpoint=None):\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n img_in_np_tmp = adjust_dynamic_range(img_in_np, [0,255], [-1,1])\n img_out_np_tmp = adjust_dynamic_range(img_out_np, [0,255], [-1,1])\n img_diff = img_in_np_tmp - img_out_np_tmp\n img_diff_smooth = smooth_image(img_diff)\n img_diff_std = img_diff_smooth.std()\n\n fig=plt.figure(figsize=(16, 4))\n fig.add_subplot(1, 4, 1)\n plt.title('Input (real)')\n plt.axis('off')\n plt.tight_layout()\n plt.imshow(img_in_np_tmp, cmap=plt.cm.gray) #transpose(img_in_np))\n\n if (len(mask) > 0):\n mask_temp = np.flip(mask[i], 0)\n mask_temp = np.ma.masked_where(mask_temp == 0, mask_temp)\n plt.imshow(mask_temp, alpha=0.7, cmap=plt.cm.autumn)\n\n fig.add_subplot(1, 4, 2)\n plt.title('Output (fake)') if checkpoint else plt.title('Output (fake) iter: {}'.format(checkpoint))\n plt.axis('off')\n plt.imshow(img_out_np_tmp, cmap=plt.cm.gray)\n\n fig.add_subplot(1, 4, 3)\n plt.title('Difference (+)')\n plt.axis('off')\n plt.imshow(img_in_np_tmp, cmap=plt.cm.gray)\n\n norm = mpl.colors.Normalize(vmin=0, vmax=0.2)\n img_diff_smooth[(img_diff_smooth < img_diff_std*0.5) & (img_diff_smooth > img_diff_std*-0.5)] = 0.\n plt.imshow(img_diff_smooth, cmap='inferno', alpha=0.4, norm=norm)\n\n fig.add_subplot(1, 4, 4)\n plt.title('Difference (-)')\n plt.axis('off')\n plt.imshow(img_in_np_tmp, cmap=plt.cm.gray)\n\n vstd = img_diff_std\n\n# norm = mpl.colors.Normalize(vmin=0, vmax=vstd*5)\n img_diff_smooth[(img_diff_smooth < img_diff_std*0.5) & (img_diff_smooth > img_diff_std*-0.5)] = 0.\n plt.imshow(img_diff_smooth*-1, cmap='inferno', alpha=0.4, norm=norm)\n \n return fig\n \n\ndef apply_mirror_augment(minibatch):\n mask = np.random.rand(minibatch.shape[0]) < 0.5\n minibatch = np.array(minibatch)\n minibatch[mask] = minibatch[mask, :, :, ::-1]\n return minibatch\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.zeros", "numpy.rint", "matplotlib.pyplot.title", "numpy.ones", "matplotlib.pyplot.figure", "numpy.float32", "matplotlib.pyplot.tight_layout", "matplotlib.colors.Normalize", "numpy.clip", "numpy.flip", "numpy.ma.masked_where", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
Huuush/learn_fashionai
[ "f5e63f37f69edfc8a681289208240fa05c8d4dc0" ]
[ "datasets/multi_length.py" ]
[ "import os\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport pandas as pd\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset\n\n\nclass FashionAIDataset(Dataset):\n AttrKey = {\n 'coat_length_labels': 8,\n 'pant_length_labels': 6,\n 'skirt_length_labels': 6,\n 'sleeve_length_labels': 9,\n }\n\n def __init__(self, data_root, attr_task, mode, transform=None):\n if attr_task not in ['coat_length_labels', 'collar_design_labels', 'lapel_design_labels', 'neck_design_labels',\n 'neckline_design_labels', 'pant_length_labels', 'skirt_length_labels',\n 'sleeve_length_labels']:\n print(\"{} attribute not exist!\".format(attr_task))\n raise RuntimeError\n self.mode = mode\n self.transform = transform\n self.data_folder = data_root\n self.attr_task = attr_task\n if self.mode == \"train\":\n self.label_file = os.path.join(data_root, \"Annotations\", f\"label_{self.attr_task}_train.csv\")\n if self.mode == \"test\":\n self.label_file = os.path.join(data_root, \"Annotations\", f\"label_{self.attr_task}_test.csv\")\n\n self.df = pd.read_csv(self.label_file, header=1, names=['img_path', 'task', 'label'])\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n # δΏθ―εŒδΈ€task\n img_info = self.df.iloc[idx]\n img_path = os.path.join(self.data_folder, img_info['img_path'])\n img_label = img_info['label']\n one_hot_target = np.zeros(FashionAIDataset.AttrKey[self.attr_task])\n # one_hot_target = F.one_hot(img_label, FashionAIDataset.AttrKey[self.attr_task])\n # todo: how to deal with label nnnym\n y_label_idx = img_label.find('y')\n one_hot_target[y_label_idx] = 1\n # m_count = img_label.count(\"m\")\n # if m_count != 0:\n # m_idx_list = []\n # m_idx=-1\n # for i in range(m_count):\n # m_idx = img_label.find(\"m\", m_idx+1)\n # m_idx_list.append(m_idx)\n # if m_count == 1:\n # one_hot_target[m_idx_list[0]] = 0.1\n # one_hot_target[y_label_idx] = 0.9\n # elif m_count == 2:\n # one_hot_target[m_idx_list[0]] = 0\n # one_hot_target[m_idx_list[1]] = 0.1\n # one_hot_target[y_label_idx] = 0.9\n # elif m_count == 3:\n # one_hot_target[m_idx_list[0]] = 0\n # one_hot_target[m_idx_list[1]] = 0.1\n # one_hot_target[m_idx_list[2]] = 0.1\n # one_hot_target[y_label_idx] = 0.9\n\n # use ablu toolkit\n # img_data = cv2.imread(img_path, cv2.IMREAD_COLOR)\n # img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB)\n\n # use randargument\n img_data = Image.open(img_path).convert('RGB')\n if self.transform is not None:\n # albu toolkit\n # augmented = self.transform(image=img_data)\n # img_data = augmented['image']\n # torch\n img_data = self.transform(img_data)\n\n return img_data, one_hot_target\n\n # def get_filelist(self, file_dir):\n # import os\n # if not os.path.exists(file_dir):\n # return []\n # if os.path.isfile(file_dir):\n # return [file_dir]\n # result = []\n # for subdir in os.listdir(file_dir):\n # sub_path = os.path.join(file_dir, subdir)\n # result += self.get_filelist(sub_path)\n # return result\n\n\n# class FashionAIPercatDataset(Dataset):\n# def __int__(self, data_root, task, transform):\n# self.data_root = data_root\n# self.img_folder1 = os.path.join(self.data_root, \"Images\")\n# self.img_list =\n#\n# def __len__(self):\n# return len(self.df)\n#\n# def __getitem__(self, idx):\n#\n#\n# def get_filelist(self, file_dir):\n# import os\n# if not os.path.exists(file_dir):\n# return []\n# if os.path.isfile(file_dir):\n# return [file_dir]\n# result = []\n# for subdir in os.listdir(file_dir):\n# sub_path = os.path.join(file_dir, subdir)\n# result += self.get_filelist(sub_path)\n# return result\n\n\nif __name__ == '__main__':\n import albumentations as A\n from albumentations.pytorch import ToTensorV2\n\n\n def get_transforms(*, data):\n if data == 'train':\n return A.Compose([\n # A.Resize(CFG.size, CFG.size),\n A.RandomResizedCrop(512, 512),\n A.Transpose(p=0.5),\n A.HorizontalFlip(p=0.5),\n A.HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),\n A.RandomBrightnessContrast(brightness_limit=(-0.1, 0.1), contrast_limit=(-0.1, 0.1), p=0.5),\n A.VerticalFlip(p=0.5),\n A.ShiftScaleRotate(p=0.5),\n A.CoarseDropout(p=0.5),\n A.Cutout(p=0.5),\n A.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n ),\n ToTensorV2(),\n ])\n\n elif data == 'test':\n return A.Compose([\n A.CenterCrop(512, 512),\n A.Resize(512, 512),\n # A.CenterCrop(CFG.size, CFG.size),\n A.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n ),\n ToTensorV2(),\n ])\n\n\n AttrKey = {\n 'coat_length_labels': 8, # 上董\n 'collar_design_labels': 5, # 钆子\n 'lapel_design_labels': 5, # ηΏ»ι’†\n 'neck_design_labels': 5, # θ„–ι’ˆ\n 'neckline_design_labels': 10, # 针线\n 'pant_length_labels': 6, # 裀子\n 'skirt_length_labels': 6, # 裙子\n 'sleeve_length_labels': 9, # 蒖子\n }\n ###########################################################################################################\n data_fold = '/workspace/fashionai/datasets/fashionAI/train'\n trainset = FashionAIDataset(data_fold, 'collar_design_labels', mode='train', transform=get_transforms(data='train'))\n train_loader = DataLoader(trainset, batch_size=16, shuffle=True, num_workers=1, pin_memory=True, drop_last=True)\n\n for batch_idx, (data, label) in enumerate(train_loader):\n print(data.shape)\n print(label)\n #############################################################################################################\n\n###########################################################################################################\n# base_label = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label_test.csv'\n# for task in [\"lapel_design_labels\", \"neck_design_labels\",\n# \"neckline_design_labels\", \"pant_length_labels\", \"skirt_length_labels\",\n# \"sleeve_length_labels\"]:\n# df = pd.read_csv(base_label, header=None, names=['img_path', 'task', 'label'])\n# # train_label = f'/workspace/fashionai/datasets/fashionAI/train/Annotations/label_{task}_train.csv'\n# test_label = f'/workspace/fashionai/datasets/fashionAI/train/Annotations/label_{task}_test.csv'\n#\n# df = df[df['task'].str.contains(task)]\n# df.to_csv(test_label, index=False)\n# print(\"{} done\".format(task))\n###########################################################################################################\n# base_label = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label.csv'\n# train_label = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label_train.csv'\n# test_label = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label_test.csv'\n#\n# df = pd.read_csv(base_label)\n# shuffle_df = df.sample(frac=1, random_state=42)\n# cut_idx = int(round(0.8 * shuffle_df.shape[0]))\n# train_data, test_data = shuffle_df.iloc[:cut_idx], shuffle_df.iloc[cut_idx:]\n# train_data.to_csv(train_label, index=False)\n# test_data.to_csv(test_label, index=False)\n#############################################################################################################\n\n#############################################################################################################\n# base_label2 = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label2.csv'\n# base_label1 = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label1.csv'\n# outputcsv = '/workspace/fashionai/datasets/fashionAI/train/Annotations/label11111.csv'\n#\n# df1 = pd.read_csv(base_label1)\n# df2 = pd.read_csv(base_label2)\n#\n# all_csv = pd.concat([df1, df2], axis=0)\n# all_csv.to_csv(outputcsv, index=False)\n#\n# check = pd.read_csv(outputcsv)\n# print(check)\n#############################################################################################################\n\n#############################################################################################################\n# with open(base_label, 'r') as f:\n# lines = f.readlines()\n# count = 1\n# for l in lines:\n# img_name = l.rstrip().split(',')[0].split('/')[-1]\n# tokens[img_name] = count\n# count += 1\n#\n# with open(label2, 'r') as F:\n# lines = F.readlines()\n# for l in lines:\n# img_name2 = l.rstrip().split(',')[0].split('/')[-1]\n# if img_name2 in tokens.keys():\n# print(\"find!!!!!! {}\".format(img_name2))\n# print(\"data is clean...\")\n#############################################################################################################\n" ]
[ [ "pandas.read_csv", "torch.utils.data.DataLoader", "numpy.zeros" ] ]
EmilRyberg/bin_picking
[ "7616d066307c064c00a8de9ca6ec5d79d1620657" ]
[ "bin_picking_lib/move_robot/move_robot_moveit.py" ]
[ "import rospy\nfrom moveit_msgs.msg import ExecuteTrajectoryGoal\nimport numpy as np\nnp.set_printoptions(precision=3, suppress=True)\nimport math\nimport actionlib\nfrom scipy.spatial.transform.rotation import Rotation\nfrom bin_picking.msg import MoveRobotAction, MoveRobotGoal\nfrom bin_picking_lib.move_robot.ur_utils import Utils\n\n\ndef apply_transform_real_to_moveit(pose):\n pose_local = pose.copy()\n tvec = pose_local[:3]\n orientation = Rotation.from_rotvec(pose_local[3:])\n pose_tmat = Utils.trans_and_rot_to_tmat(tvec, orientation)\n rot_z_1 = Rotation.from_euler(\"xyz\", [0, 0, np.pi])\n rot_z_2 = Rotation.from_euler(\"xyz\", [0, 0, np.pi / 2])\n rot_y = Rotation.from_euler(\"xyz\", [0, -np.pi / 2, 0])\n tmat_z_1 = Utils.trans_and_rot_to_tmat([0, 0, 0], rot_z_1)\n tmat_z_2 = Utils.trans_and_rot_to_tmat([0, 0, 0], rot_z_2)\n tmat_y = Utils.trans_and_rot_to_tmat([0, 0, 0], rot_y)\n applied_tmat = tmat_z_1 @ pose_tmat @ tmat_z_2 @ tmat_y\n applied_trans, applied_rot = Utils.tmat_to_trans_and_rot(applied_tmat)\n applied_rvect = applied_rot.as_rotvec()\n return np.concatenate((applied_trans, applied_rvect))\n\n\ndef apply_transform_moveit_to_real(pose):\n pose_local = pose.copy()\n tvec = pose_local[:3]\n orientation = Rotation.from_rotvec(pose_local[3:])\n pose_tmat = Utils.trans_and_rot_to_tmat(tvec, orientation)\n rot_z_1 = Rotation.from_euler(\"xyz\", [0, 0, -np.pi])\n rot_z_2 = Rotation.from_euler(\"xyz\", [0, 0, -np.pi / 2])\n rot_y = Rotation.from_euler(\"xyz\", [0, np.pi / 2, 0])\n tmat_z_1 = Utils.trans_and_rot_to_tmat([0, 0, 0], rot_z_1)\n tmat_z_2 = Utils.trans_and_rot_to_tmat([0, 0, 0], rot_z_2)\n tmat_y = Utils.trans_and_rot_to_tmat([0, 0, 0], rot_y)\n applied_tmat = tmat_z_1 @ pose_tmat @ tmat_y @ tmat_z_2\n applied_trans, applied_rot = Utils.tmat_to_trans_and_rot(applied_tmat)\n applied_rvect = applied_rot.as_rotvec()\n return np.concatenate((applied_trans, applied_rvect))\n\nclass MoveRobotMoveIt:\n def __init__(self, create_node=False):\n if create_node:\n rospy.init_node(\"moveit_test\", anonymous=True)\n self.client = actionlib.SimpleActionClient(\"bin_picking_moveit_interface\", MoveRobotAction)\n rospy.loginfo(\"MoveRobotMoveIt waiting for action server to come online\")\n self.client.wait_for_server()\n #self.home_pose_l = [35, -300, 300, 0, 0, -0.8] # old\n self.home_pose_l = [0.035, -0.300, 0.300, 0, 0, -0.8]\n self.home_pose_gripper = [-60, -60, -110, -100, 90, -60]\n self.home_pose_suction = [-60, -60, -110, 170, -70, 100]\n self.move_out_of_view_pose = [-150, -60, -110, 170, -70, 100]\n self.default_orientation = [0, 0, 0]\n #self.gripper_tcp = [0, 0, 0.201, 0, 0, 0]\n #self.suction_tcp = [-0.193, 0, 0.08, 0, -np.pi/2, 0]\n\n self.camera_pose_gripper = [-60, -60, -110, -100, -90, -75]\n self.camera_pose_suction = [-5, -40, -100, -140, 0, -170]\n\n self.pcb_singularity_avoidance = [-70, -70, -107, -180, -147, 90]\n\n self.cover_closed = 20\n self.box_closed = 3\n\n #Part drop locations:\n self.white_cover_drop = [0.350, -0.400, 0.300, 2.89, 1.21, 0] # old\n #self.white_cover_drop = [-0.350, 0.400, 0.300, -0.61, 1.48, 0.62] #intermediate calc\n # New frames are R_z(pi) * current_transform * R_z(pi/2) * R_y(-pi/2)\n #self.white_cover_drop = [-0.350, 0.400, 0.300, 0.61, 1.48, -0.61] # this one is calibrated for new frames\n self.black_cover_drop = [200, -250, 100, 2.89, 1.21, 0] #old\n self.blue_cover_drop = [-50, -250, 100, 2.89, 1.21, 0] #old\n self.bottom_cover_drop = [-150, -350, 100, 2.89, 1.21, 0] #old\n self.pcb_drop = [-250, -450, 100, 2.89, 1.21, 0] #old\n\n rospy.loginfo(\"Move Robot interface ready\")\n\n def movej(self, pose, acceleration=1.0, velocity=0.1, degrees=True, max_retries=3):\n pose_local = pose.copy()\n if degrees:\n for i in range(6):\n pose_local[i] = math.radians(pose_local[i])\n goal = MoveRobotGoal()\n goal.action = \"joint\"\n goal.joint_goal.positions = pose_local\n goal.joint_goal.velocities = np.repeat(velocity, 6)\n retry_amount = 0\n success = False\n while retry_amount < max_retries:\n self.client.send_goal(goal)\n self.client.wait_for_result()\n result = self.client.get_result()\n success = result.success\n if success:\n break\n retry_amount += 1\n return success\n\n def movel(self, pose, acceleration=1.0, velocity=0.2, use_mm=False, max_retries=3):\n pose_local = pose.copy()\n if use_mm:\n pose_local[0] *= 0.001\n pose_local[1] *= 0.001\n pose_local[2] *= 0.001\n pose_local = self.apply_gripper_tcp_offset(pose_local)\n pose_local = apply_transform_real_to_moveit(pose_local)\n print(\"moving to \" + str(pose_local))\n quaternion = Rotation.from_rotvec(pose_local[3:]).as_quat()\n goal = MoveRobotGoal()\n goal.action = \"cartesian\"\n goal.cartesian_goal.position.x = pose_local[0]\n goal.cartesian_goal.position.y = pose_local[1]\n goal.cartesian_goal.position.z = pose_local[2]\n goal.cartesian_goal.orientation.x = quaternion[0]\n goal.cartesian_goal.orientation.y = quaternion[1]\n goal.cartesian_goal.orientation.z = quaternion[2]\n goal.cartesian_goal.orientation.w = quaternion[3]\n retry_amount = 0\n success = False\n while retry_amount < max_retries:\n self.client.send_goal(goal)\n self.client.wait_for_result()\n result = self.client.get_result()\n success = result.success\n if success:\n break\n retry_amount += 1\n return success\n\n def movel2(self, location, orientation, acceleration=1.0, velocity=0.2, use_mm=False, max_retries=3):\n return self.movel(np.concatenate((location, orientation)), use_mm=use_mm, max_retries=max_retries)\n\n def set_tcp(self, pose):\n # TODO: Implement this\n pass\n\n def move_to_home_suction(self, speed=1.0):\n return self.movej(self.home_pose_suction, acceleration=1.0, velocity=speed)\n\n def move_to_home_gripper(self, speed=1.0):\n return self.movej(self.home_pose_gripper, acceleration=1.0, velocity=speed)\n\n def move_to_home_l(self, speed=1.0):\n return self.movel(self.home_pose_l, acceleration=1.0, velocity=speed)\n\n def move_out_of_view(self, speed=2.0):\n return self.movej(self.move_out_of_view_pose, acceleration=1.0, velocity=speed)\n\n def open_gripper(self):\n goal = MoveRobotGoal()\n goal.action = \"open_gripper\"\n self.client.send_goal(goal)\n self.client.wait_for_result()\n result = self.client.get_result()\n return result.success\n\n def close_gripper(self, width=0, speed=5, lock=False, gripping_box=False):\n goal = MoveRobotGoal()\n goal.action = \"close_gripper\"\n goal.gripper_width = width\n goal.gripper_speed = speed\n goal.gripper_lock = lock\n goal.gripper_grip_box = gripping_box\n self.client.send_goal(goal)\n self.client.wait_for_result()\n result = self.client.get_result()\n return result.success\n\n def grasp_cover(self):\n return self.close_gripper(self.cover_closed)\n\n def grasp_box(self):\n return self.close_gripper(self.box_closed, gripping_box=True)\n\n def enable_suction(self):\n goal = MoveRobotGoal()\n goal.action = \"suction_on\"\n self.client.send_goal(goal)\n self.client.wait_for_result()\n result = self.client.get_result()\n return result.success\n\n def disable_suction(self):\n goal = MoveRobotGoal()\n goal.action = \"suction_off\"\n self.client.send_goal(goal)\n self.client.wait_for_result()\n result = self.client.get_result()\n return result.success\n\n def apply_gripper_tcp_offset(self, pose):\n pose_trans = pose[:3]\n pose_rot = Rotation.from_rotvec(pose[3:])\n pose_tmat = Utils.trans_and_rot_to_tmat(pose_trans, pose_rot)\n tcp_tmat = Utils.trans_and_rot_to_tmat([0, 0, -0.201], Rotation.from_rotvec([0, 0, 0]))\n new_pose_tmat = pose_tmat @ tcp_tmat\n new_pose_trans, new_pose_rot = Utils.tmat_to_trans_and_rot(new_pose_tmat)\n new_pose_rotvec = new_pose_rot.as_rotvec()\n return np.concatenate((new_pose_trans, new_pose_rotvec))\n\n\nif __name__ == \"__main__\":\n test_pose = np.array([-0.4379,0.33657,0.33055, 0.5, 0, 0.5])\n #moveit_pose = apply_transform_real_to_moveit(test_pose)\n world_pose = apply_transform_moveit_to_real(test_pose)\n print(\"start pose\", test_pose)\n #print(\"moveit_pose\", moveit_pose)\n print(\"world_pose\", world_pose)\n #mr = MoveRobotMoveIt(create_node=True)\n #mr.close_gripper(10)\n #mr.open_gripper()\n #mr.move_to_home_gripper()\n #mr.move_to_home_l()\n #mr.move_to_home_suction()\n #mr.move_out_of_view()\n #mr.movel(mr.white_cover_drop)\n\n #[0.350, -0.400, 0.300, 2.89, 1.21, 0] #old\n #[-0.350, 0.400, 0.300, 0.61, 1.48, -0.61] #new expected\n #a = mr.apply_gripper_tcp_offset([0.350, -0.400, 0.300, 2.89, 1.21, 0])\n #b = mr.apply_transform_real_to_moveit(a)\n #print(\"done\")" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.set_printoptions", "scipy.spatial.transform.rotation.Rotation.from_rotvec", "numpy.repeat", "scipy.spatial.transform.rotation.Rotation.from_euler" ] ]
awesome-archive/gobbli
[ "71aacbdc1184871b164185dc0c9f615f07b83173" ]
[ "gobbli/augment/bert/src/augment_text.py" ]
[ "import argparse\n\nimport torch\nimport torch.nn.functional as F\nfrom pytorch_transformers import BertConfig, BertForMaskedLM, BertTokenizer\n\n\ndef batch_list(l, batch_size):\n for i in range(0, len(l), batch_size):\n yield l[i : i + batch_size]\n\n\ndef encode_batch(batch, tokenizer, config):\n # Truncate input based on the sequence length\n # before stacking as a batch\n # Also return a boolean array indicating which tokens are masked\n encoded_texts = []\n for text in batch:\n encoded_text = torch.tensor(tokenizer.encode(text))[\n : config.max_position_embeddings\n ]\n encoded_texts.append(encoded_text)\n\n return torch.nn.utils.rnn.pad_sequence(encoded_texts, batch_first=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"input_file\", help=\"Input file containing line-delimited texts.\"\n )\n parser.add_argument(\n \"output_file\",\n help=\"Output file, where line-delimited generated texts will be written.\",\n )\n parser.add_argument(\n \"--bert-model\",\n help=\"BERT model to use. \"\n \"Can be one of the pretrained names supported by pytorch_transformers, in which case \"\n \"the pretrained weights will be downloaded. \"\n \"Anything else supported by pytorch_transformers should work as well. \",\n default=\"bert-base-uncased\",\n )\n parser.add_argument(\n \"--n-probable\",\n help=\"Number of probable tokens to consider for replacement.\",\n type=int,\n default=5,\n )\n parser.add_argument(\n \"--diversity\",\n help=\"Inverse dependence of selection likelihood on predicted probability.\",\n type=float,\n default=0.8,\n )\n parser.add_argument(\n \"--probability\",\n help=\"Probability of masking each token.\",\n type=float,\n default=0.1,\n )\n parser.add_argument(\n \"--times\",\n help=\"Number of new documents to generate for each existing document.\",\n type=int,\n default=5,\n )\n parser.add_argument(\n \"--batch-size\",\n help=\"Number of documents to run through the BERT model at once.\",\n type=int,\n default=32,\n )\n parser.add_argument(\n \"--cache-dir\",\n help=\"Directory to use as the cache for pytorch_transformer downloads.\",\n default=None,\n )\n parser.add_argument(\n \"--device\",\n default=\"cpu\",\n help=\"PyTorch name for the device to use when running the model. Default: %(default)s\",\n )\n\n args = parser.parse_args()\n\n if args.batch_size < 1:\n raise ValueError(\"batch_size must be >= 1\")\n\n if args.times < 0:\n raise ValueError(\"times must be >= 0\")\n\n if not 0 <= args.probability <= 1:\n raise ValueError(\"probability must be >= 0 and <= 1\")\n\n if not 0 < args.diversity <= 1:\n raise ValueError(\"diversity must be > 0 and <= 1\")\n\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, cache_dir=args.cache_dir)\n if tokenizer is None:\n raise ValueError(\"Failed to acquire tokenizer\")\n config = BertConfig.from_pretrained(args.bert_model, cache_dir=args.cache_dir)\n if config is None:\n raise ValueError(\"Failed to acquire config\")\n model = BertForMaskedLM.from_pretrained(\n args.bert_model, config=config, cache_dir=args.cache_dir\n )\n if model is None:\n raise ValueError(\"Failed to acquire model\")\n\n model = model.to(args.device)\n model.eval()\n\n with open(args.input_file, \"r\", encoding=\"utf-8\") as f_in:\n with open(args.output_file, \"w\", encoding=\"utf-8\") as f_out:\n batches = batch_list(f_in.readlines(), args.batch_size)\n input_id_batches = [\n encode_batch(batch, tokenizer, config) for batch in batches\n ]\n\n for time in range(args.times):\n for batch_id, input_ids in enumerate(input_id_batches):\n # Generate a replacement mask for the batch. Do this each time we\n # run the batch to get different results\n # Don't mask any padding tokens, so we don't get predictions for them\n # and can easily remove them at the end\n should_replace = (\n torch.rand_like(input_ids, dtype=torch.float) < args.probability\n ) & (input_ids != tokenizer.vocab.get(tokenizer.pad_token))\n\n masked_ids = input_ids.clone().detach()\n masked_ids[should_replace] = tokenizer.vocab.get(\n tokenizer.mask_token\n )\n masked_ids = masked_ids.to(args.device)\n\n # These are created implicitly, but we need to explicitly\n # initialize them to their defaults because they won't\n # be on the chosen device otherwise\n attention_mask = torch.ones_like(masked_ids).to(args.device)\n token_type_ids = torch.zeros_like(masked_ids).to(args.device)\n\n with torch.no_grad():\n (output,) = model(\n masked_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n )\n\n # Bring model results back to the CPU for processing\n masked_ids = masked_ids.cpu()\n output = output.cpu()\n\n # Exponentiate according to inverse diversity to set level of\n # dependence on predicted probability\n prediction_scores = torch.pow(\n F.softmax(output, dim=2), 1.0 / args.diversity\n )\n\n max_seq_len = prediction_scores.size(1)\n\n output_ids = []\n for i, row in enumerate(prediction_scores):\n # size: (seq_len, vocab_len)\n # Generate probabilities for each token. Ultimately, we only\n # care about the ones that were masked\n # First generate the indices of all candidates, i.e.\n # tokens in the top n_probable when sorted by descending predicted\n # probability\n # size: (seq_len, n_probable)\n candidate_ndxs = torch.argsort(row, dim=1, descending=True)[\n :, : args.n_probable\n ]\n\n candidates = torch.stack(\n [tok[candidate_ndxs[i]] for i, tok in enumerate(row)]\n )\n\n # Determine a replacement among the candidates for each token\n # in the row\n # size: (seq_len)\n replacement_sorted_ndxs = torch.multinomial(\n candidates, 1\n ).squeeze()\n\n # Map the candidate indices back to original row indices\n # size: (seq_len)\n replacements = torch.tensor(\n [\n candidate_ndxs[i][ndx]\n for i, ndx in enumerate(replacement_sorted_ndxs)\n ]\n )\n\n # Perform the replacement\n output_ids.append(\n torch.where(\n should_replace[i], replacements, masked_ids[i]\n ).tolist()\n )\n\n # Convert the token IDs back to text; filter out padding tokens\n output_texts = [\n tokenizer.decode(\n [\n tok\n for tok in row\n if not tok == tokenizer.vocab.get(tokenizer.pad_token)\n ]\n )\n for row in output_ids\n ]\n\n # Write text to the output file\n # Escape any embedded newlines\n # Make sure not to write an extra newline after the last row\n for i, row in enumerate(output_texts):\n f_out.write(row.replace(\"\\n\", \" \"))\n\n # Write a newline except for the last row on the last\n # time through. We'd get an empty line at the end of the\n # file otherwise\n if not (\n i == len(output_texts) - 1\n and batch_id == len(input_id_batches) - 1\n and time == args.times - 1\n ):\n f_out.write(\"\\n\")\n" ]
[ [ "torch.rand_like", "torch.nn.utils.rnn.pad_sequence", "torch.no_grad", "torch.argsort", "torch.multinomial", "torch.ones_like", "torch.nn.functional.softmax", "torch.zeros_like", "torch.where" ] ]
kkleidal/kens_tf_utils
[ "b650475bceec4221044dabc9a2c219bf281672d0" ]
[ "torchapi.py" ]
[ "import tensorflow as tf\nimport tflearn as tfl\nfrom kentf.scoping import adapt_name\n\ndef pad(tensor, padW, padH, name=None):\n name = adapt_name(name, \"pad\")\n return tf.pad(tensor, [[0, 0], [padW, padW], [padH, padH], [0, 0]], name=name)\n\ndef SpatialConvolution(inp, _, nfilters, kW, kH, dW, dH, padW, padH, **kwargs):\n name = adapt_name(kwargs.get(\"name\", None), \"conv\")\n with tf.variable_scope(name):\n out = inp\n out = pad(out, padW, padH)\n config = dict(\n strides=(dW, dH),\n padding='valid',\n regularizer='L2',\n weights_init='xavier',\n bias_init='zeros',\n weight_decay=1.0,\n )\n config.update(kwargs)\n out = tfl.layers.conv.conv_2d(out, nfilters, (kW, kH), **config)\n return out\n\ndef SpatialMaxPooling(inp, kW, kH, dW=1, dH=1, padW=0, padH=0, **kwargs):\n name = adapt_name(kwargs.get(\"name\", None), \"pool\")\n with tf.name_scope(name):\n out = inp\n out = pad(out, padW, padH)\n config = dict(\n strides=(dW, dH),\n padding='valid',\n )\n config.update(kwargs)\n out = tfl.layers.conv.max_pool_2d(out, (kW, kH), **config)\n return out\n" ]
[ [ "tensorflow.pad", "tensorflow.variable_scope", "tensorflow.name_scope" ] ]
Hubblesphere/openpilot
[ "210da470fc78cce48f38691c2e355727f24e5bc2" ]
[ "selfdrive/controls/lib/lateral_planner.py" ]
[ "import math\nimport numpy as np\nfrom common.realtime import sec_since_boot, DT_MDL\nfrom common.numpy_fast import interp\nfrom selfdrive.swaglog import cloudlog\nfrom selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc\nfrom selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS\nfrom selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE\nfrom selfdrive.config import Conversions as CV\nimport cereal.messaging as messaging\nfrom cereal import log\nfrom common.op_params import opParams\n\nLaneChangeState = log.LateralPlan.LaneChangeState\nLaneChangeDirection = log.LateralPlan.LaneChangeDirection\n\nLANE_CHANGE_SPEED_MIN = 5 * CV.MPH_TO_MS\nLANE_CHANGE_TIME_MAX = 10.\n\nDESIRES = {\n LaneChangeDirection.none: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,\n },\n LaneChangeDirection.left: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,\n },\n LaneChangeDirection.right: {\n LaneChangeState.off: log.LateralPlan.Desire.none,\n LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,\n LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,\n LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,\n },\n}\n\n\nclass LateralPlanner():\n def __init__(self, CP, use_lanelines=True, wide_camera=False):\n self.use_lanelines = use_lanelines\n self.LP = LanePlanner(wide_camera)\n self.op_params = opParams()\n\n self.last_cloudlog_t = 0\n self.steer_rate_cost = CP.steerRateCost\n\n self.solution_invalid_cnt = 0\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n self.lane_change_timer = 0.0\n self.lane_change_ll_prob = 1.0\n self.keep_pulse_timer = 0.0\n self.prev_one_blinker = False\n self.desire = log.LateralPlan.Desire.none\n\n self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))\n self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))\n self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))\n self.t_idxs = np.arange(TRAJECTORY_SIZE)\n self.y_pts = np.zeros(TRAJECTORY_SIZE)\n\n self.lat_mpc = LateralMpc()\n self.reset_mpc(np.zeros(6))\n\n def reset_mpc(self, x0=np.zeros(6)):\n self.x0 = x0\n self.lat_mpc.reset(x0=self.x0)\n self.desired_curvature = 0.0\n self.safe_desired_curvature = 0.0\n self.desired_curvature_rate = 0.0\n self.safe_desired_curvature_rate = 0.0\n\n def update(self, sm, CP):\n v_ego = sm['carState'].vEgo\n active = sm['controlsState'].active\n measured_curvature = sm['controlsState'].curvature\n\n md = sm['modelV2']\n self.LP.parse_model(sm['modelV2'])\n if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:\n self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])\n self.t_idxs = np.array(md.position.t)\n self.plan_yaw = list(md.orientation.z)\n if len(md.orientation.xStd) == TRAJECTORY_SIZE:\n self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])\n\n # Lane change logic\n one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker\n below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN\n\n if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):\n self.lane_change_state = LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n else:\n # LaneChangeState.off\n if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:\n self.lane_change_state = LaneChangeState.preLaneChange\n self.lane_change_ll_prob = 1.0\n\n # LaneChangeState.preLaneChange\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n # Set lane change direction\n if sm['carState'].leftBlinker:\n self.lane_change_direction = LaneChangeDirection.left\n elif sm['carState'].rightBlinker:\n self.lane_change_direction = LaneChangeDirection.right\n else: # If there are no blinkers we will go back to LaneChangeState.off\n self.lane_change_direction = LaneChangeDirection.none\n\n torque_applied = sm['carState'].steeringPressed and \\\n ((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))\n if v_ego >= self.op_params.get('alca_no_nudge_speed') * CV.MPH_TO_MS:\n torque_applied = True\n\n blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or\n (sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))\n\n if not one_blinker or below_lane_change_speed:\n self.lane_change_state = LaneChangeState.off\n elif torque_applied and not blindspot_detected:\n self.lane_change_state = LaneChangeState.laneChangeStarting\n\n # LaneChangeState.laneChangeStarting\n elif self.lane_change_state == LaneChangeState.laneChangeStarting:\n # fade out over .5s\n self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)\n\n # 98% certainty\n lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob\n if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:\n self.lane_change_state = LaneChangeState.laneChangeFinishing\n\n # LaneChangeState.laneChangeFinishing\n elif self.lane_change_state == LaneChangeState.laneChangeFinishing:\n # fade in laneline over 1s\n self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)\n if one_blinker and self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.preLaneChange\n elif self.lane_change_ll_prob > 0.99:\n self.lane_change_state = LaneChangeState.off\n\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:\n self.lane_change_timer = 0.0\n else:\n self.lane_change_timer += DT_MDL\n\n self.prev_one_blinker = one_blinker\n\n self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]\n\n # Send keep pulse once per second during LaneChangeStart.preLaneChange\n if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:\n self.keep_pulse_timer = 0.0\n elif self.lane_change_state == LaneChangeState.preLaneChange:\n self.keep_pulse_timer += DT_MDL\n if self.keep_pulse_timer > 1.0:\n self.keep_pulse_timer = 0.0\n elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:\n self.desire = log.LateralPlan.Desire.none\n\n # Turn off lanes during lane change\n if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:\n self.LP.lll_prob *= self.lane_change_ll_prob\n self.LP.rll_prob *= self.lane_change_ll_prob\n if self.use_lanelines:\n d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)\n self.lat_mpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)\n else:\n d_path_xyz = self.path_xyz\n path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH\n # Heading cost is useful at low speed, otherwise end of plan can be off-heading\n heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])\n self.lat_mpc.set_weights(path_cost, heading_cost, CP.steerRateCost)\n y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])\n heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)\n self.y_pts = y_pts\n\n assert len(y_pts) == LAT_MPC_N + 1\n assert len(heading_pts) == LAT_MPC_N + 1\n self.x0[4] = v_ego\n self.lat_mpc.run(self.x0,\n v_ego,\n CAR_ROTATION_RADIUS,\n y_pts,\n heading_pts)\n # init state for next\n self.x0[3] = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.lat_mpc.x_sol[:,3])\n\n\n # Check for infeasable MPC solution\n mpc_nans = any(math.isnan(x) for x in self.lat_mpc.x_sol[:,3])\n t = sec_since_boot()\n if mpc_nans or self.lat_mpc.solution_status != 0:\n self.reset_mpc()\n self.x0[3] = measured_curvature\n if t > self.last_cloudlog_t + 5.0:\n self.last_cloudlog_t = t\n cloudlog.warning(\"Lateral mpc - nan: True\")\n\n if self.lat_mpc.cost > 20000. or mpc_nans:\n self.solution_invalid_cnt += 1\n else:\n self.solution_invalid_cnt = 0\n\n def publish(self, sm, pm):\n plan_solution_valid = self.solution_invalid_cnt < 2\n plan_send = messaging.new_message('lateralPlan')\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])\n plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)\n plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]\n plan_send.lateralPlan.psis = [float(x) for x in self.lat_mpc.x_sol[0:CONTROL_N, 2]]\n plan_send.lateralPlan.curvatures = [float(x) for x in self.lat_mpc.x_sol[0:CONTROL_N,3]]\n plan_send.lateralPlan.curvatureRates = [float(x) for x in self.lat_mpc.u_sol[0:CONTROL_N-1]] +[0.0]\n plan_send.lateralPlan.lProb = float(self.LP.lll_prob)\n plan_send.lateralPlan.rProb = float(self.LP.rll_prob)\n plan_send.lateralPlan.dProb = float(self.LP.d_prob)\n plan_send.lateralPlan.cameraOffset = float(self.LP.camera_offset)\n\n plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)\n\n plan_send.lateralPlan.desire = self.desire\n plan_send.lateralPlan.laneChangeState = self.lane_change_state\n plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction\n\n pm.send('lateralPlan', plan_send)\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.ones", "numpy.arange", "numpy.column_stack" ] ]