repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
drixselecta/homebytwo
[ "29d26ce9f5586943e3b64c95aa4ce9ea7263bd10" ]
[ "homebytwo/routes/models/activity.py" ]
[ "from abc import abstractmethod\nfrom typing import List, Optional\n\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.measure import D\nfrom django.core.exceptions import FieldError\nfrom django.db.models import Count\n\nfrom numpy import array\nfrom pandas import DataFrame\nfrom stravalib import unithelper\nfrom stravalib.exc import ObjectNotFound\n\nfrom ...core.models import TimeStampedModel\nfrom ..fields import DataFrameField, NumpyArrayField\nfrom ..prediction_model import PredictionModel\n\nSTREAM_TYPES = [\"time\", \"altitude\", \"distance\", \"moving\"]\nSTRAVA_ACTIVITY_URL = \"https://www.strava.com/activities/{}\"\n\n\ndef athlete_streams_directory_path(instance, filename):\n # streams will upload to MEDIA_ROOT/athlete_<id>/<filename>\n return f\"athlete_{instance.athlete.id}/streams/{filename}\"\n\n\ndef get_default_array():\n \"\"\"\n default array (mutable) for the `regression_coefficients` NumpyArrayField.\n \"\"\"\n return array([0.0, 0.0, 0.0, 0.075, 0.0004, 0.0001, 0.0001]).copy()\n\n\ndef get_default_category():\n \"\"\"\n default list (mutable) for the categories saved by the one-hot encoder ArrayField.\n \"\"\"\n return array([\"None\"]).copy()\n\n\ndef update_user_activities_from_strava(athlete, after=None, before=None, limit=1000):\n \"\"\"\n fetches an athlete's activities from Strava and saves them to the Database.\n It erases the ones that are no more available because they have been deleted\n or set to private and returns all of the athlete's current activities.\n\n Parameters:\n 'after': start date after specified value (UTC). datetime.datetime, str or None.\n 'before': start date before specified value (UTC). datetime.datetime or str or None\n 'limit': maximum activities retrieved. Integer\n\n See https://pythonhosted.org/stravalib/usage/activities.html#list-of-activities and\n https://developers.strava.com/playground/#/Activities/getLoggedInAthleteActivities\n \"\"\"\n\n # retrieve the athlete's activities on Strava\n strava_activities = athlete.strava_client.get_activities(\n before=before, after=after, limit=limit\n )\n\n current_activities = []\n for strava_activity in strava_activities:\n if is_activity_supported(strava_activity):\n activity = Activity.get_or_stub(strava_activity.id, athlete)\n activity.update_with_strava_data(strava_activity)\n current_activities.append(activity)\n\n # delete existing activities that are not in the Strava result\n existing_activities = Activity.objects.filter(athlete=athlete)\n existing_activities.exclude(\n id__in=[activity.id for activity in current_activities]\n ).delete()\n\n return current_activities\n\n\ndef is_activity_supported(strava_activity):\n \"\"\"\n check that the activity was not manually uploaded by the athlete\n and if the activity type is supported by homebytwo\n \"\"\"\n if strava_activity.manual:\n return False\n if strava_activity.type not in ActivityType.SUPPORTED_ACTIVITY_TYPES:\n return False\n return True\n\n\ndef are_streams_valid(strava_streams):\n \"\"\"\n check if all required stream types are present and\n if they all contain values.\n \"\"\"\n if not all(stream_type in strava_streams for stream_type in STREAM_TYPES):\n return False\n if not all(raw_stream.original_size > 0 for raw_stream in strava_streams.values()):\n return False\n return True\n\n\nclass ActivityQuerySet(models.QuerySet):\n def for_user(self, user):\n \"\"\"\n return all routes of a given user.\n this is convenient with the 'request.user' object in views.\n \"\"\"\n return self.filter(athlete=user.athlete)\n\n\nclass ActivityManager(models.Manager):\n def get_queryset(self):\n return ActivityQuerySet(self.model, using=self._db)\n\n def for_user(self, user):\n return self.get_queryset().for_user(user)\n\n\nclass Activity(TimeStampedModel):\n \"\"\"\n An athlete's Strava activity used to train his prediction models\n \"\"\"\n\n NONE = None\n DEFAULT_RUN = 0\n RACE_RUN = 1\n LONG_RUN = 2\n WORKOUT_RUN = 3\n DEFAULT_RIDE = 10\n RACE_RIDE = 11\n WORKOUT_RIDE = 12\n\n WORKOUT_TYPE_CHOICES = [\n (NONE, \"None\"),\n (DEFAULT_RUN, \"default run\"),\n (RACE_RUN, \"race run\"),\n (LONG_RUN, \"long run\"),\n (WORKOUT_RUN, \"workout run\"),\n (DEFAULT_RIDE, \"default ride\"),\n (RACE_RIDE, \"race ride\"),\n (WORKOUT_RIDE, \"workout ride\"),\n ]\n\n # name of the activity as imported from Strava\n name = models.CharField(max_length=255)\n\n # description of the activity as imported from Strava\n description = models.TextField(blank=True)\n\n # Activity ID on Strava\n strava_id = models.BigIntegerField(unique=True)\n\n # Starting date and time of the activity in UTC\n start_date = models.DateTimeField()\n\n # Athlete whose activities have been imported from Strava\n athlete = models.ForeignKey(\n \"Athlete\", on_delete=models.CASCADE, related_name=\"activities\"\n )\n\n # Athlete whose activities have been imported from Strava\n activity_type = models.ForeignKey(\n \"ActivityType\", on_delete=models.PROTECT, related_name=\"activities\"\n )\n\n # Total activity distance\n distance = models.FloatField(\"Activity distance in m\", blank=True, null=True)\n\n # elevation gain in m\n total_elevation_gain = models.FloatField(\n \"Total elevation gain in m\", blank=True, null=True\n )\n\n # total duration of the activity in seconds as opposed to moving time\n elapsed_time = models.DurationField(\n \"Total activity time as timedelta\", blank=True, null=True\n )\n\n # time in movement during the activity\n moving_time = models.DurationField(\n \"Movement time as timedelta\", blank=True, null=True\n )\n\n # streams retrieved from the Strava API\n streams = DataFrameField(\n null=True, upload_to=athlete_streams_directory_path, unique_fields=[\"strava_id\"]\n )\n\n # skip trying to import streams from Strava\n skip_streams_import = models.BooleanField(default=False)\n\n # Workout Type as defined in Strava\n workout_type = models.SmallIntegerField(\n choices=WORKOUT_TYPE_CHOICES, blank=True, null=True\n )\n\n # is the activity flagged as a commute?\n commute = models.BooleanField(default=False)\n\n # Gear used if any\n gear = models.ForeignKey(\n \"Gear\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"activities\",\n )\n\n class Meta:\n ordering = [\"-start_date\"]\n verbose_name_plural = \"activities\"\n\n # Custom manager\n objects = ActivityManager()\n\n def __str__(self):\n return \"{0}: {1} - {2}\".format(self.activity_type, self.name, self.athlete)\n\n def get_strava_url(self):\n # return the absolute URL to the activity on Strava\n return STRAVA_ACTIVITY_URL.format(self.strava_id)\n\n def get_distance(self):\n # return the activity distance as a Distance object\n return D(m=self.distance)\n\n def get_total_elevation_gain(self):\n # return the activity distance as a Distance object\n return D(m=self.total_elevation_gain)\n\n @classmethod\n def get_or_stub(cls, strava_id, athlete):\n \"\"\"\n use Strava id to return an activity from the database or an activity stub\n \"\"\"\n try:\n activity = cls.objects.get(strava_id=strava_id)\n except cls.DoesNotExist:\n activity = cls(strava_id=strava_id, athlete=athlete)\n\n return activity\n\n def get_activity_from_strava(self):\n \"\"\"\n retrieve single activity information from Strava.\n \"\"\"\n try:\n strava_activity = self.athlete.strava_client.get_activity(self.strava_id)\n\n # Activity was was deleted or made private on Strava\n except ObjectNotFound:\n if self.id:\n self.delete()\n\n # strava activity was found on Strava\n else:\n return strava_activity\n\n def update_with_strava_data(self, strava_activity, commit=True):\n \"\"\"\n update an activity based on information received from Strava.\n\n :param strava_activity: the activity object returned by the Strava API client.\n :param commit: save Strava activity to the database\n \"\"\"\n\n # fields from the Strava API object mapped to the Activity Model\n fields_map = {\n \"name\": strava_activity.name,\n \"activity_type\": strava_activity.type,\n \"start_date\": strava_activity.start_date,\n \"elapsed_time\": strava_activity.elapsed_time,\n \"moving_time\": strava_activity.moving_time,\n \"description\": strava_activity.description,\n \"workout_type\": strava_activity.workout_type,\n \"distance\": unithelper.meters(strava_activity.distance),\n \"total_elevation_gain\": unithelper.meters(\n strava_activity.total_elevation_gain\n ),\n \"gear\": strava_activity.gear_id,\n \"commute\": strava_activity.commute,\n }\n\n # find or create the activity type\n fields_map[\"activity_type\"], created = ActivityType.objects.get_or_create(\n name=strava_activity.type\n )\n\n if strava_activity.gear_id:\n # resolve foreign key relationship for gear and get gear info if new\n fields_map[\"gear\"], created = Gear.objects.get_or_create(\n strava_id=strava_activity.gear_id, athlete=self.athlete\n )\n if created:\n fields_map[\"gear\"].update_from_strava()\n\n # transform description text to empty if None\n if strava_activity.description is None:\n fields_map[\"description\"] = \"\"\n\n # update activity information\n for key, value in fields_map.items():\n setattr(self, key, value)\n\n if commit:\n self.save()\n\n def update_activity_streams_from_strava(self):\n \"\"\"\n save activity streams from Strava in a pandas DataFrame.\n returns True if streams could be imported.\n \"\"\"\n strava_streams = self.get_streams_from_strava()\n\n if strava_streams and are_streams_valid(strava_streams):\n self.streams = DataFrame(\n {key: stream.data for key, stream in strava_streams.items()}\n )\n self.save(update_fields=[\"streams\"])\n return True\n\n # otherwise, skip trying to get the streams next time\n self.skip_streams_import = True\n self.save(update_fields=[\"skip_streams_import\"])\n return False\n\n def get_streams_from_strava(self, resolution=\"low\"):\n \"\"\"\n Return activity streams from Strava: Time, Altitude, Distance and Moving.\n\n Only activities with all four required types of stream present will be returned.\n Setting a 'low' resolution provides free downsampling of the data\n for better accuracy in the prediction.\n \"\"\"\n\n strava_client = self.athlete.strava_client\n return strava_client.get_activity_streams(\n self.strava_id, types=STREAM_TYPES, resolution=resolution\n )\n\n def get_training_data(self):\n \"\"\"\n return activity data for training the linear regression model.\n \"\"\"\n\n # load activity streams as a DataFrame\n activity_data = self.streams\n\n # calculate gradient in percents, pace in minutes/kilometer and\n # cumulative elevation gain\n activity_data[\"step_distance\"] = activity_data.distance.diff()\n activity_data[\"gradient\"] = (\n activity_data.altitude.diff() / activity_data.step_distance * 100\n )\n activity_data[\"pace\"] = activity_data.time.diff() / activity_data.step_distance\n activity_data[\"cumulative_elevation_gain\"] = activity_data.altitude.diff()[\n activity_data.altitude.diff() >= 0\n ].cumsum()\n activity_data[\n \"cumulative_elevation_gain\"\n ] = activity_data.cumulative_elevation_gain.fillna(method=\"ffill\").fillna(\n value=0\n )\n\n # remove rows with empty gradient or empty pace\n columns = [\"gradient\", \"pace\"]\n activity_data = activity_data[activity_data[columns].notnull().all(1)].copy()\n\n # add activity information to every row\n activity_properties = {\n \"strava_id\": self.strava_id,\n \"start_date\": self.start_date,\n \"total_elevation_gain\": self.total_elevation_gain,\n \"total_distance\": self.distance,\n \"gear\": self.gear.strava_id if self.gear else \"None\",\n \"workout_type\": self.get_workout_type_display()\n if self.workout_type or self.workout_type == 0\n else \"None\",\n \"commute\": self.commute,\n }\n\n return activity_data.assign(\n **{key: value for key, value in activity_properties.items()}\n )\n\n\nclass PredictedModel(models.Model):\n \"\"\"\n base Model for training and persisting schedule prediction models\n\n Subclassed by ActivityType and ActivityPerformance.\n \"\"\"\n\n # list of regression coefficients as trained by the regression model\n regression_coefficients = NumpyArrayField(\n models.FloatField(), default=get_default_array\n )\n\n # flat pace in seconds per meter: the intercept of the regression\n flat_parameter = models.FloatField(default=0.36) # 6:00/km or 10km/h\n\n # workout_type categories found by the prediction model\n workout_type_categories = NumpyArrayField(\n models.CharField(max_length=50),\n default=get_default_category,\n )\n\n # reliability and cross_validation scores of the prediction model\n # between 0.0 and 1.0\n model_score = models.FloatField(default=0.0)\n cv_scores = NumpyArrayField(models.FloatField(), default=get_default_array)\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n set activity_type and adapt to the number of categorical columns.\n\n self._activity_type is required to remove outliers in the training data based\n on max and min speed and gradient.\n\n categorical columns determines the shape of the regression_coefficients array\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # set _activity_type to self or related Model\n if hasattr(self, \"activity_type\"):\n self._activity_type = self.activity_type\n elif isinstance(self, ActivityType):\n self._activity_type = self\n else:\n raise FieldError(f\"Cannot find activity_type for {self}\")\n\n # set default value for regression_coefficients based on the number of\n # categorical columns present in the Model\n categorical_coefficients = [0.0] * len(self.get_categorical_columns())\n numerical_coefficients = [0.0, 0.075, 0.0004, 0.0001, 0.0001]\n coefficients = self._meta.get_field(\"regression_coefficients\")\n coefficients.default = array(categorical_coefficients + numerical_coefficients)\n\n @abstractmethod\n def get_training_activities(self, max_num_activities: Optional[int]):\n \"\"\"\n retrieve activities to train the prediction model\n\n must be implemented in the subclasses.\n \"\"\"\n raise NotImplementedError\n\n def get_training_data(self, limit_activities: Optional[int] = None) -> DataFrame:\n \"\"\"\n retrieve training data for the prediction model\n\n :param limit_activities: maximum number of Strava activities used to feed the\n prediction model, defaults to `None`, i.e. all available activities\n \"\"\"\n target_activities = self.get_training_activities(limit_activities)\n\n # collect activity_data into a pandas DataFrame\n observations = DataFrame()\n for activity in target_activities:\n observations = observations.append(\n activity.get_training_data(), sort=True, ignore_index=True\n )\n\n return observations\n\n def remove_outliers(self, observations):\n \"\"\"\n remove speed and gradient outliers from training data based on ActivityType\n \"\"\"\n return observations[\n (observations.pace > self._activity_type.min_pace)\n & (observations.pace < self._activity_type.max_pace)\n & (observations.gradient > self._activity_type.min_gradient)\n & (observations.gradient < self._activity_type.max_gradient)\n ]\n\n @classmethod\n def get_categorical_columns(cls) -> List[str]:\n \"\"\"\n determine columns to use for categorical data based on available Model fields\n\n ActivityPerformance has two fields: gear_categories, workout_type_categories\n ActivityType has one: workout_type_categories\n \"\"\"\n possible_columns = [\"gear\", \"workout_type\"]\n return list(filter(lambda c: hasattr(cls, f\"{c}_categories\"), possible_columns))\n\n def train_prediction_model(self, limit_activities: Optional[int] = None) -> str:\n \"\"\"\n train prediction model for ActivityType or ActivityPerformance\n\n :param limit_activities: max number of activities considered for training\n :return: description of the training result\n \"\"\"\n\n observations = self.get_training_data(limit_activities=limit_activities)\n if observations.empty:\n return (\n f\"No training data found for activity type: {self._activity_type.name}\"\n )\n\n # remove outliers\n data = self.remove_outliers(observations)\n\n # determine categorical columns for model training\n categorical_columns = self.get_categorical_columns()\n\n # train prediction model\n prediction_model = PredictionModel(categorical_columns=categorical_columns)\n feature_columns = (\n prediction_model.numerical_columns + prediction_model.categorical_columns\n )\n prediction_model.train(\n y=data[\"pace\"],\n x=data[feature_columns].fillna(value=\"None\"),\n )\n\n # save model score\n self.model_score = prediction_model.model_score\n self.cv_scores = prediction_model.cv_scores\n\n # save coefficients and intercept\n regression = prediction_model.pipeline.named_steps[\"linearregression\"]\n self.regression_coefficients = regression.coef_\n self.flat_parameter = regression.intercept_\n\n # save categories from categorical columns\n for index, column in enumerate(prediction_model.categorical_columns):\n setattr(\n self,\n f\"{column}_categories\",\n prediction_model.onehot_encoder_categories[index],\n )\n\n self.save()\n\n message = (\n f\"{self} successfully trained with {data.shape[0]} observations. \"\n f\"Model score: {self.model_score}, \"\n f\"cross-validation score: {self.cv_scores}. \"\n )\n return message\n\n def get_prediction_model(self) -> PredictionModel:\n \"\"\"\n restore the Prediction Model from the saved parameters\n \"\"\"\n\n # retrieve categorical columns and values\n categorical_columns = self.get_categorical_columns()\n onehot_encoder_categories = []\n for column in categorical_columns:\n onehot_encoder_categories.append(getattr(self, column + \"_categories\"))\n\n return PredictionModel(\n regression_intercept=self.flat_parameter,\n regression_coefficients=self.regression_coefficients,\n categorical_columns=categorical_columns,\n onehot_encoder_categories=onehot_encoder_categories,\n )\n\n\nclass ActivityTypeQuerySet(models.QuerySet):\n def predicted(self):\n \"\"\"\n retrieve athlete activity_type choices available for schedule prediction\n \"\"\"\n activity_types = self.filter(name__in=ActivityType.SUPPORTED_ACTIVITY_TYPES)\n activity_types = activity_types.exclude(activities=None)\n activity_types = activity_types.annotate(num_activities=Count(\"activities\"))\n return activity_types.order_by(\"-num_activities\")\n\n def for_athlete(self, athlete):\n \"\"\"\n retrieve activity_type choices available for schedule prediction\n \"\"\"\n return self.predicted().filter(activities__athlete=athlete)\n\n\nclass ActivityType(PredictedModel):\n \"\"\"\n ActivityType is used to define default performance values for each type of activity.\n The choice of available activities is limited to the ones available on Strava:\n http://developers.strava.com/docs/reference/#api-models-ActivityType\n \"\"\"\n\n # Strava activity types\n ALPINESKI = \"AlpineSki\"\n BACKCOUNTRYSKI = \"BackcountrySki\"\n CANOEING = \"Canoeing\"\n CROSSFIT = \"Crossfit\"\n EBIKERIDE = \"EBikeRide\"\n ELLIPTICAL = \"Elliptical\"\n GOLF = \"Golf\"\n HANDCYCLE = \"Handcycle\"\n HIKE = \"Hike\"\n ICESKATE = \"IceSkate\"\n INLINESKATE = \"InlineSkate\"\n KAYAKING = \"Kayaking\"\n KITESURF = \"Kitesurf\"\n NORDICSKI = \"NordicSki\"\n RIDE = \"Ride\"\n ROCKCLIMBING = \"RockClimbing\"\n ROLLERSKI = \"RollerSki\"\n ROWING = \"Rowing\"\n RUN = \"Run\"\n SAIL = \"Sail\"\n SKATEBOARD = \"Skateboard\"\n SNOWBOARD = \"Snowboard\"\n SNOWSHOE = \"Snowshoe\"\n SOCCER = \"Soccer\"\n STAIRSTEPPER = \"StairStepper\"\n STANDUPPADDLING = \"StandUpPaddling\"\n SURFING = \"Surfing\"\n SWIM = \"Swim\"\n VELOMOBILE = \"Velomobile\"\n VIRTUALRIDE = \"VirtualRide\"\n VIRTUALRUN = \"VirtualRun\"\n WALK = \"Walk\"\n WEIGHTTRAINING = \"WeightTraining\"\n WHEELCHAIR = \"Wheelchair\"\n WINDSURF = \"Windsurf\"\n WORKOUT = \"Workout\"\n YOGA = \"Yoga\"\n\n ACTIVITY_NAME_CHOICES = [\n (ALPINESKI, \"Alpine Ski\"),\n (BACKCOUNTRYSKI, \"Backcountry Ski\"),\n (CANOEING, \"Canoeing\"),\n (CROSSFIT, \"Crossfit\"),\n (EBIKERIDE, \"E-Bike Ride\"),\n (ELLIPTICAL, \"Elliptical\"),\n (GOLF, \"Golf\"),\n (HANDCYCLE, \"Handcycle\"),\n (HIKE, \"Hike\"),\n (ICESKATE, \"Ice Skate\"),\n (INLINESKATE, \"Inline Skate\"),\n (KAYAKING, \"Kayaking\"),\n (KITESURF, \"Kitesurf\"),\n (NORDICSKI, \"Nordic Ski\"),\n (RIDE, \"Ride\"),\n (ROCKCLIMBING, \"Rock Climbing\"),\n (ROLLERSKI, \"Roller Ski\"),\n (ROWING, \"Rowing\"),\n (RUN, \"Run\"),\n (SAIL, \"Sail\"),\n (SKATEBOARD, \"Skateboard\"),\n (SNOWBOARD, \"Snowboard\"),\n (SNOWSHOE, \"Snowshoe\"),\n (SOCCER, \"Soccer\"),\n (STAIRSTEPPER, \"Stair Stepper\"),\n (STANDUPPADDLING, \"Stand-Up Paddling\"),\n (SURFING, \"Surfing\"),\n (SWIM, \"Swim\"),\n (VELOMOBILE, \"Velomobile\"),\n (VIRTUALRIDE, \"Virtual Ride\"),\n (VIRTUALRUN, \"Virtual Run\"),\n (WALK, \"Walk\"),\n (WEIGHTTRAINING, \"Weight Training\"),\n (WHEELCHAIR, \"Wheelchair\"),\n (WINDSURF, \"Windsurf\"),\n (WORKOUT, \"Workout\"),\n (YOGA, \"Yoga\"),\n ]\n\n SUPPORTED_ACTIVITY_TYPES = {\n BACKCOUNTRYSKI,\n EBIKERIDE,\n HANDCYCLE,\n HIKE,\n INLINESKATE,\n NORDICSKI,\n RIDE,\n ROCKCLIMBING,\n ROLLERSKI,\n RUN,\n SNOWSHOE,\n VELOMOBILE,\n VIRTUALRIDE,\n VIRTUALRUN,\n WALK,\n WHEELCHAIR,\n }\n\n name = models.CharField(max_length=24, choices=ACTIVITY_NAME_CHOICES, unique=True)\n\n # min and max plausible gradient and speed to filter outliers in activity data.\n min_pace = models.FloatField(default=0.1) # 1:40/km or 36 km/h\n max_pace = models.FloatField(default=2.4) # 40:00/km or 1.5 km/h\n min_gradient = models.FloatField(default=-100.0) # 100% or -45°\n max_gradient = models.FloatField(default=100.0) # 100% or 45°\n\n objects = ActivityTypeQuerySet.as_manager()\n\n def __str__(self):\n return self.name\n\n def get_training_activities(self, limit=None):\n \"\"\"\n retrieve Strava activities to train the prediction model\n \"\"\"\n return self.activities.filter(streams__isnull=False)[:limit]\n\n\nclass ActivityPerformance(PredictedModel, TimeStampedModel):\n \"\"\"\n Athlete prediction model for an activity type calculated from his Strava history.\n\n Based on the athlete's past activities on strava, we train a linear regression model\n to predict the athlete's pace on a route. The pace of the athlete depends on the\n *slope* of the travelled segment.\n \"\"\"\n\n athlete = models.ForeignKey(\n \"Athlete\", on_delete=models.CASCADE, related_name=\"performances\"\n )\n activity_type = models.ForeignKey(\n \"ActivityType\", on_delete=models.PROTECT, related_name=\"performances\"\n )\n # gear categories returned by the prediction model\n gear_categories = NumpyArrayField(\n models.CharField(max_length=50),\n default=get_default_category,\n )\n\n def __str__(self):\n return \"{} - {} - {:.2%}\".format(\n self.athlete.user.username, self.activity_type.name, self.model_score\n )\n\n def get_training_activities(self, limit: int = None):\n \"\"\"\n return the activities that should feed the prediction model\n\n :param limit: maximum number of activities considered\n \"\"\"\n return self.activity_type.activities.filter(\n athlete=self.athlete, streams__isnull=False\n )[:limit]\n\n\nclass Gear(models.Model):\n \"\"\"\n Small helper model to save gear from Strava.\n \"\"\"\n\n strava_id = models.CharField(max_length=24, unique=True)\n name = models.CharField(max_length=100, blank=True)\n brand_name = models.CharField(max_length=100, blank=True)\n athlete = models.ForeignKey(\n \"Athlete\", on_delete=models.CASCADE, related_name=\"gears\"\n )\n\n def __str__(self):\n return \"{0} - {1}\".format(self.brand_name, self.name)\n\n def update_from_strava(self):\n # retrieve gear info from Strava\n strava_gear = self.athlete.strava_client.get_gear(self.strava_id)\n\n self.name = strava_gear.name\n if strava_gear.brand_name is not None:\n self.brand_name = strava_gear.brand_name\n\n # save\n self.save()\n" ]
[ [ "pandas.DataFrame", "numpy.array" ] ]
NSF-Swift/Spectrum-Access-System
[ "02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf", "02cf3490c9fd0cec38074d3bdb3bca63bb7d03bf" ]
[ "src/harness/testcases/WINNF_FT_S_QPR_testcase.py", "src/harness/reference_models/propagation/wf_itm.py" ]
[ "# Copyright 2018 SAS Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport logging\nimport os\nimport numpy as np\n\nimport sas\nimport sas_testcase\nfrom util import configurable_testcase, writeConfig, loadConfig, json_load\nfrom reference_models.antenna import antenna\nfrom reference_models.geo import vincenty\nfrom reference_models.geo import zones\nfrom reference_models.propagation import wf_itm\n\n\nclass QuietZoneProtectionTestcase(sas_testcase.SasTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(QuietZoneProtectionTestcase, cls).setUpClass()\n cls.fcc_offices = zones.GetFccOfficeLocations()\n\n def setUp(self):\n self._sas, self._sas_admin = sas.GetTestingSas()\n self._sas_admin.Reset()\n\n def tearDown(self):\n pass\n\n def generate_QPR_2_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.2.\"\"\"\n\n # Load device info\n # CBSD 1: Category A CBSD located within the boundary of the NRAO / NRRO\n # Quiet Zone.\n device_1 = json_load(\n os.path.join('testcases', 'testdata', 'device_a.json'))\n device_1['installationParam']['latitude'] = 39.244586\n device_1['installationParam']['longitude'] = -78.505269\n\n # CBSD 2: Category B CBSD located within the boundary of the NRAO / NRRO\n # Quiet Zone.\n device_2 = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_2['installationParam']['latitude'] = 39.247287\n device_2['installationParam']['longitude'] = -80.489236\n\n # device_1 is Category A.\n self.assertEqual(device_1['cbsdCategory'], 'A')\n\n # device_2 is Category B with conditionals pre-loaded.\n self.assertEqual(device_2['cbsdCategory'], 'B')\n conditionals_2 = {\n 'cbsdCategory': device_2['cbsdCategory'],\n 'fccId': device_2['fccId'],\n 'cbsdSerialNumber': device_2['cbsdSerialNumber'],\n 'airInterface': device_2['airInterface'],\n 'installationParam': device_2['installationParam'],\n 'measCapability': device_2['measCapability']\n }\n conditionals = [conditionals_2]\n del device_2['installationParam']\n del device_2['cbsdCategory']\n del device_2['airInterface']\n del device_2['measCapability']\n\n # Create the actual config.\n devices = [device_1, device_2]\n config = {\n 'registrationRequests': devices,\n 'conditionalRegistrationData': conditionals,\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_2_default_config)\n def test_WINNF_FT_S_QPR_2(self, config_filename):\n \"\"\"[Configurable] Rejecting Registration of CBSD inside the NRAO/NRRO\n Quiet Zone.\n \"\"\"\n config = loadConfig(config_filename)\n # Very light checking of the config file.\n self.assertValidConfig(\n config, {\n 'registrationRequests': list,\n 'conditionalRegistrationData': list\n })\n\n # Whitelist FCC IDs and User IDs.\n for device in config['registrationRequests']:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n self._sas_admin.InjectUserId({'userId': device['userId']})\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Register CBSDs.\n request = {'registrationRequest': config['registrationRequests']}\n responses = self._sas.Registration(request)['registrationResponse']\n\n # Check registration responses.\n self.assertEqual(len(responses), len(config['registrationRequests']))\n for i, response in enumerate(responses):\n response = responses[i]\n logging.debug('Looking at response number %d', i)\n self.assertNotEqual(response['response']['responseCode'], 0)\n self.assertFalse('cbsdId' in response)\n\n def generate_QPR_5_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.5.\"\"\"\n\n # CBSD 1: Category A CBSD within 3.8 km of the Table Mountain QZ\n device_1 = json_load(\n os.path.join('testcases', 'testdata', 'device_a.json'))\n device_1['installationParam']['latitude'] = 40.121452\n device_1['installationParam']['longitude'] = -105.23381\n\n # CBSD 2: Category B CBSD within 38 km of the Table Mountain QZ\n device_2 = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_2['installationParam']['latitude'] = 40.271668\n device_2['installationParam']['longitude'] = -105.101395\n\n # CBSD 3: Category B CBSD within 54 km of the Table Mountain QZ\n device_3 = json_load(\n os.path.join('testcases', 'testdata', 'device_d.json'))\n device_3['installationParam']['latitude'] = 40.175726\n device_3['installationParam']['longitude'] = -104.654388\n\n # CBSD 4: Category B CBSD within 64 km of the Table Mountain QZ\n device_4 = json_load(\n os.path.join('testcases', 'testdata', 'device_h.json'))\n device_4['installationParam']['latitude'] = 40.074919\n device_4['installationParam']['longitude'] = -104.523926\n\n # CBSD 5: Category B CBSD located within 80 km of the Table Mountain QZ\n device_5 = json_load(\n os.path.join('testcases', 'testdata', 'device_j.json'))\n device_5['installationParam']['latitude'] = 40.525283\n device_5['installationParam']['longitude'] = -104.547272\n\n # device_1 is Category A.\n self.assertEqual(device_1['cbsdCategory'], 'A')\n\n # device_2, device_3, device_4 and device_5 are Category B.\n self.assertEqual(device_2['cbsdCategory'], 'B')\n self.assertEqual(device_3['cbsdCategory'], 'B')\n self.assertEqual(device_4['cbsdCategory'], 'B')\n self.assertEqual(device_5['cbsdCategory'], 'B')\n\n # Conditionals to pre-load\n conditionals_2 = {\n 'cbsdCategory': device_2['cbsdCategory'],\n 'fccId': device_2['fccId'],\n 'cbsdSerialNumber': device_2['cbsdSerialNumber'],\n 'airInterface': device_2['airInterface'],\n 'installationParam': device_2['installationParam'],\n 'measCapability': device_2['measCapability']\n }\n conditionals_3 = {\n 'cbsdCategory': device_3['cbsdCategory'],\n 'fccId': device_3['fccId'],\n 'cbsdSerialNumber': device_3['cbsdSerialNumber'],\n 'airInterface': device_3['airInterface'],\n 'installationParam': device_3['installationParam'],\n 'measCapability': device_3['measCapability']\n }\n conditionals_4 = {\n 'cbsdCategory': device_4['cbsdCategory'],\n 'fccId': device_4['fccId'],\n 'cbsdSerialNumber': device_4['cbsdSerialNumber'],\n 'airInterface': device_4['airInterface'],\n 'installationParam': device_4['installationParam'],\n 'measCapability': device_4['measCapability']\n }\n conditionals_5 = {\n 'cbsdCategory': device_5['cbsdCategory'],\n 'fccId': device_5['fccId'],\n 'cbsdSerialNumber': device_5['cbsdSerialNumber'],\n 'airInterface': device_5['airInterface'],\n 'installationParam': device_5['installationParam'],\n 'measCapability': device_5['measCapability']\n }\n\n # Remove conditionals from registration.\n for device in [device_2, device_3, device_4, device_5]:\n del device['cbsdCategory']\n del device['airInterface']\n del device['installationParam']\n del device['measCapability']\n\n # Grant Requests - N1\n grant1N1 = {\n 'operationParam': {\n 'maxEirp': 19,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant2N1 = {\n 'operationParam': {\n 'maxEirp': 37,\n 'operationFrequencyRange': {\n 'lowFrequency': 3655000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant3N1 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant4N1 = {\n 'operationParam': {\n 'maxEirp': 33,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant5N1 = {\n 'operationParam': {\n 'maxEirp': 30,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3670000000\n }\n }\n }\n # Grant Requests - N2\n grant1N2 = {\n 'operationParam': {\n 'maxEirp': 19,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3670000000\n }\n }\n }\n grant2N2 = {\n 'operationParam': {\n 'maxEirp': 37,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3665000000\n }\n }\n }\n grant3N2 = {\n 'operationParam': {\n 'maxEirp': 36,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3670000000\n }\n }\n }\n grant4N2 = {\n 'operationParam': {\n 'maxEirp': 37,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3680000000\n }\n }\n }\n grant5N2 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3670000000,\n 'highFrequency': 3690000000\n }\n }\n }\n # Create the actual config.\n config = {\n 'registrationRequests': [\n device_1, device_2, device_3, device_4, device_5\n ],\n 'conditionalRegistrationData': [\n conditionals_2, conditionals_3, conditionals_4, conditionals_5\n ],\n 'grantRequestsN1': [grant1N1, grant2N1, grant3N1, grant4N1, grant5N1],\n 'grantRequestsN2': [grant1N2, grant2N2, grant3N2, grant4N2, grant5N2]\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_5_default_config)\n def test_WINNF_FT_S_QPR_5(self, config_filename):\n \"\"\"[Configurable] Unsuccessful Grant Request from CBSDs within Coordination\n Area around Table Mountain Quiet Zone (QZ) with Multiple Grants.\n \"\"\"\n config = loadConfig(config_filename)\n self.assertValidConfig(\n config, {\n 'registrationRequests': list,\n 'conditionalRegistrationData': list,\n 'grantRequestsN1': list,\n 'grantRequestsN2': list\n })\n self.assertEqual(\n len(config['registrationRequests']), len(config['grantRequestsN1']))\n self.assertEqual(\n len(config['grantRequestsN1']), len(config['grantRequestsN2']))\n\n # Whitelist FCC ID and User ID.\n for device in config['registrationRequests']:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n self._sas_admin.InjectUserId({'userId': device['userId']})\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Step 1: Register CBSDs.\n request = {'registrationRequest': config['registrationRequests']}\n responses = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response.\n self.assertEqual(len(responses), len(config['registrationRequests']))\n grant_request_n1 = []\n grant_request_n2 = []\n successful_reg_requests = []\n for i, response in enumerate(responses):\n if response['response']['responseCode'] == 0:\n self.assertTrue('cbsdId' in response)\n successful_reg_requests.append(config['registrationRequests'][i])\n config['grantRequestsN1'][i]['cbsdId'] = response['cbsdId']\n grant_request_n1.append(config['grantRequestsN1'][i])\n config['grantRequestsN2'][i]['cbsdId'] = response['cbsdId']\n grant_request_n2.append(config['grantRequestsN2'][i])\n del request, responses\n\n if not successful_reg_requests:\n return # SAS passes immediately, since none of the registration requests\n # succeeded in this case.\n\n # Step 2: For CBSDs successfully registered in Step 1, Send grant request 1\n request1 = {'grantRequest': grant_request_n1}\n grant_responses1 = self._sas.Grant(request1)['grantResponse']\n # Check grant response 1\n self.assertEqual(len(grant_responses1), len(grant_request_n1))\n\n # Step 3: For CBSDs successfully registered in Step 1, Send grant request 2\n request2 = {'grantRequest': grant_request_n2}\n grant_responses2 = self._sas.Grant(request2)['grantResponse']\n # Check grant response 2\n self.assertEqual(len(grant_responses2), len(grant_request_n2))\n\n for i, device in enumerate(successful_reg_requests):\n if 'installationParam' in device:\n cbsd_information = device\n else:\n for conditional in config['conditionalRegistrationData']:\n if (device['fccId'] == conditional['fccId']) and (\n device['cbsdSerialNumber'] == conditional['cbsdSerialNumber']):\n cbsd_information = conditional\n logging.info(\n 'Looking at device with FccID: %s / CbsdSerialNumber: %s',\n cbsd_information['fccId'], cbsd_information['cbsdSerialNumber'])\n grant_response1 = grant_responses1[i]\n grant_response2 = grant_responses2[i]\n if not (grant_response1['response']['responseCode'] == 0 or\n grant_response2['response']['responseCode'] == 0):\n logging.info('Both grant requests were rejected for this device.')\n continue # Skip further calculation for this device.\n\n # Calculate PL\n logging.info(\n 'Calculating PL for device with FccID: %s / CbsdSerialNumber: %s',\n cbsd_information['fccId'], cbsd_information['cbsdSerialNumber'])\n cbsd_lat = cbsd_information['installationParam']['latitude']\n cbsd_lon = cbsd_information['installationParam']['longitude']\n cbsd_ant_azi = cbsd_information['installationParam']['antennaAzimuth']\n cbsd_ant_beamwidth = cbsd_information['installationParam'][\n 'antennaBeamwidth']\n cbsd_height = cbsd_information['installationParam']['height']\n cbsd_height_type = cbsd_information['installationParam']['heightType']\n is_cbsd_indoor = cbsd_information['installationParam']['indoorDeployment']\n freq_mhz = 3625. # Always in all SAS\n table_mountain_quiet_zone_lat = 40.130660\n table_mountain_quiet_zone_long = -105.244596\n table_mountain_quiet_zone_height = 9 # 9 m: According to the spec\n propagation = wf_itm.CalcItmPropagationLoss(\n cbsd_lat,\n cbsd_lon,\n cbsd_height,\n table_mountain_quiet_zone_lat,\n table_mountain_quiet_zone_long,\n table_mountain_quiet_zone_height,\n cbsd_indoor=is_cbsd_indoor,\n reliability=0.5,\n freq_mhz=freq_mhz,\n is_height_cbsd_amsl=(cbsd_height_type == 'AMSL'))\n pl = propagation.db_loss\n logging.info('Propagation:db_loss: %f', pl)\n bearing = propagation.incidence_angles.hor_cbsd\n logging.info('Bearing: %f', bearing)\n # Calculate effective antenna gain\n max_ant_gain_dbi = cbsd_information['installationParam']['antennaGain']\n ant_gain_dbi = antenna.GetStandardAntennaGains(\n bearing, cbsd_ant_azi, cbsd_ant_beamwidth,\n max_ant_gain_dbi)\n logging.info('Effective Antenna Gain: %f', ant_gain_dbi)\n\n # Gather values required for calculating Total Interference\n grant1_eirp = 0\n if grant_response1['response']['responseCode'] == 0:\n p1 = grant_request_n1[i]['operationParam']['maxEirp']\n logging.info('Grant 1 Max Eirp: %f dBm/MHz', p1)\n bw1 = (grant_request_n1[i]['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n grant_request_n1[i]['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 1 Bandwidth: %f', bw1)\n grant1_eirp = (10**(p1/10.0)) * bw1\n logging.info('Grant 1 EIRP is %f', grant1_eirp)\n\n grant2_eirp = 0\n if grant_response2['response']['responseCode'] == 0:\n p2 = grant_request_n2[i]['operationParam']['maxEirp']\n logging.info('Grant 2 Max Eirp: %f dBm/MHz', p2)\n bw2 = (grant_request_n2[i]['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n grant_request_n2[i]['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 2 Bandwidth: %f', bw2)\n grant2_eirp = (10**(p2/10.0)) * bw2\n logging.info('Grant 2 EIRP is %f', grant2_eirp)\n\n # Step 4: Calculate Total Interference\n total_interference_dbm = ant_gain_dbi - max_ant_gain_dbi + (\n 10 * np.log10(grant1_eirp + grant2_eirp)) - pl\n logging.info('Total Interference is %f dBm', total_interference_dbm)\n\n # CHECK: Total Interference from all approved grants is <= -88.4 dBm\n self.assertLessEqual(total_interference_dbm, -88.4)\n\n def generate_QPR_6_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.6.\"\"\"\n\n # Load device info\n # CBSD 1: Category A within 2.4 km of Waipahu, Hawaii FCC Field Office.\n device_1 = json_load(\n os.path.join('testcases', 'testdata', 'device_a.json'))\n device_1['installationParam']['latitude'] = 21.377719\n device_1['installationParam']['longitude'] = -157.973411\n\n # CBSD 2: Category B within 2.4 km of Allegan, Michigan FCC Field Office.\n device_2 = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_2['installationParam']['latitude'] = 42.586213\n device_2['installationParam']['longitude'] = -85.955594\n\n # device_1 is Category A.\n self.assertEqual(device_1['cbsdCategory'], 'A')\n\n # device_2 is Category B with conditionals pre-loaded.\n self.assertEqual(device_2['cbsdCategory'], 'B')\n conditionals_2 = {\n 'cbsdCategory': device_2['cbsdCategory'],\n 'fccId': device_2['fccId'],\n 'cbsdSerialNumber': device_2['cbsdSerialNumber'],\n 'airInterface': device_2['airInterface'],\n 'installationParam': device_2['installationParam'],\n 'measCapability': device_2['measCapability']\n }\n conditionals = [conditionals_2]\n del device_2['installationParam']\n del device_2['cbsdCategory']\n del device_2['airInterface']\n del device_2['measCapability']\n\n # Create the actual config.\n devices = [device_1, device_2]\n config = {\n 'registrationRequests': devices,\n 'conditionalRegistrationData': conditionals,\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_6_default_config)\n def test_WINNF_FT_S_QPR_6(self, config_filename):\n \"\"\"[Configurable] Rejecting Registration of CBSDs inside the FCC Protected\n Field Offices Quiet Zone.\n \"\"\"\n config = loadConfig(config_filename)\n # Very light checking of the config file.\n self.assertValidConfig(\n config, {\n 'registrationRequests': list,\n 'conditionalRegistrationData': list\n })\n\n # Whitelist FCC IDs and User IDs.\n for device in config['registrationRequests']:\n self._sas_admin.InjectFccId({'fccId': device['fccId']})\n self._sas_admin.InjectUserId({'userId': device['userId']})\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Register CBSDs.\n request = {'registrationRequest': config['registrationRequests']}\n responses = self._sas.Registration(request)['registrationResponse']\n\n # Check registration responses.\n self.assertEqual(len(responses), len(config['registrationRequests']))\n for i, response in enumerate(responses):\n response = responses[i]\n logging.debug('Looking at response number %d', i)\n self.assertNotEqual(response['response']['responseCode'], 0)\n self.assertFalse('cbsdId' in response)\n\n def generate_QPR_7_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.7.\"\"\"\n\n # Load device info\n # Cat B - between 2.4 km - 4.8 km of Waipahu, Hawaii Field Office.\n device_b = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_b['installationParam']['latitude'] = 21.397934\n device_b['installationParam']['longitude'] = -158.034459\n\n # device_b is Category B with conditionals pre-loaded.\n self.assertEqual(device_b['cbsdCategory'], 'B')\n conditionals_b = {\n 'cbsdCategory': device_b['cbsdCategory'],\n 'fccId': device_b['fccId'],\n 'cbsdSerialNumber': device_b['cbsdSerialNumber'],\n 'airInterface': device_b['airInterface'],\n 'installationParam': device_b['installationParam'],\n 'measCapability': device_b['measCapability']\n }\n del device_b['installationParam']\n del device_b['cbsdCategory']\n del device_b['airInterface']\n del device_b['measCapability']\n\n # Grant Request\n grant_0 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n\n # Create the actual config.\n config = {\n 'registrationRequest': device_b,\n 'conditionalRegistrationData': [conditionals_b],\n 'grantRequest': grant_0,\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_7_default_config)\n def test_WINNF_FT_S_QPR_7(self, config_filename):\n \"\"\"[Configurable] Unsuccessful Grant Request from CBSDs within 4.8 km of\n the FCC Field Offices.\n \"\"\"\n config = loadConfig(config_filename)\n # Very light checking of the config file.\n self.assertValidConfig(\n config, {\n 'registrationRequest': dict,\n 'conditionalRegistrationData': list,\n 'grantRequest': dict\n })\n\n # Whitelist FCC ID and User ID.\n self._sas_admin.InjectFccId({\n 'fccId': config['registrationRequest']['fccId']\n })\n self._sas_admin.InjectUserId({\n 'userId': config['registrationRequest']['userId']\n })\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Register CBSD.\n request = {'registrationRequest': [config['registrationRequest']]}\n response = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response.\n self.assertEqual(len(response), 1)\n if response[0]['response']['responseCode'] != 0:\n return # SAS passes immediately in this case.\n cbsd_id = response[0]['cbsdId']\n del request, response\n\n # Calculate the closest FCC office\n lat_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'latitude']\n lon_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'longitude']\n distance_offices = [\n vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, office['latitude'], office['longitude'])[0]\n for office in self.fcc_offices\n ]\n index_closest = np.argmin(distance_offices)\n closest_fcc_office = self.fcc_offices[index_closest]\n logging.info('Closest FCC office Lat: %f', closest_fcc_office['latitude'])\n logging.info('Closest FCC office Long: %f',\n closest_fcc_office['longitude'])\n # Calculate bearing and ant_gain\n _, bearing, _ = vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, closest_fcc_office['latitude'],\n closest_fcc_office['longitude'])\n ant_gain = antenna.GetStandardAntennaGains(\n bearing,\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaAzimuth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaBeamwidth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain']\n )\n logging.info('ant_gain is %f dBi', ant_gain)\n # Gather values required for calculating EIRP\n p = config['grantRequest']['operationParam']['maxEirp']\n logging.info('Grant maxEirp is %f', p)\n max_ant_gain = (\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain'])\n logging.info('max_ant_gain is %f dBi', max_ant_gain)\n bw = (\n config['grantRequest']['operationParam']['operationFrequencyRange']\n ['highFrequency'] - config['grantRequest']['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('bw is %f MHz', bw)\n # Calculate EIRP to verify grant response\n eirp = (p - max_ant_gain + ant_gain + (10 * np.log10(bw)))\n logging.info('EIRP is %f dBm', eirp)\n\n # If successfully registered, CBSD sends a grant request\n config['grantRequest']['cbsdId'] = cbsd_id\n grant_request = config['grantRequest']\n request = {'grantRequest': [grant_request]}\n response = self._sas.Grant(request)['grantResponse']\n # Check grant response\n self.assertEqual(len(response), 1)\n # If EIRP <= 49.15 dBm = SUCCESS\n if eirp <= 49.15:\n self.assertEqual(response[0]['response']['responseCode'], 0)\n # If EIRP > 49.15 dBm = INTERFERENCE\n else:\n self.assertEqual(response[0]['response']['responseCode'], 400)\n\n def generate_QPR_8_default_config(self, filename):\n \"\"\"Generates the WinnForum configuration for QPR.8.\"\"\"\n\n # Load device info\n # Cat B - between 2.4 km - 4.8 km of Waipahu, Hawaii Field Office.\n device_b = json_load(\n os.path.join('testcases', 'testdata', 'device_b.json'))\n device_b['installationParam']['latitude'] = 21.397934\n device_b['installationParam']['longitude'] = -158.034459\n\n # device_b is Category B with conditionals pre-loaded.\n self.assertEqual(device_b['cbsdCategory'], 'B')\n conditionals_b = {\n 'cbsdCategory': device_b['cbsdCategory'],\n 'fccId': device_b['fccId'],\n 'cbsdSerialNumber': device_b['cbsdSerialNumber'],\n 'airInterface': device_b['airInterface'],\n 'installationParam': device_b['installationParam'],\n 'measCapability': device_b['measCapability']\n }\n del device_b['installationParam']\n del device_b['cbsdCategory']\n del device_b['airInterface']\n del device_b['measCapability']\n\n # Grant Requests\n grant_0 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3650000000,\n 'highFrequency': 3660000000\n }\n }\n }\n grant_1 = {\n 'operationParam': {\n 'maxEirp': 35,\n 'operationFrequencyRange': {\n 'lowFrequency': 3660000000,\n 'highFrequency': 3670000000\n }\n }\n }\n # Create the actual config.\n config = {\n 'registrationRequest': device_b,\n 'conditionalRegistrationData': [conditionals_b],\n 'grantRequest1': grant_0,\n 'grantRequest2': grant_1\n }\n writeConfig(filename, config)\n\n @configurable_testcase(generate_QPR_8_default_config)\n def test_WINNF_FT_S_QPR_8(self, config_filename):\n \"\"\"[Configurable] Unsuccessful Grant Request from CBSDs within 4.8 km of\n the FCC Field Offices with multiple grants.\n \"\"\"\n config = loadConfig(config_filename)\n self.assertValidConfig(\n config, {\n 'registrationRequest': dict,\n 'conditionalRegistrationData': list,\n 'grantRequest1': dict,\n 'grantRequest2': dict\n })\n\n # Whitelist FCC ID and User ID.\n self._sas_admin.InjectFccId({\n 'fccId': config['registrationRequest']['fccId']\n })\n self._sas_admin.InjectUserId({\n 'userId': config['registrationRequest']['userId']\n })\n\n # Pre-load conditional registration data.\n if config['conditionalRegistrationData']:\n self._sas_admin.PreloadRegistrationData({\n 'registrationData': config['conditionalRegistrationData']\n })\n\n # Step 1: Register CBSD.\n request = {'registrationRequest': [config['registrationRequest']]}\n response = self._sas.Registration(request)['registrationResponse']\n\n # Check registration response.\n self.assertEqual(len(response), 1)\n if response[0]['response']['responseCode'] != 0:\n return # SAS passes immediately in this case.\n cbsd_id = response[0]['cbsdId']\n del request, response\n\n # Step 2: Request first grant.\n config['grantRequest1']['cbsdId'] = cbsd_id\n grant_request = config['grantRequest1']\n request = {'grantRequest': [grant_request]}\n response = self._sas.Grant(request)['grantResponse'][0]\n # Check if the first grant response is successful.\n grant1_approved = response['response']['responseCode'] == 0\n del request, response\n\n # Step 3: Request second grant\n config['grantRequest2']['cbsdId'] = cbsd_id\n grant_request = config['grantRequest2']\n request = {'grantRequest': [grant_request]}\n response = self._sas.Grant(request)['grantResponse'][0]\n # Check if the second grant response is successful.\n grant2_approved = response['response']['responseCode'] == 0\n del request, response\n\n if not (grant1_approved or grant2_approved):\n logging.info('Both grant requests were rejected')\n return # SAS passes immediately in this case.\n\n # Calculate the closest FCC office\n lat_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'latitude']\n lon_cbsd = config['conditionalRegistrationData'][0]['installationParam'][\n 'longitude']\n distance_offices = [\n vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, office['latitude'], office['longitude'])[0]\n for office in self.fcc_offices\n ]\n index_closest = np.argmin(distance_offices)\n closest_fcc_office = self.fcc_offices[index_closest]\n logging.info('Closest FCC Office Lat: %f', closest_fcc_office['latitude'])\n logging.info('Closest FCC Office Long: %f', closest_fcc_office['longitude'])\n # Calculate bearing and ant_gain\n _, bearing, _ = vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, closest_fcc_office['latitude'],\n closest_fcc_office['longitude'])\n logging.info('Bearing: %f', bearing)\n ant_gain_dbi = antenna.GetStandardAntennaGains(\n bearing,\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaAzimuth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaBeamwidth'],\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain']\n )\n logging.info('Ant Gain: %f dBi', ant_gain_dbi)\n max_ant_gain_dbi = (\n config['conditionalRegistrationData'][0]['installationParam'][\n 'antennaGain'])\n logging.info('Max Ant Gain: %f dBi', max_ant_gain_dbi)\n\n # Gather values required for calculating Total EIRP\n grant1_eirp = 0\n if grant1_approved:\n p1 = config['grantRequest1']['operationParam']['maxEirp']\n logging.info('Grant 1 Max Eirp: %f dBm/MHz', p1)\n bw1 = (config['grantRequest1']['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n config['grantRequest1']['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 1 Bandwidth: %f', bw1)\n grant1_eirp = (10**(p1/10.0)) * bw1\n logging.info('Grant 1 nominal EIRP is %f', grant1_eirp)\n\n grant2_eirp = 0\n if grant2_approved:\n p2 = config['grantRequest2']['operationParam']['maxEirp']\n logging.info('Grant 2 Max Eirp: %f dBm/MHz', p2)\n bw2 = (config['grantRequest2']['operationParam']\n ['operationFrequencyRange']['highFrequency'] -\n config['grantRequest2']['operationParam']\n ['operationFrequencyRange']['lowFrequency']) / 1.e6\n logging.info('Grant 2 Bandwidth: %f', bw2)\n grant2_eirp = (10**(p2/10.0)) * bw2\n logging.info('Grant 2 nominal EIRP is %f', grant2_eirp)\n\n # Step 4: Calculate Total EIRP\n total_eirp_dbm = ant_gain_dbi - max_ant_gain_dbi + (\n 10 * np.log10(grant1_eirp + grant2_eirp))\n logging.info('Total EIRP is %f dBm', total_eirp_dbm)\n\n # CHECK: Total EIRP of all approved grants is <= 49.15 dBm\n self.assertLessEqual(total_eirp_dbm, 49.15)\n", "# Copyright 2017 SAS Project Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"WinnForum-specific version of ITM propagation model.\n\nTypical usage:\n # Configure the terrain driver (memory use is: cache_size * 50MB)\n from reference_models.geo import drive\n drive.ConfigureTerrainDriver(terrain_dir=my_ned_path, cache_size=16)\n\n # Get the path loss and incidence angles\n db_loss, incidence_angles, internals = CalcItmPropagationLoss(\n lat_cbsd, lon_cbsd, height_cbsd,\n lat_rx, lon_rx, height_rx,\n cbsd_indoor=False,\n reliability=0.5,\n freq_mhz=3625.)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport numpy as np\n\nfrom reference_models.geo import drive\nfrom reference_models.geo import vincenty\nfrom reference_models.propagation.itm import itm\n\n# TEMPORARY to avoid breaking code under PR\nterrainDriver = drive.terrain_driver\n\n\n# ITM warning codes\nclass ItmErrorCode:\n NONE = 0\n CAUTION = 1\n NOTE = 2\n WARNING = 3\n OTHER = 4\n\n_ITM_ERROR_MODES = {\n ItmErrorCode.NONE: 'No Error.',\n ItmErrorCode.CAUTION: ('Caution: Some parameters are nearly out of range.'\n ' Results should be used with caution.'),\n ItmErrorCode.NOTE: ('Note: Default parameters have been substituted for impossible ones.'),\n ItmErrorCode.WARNING: ('Warning: A combination of parameters is out of range.'\n ' Results are probably invalid.'),\n ItmErrorCode.OTHER: ('Warning: Some parameters are out of range.'\n ' Results are probably invalid.')\n}\n\n\ndef GetInfoOnItmCode(code):\n \"\"\"Get description of ITM error code.\"\"\"\n return _ITM_ERROR_MODES(code)\n\n# Defined namedtuple for nice output packing\n_PropagResult = namedtuple('_PropagResult',\n ['db_loss', 'incidence_angles', 'internals'])\n_IncidenceAngles = namedtuple('_IncidenceAngles',\n ['hor_cbsd', 'ver_cbsd', 'hor_rx', 'ver_rx'])\n\n\n# Main entry point for the Winnforum compliant ITM propagation model\ndef CalcItmPropagationLoss(lat_cbsd, lon_cbsd, height_cbsd,\n lat_rx, lon_rx, height_rx,\n cbsd_indoor=False,\n reliability=0.5,\n freq_mhz=3625.,\n its_elev=None,\n is_height_cbsd_amsl=False,\n return_internals=False):\n \"\"\"Implements the WinnForum-compliant ITM point-to-point propagation model.\n\n According to WinnForum spec R2-SGN-17, R2-SGN-22 and R2-SGN-5 to 10.\n\n One can use this routine in 3 ways:\n reliability = -1 : to get the average path loss\n reliability in [0,1] : to get a pathloss for given quantile\n sequence of reliabilities: to get an array of pathloss. Used to obtain\n inverse CDF of the pathloss.\n\n Inputs:\n lat_cbsd, lon_cbsd, height_cbsd: Lat/lon (deg) and height AGL (m) of CBSD\n lat_rx, lon_rx, height_rx: Lat/lon (deg) and height AGL (m) of Rx point\n cbsd_indoor: CBSD indoor status - Default=False.\n reliability: Reliability. Default is 0.5 (median value)\n Different options:\n value in [0,1]: returns the CDF quantile\n -1: returns the mean path loss\n iterable sequence: returns a list of path losses\n freq_mhz: Frequency (MHz). Default is mid-point of band.\n its_elev: Optional profile to use (in ITM format). Default=None\n If not specified, it is extracted from the terrain.\n is_height_cbsd_amsl: If True, the CBSD height shall be considered as AMSL (Average\n mean sea level).\n return_internals: If True, returns internal variables.\n\n Returns:\n A namedtuple of:\n db_loss Path Loss in dB, either a scalar if reliability is scalar\n or a list of path losses if reliability is an iterable.\n\n incidence_angles: A namedtuple of\n hor_cbsd: Horizontal departure angle (bearing) from CBSD to Rx\n ver_cbsd: Vertical departure angle at CBSD\n hor_rx: Horizontal incidence angle (bearing) from Rx to CBSD\n ver_rx: Vertical incidence angle at Rx\n\n internals: A dictionary of internal data for advanced analysis\n (only if return_internals=True):\n itm_err_num: ITM error code from ItmErrorCode (see GetInfoOnItmCode).\n itm_str_mode: String containing description of dominant prop mode.\n dist_km: Distance between end points (km).\n prof_d_km ndarray of distances (km) - x values to plot terrain.\n prof_elev ndarray of terrain heightsheights (m) - y values to plot terrain,\n\n Raises:\n Exception if input parameters invalid or out of range.\n \"\"\"\n # Case of same points\n if (lat_cbsd == lat_rx and lon_cbsd == lon_rx):\n return _PropagResult(\n db_loss = 0 if np.isscalar(reliability) else [0] * len(reliability),\n incidence_angles = _IncidenceAngles(0,0,0,0),\n internals = None)\n\n # Sanity checks on input parameters\n if freq_mhz < 40.0 or freq_mhz > 10000:\n raise Exception('Frequency outside range [40MHz - 10GHz]')\n\n if is_height_cbsd_amsl:\n altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd)\n height_cbsd = height_cbsd - altitude_cbsd\n\n # Ensure minimum height of 1 meter\n if height_cbsd < 1:\n height_cbsd = 1\n if height_rx < 1:\n height_rx = 1\n\n # Internal ITM parameters are always set to following values in WF version:\n confidence = 0.5 # Confidence (always 0.5)\n dielec = 25. # Dielectric constant (always 25.)\n conductivity = 0.02 # Conductivity (always 0.02)\n polarization = 1 # Polarization (always vertical = 1)\n mdvar = 13\n\n # Get the terrain profile, using Vincenty great circle route, and WF\n # standard (bilinear interp; 1500 pts for all distances over 45 km)\n if its_elev is None:\n its_elev = drive.terrain_driver.TerrainProfile(\n lat1=lat_cbsd, lon1=lon_cbsd,\n lat2=lat_rx, lon2=lon_rx,\n target_res_meter=30.,\n do_interp=True, max_points=1501)\n\n # Find the midpoint of the great circle path\n dist_km, bearing_cbsd, bearing_rx = vincenty.GeodesicDistanceBearing(\n lat_cbsd, lon_cbsd, lat_rx, lon_rx)\n latmid, lonmid, _ = vincenty.GeodesicPoint(\n lat_cbsd, lon_cbsd, dist_km/2., bearing_cbsd)\n\n # Determine climate value, based on ITU-R P.617 method:\n climate = drive.climate_driver.TropoClim(latmid, lonmid)\n # If the common volume lies over the sea, the climate value to use depends\n # on the climate values at either end. A simple min() function should\n # properly implement the logic, since water is the max.\n if climate == 7:\n climate = min(drive.climate_driver.TropoClim(lat_cbsd, lon_cbsd),\n drive.climate_driver.TropoClim(lat_rx, lon_rx))\n\n # Look up the refractivity at the path midpoint, if not explicitly provided\n refractivity = drive.refract_driver.Refractivity(latmid, lonmid)\n\n # Call ITM prop loss.\n reliabilities = reliability\n do_avg = False\n if np.isscalar(reliabilities) and reliability == -1:\n # Pathloss mean: average the value for 1% to 99% included\n reliabilities = np.arange(0.01, 1.0, 0.01)\n do_avg = True\n\n db_loss, ver_cbsd, ver_rx, str_mode, err_num = itm.point_to_point(\n its_elev, height_cbsd, height_rx,\n dielec, conductivity,\n refractivity, freq_mhz,\n climate, polarization,\n confidence, reliabilities,\n mdvar, False)\n if do_avg:\n db_loss = -10*np.log10(np.mean(10**(-np.array(db_loss)/10.)))\n\n # Add indoor losses\n if cbsd_indoor:\n if np.isscalar(db_loss):\n db_loss += 15\n else:\n db_loss = [loss+15 for loss in db_loss]\n\n # Create distance/terrain arrays for plotting if desired\n internals = None\n if return_internals:\n prof_d_km = (its_elev[1]/1000.) * np.arange(len(its_elev)-2)\n prof_elev = np.asarray(its_elev[2:])\n internals = {\n 'itm_err_num': err_num,\n 'itm_str_mode': str_mode,\n 'dist_km': dist_km,\n 'prof_d_km': prof_d_km,\n 'prof_elev': prof_elev\n }\n\n return _PropagResult(\n db_loss = db_loss,\n incidence_angles = _IncidenceAngles(\n hor_cbsd = bearing_cbsd,\n ver_cbsd = ver_cbsd,\n hor_rx = bearing_rx,\n ver_rx = ver_rx),\n internals = internals\n )\n\n\n# Utility function to compute the HAAT for a CBSD\ndef ComputeHaat(lat_cbsd, lon_cbsd, height_cbsd, height_is_agl=True):\n \"\"\"Computes a CBSD HAAT (Height above average terrain).\n\n Args:\n lat_cbsd, lon_cbsd: the CBSD location (degrees).\n height_cbsd: the CBSD antenna height (meters)\n height_is_agl: boolean specifying if height is AGL (Above Ground Level)\n or AMSL (Above Mean Sea Level).\n\n Returns:\n the CBSD HAAT (meters).\n \"\"\"\n norm_haat, alt_ground = drive.terrain_driver.ComputeNormalizedHaat(lat_cbsd, lon_cbsd)\n if height_is_agl:\n return height_cbsd + norm_haat\n else:\n return height_cbsd - alt_ground + norm_haat\n" ]
[ [ "numpy.log10", "numpy.argmin" ], [ "numpy.array", "numpy.arange", "numpy.isscalar", "numpy.asarray" ] ]
geexie/dpbench
[ "7d41409ded3c816f35003bc5aea071852bceb892" ]
[ "native_dpcpp/blackscholes/GPU/base_bs_erf.py" ]
[ "# Copyright (C) 2017-2018 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport os\nimport run_utils as utils\nimport numpy as np\nfrom dpbench_datagen.blackscholes import gen_data_to_file, gen_rand_data\nfrom dpbench_python.blackscholes.bs_python import black_scholes_python\n\n# make xrange available in python 3\ntry:\n xrange\nexcept NameError:\n xrange = range\n\n\ndef ip_data_to_file(nopt):\n gen_data_to_file(nopt)\n\n\ndef gen_data_np(nopt):\n price, strike, t = gen_rand_data(nopt)\n return (\n price,\n strike,\n t,\n np.zeros(nopt, dtype=np.float64),\n -np.ones(nopt, dtype=np.float64),\n )\n\n\nRISK_FREE = 0.1\nVOLATILITY = 0.2\n\n# create input data, call blackscholes computation function (alg)\ndef run(name, sizes=14, step=2, nopt=2 ** 15):\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--steps\", required=False, default=sizes, help=\"Number of steps\"\n )\n parser.add_argument(\n \"--step\", required=False, default=step, help=\"Factor for each step\"\n )\n parser.add_argument(\n \"--size\", required=False, default=nopt, help=\"Initial data size\"\n )\n parser.add_argument(\n \"--repeat\", required=False, default=1, help=\"Iterations inside measured region\"\n )\n parser.add_argument(\n \"--usm\",\n required=False,\n action=\"store_true\",\n help=\"Use USM Shared or pure numpy\",\n )\n parser.add_argument(\n \"--test\",\n required=False,\n action=\"store_true\",\n help=\"Check for correctness by comparing output with naieve Python version\",\n )\n\n args = parser.parse_args()\n sizes = int(args.steps)\n step = int(args.step)\n nopt = int(args.size)\n repeat = int(args.repeat)\n\n clean_string = [\"make\", \"clean\"]\n utils.run_command(clean_string, verbose=True)\n\n if args.usm:\n build_string = [\"make\", \"comp\"]\n utils.run_command(build_string, verbose=True)\n exec_name = \"./black_scholes_comp\"\n else:\n build_string = [\"make\"]\n utils.run_command(build_string, verbose=True)\n exec_name = \"./black_scholes\"\n\n if args.test:\n # run sequential python\n price, strike, t, p_call, p_put = gen_data_np(nopt)\n black_scholes_python(\n nopt, price, strike, t, RISK_FREE, VOLATILITY, p_call, p_put\n )\n\n # run dpcpp\n ip_data_to_file(nopt)\n run_cmd = [exec_name, str(nopt), str(1), \"-t\"]\n utils.run_command(run_cmd, verbose=True)\n\n # read output of dpcpp into n_call, n_put\n n_call = np.fromfile(\"call.bin\", np.float64)\n\n # read output of dpcpp into n_call, n_put\n n_put = np.fromfile(\"put.bin\", np.float64)\n\n # compare outputs\n if np.allclose(n_call, p_call) and np.allclose(n_put, p_put):\n print(\"Test succeeded\\n\")\n else:\n print(\"Test failed\\n\")\n return\n\n if os.path.isfile(\"runtimes.csv\"):\n os.remove(\"runtimes.csv\")\n\n for i in xrange(sizes):\n # generate input data\n ip_data_to_file(nopt)\n\n # run the C program\n run_cmd = [exec_name, str(nopt), str(repeat)]\n utils.run_command(run_cmd, verbose=True)\n nopt *= step\n repeat -= step\n if repeat < 1:\n repeat = 1\n\n\nif __name__ == \"__main__\":\n run(\"Blackscholes dpcpp\")\n" ]
[ [ "numpy.allclose", "numpy.fromfile", "numpy.ones", "numpy.zeros" ] ]
fyabc/MSRAPaperProject
[ "2d7974acfe8065523d0c56da695807e94acd0b34" ]
[ "MyMLFramework/tests.py" ]
[ "#! /usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function\n\nimport numpy as np\nimport theano.tensor as T\nfrom theano import shared, config\n\nfrom model import SimpleModel, Model\nfrom layers.layer import Dense\nfrom layers.activations import Activation\n\n__author__ = 'fyabc'\n\nfX = config.floatX\n\n\ndef testSimpleModel():\n # config.optimizer = 'None'\n # config.exception_verbosity = 'high'\n\n # def initNorm(*args):\n # return np.asarray(np.random.randn(*args) * 0.1, dtype=fX)\n\n model = SimpleModel()\n model.setInput(2)\n\n W1 = shared(value=np.asarray([\n [1, 2, 3],\n [4, 0, 1],\n ], dtype=fX), name='W1')\n b1 = shared(value=np.asarray([\n 1, 2, 4,\n ], dtype=fX), name='b1')\n\n def layer1(x):\n return T.nnet.relu(T.dot(x, W1) + b1)\n\n model.addRaw(layer1, [W1, b1])\n\n model.compile()\n\n result = model.objectiveFunction([[6, 1], [7, 2], [8, 3]], [[7, 6, 2], [1, 2, 3], [4, 0, 5]])\n\n print(result)\n\n\ndef testModel():\n model = Model()\n model.add(Dense(\n outputShape=(3, 3,),\n inputShape=(3, 2,)\n ))\n model.add(Activation('sigmoid'))\n\n model.compile()\n\n result = model.objectiveFunction([[6, 1], [7, 2], [8, 3]], [[7, 6, 2], [1, 2, 3], [4, 0, 5]])\n\n print(result)\n\n\ndef test():\n # testSimpleModel()\n testModel()\n\n\nif __name__ == '__main__':\n test()\n" ]
[ [ "numpy.asarray" ] ]
YuxinZou/volkscv
[ "67ac83f0c0ac85bd6606053732b454db17c53de0" ]
[ "volkscv/utils/parser/txt_parse.py" ]
[ "import os\n\nimport numpy as np\n\nfrom .base import BaseParser\nfrom .utils import read_imglist\n\n\nclass TXTParser(BaseParser):\n \"\"\"Class of parser for classification TXT annotation file.\n\n xxx.png dog\n xxx.png cat\n xxxx.png dog\n\n Args:\n anno_path (str): Path of annotation file.\n categories (list or tuple): All categories of data.\n \"\"\"\n\n def __init__(self,\n categories=None,\n **kwargs):\n super(TXTParser, self).__init__(**kwargs)\n\n self.categories = categories\n assert self.imgs_list is not None, \\\n \"For txt file parser, the imgs_list attribute shouldn't be None.\"\n\n def __call__(self, need_shape=True):\n fname_list, labels_list, shapes_list, scores_list = [], [], [], []\n fnames, annos = read_imglist(self.txt_file)\n for fname, anno in zip(fnames, annos):\n fname = os.path.join(self.imgs_folder, fname)\n height, width = self._get_shape(fname) if need_shape else (0, 0)\n shapes_list.append([width, height])\n fname_list.append(fname)\n assert anno[0] in self.categories, \\\n f'Label: {anno[0]} is not in categories.'\n labels_list.append(self.categories.index(anno[0]))\n if len(anno) > 1:\n scores_list.append(float(anno[1]))\n\n self.result = dict(\n img_names=np.array(fname_list),\n categories=np.array(self.categories),\n shapes=np.array(shapes_list),\n labels=np.array(labels_list),\n scores=np.array(scores_list) if len(scores_list) else None,\n )\n return self.result\n" ]
[ [ "numpy.array" ] ]
mabdulhussin/openpilot
[ "3988e14c95043c7f1d35bfe5edbd7701fa44f9bb", "3988e14c95043c7f1d35bfe5edbd7701fa44f9bb" ]
[ "selfdrive/mapd/test/test_NodesData.py", "selfdrive/mapd/lib/Route.py" ]
[ "import unittest\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom selfdrive.mapd.lib.mock_data import MockRoad\nfrom selfdrive.mapd.lib.NodesData import vectors\n\n\nclass TestNodesData(unittest.TestCase):\n def test_vectors(self):\n points = np.radians(MockRoad.road1_points_grad)\n expected = np.array([\n [-1.34011951e-05, 1.00776468e-05],\n [-5.83610920e-06, 4.41046897e-06],\n [-7.83348567e-06, 5.94114032e-06],\n [-7.08560788e-06, 5.30408795e-06],\n [-6.57632550e-06, 4.05791838e-06],\n [-1.16077872e-06, 6.91151252e-07],\n [-1.53178098e-05, 9.62215139e-06],\n [-5.76314175e-06, 3.55176643e-06],\n [-1.61124141e-05, 9.86127759e-06],\n [-1.48006628e-05, 8.58192512e-06],\n [-1.72237209e-06, 1.60570482e-06],\n [-8.68985228e-06, 9.22062311e-06],\n [-1.42922812e-06, 1.51494711e-06],\n [-3.39761486e-06, 2.57087743e-06],\n [-2.75467373e-06, 1.28631255e-06],\n [-1.57501989e-05, 5.72309451e-06],\n [-2.52143954e-06, 1.34565295e-06],\n [-1.65278643e-06, 1.28630942e-06],\n [-2.22196114e-05, 1.64360838e-05],\n [-5.88675934e-06, 4.08234746e-06],\n [-1.83673390e-06, 1.46782408e-06],\n [-1.55004206e-06, 1.51843800e-06],\n [-1.20451533e-06, 2.06298011e-06],\n [-1.91801338e-06, 4.64083285e-06],\n [-2.38653483e-06, 5.60076524e-06],\n [-1.65269781e-06, 5.78402290e-06],\n [-3.66908309e-07, 2.75412965e-06],\n [0.00000000e+00, 1.92858882e-06],\n [9.09242615e-08, 2.66162711e-06],\n [3.14490354e-07, 1.53065382e-06],\n [8.66452477e-08, 4.83456208e-07],\n [2.41750593e-07, 1.10828411e-06],\n [7.43745228e-06, 1.27618831e-05],\n [5.59968054e-06, 9.63947367e-06],\n [2.01951467e-06, 2.75413219e-06],\n [4.59952643e-07, 6.42281301e-07],\n [1.74353749e-06, 1.74533121e-06],\n [2.57144338e-06, 2.11185266e-06],\n [1.46893187e-05, 1.11999169e-05],\n [3.84659229e-05, 2.85527952e-05],\n [2.71627936e-05, 1.98727946e-05],\n [8.44632540e-06, 6.15058628e-06],\n [2.29420323e-06, 1.92859222e-06],\n [2.58083439e-06, 3.16952222e-06],\n [3.76373643e-06, 5.14174911e-06],\n [5.32416098e-06, 6.51707770e-06],\n [8.62890928e-06, 1.11998258e-05],\n [1.25762497e-05, 1.65231340e-05],\n [8.90452991e-06, 1.10148240e-05],\n [4.86505726e-06, 4.59023120e-06],\n [3.85545276e-06, 3.39642031e-06],\n [3.48753893e-06, 3.30566145e-06],\n [2.99557303e-06, 2.61276368e-06],\n [2.15496788e-06, 1.87797727e-06],\n [4.10564937e-06, 3.58142649e-06],\n [1.53680853e-06, 1.33866906e-06],\n [4.99540175e-06, 4.35635790e-06],\n [1.37744970e-06, 1.19380643e-06],\n [1.74319821e-06, 1.28456429e-06],\n [9.99931238e-07, 1.14493663e-06],\n [6.42735560e-07, 1.19380547e-06],\n [3.66818436e-07, 1.46782199e-06],\n [5.45413874e-08, 1.83783170e-06],\n [-1.35818548e-07, 1.14842666e-06],\n [-5.50758101e-07, 3.02989178e-06],\n [-4.58785270e-07, 2.66162724e-06],\n [-2.51315555e-07, 1.19031459e-06],\n [-3.91409773e-07, 1.65457223e-06],\n [-2.14525206e-06, 5.67755902e-06],\n [-4.24558096e-07, 1.39102753e-06],\n [-1.46936730e-06, 5.32325561e-06],\n [-1.37632061e-06, 4.59021715e-06],\n [-8.26642899e-07, 4.68097349e-06],\n [-6.42702724e-07, 4.95673534e-06],\n [-3.66796960e-07, 7.25009780e-06],\n [-1.82861669e-07, 8.99542699e-06],\n [4.09564134e-07, 6.11214315e-06],\n [7.80629912e-08, 1.45734993e-06],\n [4.81205526e-07, 7.56076647e-06],\n [2.01036346e-07, 2.42775302e-06]])\n\n v = vectors(points)\n assert_array_almost_equal(v, expected)\n", "from selfdrive.mapd.lib.NodesData import NodesData, NodeDataIdx\nfrom selfdrive.mapd.config import QUERY_RADIUS\nfrom selfdrive.mapd.lib.geo import ref_vectors, R, distance_to_points\nfrom itertools import compress\nimport numpy as np\n\n\n_ACCEPTABLE_BEARING_DELTA_COSINE = -0.7 # Continuation paths with a bearing of 180 +/- 45 degrees.\n_MAX_ALLOWED_BEARING_DELTA_COSINE_AT_EDGE = -0.3420 # bearing delta at route edge must be 180 +/- 70 degrees.\n_MAP_DATA_EDGE_DISTANCE = 50 # mts. Consider edge of map data from this distance to edge of query radius.\n\n\nclass Route():\n \"\"\"A set of consecutive way relations forming a default driving route.\n \"\"\"\n def __init__(self, current, wr_index, way_collection_id, query_center):\n \"\"\"Create a Route object from a given `wr_index` (Way relation index)\n\n Args:\n current (WayRelation): The Way Relation that is currently located. It must be active.\n wr_index (Dict(NodeId, [WayRelation])): The index of WayRelations by node id of an edge node.\n way_collection_id (UUID): The id of the Way Collection that created this Route.\n query_center (Numpy Array): lat, lon] numpy array in radians indicating the center of the data query.\n \"\"\"\n self.way_collection_id = way_collection_id\n self._ordered_way_relations = []\n self._nodes_data = None\n self._reset()\n\n # An active current way is needed to be able to build a route\n if not current.active:\n return\n\n # Build the route by finding iteratavely the best matching ways continuing after the end of the\n # current (last_wr) way. Use the index to find the continuation posibilities on each iteration.\n last_wr = current\n ordered_way_ids = []\n while True:\n # - Append current element to the route list of ordered way relations.\n self._ordered_way_relations.append(last_wr)\n ordered_way_ids.append(last_wr.id)\n\n # - Get the id of the node at the end of the way and then fetch the way relations that share the end node id.\n last_node_id = last_wr.last_node.id\n way_relations = wr_index[last_node_id]\n\n # - If no more way_relations than last_wr, we got to the end.\n if len(way_relations) == 1:\n break\n\n # - Get the coordinates for the edge node and build the array of coordinates for the nodes before the edge node\n # on each of the common way relations, then get the vectors in cartesian plane for the end sections of each way.\n ref_point = last_wr.last_node_coordinates\n points = np.array(list(map(lambda wr: wr.node_before_edge_coordinates(last_node_id), way_relations)))\n v = ref_vectors(ref_point, points) * R\n\n # - Calculate the bearing (from true north clockwise) for every end section of each way.\n b = np.arctan2(v[:, 0], v[:, 1])\n\n # - Find index of las_wr section and calculate deltas of bearings to the other sections.\n last_wr_idx = way_relations.index(last_wr)\n b_ref = b[last_wr_idx]\n delta = b - b_ref\n\n # - Update the direction of the possible route continuation ways as starting from last_node_id.\n # Make sure to exclude any ways already included in the ordered list as to not modify direction when there\n # are looping roads (like roundabouts). A way will never be included twice in a route anyway.\n for wr in way_relations:\n if wr.id not in ordered_way_ids:\n wr.update_direction_from_starting_node(last_node_id)\n\n # - Filter the possible route continuation way relations:\n # - exclude any way already added to the ordered list.\n # - exclude all way relations that are prohibited due to traffic direction.\n mask = [wr.id not in ordered_way_ids and not wr.is_prohibited for wr in way_relations]\n way_relations = list(compress(way_relations, mask))\n delta = delta[mask]\n\n # if no options left, we got to the end.\n if len(way_relations) == 0:\n break\n\n # - The cosine of the bearing delta will aid us in choosing the way that continues. The cosine is\n # minimum (-1) for a perfect straight continuation as delta would be pi or -pi.\n cos_delta = np.cos(delta)\n\n def pick_best_idx(cos_delta):\n \"\"\"Selects the best index on `cos_delta` array for a way that continues the route.\n In principle we want to choose the way that continues as straight as possible.\n Bue we need to make sure that if there are 2 or more ways continuing relatively straight, then we\n need to disambiguate, either by matching the `ref` or `name` value of the continuing way with the\n last way selected.\n This can prevent cases where the chosen route could be for instance an exit ramp of a way due to the fact\n that the ramp has a better match on bearing to previous way. We choose to stay on the road with the same `ref`\n or `name` value if available.\n If there is no ambiguity or there are no `name` or `ref` values to disambiguate, then we pick the one with\n the straightest following direction.\n \"\"\"\n # Find the indexes of the cosine of the deltas that are considered straight enough to continue.\n idxs = np.nonzero(cos_delta < _ACCEPTABLE_BEARING_DELTA_COSINE)[0]\n\n # If no amiguity or no way to break it, just return the straightest line.\n if len(idxs) <= 1 or (last_wr.ref is None and last_wr.name is None):\n # The section with the best continuation is the one with a bearing delta closest to pi. This is equivalent\n # to taking the one with the smallest cosine of the bearing delta, as cosine is minimum (-1) on both pi\n # and -pi.\n return np.argmin(cos_delta)\n\n wrs = [way_relations[idx] for idx in idxs]\n\n # If we find a continuation way with the same reference we just choose it.\n refs = list(map(lambda wr: wr.ref, wrs))\n if last_wr.ref is not None:\n idx = next((idx for idx, ref in enumerate(refs) if ref == last_wr.ref), None)\n if idx is not None:\n return idxs[idx]\n\n # If we find a continuation way with the same name we just choose it.\n names = list(map(lambda wr: wr.name, wrs))\n if last_wr.name is not None:\n idx = next((idx for idx, name in enumerate(names) if name == last_wr.name), None)\n if idx is not None:\n return idxs[idx]\n\n # We did not manage to deambiguate, choose straightest path.\n return np.argmin(cos_delta)\n\n # Get the index of the continuation way.\n best_idx = pick_best_idx(cos_delta)\n\n # - Make sure to not select as route continuation a way that turns too much if we are close to the border of\n # map data queried. This is to avoid building a route that takes a sharp turn just because we do not have the\n # data for the way that actually continues straight.\n if cos_delta[best_idx] > _MAX_ALLOWED_BEARING_DELTA_COSINE_AT_EDGE:\n dist_to_center = distance_to_points(query_center, np.array([ref_point]))[0]\n if dist_to_center > QUERY_RADIUS - _MAP_DATA_EDGE_DISTANCE:\n break\n\n # - Select next way.\n last_wr = way_relations[best_idx]\n\n # Build the node data from the ordered list of way relations\n self._nodes_data = NodesData(self._ordered_way_relations, wr_index)\n\n # Locate where we are in the route node list.\n self._locate()\n\n def __repr__(self):\n count = self._nodes_data.count if self._nodes_data is not None else None\n return f'Route: {self.way_collection_id}, idx ahead: {self._ahead_idx} of {count}'\n\n def _reset(self):\n self._limits_ahead = None\n self._cuvature_limits_ahead = None\n self._curvatures_ahead = None\n self._ahead_idx = None\n self._distance_to_node_ahead = None\n\n @property\n def located(self):\n return self._ahead_idx is not None\n\n def _locate(self):\n \"\"\"Will resolve the index in the nodes_data list for the node ahead of the current location.\n It updates as well the distance from the current location to the node ahead.\n \"\"\"\n current = self.current_wr\n if current is None:\n return\n\n node_ahead_id = current.node_ahead.id\n self._distance_to_node_ahead = current.distance_to_node_ahead\n start_idx = self._ahead_idx if self._ahead_idx is not None else 1\n self._ahead_idx = None\n\n ids = self._nodes_data.get(NodeDataIdx.node_id)\n for idx in range(start_idx, len(ids)):\n if ids[idx] == node_ahead_id:\n self._ahead_idx = idx\n break\n\n @property\n def current_wr(self):\n return self._ordered_way_relations[0] if len(self._ordered_way_relations) else None\n\n def update(self, location_rad, bearing_rad, accuracy):\n \"\"\"Will update the route structure based on the given `location_rad` and `bearing_rad` assuming progress on the\n route on the original direction. If direction has changed or active point on the route can not be found, the route\n will become invalid.\n \"\"\"\n if len(self._ordered_way_relations) == 0 or location_rad is None or bearing_rad is None:\n return\n\n # Skip if no update on location or bearing.\n if np.array_equal(self.current_wr.location_rad, location_rad) and self.current_wr.bearing_rad == bearing_rad:\n return\n\n # Transverse the way relations on the actual order until we find an active one. From there, rebuild the route\n # with the way relations remaining ahead.\n for idx, wr in enumerate(self._ordered_way_relations):\n active_direction = wr.direction\n wr.update(location_rad, bearing_rad, accuracy)\n\n if not wr.active:\n continue\n\n if wr.direction != active_direction:\n # Driving direction on the route has changed. stop.\n break\n\n # We have now the current wr. Repopulate from here till the end and locate\n self._ordered_way_relations = self._ordered_way_relations[idx:]\n self._reset()\n self._locate()\n\n # If the active way is diverting, check whether there are posibilities to divert from the route in the\n # vecinity of the current location. If there are possibilities, then stop here to loose the route as we are\n # most likely driving away. If there are no possibilites, then stick to the route as the diversion is probably\n # just a matter of GPS accuracy. (It can happen after driving under a bridge)\n if wr.diverting and len(self._nodes_data.possible_divertions(self._ahead_idx, self._distance_to_node_ahead)) > 0:\n break\n\n # The current location in route is valid, return.\n return\n\n # if we got here, there is no new active way relation or driving direction has changed. Reset.\n self._reset()\n\n @property\n def speed_limits_ahead(self):\n \"\"\"Returns and array of SpeedLimitSection objects for the actual route ahead of current location\n \"\"\"\n if self._limits_ahead is not None:\n return self._limits_ahead\n\n if self._nodes_data is None or self._ahead_idx is None:\n return []\n\n self._limits_ahead = self._nodes_data.speed_limits_ahead(self._ahead_idx, self._distance_to_node_ahead)\n return self._limits_ahead\n\n @property\n def curvature_speed_limits_ahead(self):\n \"\"\"Returns and array of TurnSpeedLimitSection objects for the actual route ahead of current location due\n to curvatures\n \"\"\"\n if self._cuvature_limits_ahead is not None:\n return self._cuvature_limits_ahead\n\n if self._nodes_data is None or self._ahead_idx is None:\n return []\n\n self._cuvature_limits_ahead = self._nodes_data. \\\n curvatures_speed_limit_sections_ahead(self._ahead_idx, self._distance_to_node_ahead)\n\n return self._cuvature_limits_ahead\n\n @property\n def current_speed_limit(self):\n if not self.located:\n return None\n\n limits_ahead = self.speed_limits_ahead\n if len(limits_ahead) == 0 or limits_ahead[0].start != 0:\n return None\n\n return limits_ahead[0].value\n\n @property\n def current_curvature_speed_limit_section(self):\n if not self.located:\n return None\n\n limits_ahead = self.curvature_speed_limits_ahead\n if len(limits_ahead) == 0 or limits_ahead[0].start != 0:\n return None\n\n return limits_ahead[0]\n\n @property\n def next_speed_limit_section(self):\n if not self.located:\n return None\n\n limits_ahead = self.speed_limits_ahead\n if len(limits_ahead) == 0:\n return None\n\n # Find the first section that does not start in 0. i.e. the next section\n for section in limits_ahead:\n if section.start > 0:\n return section\n\n return None\n\n def next_curvature_speed_limit_sections(self, horizon_mts):\n if not self.located:\n return []\n\n # Provide the curvature speed sections that start ahead (> 0) and up to horizon\n return list(filter(lambda la: la.start > 0 and la.start <= horizon_mts, self.curvature_speed_limits_ahead))\n\n @property\n def distance_to_end(self):\n if not self.located:\n return None\n\n return self._nodes_data.distance_to_end(self._ahead_idx, self._distance_to_node_ahead)\n" ]
[ [ "numpy.testing.assert_array_almost_equal", "numpy.radians", "numpy.array" ], [ "numpy.array", "numpy.array_equal", "numpy.argmin", "numpy.nonzero", "numpy.arctan2", "numpy.cos" ] ]
yongzx/Semi-supervised-Deep-Embedded-Clustering-with-Anomaly-Detection-for-Semantic-Frame-Induction
[ "7ead941bdf50093f1b8dd860cdeb5f04fb223165" ]
[ "evaluation/clustering.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Nov 04, 2019\n@author: yongzhengxin\n\"\"\"\n\nimport numpy as np\nfrom sklearn import metrics\nimport bcubed\n\ndef purity_score(y_true, y_pred, inv=False):\n \"\"\"\n :param y_true: true cluster ids\n :param y_pred: predicted cluster ids\n :param inv: boolean\n :return: purity (inv = False) or inverse-purity (inv = True)\n \"\"\"\n # compute contingency matrix (also called confusion matrix)\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)\n axis = 0 if not inv else 1\n\n # return purity\n return np.sum(np.amax(contingency_matrix, axis=axis)) / np.sum(contingency_matrix)\n\n\ndef f_purity_score(y_true, y_pred):\n \"\"\"\n :param y_true: true cluster ids\n :param y_pred: predicted cluster ids\n :return: F1 purity score\n\n Implementation details - harmonic mean of purity and inverse purity score - see https://arxiv.org/pdf/1401.4590.pdf\n \"\"\"\n return 2 * (purity_score(y_true, y_pred) * purity_score(y_true, y_pred, inv=True)) / (purity_score(y_true, y_pred) + purity_score(y_true, y_pred, inv=True))\n\n\ndef external_eval_clusters(y_true, y_pred):\n \"\"\"\n :param y_true: true cluster ids\n :param y_pred: predicted cluster ids\n :return: external evaluation metrics of clustering quality.\n The metrics are purity, inverse purity, harmonic mean, b-cubed precision, recall and their harmonic mean.\n \"\"\"\n purity = purity_score(y_true, y_pred)\n inverse_purity = purity_score(y_true, y_pred, inv=True)\n f_purity = f_purity_score(y_true, y_pred)\n\n ldict = {i: {cluster_idx} for i, cluster_idx in enumerate(y_true)}\n cdict = {i: {cluster_idx} for i, cluster_idx in enumerate(y_pred)}\n bcubed_precision = bcubed.precision(cdict, ldict)\n bcubed_recall = bcubed.recall(cdict, ldict)\n bcubed_fscore = bcubed.fscore(bcubed_precision, bcubed_recall)\n\n return purity, inverse_purity, f_purity, bcubed_precision, bcubed_recall, bcubed_fscore\n\n\ndef print_external_eval_clusters(purity, inverse_purity, f_purity, bcubed_precision, bcubed_recall, bcubed_fscore):\n \"\"\"\n Print out the external evaluation metrics of clustering quality.\n \"\"\"\n print(\"Purity:\", purity)\n print(\"Inverse Purity:\", inverse_purity)\n print(\"F-score (Purity and Inverse Purity):\", f_purity)\n print(\"BCubed Precision:\", bcubed_precision)\n print(\"BCubed Recall:\", bcubed_recall)\n print(\"BCubed F1:\", bcubed_fscore)\n return\n" ]
[ [ "sklearn.metrics.cluster.contingency_matrix", "numpy.amax", "numpy.sum" ] ]
Steward3103/Tensorflow
[ "1039ff9ee8c8c7ed09f9bb106131a50285866dd4" ]
[ "tensorflow/contrib/lite/python/lite.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Lite tooling helper functionality.\n\nEXPERIMENTAL: APIs here are unstable and likely to change without notice.\n\n@@TocoConverter\n@@toco_convert\n@@toco_convert_protos\n@@Interpreter\n@@OpHint\n@@convert_op_hints_to_stubs\n\n@@FLOAT\n@@QUANTIZED_UINT8\n@@TFLITE\n@@GRAPHVIZ_DOT\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.lite.python import lite_constants as constants\nfrom tensorflow.contrib.lite.python.convert import tensor_name\nfrom tensorflow.contrib.lite.python.convert import toco_convert\nfrom tensorflow.contrib.lite.python.convert import toco_convert_protos # pylint: disable=unused-import\nfrom tensorflow.contrib.lite.python.convert_saved_model import freeze_saved_model\nfrom tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: disable=unused-import\nfrom tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import\nfrom tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import\nfrom tensorflow.python.framework import graph_util as tf_graph_util\nfrom tensorflow.python.ops.variables import global_variables_initializer\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import tag_constants\n\n\nclass TocoConverter(object):\n \"\"\"Convert a TensorFlow model into `output_format` using TOCO.\n\n This is used to convert from a TensorFlow GraphDef or SavedModel into either a\n TFLite FlatBuffer or graph visualization.\n\n Attributes:\n\n inference_type: Currently must be `{FLOAT, QUANTIZED_UINT8}`.\n (default FLOAT)\n output_format: Type of data to write (currently must be TFLITE or\n GRAPHVIZ_DOT). (default TFLITE)\n quantized_input_stats: The mean and std deviation of training data for each\n input tensor. Only needed if `inference_type` is `QUANTIZED_UINT8`.\n (default None)\n drop_control_dependency: Boolean indicating whether to drop control\n dependencies silently. This is due to TFLite not supporting control\n dependencies. (default True)\n allow_custom_ops: Boolean indicating whether to allow custom operations.\n (default False)\n\n Example usage:\n\n # Converting a frozen graph.\n converter = lite.TocoConverter.from_session(sess, in_tensors, out_tensors)\n tflite_model = converter.convert()\n open(\"converted_model.tflite\", \"wb\").write(tflite_model)\n\n # Converting a SavedModel.\n converter = lite.TocoConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n \"\"\"\n\n def __init__(self, graph_def, input_tensors, output_tensors):\n \"\"\"Constructor for TocoConverter.\n\n Args:\n\n graph_def: TensorFlow GraphDef.\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.get_shape()` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n \"\"\"\n self._graph_def = graph_def\n self._input_tensors = input_tensors\n self._output_tensors = output_tensors\n self.inference_type = constants.FLOAT\n self.output_format = constants.TFLITE\n self.quantized_input_stats = None\n self.drop_control_dependency = True\n self.allow_custom_ops = False\n\n @classmethod\n def from_session(cls,\n sess,\n input_tensors,\n output_tensors,\n freeze_variables=False):\n \"\"\"Creates a TocoConverter class from a TensorFlow Session.\n\n Args:\n sess: TensorFlow Session.\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.get_shape()` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n freeze_variables: Boolean indicating whether the variables need to be\n converted into constants via the freeze_graph.py script.\n (default False)\n\n Returns:\n TocoConverter class.\n \"\"\"\n\n # Get GraphDef.\n if freeze_variables:\n sess.run(global_variables_initializer())\n output_arrays = [tensor_name(tensor) for tensor in output_tensors]\n graph_def = tf_graph_util.convert_variables_to_constants(\n sess, sess.graph_def, output_arrays)\n else:\n graph_def = sess.graph_def\n\n # Create TocoConverter class.\n return cls(graph_def, input_tensors, output_tensors)\n\n @classmethod\n def from_saved_model(\n cls,\n saved_model_dir,\n input_arrays=None,\n input_shapes=None,\n output_arrays=None,\n tag_set=None,\n signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):\n \"\"\"Creates a TocoConverter class from a SavedModel.\n\n Args:\n saved_model_dir: SavedModel directory to convert.\n input_arrays: List of input tensors to freeze graph with. Uses input\n arrays from SignatureDef when none are provided. (default None)\n input_shapes: Map of strings representing input tensor names to list of\n integers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n Automatically determined when input shapes is None (e.g., {\"foo\" :\n None}). (default None)\n output_arrays: List of output tensors to freeze graph with. Uses output\n arrays from SignatureDef when none are provided. (default None)\n tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to\n analyze. All tags in the tag set must be present. (default \"serve\")\n signature_key: Key identifying SignatureDef containing inputs and outputs.\n\n Returns:\n TocoConverter class.\n \"\"\"\n if tag_set is None:\n tag_set = set([tag_constants.SERVING])\n\n result = freeze_saved_model(saved_model_dir, input_arrays, input_shapes,\n output_arrays, tag_set, signature_key)\n return cls(\n graph_def=result[0], input_tensors=result[1], output_tensors=result[2])\n\n def convert(self):\n \"\"\"Converts a TensorFlow GraphDef based on instance variables.\n\n Returns:\n The converted data in serialized format. Either a TFLite Flatbuffer or a\n Graphviz graph depending on value in `output_format`.\n\n Raises:\n ValueError:\n None value for dimension in input_tensor.\n \"\"\"\n # Checks dimensions in input tensor.\n for tensor in self._input_tensors:\n shape = tensor.get_shape().as_list()\n if None in shape[1:]:\n raise ValueError(\n \"None is only supported in the 1st dimension. Tensor '{0}' has \"\n \"invalid shape '{1}'.\".format(tensor.name, shape))\n elif shape[0] is None:\n self._set_batch_size(batch_size=1)\n\n # Converts model.\n result = toco_convert(\n input_data=self._graph_def,\n input_tensors=self._input_tensors,\n output_tensors=self._output_tensors,\n inference_type=self.inference_type,\n input_format=constants.TENSORFLOW_GRAPHDEF,\n output_format=self.output_format,\n quantized_input_stats=self.quantized_input_stats,\n drop_control_dependency=self.drop_control_dependency)\n return result\n\n def _set_batch_size(self, batch_size):\n \"\"\"Sets the first dimension of the input tensor to `batch_size`.\n\n Args:\n batch_size: Batch size for the model. Replaces the first dimension of an\n input size array if undefined. (default 1)\n \"\"\"\n for tensor in self._input_tensors:\n shape = tensor.get_shape().as_list()\n shape[0] = batch_size\n tensor.set_shape(shape)\n" ]
[ [ "tensorflow.contrib.lite.python.convert_saved_model.freeze_saved_model", "tensorflow.contrib.lite.python.convert.toco_convert", "tensorflow.contrib.lite.python.convert.tensor_name", "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.python.ops.variables.global_variables_initializer" ] ]
jjakimoto/rl_traders.py
[ "d5411c96d49ba6a54751d12cdd11974e5cc1a8aa" ]
[ "rl_traders/models.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch_utils.models import FeedForward\n\n\nclass EIIEFeedForwarad(nn.Module):\n def __init__(self, model_params, cash_bias):\n super(EIIEFeedForwarad, self).__init__()\n self.lower_model = FeedForward(model_params['lower_params'])\n self.upper_model = FeedForward(model_params['upper_params'])\n self.cash_bias = nn.Parameter(cash_bias)\n\n def forward(self, states, prev_actions):\n n_batch = states.shape[0]\n outputs = self.lower_model(states)\n # We do not use cash actions as input, prev_actions[:, 0]\n prev_actions = prev_actions[:, None, None, 1:]\n # Concatenation with channel dimension\n outputs = torch.cat((outputs, prev_actions), dim=1)\n prev_softmax = self.upper_model(outputs)\n _cash_bias = self.cash_bias.repeat(n_batch, 1)\n prev_softmax = torch.cat((_cash_bias, prev_softmax), dim=-1)\n actions = F.softmax(prev_softmax, dim=-1)\n return actions\n\n def predict(self, state, prev_action):\n states = state[None, :]\n prev_actions = prev_action[None, :]\n return self.forward(states, prev_actions)[0].detach().numpy()\n" ]
[ [ "torch.cat", "torch.nn.functional.softmax", "torch.nn.Parameter" ] ]
tubamuzzaffar/RACT
[ "6c0a44eb795c3b54a0d43e424cb80c400b306197" ]
[ "utils/data_loaders.py" ]
[ "from scipy import sparse\nimport pandas as pd\nimport numpy as np\nimport os\n\nimport tensorflow as tf\n\n\ndef load_train_data(csv_file, n_items):\n tp = pd.read_csv(csv_file)\n n_users = tp['uid'].max() + 1\n\n rows, cols = tp['uid'], tp['sid']\n data = sparse.csr_matrix((np.ones_like(rows), (rows, cols)),\n dtype='float64',\n shape=(n_users, n_items))\n return data\n\n\ndef load_tr_te_data(csv_file_tr, csv_file_te, n_items):\n tp_tr = pd.read_csv(csv_file_tr)\n tp_te = pd.read_csv(csv_file_te)\n\n start_idx = min(tp_tr['uid'].min(), tp_te['uid'].min())\n end_idx = max(tp_tr['uid'].max(), tp_te['uid'].max())\n\n rows_tr, cols_tr = tp_tr['uid'] - start_idx, tp_tr['sid']\n rows_te, cols_te = tp_te['uid'] - start_idx, tp_te['sid']\n\n data_tr = sparse.csr_matrix((np.ones_like(rows_tr), (rows_tr, cols_tr)),\n dtype='float64',\n shape=(end_idx - start_idx + 1, n_items))\n data_te = sparse.csr_matrix((np.ones_like(rows_te), (rows_te, cols_te)),\n dtype='float64',\n shape=(end_idx - start_idx + 1, n_items))\n return data_tr, data_te\n\n\ndef tr_te_dataset(data_tr, data_te, batch_size):\n # https://www.tensorflow.org/performance/performance_guide makes me think that I'm doing\n # something wrong, because my GPU usage hovers near 0 usually. That's v disappointing. I hope\n # I can speed it up hugely...\n # This is going to take in the output of data_tr and data_te, and turn them into\n # things we can sample from.\n\n # The only worry I have is, I don't know exactly how to do the whole \"masking\" part in here..\n\n # The way it works is, load_train_data just loads in training data, while load_tr_te_data\n # has goal-vectors as well. These are the ones that you drop-out. So, this really should be fine.\n\n data_tr = data_tr.astype(np.float32)\n data_tr_coo = data_tr.tocoo()\n\n n_items = data_tr_coo.shape[1]\n\n indices = np.mat([data_tr_coo.row, data_tr_coo.col]).transpose()\n sparse_data_tr = tf.SparseTensor(indices, data_tr_coo.data, data_tr_coo.shape)\n\n data_te = data_te.astype(np.float32)\n data_te_coo = data_te.tocoo()\n\n indices = np.mat([data_te_coo.row, data_te_coo.col]).transpose()\n sparse_data_te = tf.SparseTensor(indices, data_te_coo.data, data_te_coo.shape)\n\n samples_tr = tf.data.Dataset.from_tensor_slices(sparse_data_tr)\n samples_te = tf.data.Dataset.from_tensor_slices(sparse_data_te)\n\n # 10000 might be too big to sample from... Not sure how that's supposed to work with batch anyways.\n dataset = tf.data.Dataset.zip((samples_tr, samples_te)).shuffle(100).batch(\n batch_size, drop_remainder=True)\n\n dataset = dataset.map(lambda x, y: (tf.sparse_tensor_to_dense(x), tf.sparse_tensor_to_dense(y)))\n\n expected_shape = tf.TensorShape([batch_size, n_items])\n dataset = dataset.apply(tf.contrib.data.assert_element_shape((expected_shape, expected_shape)))\n\n # dataset = dataset.skip(15)\n\n return dataset\n # dataset = dataset.map()\n\n\ndef train_dataset(data_tr, batch_size):\n\n # Note: I'm going to do the most heinous of things: I'm going to add in a fake operation here,\n # so that it has the same form as the other guy.\n # That will let us swap them out.\n\n data_tr = data_tr.astype(np.float32)\n\n data_tr_coo = data_tr.tocoo()\n\n n_items = data_tr_coo.shape[1]\n\n indices = np.mat([data_tr_coo.row, data_tr_coo.col]).transpose()\n sparse_data = tf.SparseTensor(indices, data_tr_coo.data, data_tr_coo.shape)\n\n samples_tr = tf.data.Dataset.from_tensor_slices(sparse_data)\n\n\n dataset = samples_tr.shuffle(100).batch(batch_size, drop_remainder=True)#.map(tf.sparse_to_dense)\n dataset = dataset.map(tf.sparse_tensor_to_dense)\n\n expected_shape = tf.TensorShape([batch_size, n_items])\n dataset = dataset.apply(tf.contrib.data.assert_element_shape(expected_shape))\n\n dataset = dataset.zip((dataset, dataset))\n # dataset.apply(tf.contrib.data.assert_element_shape([expected_shape, expected_shape]))\n\n # dataset = dataset.skip(200)\n\n return dataset\n\n\ndef get_batch_from_list(idxlist, batch_size, batch_num, data):\n disc_training_indices = idxlist[(batch_size * batch_num):(batch_size * (batch_num + 1))]\n X_train = data[disc_training_indices]\n if sparse.isspmatrix(X_train):\n X_train = X_train.toarray()\n X_train = X_train.astype('float32')\n return X_train\n\n\ndef get_num_items(pro_dir):\n unique_sid = list()\n with open(os.path.join(pro_dir, 'unique_sid.txt'), 'r') as f:\n for line in f:\n unique_sid.append(line.strip())\n\n n_items = len(unique_sid)\n print(\"n_items: {}\".format(n_items))\n return n_items\n" ]
[ [ "scipy.sparse.isspmatrix", "tensorflow.SparseTensor", "numpy.ones_like", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.TensorShape", "tensorflow.sparse_tensor_to_dense", "tensorflow.contrib.data.assert_element_shape", "pandas.read_csv", "tensorflow.data.Dataset.zip", "numpy.mat" ] ]
timcera/plottoolbox
[ "b5f4b634d366eb5ba244e2f1fd33a7ef0eba7298" ]
[ "src/plottoolbox/skill_metrics/index_agreement.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\n\nfrom . import utils\n\n\ndef index_agreement(simulated, observed):\n \"\"\"\n Calculate the index of agreement.\n\n Calculates the index of agreement between two variables\n simulated and observed. The index_agreement is calculated using the\n formula:\n\n index_agreement = 1.0 - sum((o - s)**2) /\n sum((abs(s - mean(o)) + abs(o - mean(o)))**2)\n\n where s is the simulated values, o is the observed values, and\n N is the total number of values in s & o. Note that s & o must\n have the same number of values.\n\n The index of agreement is between 0 and 1, where 1 is a perfect match.\n\n Input:\n simulated : simulated values\n observed : observed values\n\n Output:\n index_agreement : index of agreement\n \"\"\"\n # Check that dimensions of simulated and observed fields match\n utils.check_arrays(simulated, observed)\n\n # Calculate the index_agreement\n index_agreement = 1.0 - (\n np.sum((observed - simulated) ** 2)\n / (\n np.sum(\n (\n np.abs(simulated - np.mean(observed))\n + np.abs(observed - np.mean(observed))\n )\n ** 2\n )\n )\n )\n\n return index_agreement\n" ]
[ [ "numpy.sum", "numpy.mean" ] ]
dobraczka/pystow
[ "d7d66bf87dc3eeb266f6020621649ca8b68be6a4" ]
[ "tests/test_utils.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Tests for utilities.\"\"\"\n\nimport hashlib\nimport os\nimport tempfile\nimport unittest\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nfrom pystow.utils import (\n HexDigestError,\n download,\n getenv_path,\n mkdir,\n mock_envvar,\n n,\n name_from_url,\n read_tarfile_csv,\n read_zip_np,\n read_zipfile_csv,\n write_tarfile_csv,\n write_zipfile_csv,\n write_zipfile_np,\n)\n\nHERE = Path(__file__).resolve().parent\nTEST_TXT = HERE.joinpath(\"resources\", \"test.txt\")\n\n\nclass TestUtils(unittest.TestCase):\n \"\"\"Test utility functions.\"\"\"\n\n def test_name_from_url(self):\n \"\"\"Test :func:`name_from_url`.\"\"\"\n data = [\n (\"test.tsv\", \"https://example.com/test.tsv\"),\n (\"test.tsv\", \"https://example.com/deeper/test.tsv\"),\n (\"test.tsv.gz\", \"https://example.com/deeper/test.tsv.gz\"),\n ]\n for name, url in data:\n with self.subTest(name=name, url=url):\n self.assertEqual(name, name_from_url(url))\n\n def test_mkdir(self):\n \"\"\"Test for ensuring a directory.\"\"\"\n with tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n subdirectory = directory / \"sd1\"\n self.assertFalse(subdirectory.exists())\n\n mkdir(subdirectory, ensure_exists=False)\n self.assertFalse(subdirectory.exists())\n\n mkdir(subdirectory, ensure_exists=True)\n self.assertTrue(subdirectory.exists())\n\n def test_mock_envvar(self):\n \"\"\"Test that environment variables can be mocked properly.\"\"\"\n name, value = n(), n()\n\n self.assertNotIn(name, os.environ)\n with mock_envvar(name, value):\n self.assertIn(name, os.environ)\n self.assertEqual(value, os.getenv(name))\n self.assertNotIn(name, os.environ)\n\n def test_getenv_path(self):\n \"\"\"Test that :func:`getenv_path` works properly.\"\"\"\n envvar = n()\n\n with tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n value = directory / n()\n default = directory / n()\n\n self.assertEqual(default, getenv_path(envvar, default))\n with mock_envvar(envvar, value.as_posix()):\n self.assertEqual(value, getenv_path(envvar, default))\n # Check that it goes back\n self.assertEqual(default, getenv_path(envvar, default))\n\n def test_compressed_io(self):\n \"\"\"Test that the read/write to compressed folder functions work.\"\"\"\n rows = [[1, 2], [3, 4], [5, 6]]\n columns = [\"A\", \"B\"]\n df = pd.DataFrame(rows, columns=columns)\n inner_path = \"okay.tsv\"\n\n data = [\n (\"test.zip\", write_zipfile_csv, read_zipfile_csv),\n (\"test.tar.gz\", write_tarfile_csv, read_tarfile_csv),\n ]\n for name, writer, reader in data:\n with self.subTest(name=name), tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n path = directory / name\n self.assertFalse(path.exists())\n writer(df, path=path, inner_path=inner_path)\n self.assertTrue(path.exists())\n new_df = reader(path=path, inner_path=inner_path)\n self.assertEqual(list(df.columns), list(new_df.columns))\n self.assertEqual(df.values.tolist(), new_df.values.tolist())\n\n def test_numpy_io(self):\n \"\"\"Test IO with numpy.\"\"\"\n arr = np.array([[0, 1], [2, 3]])\n inner_path = \"okay.npz\"\n with tempfile.TemporaryDirectory() as directory:\n directory = Path(directory)\n path = directory / \"test.zip\"\n write_zipfile_np(arr, inner_path=inner_path, path=path)\n reloaded_arr = read_zip_np(path=path, inner_path=inner_path)\n self.assertTrue(np.array_equal(arr, reloaded_arr))\n\n\nclass TestHashing(unittest.TestCase):\n \"\"\"Tests for hexdigest checking.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Set up a test.\"\"\"\n self.directory = tempfile.TemporaryDirectory()\n self.path = Path(self.directory.name).joinpath(\"test.tsv\")\n\n md5 = hashlib.md5() # noqa:S303\n with TEST_TXT.open(\"rb\") as file:\n md5.update(file.read())\n self.expected_md5 = md5.hexdigest()\n self.mismatching_md5_hexdigest = \"yolo\"\n self.assertNotEqual(self.mismatching_md5_hexdigest, self.expected_md5)\n\n def tearDown(self) -> None:\n \"\"\"Tear down a test.\"\"\"\n self.directory.cleanup()\n\n def test_hash_success(self):\n \"\"\"Test checking actually works.\"\"\"\n self.assertFalse(self.path.exists())\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.expected_md5,\n },\n )\n\n def test_hash_error(self):\n \"\"\"Test hash error on download.\"\"\"\n self.assertFalse(self.path.exists())\n with self.assertRaises(HexDigestError):\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.mismatching_md5_hexdigest,\n },\n )\n\n def test_override_hash_error(self):\n \"\"\"Test hash error on download.\"\"\"\n self.path.write_text(\"test file content\")\n\n self.assertTrue(self.path.exists())\n with self.assertRaises(HexDigestError):\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.expected_md5,\n },\n force=False,\n )\n\n def test_force(self):\n \"\"\"Test overwriting wrong file.\"\"\"\n # now if force=True it should not bother with the hash check\n self.path.write_text(\"test file content\")\n\n self.assertTrue(self.path.exists())\n download(\n url=TEST_TXT.as_uri(),\n path=self.path,\n hexdigests={\n \"md5\": self.expected_md5,\n },\n force=True,\n )\n" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.array_equal" ] ]
exchhattu/MolPro-pyTorch
[ "8dfeb0be4b855741fbc66396a27d7ae0607e161d" ]
[ "Data.py" ]
[ "'''\nWritten by: Rojan Shrestha PhD\nMon Nov 18 17:35:38 2019\n'''\n\nimport sys, os, errno\n\nimport numpy as np\n\nimport flowio # pip install FlowIO\n\nclass FCdata:\n \n def __init__(self, path_to_dir, path_to_label_data, path_to_marker):\n \"\"\"\n \n Params:\n st_path_to_file: \n st_path_to_label: \n st_path_to_marker: \n \"\"\"\n self._ma_data = {}\n\n self._Xtrains = []\n self._Xvalids = []\n self._Xtest = []\n self._Ytrains = []\n self._Yvalids = []\n self._Ytest = []\n\n # get ...\n oj_idata = self.iData(path_to_dir, path_to_label_data, path_to_marker)\n print(\"coding: 23 \", oj_idata._ts_samples)\n print(\"coding: 12 \", oj_idata._ts_phenotypes) \n # for st_path, st_label in oj_idata._ma_labels.items():\n # print(\"Coding: \", st_path, st_label)\n # ar_events, ts_channels = oj_idata.read_flowdata(st_path, \n # markers = oj_idata._ts_markers, \n # transform=None, \n # auto_comp=False)\n # self._ma_data[st_path] = ar_events\n\n\n def load_data(self): \n \"\"\"\n \n \"\"\"\n in_num_sample = len(self._ma_labels)\n in_train_sample = int(0.70*in_num_sample)\n in_valid_sample = int(0.15*in_num_sample)\n in_test_sample = int(0.15*in_num_sample)\n\n ar_idx = np.random.permutation(in_num_sample)\n ar_keys = self._ma_labels.keys()\n\n ar_keys[ar_idx[:in_train_sample]]\n ar_idx[in_train_sample:in_train_sample+in_valid_sample]\n ar_idx[-in_test_sample:]\n self._Xtrains = []\n self._Xvalids = []\n self._Xtest = []\n self._Ytrains = []\n self._Yvalids = []\n self._Ytest = []\n\n # return ...\n\n def combine_samples(self, data_list, sample_id):\n \"\"\"\n Aims: merge multiple samples together, which is identified by their sample id.\n index of data_list and sample_id should be synchronized.\n Params: \n data_list - list of sample data\n sample_id - list of sample ids \n \"\"\"\n accum_x, accum_y = [], []\n for x, y in zip(data_list, sample_id):\n accum_x.append(x)\n accum_y.append(y * np.ones(x.shape[0], dtype=int))\n return np.vstack(accum_x), np.hstack(accum_y)\n\n def generate_subsets(self, X, pheno_map, ts_sample_ids, nsubsets=1000, \n ncell=200, per_sample=False, k_init=False):\n \"\"\"\n Aims: generates the data ready for pytorch model. This data generation\n is very problem specific. Each patient has nsubsets data and \n each contains ncell.\n\n Params:\n\n \"\"\"\n S = dict()\n n_unique_sample = len(np.unique(ts_sample_ids))\n\n # create N subset samples for each patient. each subset contains \n # N randomly selected cells \n for n_sample_id in range(n_unique_sample):\n X_i = X[np.where(ts_sample_ids == n_sample_id)]\n S[n_sample_id] = per_sample_subsets(X_i, nsubsets, ncell, k_init)\n # contains 3D data\n\n # interesting going here - onward data will not keep track of patient\n # information instead there will be phenotype. Since S.values() is\n # three dimensional array, patient specific data is not mingled with\n # others\n data_list, y_list = [], []\n for y_i, x_i in S.items(): # y_i: patient ids and x_i: their corresponding cells \n data_list.append(x_i)\n y_list.append(pheno_map[y_i] * np.ones(x_i.shape[0], dtype=int))\n\n Xt = np.vstack(data_list)\n yt = np.hstack(y_list)\n Xt, yt = sku.shuffle(Xt, yt)\n return Xt, yt\n\n def per_sample_subsets(self, X, nsubsets, ncell_per_subset, k_init=False):\n \"\"\"\n Aims: prepare the dimension ready to input the deep learning model\n\n Params:\n \n \"\"\"\n nmark = X.shape[1]\n shape = (nsubsets, nmark, ncell_per_subset)\n Xres = np.zeros(shape)\n\n if not k_init:\n for i in range(nsubsets):\n X_i = random_subsample(X, ncell_per_subset)\n Xres[i] = X_i.T\n else:\n for i in range(nsubsets):\n X_i = random_subsample(X, 2000)\n X_i = kmeans_subsample(X_i, ncell_per_subset, random_state=i)\n Xres[i] = X_i.T\n return Xres\n\n\n class iData:\n \n def __init__(self, path_to_dir, path_to_label, path_to_marker, cofactor=5):\n\n self._ma_labels = dict() \n self._ts_markers = []\n\n self.read_labels(path_to_label) # label either positive or neutral\n self.read_markers(path_to_marker) # marker of each cell\n\n self._ts_samples = []\n self._ts_phenotypes = []\n\n # read all files with suffix .fcs from the given directory.\n for fname, flabel in self._ma_labels.items():\n full_path = os.path.join(path_to_dir, fname)\n ar_events, ts_channels = self.read_flowdata(full_path, transform=None, auto_comp=False)\n\n ts_marker_idx = [ts_channels.index(name) for name in self._ts_markers]\n x = ar_events[:, ts_marker_idx]\n x = np.arcsinh(1./cofactor * x)\n self._ts_samples.append(x)\n self._ts_phenotypes.append(flabel)\n\n def read_labels(self, path_to_label):\n \"\"\"\n Read the label of each mass cytometry file and store into dictionary\n\n Params:\n path_to_label: path to label file\n \"\"\"\n\n if os.path.exists(path_to_label):\n with open(path_to_label, \"r\") as oj_path:\n ts_fcm_files = oj_path.read().split(\"\\n\")\n for st_fcm_file in ts_fcm_files:\n if not st_fcm_file: continue\n ts_parts = st_fcm_file.split(\",\")\n if ts_parts[0] == 'fcs_filename' and ts_parts[1] == 'label': continue\n self._ma_labels[ts_parts[0]] = ts_parts[1]\n else: \n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path_to_label)\n\n\n def read_markers(self, path_to_marker):\n \"\"\"\n Read markers and store into list \n\n Params:\n path_to_marker: path to marker file\n \"\"\"\n if os.path.exists(path_to_marker):\n with open(path_to_marker, \"r\") as oj_path:\n ts_markers = oj_path.read().split(\"\\n\")[0].split(\",\")\n self._ts_markers = [st_marker for st_marker in ts_markers if st_marker]\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path_to_label)\n\n \n def read_flowdata(self, path_to_file, *args, **kwargs):\n \"\"\"\n Aims:\n\n Params:\n path_to_file: path to file fcs \n markers: list of selected markers \n\n Returns: \n \"\"\"\n # st_fupath = os.path.join(path_to_dir, path_to_file) \n print(\"Coding:\", path_to_file)\n oj_f = flowio.FlowData(path_to_file)\n ar_events = np.reshape(oj_f.events, (-1, oj_f.channel_count))\n \n ts_channels = []\n for i in range(1, oj_f.channel_count+1):\n key = str(i)\n if 'PnS' in oj_f.channels[key] and oj_f.channels[key]['PnS'] != u' ':\n ts_channels.append(oj_f.channels[key]['PnS'])\n elif 'PnN' in oj_f.channels[key] and oj_f.channels[key]['PnN'] != u' ':\n ts_channels.append(oj_f.channels[key]['PnN'])\n else:\n ts_channels.append('None')\n\n return ar_events, ts_channels\n \n ### def load_data(path_to_dir)\n ### \"\"\"\n ### Aims: read the files from given directory\n \n ### Params:\n ### path_to_dir: path to directory where files are located\n ### \"\"\"\n \n ### ts_files = os.listdir(path_to_dir)\n ### if not ts_files:\n ### print(\"[FATAL]: no files in %s\" %path_to_dir)\n ### sys.exit(0)\n \n ### for st_file in ts_files:\n ### if st_file.endswith(\".fcs\"):\n\ndef test():\n path_to_dir = \"./data/gated_NK/\" \n path_to_label = \"./data/NK_fcs_samples_with_labels.csv\"\n path_to_marker = \"./data/nk_marker.csv\"\n o_fc_data = FCdata(path_to_dir, path_to_label, path_to_marker) \n\n\n\ntest()\n\n\n" ]
[ [ "numpy.reshape", "numpy.zeros", "numpy.random.permutation", "numpy.ones", "numpy.where", "numpy.unique", "numpy.hstack", "numpy.arcsinh", "numpy.vstack" ] ]
RasmusVestiH/RV_cds_language2
[ "61472e7cb385c6ca578dce2f4301fb27666e058b" ]
[ "as5env/lib/python3.6/site-packages/spacy/training/initialize.py" ]
[ "from typing import Union, Dict, Optional, Any, IO, TYPE_CHECKING\nfrom thinc.api import Config, fix_random_seed, set_gpu_allocator\nfrom thinc.api import ConfigValidationError\nfrom pathlib import Path\nimport srsly\nimport numpy\nimport tarfile\nimport gzip\nimport zipfile\nimport tqdm\n\nfrom .pretrain import get_tok2vec_ref\nfrom ..lookups import Lookups\nfrom ..vectors import Vectors\nfrom ..errors import Errors, Warnings\nfrom ..schemas import ConfigSchemaTraining\nfrom ..util import registry, load_model_from_config, resolve_dot_names, logger\nfrom ..util import load_model, ensure_path, get_sourced_components\nfrom ..util import OOV_RANK, DEFAULT_OOV_PROB\n\nif TYPE_CHECKING:\n from ..language import Language # noqa: F401\n\n\ndef init_nlp(config: Config, *, use_gpu: int = -1) -> \"Language\":\n raw_config = config\n config = raw_config.interpolate()\n if \"seed\" not in config[\"training\"]:\n raise ValueError(Errors.E1015.format(value=\"[training] seed\"))\n if \"gpu_allocator\" not in config[\"training\"]:\n raise ValueError(Errors.E1015.format(value=\"[training] gpu_allocator\"))\n if config[\"training\"][\"seed\"] is not None:\n fix_random_seed(config[\"training\"][\"seed\"])\n allocator = config[\"training\"][\"gpu_allocator\"]\n if use_gpu >= 0 and allocator:\n set_gpu_allocator(allocator)\n # Use original config here before it's resolved to functions\n sourced = get_sourced_components(config)\n nlp = load_model_from_config(raw_config, auto_fill=True)\n logger.info(\"Set up nlp object from config\")\n config = nlp.config.interpolate()\n # Resolve all training-relevant sections using the filled nlp config\n T = registry.resolve(config[\"training\"], schema=ConfigSchemaTraining)\n dot_names = [T[\"train_corpus\"], T[\"dev_corpus\"]]\n if not isinstance(T[\"train_corpus\"], str):\n raise ConfigValidationError(\n desc=Errors.E897.format(\n field=\"training.train_corpus\", type=type(T[\"train_corpus\"])\n )\n )\n if not isinstance(T[\"dev_corpus\"], str):\n raise ConfigValidationError(\n desc=Errors.E897.format(\n field=\"training.dev_corpus\", type=type(T[\"dev_corpus\"])\n )\n )\n train_corpus, dev_corpus = resolve_dot_names(config, dot_names)\n optimizer = T[\"optimizer\"]\n # Components that shouldn't be updated during training\n frozen_components = T[\"frozen_components\"]\n # Sourced components that require resume_training\n resume_components = [p for p in sourced if p not in frozen_components]\n logger.info(f\"Pipeline: {nlp.pipe_names}\")\n if resume_components:\n with nlp.select_pipes(enable=resume_components):\n logger.info(f\"Resuming training for: {resume_components}\")\n nlp.resume_training(sgd=optimizer)\n # Make sure that listeners are defined before initializing further\n nlp._link_components()\n with nlp.select_pipes(disable=[*frozen_components, *resume_components]):\n nlp.initialize(lambda: train_corpus(nlp), sgd=optimizer)\n logger.info(f\"Initialized pipeline components: {nlp.pipe_names}\")\n # Detect components with listeners that are not frozen consistently\n for name, proc in nlp.pipeline:\n if getattr(proc, \"listening_components\", None): # e.g. tok2vec/transformer\n for listener in proc.listening_components:\n if listener in frozen_components and name not in frozen_components:\n logger.warning(Warnings.W087.format(name=name, listener=listener))\n # We always check this regardless, in case user freezes tok2vec\n if listener not in frozen_components and name in frozen_components:\n logger.warning(Warnings.W086.format(name=name, listener=listener))\n return nlp\n\n\ndef init_vocab(\n nlp: \"Language\",\n *,\n data: Optional[Path] = None,\n lookups: Optional[Lookups] = None,\n vectors: Optional[str] = None,\n) -> \"Language\":\n if lookups:\n nlp.vocab.lookups = lookups\n logger.info(f\"Added vocab lookups: {', '.join(lookups.tables)}\")\n data_path = ensure_path(data)\n if data_path is not None:\n lex_attrs = srsly.read_jsonl(data_path)\n for lexeme in nlp.vocab:\n lexeme.rank = OOV_RANK\n for attrs in lex_attrs:\n if \"settings\" in attrs:\n continue\n lexeme = nlp.vocab[attrs[\"orth\"]]\n lexeme.set_attrs(**attrs)\n if len(nlp.vocab):\n oov_prob = min(lex.prob for lex in nlp.vocab) - 1\n else:\n oov_prob = DEFAULT_OOV_PROB\n nlp.vocab.cfg.update({\"oov_prob\": oov_prob})\n logger.info(f\"Added {len(nlp.vocab)} lexical entries to the vocab\")\n logger.info(\"Created vocabulary\")\n if vectors is not None:\n load_vectors_into_model(nlp, vectors)\n logger.info(f\"Added vectors: {vectors}\")\n logger.info(\"Finished initializing nlp object\")\n\n\ndef load_vectors_into_model(\n nlp: \"Language\", name: Union[str, Path], *, add_strings: bool = True\n) -> None:\n \"\"\"Load word vectors from an installed model or path into a model instance.\"\"\"\n try:\n vectors_nlp = load_model(name)\n except ConfigValidationError as e:\n title = f\"Config validation error for vectors {name}\"\n desc = (\n \"This typically means that there's a problem in the config.cfg included \"\n \"with the packaged vectors. Make sure that the vectors package you're \"\n \"loading is compatible with the current version of spaCy.\"\n )\n err = ConfigValidationError.from_error(e, title=title, desc=desc)\n raise err from None\n nlp.vocab.vectors = vectors_nlp.vocab.vectors\n if add_strings:\n # I guess we should add the strings from the vectors_nlp model?\n # E.g. if someone does a similarity query, they might expect the strings.\n for key in nlp.vocab.vectors.key2row:\n if key in vectors_nlp.vocab.strings:\n nlp.vocab.strings.add(vectors_nlp.vocab.strings[key])\n\n\ndef init_tok2vec(\n nlp: \"Language\", pretrain_config: Dict[str, Any], init_config: Dict[str, Any]\n) -> bool:\n # Load pretrained tok2vec weights - cf. CLI command 'pretrain'\n P = pretrain_config\n I = init_config\n weights_data = None\n init_tok2vec = ensure_path(I[\"init_tok2vec\"])\n if init_tok2vec is not None:\n if not init_tok2vec.exists():\n err = f\"can't find pretrained tok2vec: {init_tok2vec}\"\n errors = [{\"loc\": [\"initialize\", \"init_tok2vec\"], \"msg\": err}]\n raise ConfigValidationError(config=nlp.config, errors=errors)\n with init_tok2vec.open(\"rb\") as file_:\n weights_data = file_.read()\n if weights_data is not None:\n layer = get_tok2vec_ref(nlp, P)\n layer.from_bytes(weights_data)\n logger.info(f\"Loaded pretrained weights from {init_tok2vec}\")\n return True\n return False\n\n\ndef convert_vectors(\n nlp: \"Language\",\n vectors_loc: Optional[Path],\n *,\n truncate: int,\n prune: int,\n name: Optional[str] = None,\n) -> None:\n vectors_loc = ensure_path(vectors_loc)\n if vectors_loc and vectors_loc.parts[-1].endswith(\".npz\"):\n nlp.vocab.vectors = Vectors(data=numpy.load(vectors_loc.open(\"rb\")))\n for lex in nlp.vocab:\n if lex.rank and lex.rank != OOV_RANK:\n nlp.vocab.vectors.add(lex.orth, row=lex.rank)\n else:\n if vectors_loc:\n logger.info(f\"Reading vectors from {vectors_loc}\")\n vectors_data, vector_keys = read_vectors(vectors_loc, truncate)\n logger.info(f\"Loaded vectors from {vectors_loc}\")\n else:\n vectors_data, vector_keys = (None, None)\n if vector_keys is not None:\n for word in vector_keys:\n if word not in nlp.vocab:\n nlp.vocab[word]\n if vectors_data is not None:\n nlp.vocab.vectors = Vectors(data=vectors_data, keys=vector_keys)\n if name is None:\n # TODO: Is this correct? Does this matter?\n nlp.vocab.vectors.name = f\"{nlp.meta['lang']}_{nlp.meta['name']}.vectors\"\n else:\n nlp.vocab.vectors.name = name\n nlp.meta[\"vectors\"][\"name\"] = nlp.vocab.vectors.name\n if prune >= 1:\n nlp.vocab.prune_vectors(prune)\n\n\ndef read_vectors(vectors_loc: Path, truncate_vectors: int):\n f = ensure_shape(vectors_loc)\n shape = tuple(int(size) for size in next(f).split())\n if truncate_vectors >= 1:\n shape = (truncate_vectors, shape[1])\n vectors_data = numpy.zeros(shape=shape, dtype=\"f\")\n vectors_keys = []\n for i, line in enumerate(tqdm.tqdm(f)):\n line = line.rstrip()\n pieces = line.rsplit(\" \", vectors_data.shape[1])\n word = pieces.pop(0)\n if len(pieces) != vectors_data.shape[1]:\n raise ValueError(Errors.E094.format(line_num=i, loc=vectors_loc))\n vectors_data[i] = numpy.asarray(pieces, dtype=\"f\")\n vectors_keys.append(word)\n if i == truncate_vectors - 1:\n break\n return vectors_data, vectors_keys\n\n\ndef open_file(loc: Union[str, Path]) -> IO:\n \"\"\"Handle .gz, .tar.gz or unzipped files\"\"\"\n loc = ensure_path(loc)\n if tarfile.is_tarfile(str(loc)):\n return tarfile.open(str(loc), \"r:gz\")\n elif loc.parts[-1].endswith(\"gz\"):\n return (line.decode(\"utf8\") for line in gzip.open(str(loc), \"r\"))\n elif loc.parts[-1].endswith(\"zip\"):\n zip_file = zipfile.ZipFile(str(loc))\n names = zip_file.namelist()\n file_ = zip_file.open(names[0])\n return (line.decode(\"utf8\") for line in file_)\n else:\n return loc.open(\"r\", encoding=\"utf8\")\n\n\ndef ensure_shape(vectors_loc):\n \"\"\"Ensure that the first line of the data is the vectors shape.\n If it's not, we read in the data and output the shape as the first result,\n so that the reader doesn't have to deal with the problem.\n \"\"\"\n lines = open_file(vectors_loc)\n first_line = next(lines)\n try:\n shape = tuple(int(size) for size in first_line.split())\n except ValueError:\n shape = None\n if shape is not None:\n # All good, give the data\n yield first_line\n yield from lines\n else:\n # Figure out the shape, make it the first value, and then give the\n # rest of the data.\n width = len(first_line.split()) - 1\n length = 1\n for _ in lines:\n length += 1\n yield f\"{length} {width}\"\n # Reading the lines in again from file. This to avoid having to\n # store all the results in a list in memory\n lines2 = open_file(vectors_loc)\n yield from lines2\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
FabricioSMarin/XRFtomo
[ "f5c9f6411bea9a85082d943af873e83f81431e5a" ]
[ "xrftomo/widgets/reconstruction.py" ]
[ "# #########################################################################\n# Copyright © 2020, UChicago Argonne, LLC. All Rights Reserved. #\n# #\n# Software Name: XRFtomo #\n# #\n# By: Argonne National Laboratory #\n# #\n# OPEN SOURCE LICENSE #\n# #\n# Redistribution and use in source and binary forms, with or without #\n# modification, are permitted provided that the following conditions #\n# are met: #\n# #\n# 1. Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# 2. Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# 3. Neither the name of the copyright holder nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# DISCLAIMER #\n# #\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY #\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #\n###########################################################################\n\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtCore import pyqtSignal\nimport xrftomo\nimport pyqtgraph\nimport numpy as np\n\nclass ReconstructionWidget(QtWidgets.QWidget):\n elementChangedSig = pyqtSignal(int, name='elementChangedSig')\n sldRangeChanged = pyqtSignal(int, np.ndarray, np.ndarray, name='sldRangeChanged')\n reconChangedSig = pyqtSignal(np.ndarray, name='reconChangedSig')\n reconArrChangedSig = pyqtSignal(np.ndarray, name='reconArrChangedSig')\n\n def __init__(self):\n super(ReconstructionWidget, self).__init__()\n self.initUI()\n\n def initUI(self):\n self.ViewControl = xrftomo.ReconstructionControlsWidget()\n self.ReconView = xrftomo.ReconView(self)\n self.actions = xrftomo.ReconstructionActions()\n self.actions2 = xrftomo.ImageProcessActions()\n self.writer = xrftomo.SaveOptions()\n\n self.file_name_title = QtWidgets.QLabel(\"_\")\n lbl1 = QtWidgets.QLabel(\"x pos:\")\n self.lbl2 = QtWidgets.QLabel(\"\")\n lbl3 = QtWidgets.QLabel(\"y pos:\")\n self.lbl4 = QtWidgets.QLabel(\"\")\n lbl5 = QtWidgets.QLabel(\"Slice\")\n lbl6 = QtWidgets.QLabel(\"value:\")\n self.lbl7 = QtWidgets.QLabel(\"\")\n\n self.ReconView.mouseMoveSig.connect(self.updatePanel)\n #get pixel value from Histogram widget's projview \n\n self.sld = QtWidgets.QSlider(QtCore.Qt.Horizontal, self)\n self.lcd = QtWidgets.QLCDNumber(self)\n self.hist = pyqtgraph.HistogramLUTWidget()\n self.hist.setMinimumSize(120,120)\n self.hist.setMaximumWidth(120)\n self.hist.setImageItem(self.ReconView.projView)\n\n self.ViewControl.combo1.currentIndexChanged.connect(self.elementChanged)\n self.ViewControl.reconGroup.currentIndexChanged.connect(self.recon_combobox_changed)\n self.ViewControl.btn.clicked.connect(self.reconstruct_params)\n self.ViewControl.equalizeBtn.clicked.connect(self.equalize_params)\n self.ViewControl.rmHotspotBtn.clicked.connect(self.rm_hotspot_params)\n self.ViewControl.setThreshBtn.clicked.connect(self.set_thresh_params)\n\n\n self.ViewControl.btn2.clicked.connect(self.reconstruct_all_params)\n self.ViewControl.recon2npy.clicked.connect(self.reconstruct_all_npy_params)\n self.ViewControl.mulBtn.clicked.connect(self.call_reconMultiply)\n self.ViewControl.divBtn.clicked.connect(self.call_reconDivide)\n self.ViewControl.end_indx.editingFinished.connect(self.update_y_range)\n self.ViewControl.start_indx.editingFinished.connect(self.update_y_range)\n self.ViewControl.mid_indx.editingFinished.connect(self.update_middle_index)\n self.ViewControl.recon_stats.clicked.connect(self.toggle_middle_index)\n self.sld.valueChanged.connect(self.update_recon_image)\n\n self.x_shifts = None\n self.y_shifts = None\n self.centers = None\n self.recon = None\n self.recon_array = None\n self.data = None\n self.data_original = None\n\n hb0 = QtWidgets.QHBoxLayout()\n hb0.addWidget(lbl1)\n hb0.addWidget(self.lbl2)\n hb0.addWidget(lbl3)\n hb0.addWidget(self.lbl4)\n hb0.addWidget(lbl6)\n hb0.addWidget(self.lbl7)\n\n hb1 = QtWidgets.QHBoxLayout()\n hb1.addWidget(lbl5)\n hb1.addWidget(self.lcd)\n hb1.addWidget(self.sld)\n\n vb1 = QtWidgets.QVBoxLayout()\n vb1.addWidget(self.file_name_title)\n vb1.addLayout(hb0)\n vb1.addWidget(self.ReconView)\n vb1.addLayout(hb1)\n\n hb2 = QtWidgets.QHBoxLayout()\n hb2.addWidget(self.ViewControl)\n hb2.addLayout(vb1)\n hb2.addWidget(self.hist, 10)\n\n self.setLayout(hb2)\n\n def updatePanel(self,x,y):\n self.lbl2.setText(str(x))\n self.lbl4.setText(str(y))\n try:\n pixel_val = round(self.view.projView.image[abs(y)-1,x],4)\n self.lbl7.setText(str(pixel_val))\n except:\n self.lbl7.setText(\"\")\n\n def showReconstruct(self):\n '''\n load window for reconstruction window\n '''\n self.write = xrftomo.SaveOptions()\n self.actions.x_shifts = self.x_shifts\n self.actions.y_shifts = self.y_shifts\n self.actions.centers = self.centers\n self.y_range = self.data.shape[2]\n\n self.ViewControl.combo1.clear()\n self.ViewControl.method.clear()\n self.ViewControl.reconGroup.clear()\n self.ViewControl.reconGroup.disconnect()\n methodname = [\"mlem\", \"gridrec\", \"art\", \"pml_hybrid\", \"pml_quad\", \"fbp\", \"sirt\", \"tv\"]\n for j in self.elements:\n self.ViewControl.combo1.addItem(j)\n for k in range(len(methodname)):\n self.ViewControl.method.addItem(methodname[k])\n for l in self.elements:\n self.ViewControl.reconGroup.addItem(l)\n self.recon_array = np.zeros((len(self.elements),self.y_range,self.data.shape[3],self.data.shape[3]))\n\n self.ViewControl.reconGroup.currentIndexChanged.connect(self.recon_combobox_changed)\n self.elementChanged()\n\n #TODO: recon_array will need to update with any changes to data dimensions as well as re-initialization\n # self.ViewControl.centerTextBox.setText(str(self.centers[2]))\n self.ViewControl.mulBtn.setEnabled(False)\n self.ViewControl.divBtn.setEnabled(False)\n self.ViewControl.end_indx.setText((str(self.data.shape[2])))\n self.ViewControl.mid_indx.setText((str(self.data.shape[2]//2)))\n\n self.sld.setRange(0, self.y_range - 1)\n self.lcd.display(0)\n\n def elementChanged(self):\n element = self.ViewControl.combo1.currentIndex()\n self.updateElementSlot(element)\n self.elementChangedSig.emit(element)\n\n def updateElementSlot(self, element):\n self.ViewControl.combo1.setCurrentIndex(element)\n\n def call_reconMultiply(self):\n '''\n multiply reconstruction by 10\n '''\n self.recon = self.actions.reconMultiply(self.recon)\n self.update_recon_image()\n\n def call_reconDivide(self):\n '''\n divide reconstuction by 10\n '''\n self.recon = self.actions.reconDivide(self.recon)\n self.update_recon_image()\n\n def reconstruct_params(self):\n element = self.ViewControl.combo1.currentIndex()\n center = np.array(float(self.data.shape[3]), dtype=np.float32)/2\n method = self.ViewControl.method.currentIndex()\n beta = float(self.ViewControl.beta.text())\n delta = float(self.ViewControl.delta.text())\n iters = int(self.ViewControl.iters.text())\n thetas = self.thetas\n end_indx = int(self.data.shape[2] - eval(self.ViewControl.start_indx.text()))\n start_indx = int(self.data.shape[2] - eval(self.ViewControl.end_indx.text()))\n mid_indx = int(self.data.shape[2] - eval(self.ViewControl.mid_indx.text())) -start_indx - 1\n\n data = self.data[:,:,start_indx:end_indx,:]\n show_stats = self.ViewControl.recon_stats.isChecked()\n num_xsections = data.shape[2]\n\n if self.ViewControl.recon_save.isChecked():\n try:\n savedir = QtGui.QFileDialog.getSaveFileName()[0]\n # savedir = '/Users/fabriciomarin/Documents/scans/Lin_XRF_tomo/Lin_3D2/testing/ptycho'\n\n if savedir == \"\":\n raise IOError\n if savedir == None:\n return\n except IOError:\n print(\"type the header name\")\n except: \n print(\"Something went horribly wrong.\")\n\n #reconstruct one ccross section at a time and save after each loop/completion. \n recons = np.zeros((data.shape[2],data.shape[3], data.shape[3]))\n xsection = np.zeros((1,data.shape[1],1, data.shape[3]))\n start_idx = int(eval(self.ViewControl.start_indx.text()))\n for i in range(num_xsections):\n j = num_xsections-i-1\n xsection[0,:,0] = data[element,:,j]\n recon = self.actions.reconstruct(xsection, 0, center, method, beta, delta, iters, thetas, 0, False)\n recons[i] = recon\n self.writer.save_reconstruction(recon, savedir, start_idx+i)\n self.recon = np.array(recons)\n else:\n self.recon = self.actions.reconstruct(data, element, center, method, beta, delta, iters, thetas, mid_indx, show_stats)\n \n self.ViewControl.mulBtn.setEnabled(True)\n self.ViewControl.divBtn.setEnabled(True)\n self.update_recon_image()\n self.update_recon_array(self.recon)\n self.reconChangedSig.emit(self.recon)\n self.reconArrChangedSig.emit(self.recon_array)\n return\n\n def reconstruct_all_params(self):\n #figure out how to get a list of all selected elements\n num_elements = self.ViewControl.combo1.count()\n element_names = [self.ViewControl.combo1.itemText(i) for i in range(num_elements)]\n # box_checked = self.ViewControl.cbox.isChecked()\n center = np.array(float(self.data.shape[3]), dtype=np.float32)/2\n method = self.ViewControl.method.currentIndex()\n beta = float(self.ViewControl.beta.text())\n delta = float(self.ViewControl.delta.text())\n iters = int(self.ViewControl.iters.text())\n thetas = self.thetas\n end_indx = int(self.data.shape[2] - eval(self.ViewControl.start_indx.text()))\n start_indx = int(self.data.shape[2] - eval(self.ViewControl.end_indx.text()))\n mid_indx = int(self.data.shape[2] - eval(self.ViewControl.mid_indx.text()))\n data = self.data[:,:,start_indx:end_indx,:]\n\n self.recon = self.actions.reconstructAll(data, element_names, center, method, beta, delta, iters, thetas)\n self.ViewControl.mulBtn.setEnabled(True)\n self.ViewControl.divBtn.setEnabled(True)\n self.update_recon_image()\n self.reconChangedSig.emit(self.recon)\n return\n\n def reconstruct_all_npy_params(self):\n #figure out how to get a list of all selected elements\n num_elements = self.ViewControl.combo1.count()\n element_names = [self.ViewControl.combo1.itemText(i) for i in range(num_elements)]\n # box_checked = self.ViewControl.cbox.isChecked()\n center = np.array(float(self.data.shape[3]), dtype=np.float32)/2\n method = self.ViewControl.method.currentIndex()\n beta = float(self.ViewControl.beta.text())\n delta = float(self.ViewControl.delta.text())\n iters = int(self.ViewControl.iters.text())\n thetas = self.thetas\n end_indx = int(self.data.shape[2] - eval(self.ViewControl.start_indx.text()))\n start_indx = int(self.data.shape[2] - eval(self.ViewControl.end_indx.text()))\n mid_indx = int(self.data.shape[2] - eval(self.ViewControl.mid_indx.text()))\n data = self.data[:,:,start_indx:end_indx,:]\n\n #reconArray [element,stack,y,x]\n num_elements = data.shape[0]\n num_slices = data.shape[2]\n slice_dim = data.shape[3]\n\n for i in range(num_elements):\n self.recon = self.actions.reconstruct(data, i, center, method, beta, delta, iters, thetas, mid_indx, False)\n self.recon_array[i] = self.recon\n self.update_recon_image()\n self.reconChangedSig.emit(self.recon)\n self.writer.save_recon_array_2npy(self.recon_array, savedir=None, index=-1)\n return\n\n def ySizeChanged(self, ySize):\n self.ViewControl.start_indx.setText('0')\n self.ViewControl.end_indx.setText(str(ySize))\n self.ViewControl.mid_indx.setText(str(ySize//2))\n self.sld.setValue(0)\n self.sld.setMaximum(ySize)\n self.recon_array = np.zeros((len(self.elements),ySize,self.data.shape[3],self.data.shape[3]))\n #check for xSize too.\n return\n \n def xSizeChanged(self, xSize):\n self.recon_array = np.zeros((len(self.elements),self.data.shape[2],xSize,xSize))\n return\n\n def update_y_range(self):\n start_indx = int(self.ViewControl.start_indx.text())\n end_indx = int(self.ViewControl.end_indx.text())\n if end_indx >self.data.shape[2]:\n end_indx = self.data.shape[2]\n self.ViewControl.end_indx.setText(str(end_indx))\n if end_indx <= 0:\n end_indx = self.data.shape[2]\n self.ViewControl.end_indx.setText(str(end_indx))\n if start_indx >=end_indx:\n self.ViewControl.start_indx.setText(str(end_indx-1))\n if start_indx < 0:\n self.ViewControl.start_indx.setText(str(0))\n self.update_middle_index()\n\n self.sld.setRange(0, end_indx-start_indx - 1)\n self.sld.setValue(0)\n self.lcd.display(0)\n\n def update_middle_index(self):\n start_indx = int(self.ViewControl.start_indx.text())\n end_indx = int(self.ViewControl.end_indx.text())\n mid_indx = int(self.ViewControl.mid_indx.text())\n if mid_indx == -1:\n mid_indx = end_indx//2\n if mid_indx > end_indx:\n mid_indx = end_indx\n self.ViewControl.mid_indx.setText(str(mid_indx))\n if mid_indx < start_indx:\n mid_indx = start_indx\n self.ViewControl.mid_indx.setText(str(mid_indx))\n\n def toggle_middle_index(self):\n if self.ViewControl.recon_stats.isChecked():\n self.ViewControl.mid_indx.setEnabled(True)\n else:\n self.ViewControl.mid_indx.setEnabled(False)\n\n def equalize_params(self):\n recon = self.recon \n recon = self.actions.equalize_recon(recon)\n self.update_recon_image()\n \n def rm_hotspot_params(self):\n recon = self.recon \n recon = self.actions.remove_hotspots(recon)\n self.update_recon_image()\n\n def set_thresh_params(self):\n recon = self.recon \n threshold = float(self.ViewControl.lThresh.text())\n recon = self.actions.setThreshold(threshold,recon)\n self.update_recon_image()\n\n def update_recon_array(self, recon):\n indx = self.ViewControl.combo1.currentIndex() \n #recon could be a partial reconstruction, account for this by indexing the Y range as well \n ymin = int(eval(self.ViewControl.start_indx.text()))\n ymax = int(eval(self.ViewControl.end_indx.text()))\n\n self.recon_array[indx,ymin:ymax,:] = recon\n\n def recon_combobox_changed(self):\n indx = self.ViewControl.reconGroup.currentIndex()\n recon = self.recon_array[indx]\n self.recon = recon\n self.update_recon_image()\n\n def update_recon_image(self):\n index = self.sld.value()\n self.lcd.display(index)\n\n try:\n self.ViewControl.maxText.setText(str(self.recon[index, :, :].max()))\n self.ViewControl.minText.setText(str(self.recon[index, :, :].min()))\n self.ReconView.projView.setImage(self.recon[index, :, :])\n except:\n print(\"run reconstruction first\")\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
VasilisNtousis/Hilti_Project_Cyber_Aware
[ "175f9343462175de5d0f24eacf285bd27f12b399" ]
[ "src/create_es_df_to_csv.py" ]
[ "import pandas as pd \n\ndef awareness_dataset():\n sec_df = pd.read_excel('IT Security Awareness Global report.xlsx')\n\n sec_df = sec_df.drop(columns=['Username','FunctionalAreaName','CostCenterName',\n 'Time spent on test','Community name','Content name',\n 'Course specified approver','TradeAssignment(Attribute8)',\n 'SalesOrganizationName','SalesOrganizationCode','FunctionalAreaCode',\n 'FirstName','LastName',\"Creator approver\",\"Manager approver\",\"Specified Approver\",\n \"Approval date\",'Suspend data'])\n\n sec_df[[\"Last login\", \"Last activity\",\"Registration date\", \"Created\",\"Date course completed\"]].apply(\n pd.to_datetime,format=\"%Y-%m-%dT%H:%M:%S\",errors='coerce')\n \n sec_df[\"Last login\"]=sec_df[\"Last login\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n sec_df[\"Last activity\"] = sec_df[\"Last activity\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n sec_df[\"Registration date\"] = sec_df[\"Registration date\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n sec_df[\"Date course completed\"] = sec_df[\"Date course completed\"].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n\n sec_df.to_csv('hilti.csv',index=False)\n\ndef phising_dataset():\n xls = pd.ExcelFile('../DataPhishingReport.xlsx')\n df1 = pd.read_excel(xls,'Attachments')\n df2 = pd.read_excel(xls,'Blank')\n df3 = pd.read_excel(xls,'Spam')\n df4 = pd.read_excel(xls,\"RealPhishing\")\n df5 = pd.read_excel(xls,\"Internal Phishing\")\n\n df1.insert(3,'Filename','Attachments')\n df2.insert(3,'Filename','Blank')\n df3.insert(3,'Filename','Spam')\n df4.insert(3,'Filename','RealPhising')\n df5.insert(3,'Filename','Internal Phising')\n\n\n df4 = df4.drop(['Sender','TicketNr','More than 1 recipient'],axis=1)\n df4= df4.reindex(['Subject',\"Date\",'UserID','Filename'],axis=1)\n df4.columns = ['Subject','Received','Sender','Filename']\n\n df_list = [df1,df2,df3,df4,df5]\n\n dataset = pd.concat(df_list)\n dataset = dataset.reset_index()\n dataset['Received'] = pd.to_datetime(dataset['Received'], errors='coerce')\n dataset['Received']=dataset['Received'].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n dataset = dataset.drop(['index'],axis=1)\n dataset.to_csv('dataPhising.csv',index=False)\n\nif __name__ == \"__main__\":\n awareness_dataset()\n phising_dataset()" ]
[ [ "pandas.ExcelFile", "pandas.read_excel", "pandas.to_datetime", "pandas.concat" ] ]
xinpl/Polaris
[ "2a9717980ab03393f7f5ba646bc58c8a3fbd1b9b" ]
[ "nnExplain/kerasUtils.py" ]
[ "from .NNModel import Layer, LayerKind\nfrom .utils import verbose\nfrom keras.models import load_model\nfrom keras.layers import Dense, Activation, Dropout, InputLayer\nimport tensorflow as tf\n\n\ndef createModelFromKerasModel(m, input_length):\n X = tf.placeholder(tf.float32, shape=[None, input_length])\n layers = m.layers\n layer_list = []\n last_output = X\n for l in layers:\n if isinstance(l,InputLayer):\n continue\n if isinstance(l,Dense):\n weights = l.get_weights()[0]\n biases = l.get_weights()[1]\n last_output = tf.matmul(last_output, tf.constant(weights))\n last_output = tf.add(last_output, tf.constant(biases))\n layer_list.append(Layer(LayerKind.dense, last_output, weights, biases))\n activation = l.get_config()['activation']\n if activation == 'relu':\n last_output = tf.nn.relu(last_output)\n layer_list.append(Layer(LayerKind.relu, last_output, None, None))\n elif activation == 'softmax':\n verbose(\"Warning: treating softmax as the output!\",0)\n elif isinstance(l, Dropout):\n continue\n else:\n raise ValueError(\"Cannot handle layer {}!\".format(l))\n return (m, layer_list, X, last_output)\n\n\ndef createModelFromKerasSave(path, input_length):\n m = load_model(path)\n return createModelFromKerasModel(m, input_length)\n" ]
[ [ "tensorflow.constant", "tensorflow.placeholder", "tensorflow.nn.relu" ] ]
ToWeRT1An/tensorpack
[ "f343e65b3c92fdf92cda7a90e8d7fd9df622b1b1" ]
[ "tensorpack/tfutils/summary.py" ]
[ "# -*- coding: utf-8 -*-\n# File: summary.py\n\n\nimport re\nfrom contextlib import contextmanager\nimport six\nfrom six.moves import range\nfrom tensorflow.python.training import moving_averages\n\nfrom ..compat import tfv1 as tf\nfrom ..utils import logger\nfrom ..utils.argtools import graph_memoized\nfrom ..utils.naming import MOVING_SUMMARY_OPS_KEY\nfrom .scope_utils import cached_name_scope\nfrom .symbolic_functions import rms\nfrom .tower import get_current_tower_context\n\n__all__ = ['add_tensor_summary', 'add_param_summary',\n 'add_activation_summary', 'add_moving_summary',\n ]\n\n\n# some scope stuff to use internally...\n@graph_memoized\ndef _get_cached_vs(name):\n with tf.variable_scope(name) as scope:\n return scope\n\n\n@contextmanager\ndef _enter_vs_reuse_ns(name):\n vs = _get_cached_vs(name)\n # XXX Not good to enter the cached vs directly, because this will clean-up custom getter\n # with tf.variable_scope(name, reuse=tf.AUTO_REUSE): # available in 1.4 only\n with tf.variable_scope(vs):\n with tf.name_scope(vs.original_name_scope):\n yield vs\n\n\ndef create_scalar_summary(name, v):\n \"\"\"\n Args:\n name (str):\n v (float): scalar value\n Returns:\n tf.Summary: a tf.Summary object with name and simple scalar value v.\n \"\"\"\n assert isinstance(name, six.string_types), type(name)\n v = float(v)\n s = tf.Summary()\n s.value.add(tag=name, simple_value=v)\n return s\n\n\ndef create_image_summary(name, val):\n \"\"\"\n Args:\n name(str):\n val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.\n Can be either float or uint8. Range has to be [0,255].\n\n Returns:\n tf.Summary:\n \"\"\"\n assert isinstance(name, six.string_types), type(name)\n n, h, w, c = val.shape\n val = val.astype('uint8')\n s = tf.Summary()\n imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9]\n for k in range(n):\n arr = val[k]\n # CV2 will only write correctly in BGR chanel order\n if c == 3:\n arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)\n elif c == 4:\n arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)\n tag = name if n == 1 else '{}/{}'.format(name, k)\n retval, img_str = cv2.imencode('.png', arr, imparams)\n if not retval:\n # Encoding has failed.\n continue\n img_str = img_str.tostring()\n\n img = tf.Summary.Image()\n img.height = h\n img.width = w\n # 1 - grayscale 3 - RGB 4 - RGBA\n img.colorspace = c\n img.encoded_image_string = img_str\n s.value.add(tag=tag, image=img)\n return s\n\n\ndef add_tensor_summary(x, types, name=None, collections=None,\n main_tower_only=True):\n \"\"\"\n Summarize a tensor by different methods.\n\n Args:\n x (tf.Tensor): a tensor to summarize\n types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms\n name (str): summary name. Defaults to be the op name.\n collections (list[str]): collections of the summary ops.\n main_tower_only (bool): Only run under main training tower. If\n set to True, calling this function under other TowerContext\n has no effect.\n\n Example:\n\n .. code-block:: python\n\n with tf.name_scope('mysummaries'): # to not mess up tensorboard\n add_tensor_summary(\n tensor, ['histogram', 'rms', 'sparsity'], name='mytensor')\n \"\"\"\n types = set(types)\n if name is None:\n name = x.op.name\n ctx = get_current_tower_context()\n if main_tower_only and ctx is not None and not ctx.is_main_training_tower:\n return\n\n SUMMARY_TYPES_DIC = {\n 'scalar': lambda: tf.summary.scalar(name + '-summary', x, collections=collections),\n 'histogram': lambda: tf.summary.histogram(name + '-histogram', x, collections=collections),\n 'sparsity': lambda: tf.summary.scalar(\n name + '-sparsity', tf.nn.zero_fraction(x),\n collections=collections),\n 'mean': lambda: tf.summary.scalar(\n name + '-mean', tf.reduce_mean(x),\n collections=collections),\n 'rms': lambda: tf.summary.scalar(\n name + '-rms', rms(x), collections=collections)\n }\n for typ in types:\n SUMMARY_TYPES_DIC[typ]()\n\n\ndef add_activation_summary(x, types=None, name=None, collections=None):\n \"\"\"\n Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.\n This function is a no-op if not calling from main training tower.\n\n Args:\n x (tf.Tensor): the tensor to summary.\n types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.\n name (str): if is None, use x.name.\n collections (list[str]): collections of the summary ops.\n \"\"\"\n ndim = x.get_shape().ndims\n if ndim < 2:\n logger.warn(\"Cannot summarize scalar activation {}\".format(x.name))\n return\n if types is None:\n types = ['sparsity', 'rms', 'histogram']\n with cached_name_scope('activation-summary'):\n add_tensor_summary(x, types, name=name, collections=collections)\n\n\ndef add_param_summary(*summary_lists, **kwargs):\n \"\"\"\n Add summary ops for all trainable variables matching the regex, under a\n reused 'param-summary' name scope.\n This function is a no-op if not calling from main training tower.\n\n Args:\n summary_lists (list): each is (regex, [list of summary type]).\n Summary type is defined in :func:`add_tensor_summary`.\n collections (list[str]): collections of the summary ops.\n\n Example:\n\n .. code-block:: python\n\n add_param_summary(\n ('.*/W', ['histogram', 'rms']),\n ('.*/gamma', ['scalar']),\n )\n \"\"\"\n collections = kwargs.pop('collections', None)\n assert len(kwargs) == 0, \"Unknown kwargs: \" + str(kwargs)\n ctx = get_current_tower_context()\n if ctx is not None and not ctx.is_main_training_tower:\n return\n\n params = tf.all_variables()\n\n\n with cached_name_scope('param-summary'):\n for p in params:\n name = p.op.name\n for rgx, actions in summary_lists:\n if not rgx.endswith('$'):\n rgx = rgx + '$'\n if re.match(rgx, name):\n add_tensor_summary(p, actions, name=name, collections=collections)\n\n\ndef add_moving_summary(*args, **kwargs):\n \"\"\"\n Summarize the moving average for scalar tensors.\n This function is a no-op if not calling from main training tower.\n See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html\n\n Args:\n args: scalar tensors to summarize\n decay (float): the decay rate. Defaults to 0.95.\n collection (str or None): the name of the collection to add EMA-maintaining ops.\n The default will work together with the default\n :class:`MovingAverageSummary` callback.\n summary_collections ([str]): the names of collections to add the\n summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`).\n\n Returns:\n [tf.Tensor]: list of tensors returned by assign_moving_average,\n which can be used to maintain the EMA.\n \"\"\"\n decay = kwargs.pop('decay', 0.95)\n coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY)\n summ_coll = kwargs.pop('summary_collections', None)\n assert len(kwargs) == 0, \"Unknown arguments: \" + str(kwargs)\n\n ctx = get_current_tower_context()\n # allow ctx to be none\n if ctx is not None and not ctx.is_main_training_tower:\n return []\n\n graph = tf.get_default_graph()\n try:\n control_flow_ctx = graph._get_control_flow_context()\n # XLA does not support summaries anyway\n # However, this function will generate unnecessary dependency edges,\n # which makes the tower function harder to compile under XLA, so we skip it\n if control_flow_ctx is not None and control_flow_ctx.IsXLAContext():\n return\n except Exception:\n pass\n\n if tf.get_variable_scope().reuse is True:\n logger.warn(\"add_moving_summary() called under reuse=True scope, ignored.\")\n return []\n\n for x in args:\n assert isinstance(x, (tf.Tensor, tf.Variable)), x\n assert x.get_shape().ndims == 0, \\\n \"add_moving_summary() only accepts scalar tensor! Got one with {}\".format(x.get_shape())\n\n ema_ops = []\n for c in args:\n name = re.sub('tower[0-9]+/', '', c.op.name)\n with tf.name_scope(None):\n if not c.dtype.is_floating:\n c = tf.cast(c, tf.float32)\n # assign_moving_average creates variables with op names, therefore clear ns first.\n with _enter_vs_reuse_ns('EMA') as vs:\n ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype,\n initializer=tf.constant_initializer(),\n trainable=False)\n ns = vs.original_name_scope\n with tf.name_scope(ns): # reuse VS&NS so that EMA_1 won't appear\n ema_op = moving_averages.assign_moving_average(\n ema_var, c, decay,\n zero_debias=True, name=name + '_EMA_apply')\n ema_ops.append(ema_op)\n with tf.name_scope(None):\n tf.summary.scalar(\n name + '-summary', ema_op,\n collections=summ_coll) # write the EMA value as a summary\n if coll is not None:\n for op in ema_ops:\n tf.add_to_collection(coll, op)\n return ema_ops\n\n\ntry:\n import cv2\nexcept ImportError:\n from ..utils.develop import create_dummy_func\n create_image_summary = create_dummy_func('create_image_summary', 'cv2') # noqa\n" ]
[ [ "tensorflow.python.training.moving_averages.assign_moving_average" ] ]
gaoyuanhezhihao/pyLib
[ "ecc517d12c4125deafa7b7fe09c63afa3349aaac" ]
[ "OneLib/ImageViewer/evaluate_detection.py" ]
[ "import argparse\nimport os\nfrom os.path import split,join,splitext\nimport xml.etree.ElementTree as ET\nimport re\nimport numpy as np\nfrom shapely.geometry import Polygon\nfrom debug_tool import paint_polygons\nfrom matplotlib import pyplot as plt\n\n\n\ndef parse_point(s):\n s.split(',')\n _, p1, p2, _ = re.split(',|\\\\(|\\\\)', s)\n # pt = re.findall('\\d+\\.*\\d*', s)\n return (float(p1), float(p2))\n\ndef parse_line(s):\n split = s.find('--')\n start = parse_point(s[:split])\n end = parse_point(s[split+2:])\n return (start, end)\n\ndef parse_pole(line):\n split = line.find('|')\n left = line[:split]\n right = line[split+1:]\n return (parse_line(left), parse_line(right))\n\ndef parse_gt_pole(s):\n # print(s)\n floats = list(map(float, re.split(',|;', s)))\n points = [(x, y) for x, y in zip(floats[0::2], floats[1::2])]\n points = sorted(points, key=lambda p:p[1])\n top = points[:2]\n bottom = points[2:]\n top = sorted(top)\n bottom = sorted(bottom)\n return ((top[0], bottom[0]), (top[1], bottom[1]))\n\ndef parse_gt(fp):\n tree = ET.parse(fp)\n gt_map = {}\n for c in tree.getroot().getchildren():\n if 'image' == c.tag:\n poles = [parse_gt_pole(p.get('points')) for p in c.getchildren() if 'points' in p.keys()]\n name = split(c.get('name'))[-1]\n name = splitext(name)[0]\n gt_map[name] = poles\n return gt_map\n\ndef area_of_bbox(bbox):\n a = (bbox[1][0] - bbox[0][0]) * (bbox[1][1] - bbox[0][1])\n assert a >= 0\n return a\n\n\ndef bbox_of_pole(pole):\n pts = (pole[0][0], pole[0][1], pole[1][0], pole[1][1])\n x_min = min(pole[0][0][0], pole[0][1][0])\n x_max = max(pole[1][0][0], pole[1][1][0])\n\n y_min = min(pole[0][0][1], pole[0][1][1])\n y_max = max(pole[1][0][1], pole[1][1][1])\n return((x_min, y_min), (x_max, y_max))\n\ndef polygon_of_pole(pole):\n assert pole[0][0][1] < pole[0][1][1], pole\n assert pole[1][0][1] < pole[1][1][1], pole\n points = [pole[0][0], pole[0][1], pole[1][1], pole[1][0]]\n return Polygon(points)\n\n\ndef calculate_iou_of_poles(pole_a, pole_b):\n polygon_a = polygon_of_pole(pole_a)\n polygon_b = polygon_of_pole(pole_b)\n # print(polygon_a)\n # print(polygon_b)\n try:\n intersection = polygon_a.intersection(polygon_b)\n except Exception as e:\n print(e)\n # paint_polygons(polygon_a, polygon_b)\n # plt.show()\n return 0.0\n else:\n # print(intersection)\n return intersection.area/ (polygon_a.area + polygon_b.area - intersection.area)\n\ndef calculate_iou_of_bbox(boxA, boxB):\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(boxA[0][0], boxB[0][0])\n yA = max(boxA[0][1], boxB[0][1])\n xB = min(boxA[1][0], boxB[1][0])\n yB = min(boxA[1][1], boxB[1][1])\n\n # compute the area of intersection rectangle\n interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))\n # print(\"intersection area=\", interArea)\n if interArea == 0:\n return 0\n # compute the area of both the prediction and ground-truth\n # rectangles\n # boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))\n # boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(area_of_bbox(boxA)+ area_of_bbox(boxB)- interArea)\n\n # return the intersection over union value\n return iou\n\n\nIOU_THRESHOLD = 0.5\nEPS = 1e-9\n\n\n\ndef compare_with_groundtruth(detected_poles, ground_truth):\n true_detection = []\n not_detected = []\n matched = [False] * len(detected_poles)\n for g in ground_truth:\n iou_list = [calculate_iou_of_poles(g, p) for p in detected_poles]\n max_idx = np.argmax(iou_list)\n if iou_list[max_idx] > IOU_THRESHOLD:\n true_detection.append((g, detected_poles[max_idx]))\n matched[max_idx] = True\n else:\n not_detected.append(g)\n false_detection = [p for m, p in zip(matched, detected_poles) if not m]\n return true_detection, false_detection, not_detected\n\nclass DetectionEvaluator:\n\n def __init__(self, gt_fp, detection_directory):\n self.gt_map = parse_gt(gt_fp)\n self.detection_map = {}\n for file_name in os.listdir(detection_directory):\n if not file_name.endswith('.txt'):\n continue\n self.evaluate(join(detection_directory, file_name))\n\n def __getitem__(self, key):\n return self.detection_map[key]\n\n def evaluate(self, detection_file_path):\n sample_name = splitext(split(detection_file_path)[-1])[0]\n with open(detection_file_path, 'r') as f:\n detected_poles = [parse_pole(l) for l in f.readlines()]\n # print(\"detected %d poles in %s\" % (len(detected_poles), file_name))\n true_detection = []\n false_detection = []\n ground_truth = self.gt_map[sample_name]\n not_detected = ground_truth\n if len(detected_poles) != 0:\n true_detection, false_detection, not_detected = compare_with_groundtruth(detected_poles, ground_truth)\n self.detection_map[sample_name] = {'true_detection': true_detection,\n 'false_detection': false_detection,\n 'not_detected': not_detected,\n 'true_positive': len(true_detection),\n 'positive': len(detected_poles),\n 'groundtruth_count':len(ground_truth),\n 'precision': len(true_detection) / (len(detected_poles) + EPS),\n 'recall': len(true_detection) / (len(ground_truth) + EPS)}\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('groundtruth_path')\n parser.add_argument('detection_result_directory')\n args = parser.parse_args()\n eva = DetectionEvaluator(args.groundtruth_path, args.detection_result_directory)\n true_positive = 0\n positive = 0\n groundtruth_count = 0\n for e in eva.detection_map.values():\n true_positive += e['true_positive']\n positive += e['positive']\n groundtruth_count += e['groundtruth_count']\n print('precision=%f, recall=%f' % (true_positive/positive, true_positive/groundtruth_count))\n" ]
[ [ "numpy.argmax" ] ]
Hemant2801/Big-mart-sales-prediction
[ "cac2748ed6a446095d568e005601d941f586e104" ]
[ "Sales prediction model.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Importing all the dependencies\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom xgboost import XGBRegressor\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# # Data collection and analysis\n\n# In[2]:\n\n\ndf = pd.read_csv('C:/Users/Hemant/jupyter_codes/ML Project 1/Big mart sales prediction/train.csv')\n\n\n# In[3]:\n\n\n#print the fisrt 5 rows of the dataset\n'''\nFD = food \nDR = drink\nNC = non consumable\n\n'''\ndf.head()\n\n\n# In[4]:\n\n\n# print the last 5 rows of the dataset\ndf.tail()\n\n\n# In[5]:\n\n\n# shape of the dataset\ndf.shape\n\n\n# In[6]:\n\n\n# getting some info about the dataset\ndf.info()\n\n\n# In[7]:\n\n\n#checking for any missing values\ndf.isnull().sum()\n\n\n# In[8]:\n\n\n# stastical measure of the dataset\ndf.describe()\n\n\n# In[9]:\n\n\n#checking for categorical data in diff object type columns\nobjlist = df.select_dtypes('object').columns\nfor i in objlist:\n print(f'\\n{i}')\n print(df[i].value_counts(), end = '\\n') \n\n\n# Handling the missing values\n# \n# Mean ---> Average value\n# Mode ---> Most repeated value\n\n# In[10]:\n\n\n# mean value of 'Item weight' collumn\nmean_value = df['Item_Weight'].mean()\n\n\n# In[11]:\n\n\n# filling the missing value with mean in 'item weight' column\ndf['Item_Weight'].fillna(mean_value, inplace = True)\n\n\n# In[12]:\n\n\n#checking for missing values\ndf.isnull().sum()\n\n\n# In[13]:\n\n\n# replacing the missing value with mode in 'Outlet Size' column\nmode_value = df.pivot_table(values = 'Outlet_Size', columns = 'Outlet_Type', aggfunc = (lambda x : x.mode()[0]))\n\n\n# In[14]:\n\n\nprint(mode_value)\n\n\n# In[15]:\n\n\nmissing_values = df['Outlet_Size'].isnull()\n\n\n# In[16]:\n\n\ndf.loc[missing_values, 'Outlet_Size'] = df.loc[missing_values, 'Outlet_Type'].apply(lambda x : mode_value[x])\n\n\n# In[17]:\n\n\n#checking for missing values\ndf.isnull().sum()\n\n\n# Data analysis\n\n# In[18]:\n\n\n# stastical measure of the data\ndf.describe()\n\n\n# Numerical features\n\n# In[19]:\n\n\nsns.set_style(style = 'darkgrid')\n\n\n# In[20]:\n\n\n#item weight distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_Weight'], kde= True)\nplt.show()\n\n\n# In[21]:\n\n\n#item visibility distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_Visibility'], kde= True)\nplt.show()\n\n\n# In[22]:\n\n\n#item MRP distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_MRP'], kde= True)\nplt.show()\n\n\n# In[23]:\n\n\n#Item_Outlet_Sales distribution\nplt.figure(figsize = (6,6))\nsns.displot(df['Item_Outlet_Sales'], kde= True)\nplt.show()\n\n\n# In[24]:\n\n\n#Outlet_Establishment_Year distribution\nplt.figure(figsize = (6,6))\nsns.countplot(x= 'Outlet_Establishment_Year', data = df)\nplt.show()\n\n\n# Categoruical features\n\n# In[25]:\n\n\n#Item_Fat_Content distribution\nplt.figure(figsize = (6,6))\nsns.countplot(x= 'Item_Fat_Content', data = df)\nplt.show()\n\n\n# In[26]:\n\n\n# Item_Type\t distribution\nplt.figure(figsize = (30,6))\nsns.countplot(x= 'Item_Type', data = df)\nplt.show()\n\n\n# In[27]:\n\n\n# Outlet location type distribution\nplt.figure(figsize = (6,6))\nsns.countplot(x = 'Outlet_Location_Type', data = df)\nplt.show()\n\n\n# # Data preprocessing\n\n# In[28]:\n\n\ndf.head()\n\n\n# In[29]:\n\n\ndf['Item_Fat_Content'].value_counts()\n\n\n# In[30]:\n\n\ndf.replace({'Item_Fat_Content' : {'low fat' : 'Low Fat', 'LF' : 'Low Fat', 'reg' : 'Regular'}}, inplace = True)\n\n\n# In[31]:\n\n\ndf['Item_Fat_Content'].value_counts()\n\n\n# Label Encoding\n\n# In[32]:\n\n\nencoder = LabelEncoder()\n\nobjlist = df.select_dtypes('object').columns\nfor i in objlist:\n df[i] = encoder.fit_transform(df[i])\n\n\n# In[33]:\n\n\ndf.head()\n\n\n# In[34]:\n\n\ncorrelation = df.corr()\n\n\n# In[43]:\n\n\nplt.figure(figsize = (20,20))\nsns.heatmap(correlation , cbar = True, cmap = 'Blues',square = True, annot = True, fmt = '.1f', annot_kws = {'size' : 8})\n\n\n# Splitting features and targets\n\n# In[36]:\n\n\nX = df.drop(columns = 'Item_Outlet_Sales' ,axis = 1)\nY = df['Item_Outlet_Sales']\n\n\n# # Splitting the data into training and testing data\n\n# In[37]:\n\n\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = .2, random_state = 6)\n\n\n# In[38]:\n\n\nprint(x_train.shape, x_test.shape)\nprint(y_train.shape, y_test.shape)\n\n\n# # Machine learning model\n\n# In[39]:\n\n\nmodel = XGBRegressor()\n\n\n# In[40]:\n\n\nmodel.fit(x_train, y_train)\n\n\n# Model evaluatuion on training data\n\n# In[41]:\n\n\ntrain_prediction = model.predict(x_train)\n\naccuracy_training = metrics.r2_score(y_train, train_prediction)\nprint('R SQUARED ERROR OF TRAINING DATA :', accuracy_training)\n\n\n# Model evaluatuion on testing data\n\n# In[42]:\n\n\ntest_prediction = model.predict(x_test)\n\naccuracy_testing = metrics.r2_score(y_test, test_prediction)\nprint('R SQUARED ERROR OF TESTING DATA :', accuracy_testing)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "sklearn.metrics.r2_score", "matplotlib.pyplot.figure", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
peekxc/tallem
[ "949af20c1f50f9b6784ee32463e59123cd64294b" ]
[ "notebooks/numba_mds.py" ]
[ "import os\nimport numpy as np\nimport numpy.typing as npt\nfrom numpy.typing import ArrayLike\nfrom typing import *\nfrom .distance import *\nfrom .utility import *\nfrom scipy.sparse.linalg import eigs as truncated_eig\nfrom scipy.linalg import eigh, eig as dense_eig\nfrom scipy.spatial import KDTree\nfrom scipy.sparse import csc_matrix, csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree, connected_components\n\nimport numpy as np\nimport numba as nb\nfrom numba import njit, types, float32, float64, int32, int64, prange\nfrom numba.extending import overload\n\n\n@njit('float64[:,:](float64[:,:])', fastmath=True,parallel=False)\ndef average_rows(x):\n\tassert x.ndim == 2\n\tres = np.zeros((1, x.shape[0]),dtype=np.float64)\n\tfor i in prange(x.shape[0]):\n\t\tres += x[i,:]\n\treturn res / x.shape[0]\n\n@njit('float64[:,:](float64[:,:])', fastmath=True,parallel=False)\ndef average_cols(x):\n\tassert x.ndim == 2\n\tres = np.zeros((1, x.shape[1]),dtype=np.float64)\n\tfor i in prange(x.shape[1]):\n\t\tres += x[:,i]\n\treturn res / x.shape[1]\n\n#test.parallel_diagnostics(level=4)\n\n\n@njit('float64[:,:](float64[:,:], int32)', fastmath=False)\ndef cmds_numba_naive(D, d):\n\tn = D.shape[0]\n\tH = np.eye(n) - (1.0/n)*np.ones(shape=(n,n)) # centering matrix\n\tB = -0.5 * H @ D @ H\n\tevals, evecs = np.linalg.eigh(B)\n\tevals, evecs = np.flip(evals)[np.arange(d)], np.fliplr(evecs)[:,np.arange(d)] \n\tw = np.flatnonzero(evals > 0)\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = evecs[:,w] @ np.diag(np.sqrt(evals[w]))\n\treturn(Y)\n\n## Classical MDS with Numba\n@njit(nb.types.Tuple((float64[:], float64[:,:]))(float64[:,:], int32), fastmath=False)\ndef cmds_numba_E(D, d):\n\t''' Given distance matrix 'D' and dimension 'd', computes the classical MDS '''\n\tD = -0.5*(D - average_rows(D) - average_cols(D).T + np.mean(D))\n\tevals, evecs = np.linalg.eigh(D)\n\tevals, evecs = np.flip(evals)[:d] , np.fliplr(evecs)[:,:d] \n\treturn((evals, evecs))\n\n@njit('float64[:,:](float64[:,:], int32)', fastmath=False)\ndef cmds_numba(D, d):\n\tn = D.shape[0]\n\tevals, evecs = cmds_numba_E(D, d)\n\tw = np.flatnonzero(evals > 0)\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = np.dot(evecs[:,w], np.diag(np.sqrt(evals[w])))\n\treturn(Y)\n\nfrom tallem.syevr import numba_dsyevr\n\n@njit('float64[:,:](float64[:,:], int32)', fastmath=False)\ndef cmds_numba_fortran(D, d):\n\tn = D.shape[0]\n\tD = -0.5*(D - average_rows(D) - average_cols(D).T + np.mean(D))\n\tevals, evecs, i, e = numba_dsyevr(D, n-d+1, n, 1e-8)\n\tw = np.flatnonzero(evals > 0)\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = np.dot(evecs[:,w], np.diag(np.sqrt(evals[w])))\n\treturn(Y)\n\n@njit('float64[:,:](float64[:,:], float64[:,:], int32)', fastmath=False)\ndef landmark_cmds_numba(LD, S, d):\n\t''' \n\tBarbones landmark MDS with Numba \n\t\n\tLD := (k x k) landmark distance matrix \n\tS := (k x n) matrix of distances from the n points to the k landmark points, where n > k\n\td := dimension of output coordinitization\n\t'''\n\tn = S.shape[1]\n\tevals, evecs = cmds_numba_E(LD, d)\n\tmean_landmark = average_cols(LD).T\n\tw = np.flatnonzero(evals > 0)\n\tL_pseudo = evecs/np.sqrt(evals[w])\n\tY = np.zeros(shape=(n, d))\n\tY[:,w] = (-0.5*(L_pseudo.T @ (S.T - mean_landmark.T).T)).T \n\treturn(Y)\n\n# lapack.dsyevr(jobz, rng, uplo, N, D, N, vl, vu, il, iu, tol, m, w, Z, ldz, isuppz, work, lwork, iwork, liwork, info)\n@njit('float64[:,:](float64[:,:])', parallel=True)\ndef dist_matrix(X):\n\tn = X.shape[0]\n\tD = np.zeros((n,n))\n\tfor i in np.arange(n):\n\t\tfor j in np.arange(n):\n\t\t\tD[i,j] = np.sum((X[i,:]-X[j,:])**2)\n\treturn(D)\n\n# @njit('float64[:,:](float64[:,:], int32[:], int32[:], int32)', parallel=True)\n# def bench_parallel(X, subsets_vec, subsets_len, d):\n# \tresults = []\n# \tfor i in prange(len(subsets_vec)-1):\n# \t\tind = np.arange(np.subsets_vec[i], subsets_vec[i+1])\n# \t\tD = dist_matrix(X[ind,:])\n# \t\tresults.append(cmds_numba(D, d))\n# \treturn(results)\n\n#from numba import njit, prange\n# @njit(parallel=True)\n# def fit_local_models(f, X, cover):\n# \tindex_set = list(cover.keys())\n# \tsubsets = list(cover.values())\n# \tresult = {}\n# \tfor j in prange(len(cover)):\n# \t\tindex, subset = index_set[j], subsets[j]\n# \t\tresult[index] = f(X[np.array(subset),:])\n# \treturn(result)\n" ]
[ [ "numpy.zeros", "numpy.sum", "numpy.linalg.eigh", "numpy.ones", "numpy.mean", "numpy.eye", "numpy.fliplr", "numpy.arange", "numpy.sqrt", "numpy.flip", "numpy.flatnonzero" ] ]
jessierliu/ecogVIS
[ "c97e79a20b3af1074a3a5e1f1ad864a580c97e04" ]
[ "ecogvis/functions/htk_to_nwb/chang2nwb.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nConvert ECoG to NWB.\n\n:Author: Ben Dichter, Jessie R. Liu\nModified by Luiz Tauffer on May 30, 2020\n\"\"\"\nfrom __future__ import print_function\nimport os\nfrom datetime import datetime\nfrom os import path\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom hdmf.backends.hdf5 import H5DataIO\nfrom ndx_ecog import ECoGSubject\nfrom ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt\nfrom pynwb.file import DynamicTableRegion\nfrom pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\nfrom pynwb.ecephys import ElectricalSeries, LFP\nimport scipy.io as sio\nfrom scipy.io.wavfile import read as wavread\nfrom tqdm import tqdm\n\nfrom ecogvis.functions.htk_to_nwb.HTK import readHTK\n\n\n# get_manager must come after dynamic imports\nmanager = get_manager()\n\n\ndef get_analog(anin_path, num=1):\n \"\"\"\n Load analog data. Try:\n 1) analog[num].wav\n 2) ANIN[num].htk\n\n Parameters\n ----------\n blockpath: str\n num: int\n\n Returns\n -------\n fs, data\n \"\"\"\n wav_path = path.join(anin_path, 'analog' + str(num) + '.wav')\n if os.path.isfile(wav_path):\n rate, data = wavread(wav_path)\n return float(rate), np.array(data, dtype=float)\n htk_path = path.join(anin_path, 'ANIN' + str(num) + '.htk')\n if os.path.isfile(htk_path):\n htk_out = readHTK(htk_path, scale_s_rate=True)\n return htk_out['sampling_rate'], htk_out['data'].ravel()\n print('no analog path found for ' + str(num))\n return None, None\n\n\ndef readhtks(htk_path, elecs=None, use_tqdm=True):\n # First fix the order of htk files\n all_files = np.array([f for f in Path(htk_path).glob('*.htk')])\n numbers = [f.name.split('.')[0].split('Wav')[1] for f in Path(htk_path).glob('*.htk') if '._' not in str(f)]\n new_numbers = [n[0] + '0' + n[1] if len(n) == 2 else n for n in numbers]\n sorted_index = np.argsort(new_numbers)\n sorted_files = all_files[sorted_index]\n # Load data from files in correct order\n data = []\n if use_tqdm:\n this_iter = tqdm(sorted_files, desc='reading electrodes')\n else:\n this_iter = sorted_files\n for i in this_iter:\n htk = readHTK(i, scale_s_rate=True)\n data.append(htk['data'])\n data = np.stack(data)\n if len(data.shape) == 3:\n data = data.transpose([2, 0, 1])\n\n rate = htk['sampling_rate']\n\n return rate, data.squeeze()\n\n\ndef get_bad_elecs(blockpath):\n bad_channels_file = os.path.join(blockpath, 'Artifacts', 'badChannels.txt')\n\n # I think bad channels is 1-indexed but I'm not sure\n if os.path.isfile(bad_channels_file) and os.stat(\n bad_channels_file).st_size:\n dat = pd.read_csv(bad_channels_file, header=None, delimiter=' ',\n engine='python')\n bad_elecs_inds = dat.values.ravel() - 1\n bad_elecs_inds = bad_elecs_inds[np.isfinite(bad_elecs_inds)]\n else:\n bad_elecs_inds = []\n\n return bad_elecs_inds\n\n\ndef elecs_to_electrode_table(nwbfile, elecspath):\n \"\"\"\n Takes an NWB file and the elecs .mat file path, loads the anatomical and\n location information for each electrode,\n and writes this information to the NWB file.\n\n Parameters:\n -----------\n nwbfile : object\n An NWB file object.\n elecspath : str\n Path to the TDT_elecs_all.mat file for this subject. First, second,\n and third columns of the key 'elecmatrix'\n should be x, y, and z coordinates, respectively. For the 'anatomy'\n field, second column should be the full electrode label and the\n fourth column should be the anatomical location name.\n\n Returns:\n --------\n nwb_file : object\n The edited NWB file with the added electrode information.\n \"\"\"\n\n # Get anatomical and location information for electrodes.\n elec_mat = sio.loadmat(elecspath)\n labels = elec_mat['anatomy'][:, 1]\n location = elec_mat['anatomy'][:, 3]\n x = elec_mat['elecmatrix'][:, 0]\n y = elec_mat['elecmatrix'][:, 1]\n z = elec_mat['elecmatrix'][:, 2]\n\n # Get MNI warped electrode coordinates.\n if Path(elecspath.as_posix().split('.')[0] + '_warped.mat').is_file():\n elec_mat_warped = sio.loadmat(str(elecspath).split('.')[0] + '_warped.mat')\n x_warped = elec_mat_warped['elecmatrix'][:, 0]\n y_warped = elec_mat_warped['elecmatrix'][:, 1]\n z_warped = elec_mat_warped['elecmatrix'][:, 2]\n else:\n print('No warped electrode information found...filling with zeros.')\n x_warped = np.zeros_like(x)\n y_warped = np.zeros_like(y)\n z_warped = np.zeros_like(z)\n\n # Define electrode device label names.\n group_labels = []\n for current_group in labels:\n name = current_group[0].rstrip('0123456789')\n # Replace 'NaN' for 'null'\n if name == 'NaN':\n name = 'null'\n group_labels.append(name)\n\n # Get the list of unique electrode device label names\n unique_group_indexes = np.unique(group_labels, return_index=True)[1]\n unique_group_labels = [group_labels[f] for f in sorted(unique_group_indexes)]\n\n # Add additional columns to the electodes table.\n nwbfile.add_electrode_column('label', 'label of electrode')\n nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')\n nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('null', 'if not connected to real electrode')\n\n for group_label in unique_group_labels:\n # Get region name and device label for the group.\n if 'Depth' in group_label:\n brain_area = group_label.split('Depth')[0]\n elif 'Strip' in group_label:\n brain_area = group_label.split('Strip')[0]\n elif 'Grid' in group_label:\n brain_area = group_label.split('Grid')[0]\n elif 'Pole' in group_label:\n brain_area = group_label.split('Pole')[0]\n elif 'HeschlsGyrus' in group_label:\n brain_area = 'HeschlsGyrus'\n elif 'null' in group_label:\n brain_area = 'null'\n else:\n brain_area = 'other'\n\n # Create electrode device (same as the group).\n device = nwbfile.create_device(group_label)\n\n # Create electrode group with name, description, device object,\n # and general location.\n electrode_group = nwbfile.create_electrode_group(\n name='{} electrodes'.format(group_label),\n description='{}'.format(group_label),\n device=device,\n location=str(brain_area)\n )\n\n # Loop through the number of electrodes in this electrode group\n elec_nums = np.where(np.array(group_labels) == group_label)[0]\n for elec_num in elec_nums:\n # Add the electrode information to the table.\n elec_location = location[elec_num]\n if len(elec_location) == 0:\n # If no label is recorded for this electrode, set it to null\n elec_location = 'null'\n is_null = True\n else:\n elec_location = elec_location[0]\n is_null = False\n\n nwbfile.add_electrode(\n id=elec_num,\n x=x[elec_num],\n y=y[elec_num],\n z=z[elec_num],\n imp=np.nan,\n x_warped=x_warped[elec_num],\n y_warped=y_warped[elec_num],\n z_warped=z_warped[elec_num],\n location=str(elec_location),\n filtering='filtering',\n group=electrode_group,\n label=str(labels[elec_num][0]),\n bad=False,\n null=is_null,\n )\n\n return nwbfile\n\n\ndef chang2nwb(blockpath, out_file_path=None, save_to_file=False, htk_config=None):\n \"\"\"\n Parameters\n ----------\n blockpath: str\n out_file_path: None | str\n if None, output = [blockpath]/[blockname].nwb\n save_to_file : bool\n If True, saves to file. If False, just returns nwbfile object\n htk_config : dict\n Dictionary cotaining HTK conversion paths and options. Example:\n {\n ecephys_path: 'path_to/ecephys_htk_files',\n ecephys_type: 'raw', 'preprocessed' or 'high_gamma',\n analog_path: 'path_to/analog_htk_files',\n anin1: {present: True, name: 'microphone', type: 'acquisition'},\n anin2: {present: True, name: 'speaker1', type: 'stimulus'},\n anin3: {present: False, name: 'speaker2', type: 'stimulus'},\n anin4: {present: False, name: 'custom', type: 'acquisition'},\n metadata: metadata,\n electrodes_file: electrodes_file,\n bipolar_file: bipolar_file\n }\n\n Returns\n -------\n \"\"\"\n\n metadata = {}\n\n if htk_config is None:\n blockpath = Path(blockpath)\n else:\n blockpath = Path(htk_config['ecephys_path'])\n metadata = htk_config['metadata']\n blockname = blockpath.parent.name\n subject_id = blockpath.parent.parent.name[2:]\n\n if out_file_path is None:\n out_file_path = blockpath.resolve().parent / ''.join([blockname, '.nwb'])\n\n # file paths\n ecog_path = blockpath\n anin_path = htk_config['analog_path']\n bad_time_file = path.join(blockpath, 'Artifacts', 'badTimeSegments.mat')\n\n # Create the NWB file object\n nwbfile_dict = {\n 'session_description': blockname,\n 'identifier': blockname,\n 'session_start_time': datetime.now().astimezone(),\n 'institution': 'University of California, San Francisco',\n 'lab': 'Chang Lab'\n }\n if 'NWBFile' in metadata:\n nwbfile_dict.update(metadata['NWBFile'])\n nwbfile = NWBFile(**nwbfile_dict)\n\n # Read electrophysiology data from HTK files\n print('reading htk acquisition...', flush=True)\n ecog_rate, data = readhtks(ecog_path)\n data = data.squeeze()\n print('done', flush=True)\n\n # Get electrodes info from mat file\n if htk_config['electrodes_file'] is not None:\n nwbfile = elecs_to_electrode_table(\n nwbfile=nwbfile,\n elecspath=htk_config['electrodes_file'],\n )\n n_electrodes = nwbfile.electrodes[:].shape[0]\n all_elecs = list(range(n_electrodes))\n elecs_region = nwbfile.create_electrode_table_region(\n region=all_elecs,\n description='ECoG electrodes on brain'\n )\n else:\n ecephys_dict = {\n 'Device': [{'name': 'auto_device'}],\n 'ElectricalSeries': [{'name': 'ECoG', 'description': 'description'}],\n 'ElectrodeGroup': [{'name': 'auto_group', 'description': 'auto_group',\n 'location': 'location', 'device': 'auto_device'}]\n }\n if 'Ecephys' in metadata:\n ecephys_dict.update(metadata['Ecephys'])\n\n # Create devices\n for dev in ecephys_dict['Device']:\n device = nwbfile.create_device(dev['name'])\n\n # Electrode groups\n for el_grp in ecephys_dict['ElectrodeGroup']:\n device = nwbfile.devices[el_grp['device']]\n electrode_group = nwbfile.create_electrode_group(\n name=el_grp['name'],\n description=el_grp['description'],\n location=el_grp['location'],\n device=device\n )\n\n # Electrodes table\n n_electrodes = data.shape[1]\n nwbfile.add_electrode_column('label', 'label of electrode')\n nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')\n nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')\n nwbfile.add_electrode_column('null', 'if not connected to real electrode')\n bad_elecs_inds = get_bad_elecs(blockpath)\n for elec_counter in range(n_electrodes):\n bad = elec_counter in bad_elecs_inds\n nwbfile.add_electrode(\n id=elec_counter,\n x=np.nan,\n y=np.nan,\n z=np.nan,\n imp=np.nan,\n x_warped=np.nan,\n y_warped=np.nan,\n z_warped=np.nan,\n location='',\n filtering='none',\n group=electrode_group,\n label='',\n bad=bad,\n null=False,\n )\n\n all_elecs = list(range(n_electrodes))\n elecs_region = nwbfile.create_electrode_table_region(\n region=all_elecs,\n description='ECoG electrodes on brain'\n )\n\n # Get Bipolar table from file\n if htk_config['bipolar_file'] is not None:\n df = pd.read_csv(htk_config['bipolar_file'], index_col='id', sep='\\t')\n\n # Create bipolar scheme table\n bipolar_scheme_table = BipolarSchemeTable(\n name='bipolar_scheme_table',\n description='desc'\n )\n\n # Columns for bipolar scheme - all anodes and cathodes within the same\n # bipolar row are considered to have the same group and location\n bipolar_scheme_table.add_column(\n name='group_name',\n description='electrode group name'\n )\n bipolar_scheme_table.add_column(\n name='location',\n description='electrode location'\n )\n\n # Iterate over anode / cathode rows\n for i, r in df.iterrows():\n if isinstance(r['anodes'], str):\n anodes = [int(a) for a in r['anodes'].split(',')]\n else:\n anodes = [int(r['anodes'])]\n if isinstance(r['cathodes'], str):\n cathodes = [int(a) for a in r['cathodes'].split(',')]\n else:\n cathodes = [int(r['cathodes'])]\n bipolar_scheme_table.add_row(\n anodes=anodes,\n cathodes=cathodes,\n group_name=nwbfile.electrodes['group_name'][anodes[0]],\n location=nwbfile.electrodes['location'][anodes[0]]\n )\n\n bipolar_scheme_table.anodes.table = nwbfile.electrodes\n bipolar_scheme_table.cathodes.table = nwbfile.electrodes\n\n # Creates bipolar table region\n elecs_region = DynamicTableRegion(\n name='electrodes',\n data=np.arange(0, df.shape[0]),\n description='desc',\n table=bipolar_scheme_table\n )\n\n ecephys_ext = EcephysExt(name='ecephys_ext')\n ecephys_ext.bipolar_scheme_table = bipolar_scheme_table\n nwbfile.add_lab_meta_data(ecephys_ext)\n\n # Stores HTK electrophysiology data as raw, preprocessed or high gamma\n if htk_config['ecephys_type'] == 'raw':\n ecog_es = ElectricalSeries(name='ECoG',\n data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),\n electrodes=elecs_region,\n rate=ecog_rate,\n description='all Wav data')\n nwbfile.add_acquisition(ecog_es)\n elif htk_config['ecephys_type'] == 'preprocessed':\n lfp = LFP()\n ecog_es = ElectricalSeries(name='preprocessed',\n data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),\n electrodes=elecs_region,\n rate=ecog_rate,\n description='all Wav data')\n lfp.add_electrical_series(ecog_es)\n # Creates the ecephys processing module\n ecephys_module = nwbfile.create_processing_module(\n name='ecephys',\n description='preprocessed electrophysiology data'\n )\n ecephys_module.add_data_interface(lfp)\n elif htk_config['ecephys_type'] == 'high_gamma':\n ecog_es = ElectricalSeries(name='high_gamma',\n data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),\n electrodes=elecs_region,\n rate=ecog_rate,\n description='all Wav data')\n # Creates the ecephys processing module\n ecephys_module = nwbfile.create_processing_module(\n name='ecephys',\n description='preprocessed electrophysiology data'\n )\n ecephys_module.add_data_interface(ecog_es)\n\n # Add ANIN 1\n if htk_config['anin1']['present']:\n fs, data = get_analog(anin_path, 1)\n ts = TimeSeries(\n name=htk_config['anin1']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin1']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN1 saved with name \"', htk_config['anin1']['name'], '\" in ',\n htk_config['anin1']['type'])\n\n # Add ANIN 2\n if htk_config['anin2']['present']:\n fs, data = get_analog(anin_path, 2)\n ts = TimeSeries(\n name=htk_config['anin2']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin2']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN2 saved with name \"', htk_config['anin2']['name'], '\" in ',\n htk_config['anin2']['type'])\n\n # Add ANIN 3\n if htk_config['anin3']['present']:\n fs, data = get_analog(anin_path, 3)\n ts = TimeSeries(\n name=htk_config['anin3']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin3']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN3 saved with name \"', htk_config['anin3']['name'], '\" in ',\n htk_config['anin3']['type'])\n\n # Add ANIN 4\n if htk_config['anin4']['present']:\n fs, data = get_analog(anin_path, 4)\n ts = TimeSeries(\n name=htk_config['anin4']['name'],\n data=data,\n unit='NA',\n rate=fs,\n )\n if htk_config['anin4']['type'] == 'acquisition':\n nwbfile.add_acquisition(ts)\n else:\n nwbfile.add_stimulus(ts)\n print('ANIN4 saved with name \"', htk_config['anin4']['name'], '\" in ',\n htk_config['anin4']['type'])\n\n # Add bad time segments\n if os.path.exists(bad_time_file) and os.stat(bad_time_file).st_size:\n bad_time = sio.loadmat(bad_time_file)['badTimeSegments']\n for row in bad_time:\n nwbfile.add_invalid_time_interval(start_time=row[0],\n stop_time=row[1],\n tags=('ECoG artifact',),\n timeseries=ecog_es)\n\n # Subject\n subject_dict = {'subject_id': subject_id}\n if 'Subject' in metadata:\n subject_dict.update(metadata['Subject'])\n subject = ECoGSubject(**subject_dict)\n nwbfile.subject = subject\n\n if save_to_file:\n print('Saving HTK content to NWB file...')\n # Export the NWB file\n with NWBHDF5IO(str(out_file_path), manager=manager, mode='w') as io:\n io.write(nwbfile)\n\n # read check\n with NWBHDF5IO(str(out_file_path), manager=manager, mode='r') as io:\n io.read()\n print('NWB file saved: ', str(out_file_path))\n\n return nwbfile, out_file_path, subject_id, blockname\n" ]
[ [ "numpy.zeros_like", "scipy.io.wavfile.read", "numpy.array", "scipy.io.loadmat", "numpy.stack", "numpy.arange", "numpy.argsort", "numpy.isfinite", "pandas.read_csv", "numpy.unique" ] ]
tmddusgood/NUGU_movie_recommendation-1
[ "0c87638963d4681583f94def038dcd980270cb14" ]
[ "nugu/movie_comment_scrapper/build_model/load_model.py" ]
[ "from gensim.models.word2vec import Word2Vec\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nimport matplotlib\n\nfont_name = matplotlib.font_manager.FontProperties(\n fname=\"C:/Windows/Fonts/gulim.ttc\" # 한글 폰트 위치를 넣어주세요\n ).get_name()\nmatplotlib.rc('font', family=font_name)\n\ndef plot_2d_graph(vocabs, xs, ys): \n plt.figure(figsize=(8,6))\n plt.scatter(xs, ys, marker ='o')\n for i, v in enumerate(vocabs):\n plt.annotate(v, xy=(xs[i], ys[i]))\n plt.show()\n\nmodel = Word2Vec.load('../20191124_0200')\n\n\n\nword_vectors = model.wv\n\nprint(model.wv['김혜수'])\nprint(model.wv.most_similar('욕정'))\nprint(model.wv.most_similar('재미'))\nprint(model.wv.most_similar('재밌'))\nprint(model.most_similar(positive=['김혜수', '레이첼'], negative=['여자', '여배우'], topn=10))\nvocabs = word_vectors.vocab.keys()\nword_vectors_list = [word_vectors[v] for v in vocabs]\n\n# pca = PCA(n_components=2)\n# xys = pca.fit_transform(word_vectors_list)\n# xs = xys[:, 0]\n# ys = xys[:, 1]\n#\n#plot_2d_graph(vocabs, xs, ys)\n\n\n\n\n\n# from sklearn.decomposition import IncrementalPCA # inital reduction\n# from sklearn.manifold import TSNE # final reduction\n# import numpy as np # array handling\n# from gensim.models.word2vec import Word2Vec\n# import matplotlib.pyplot as plt\n# from sklearn.decomposition import PCA\n# import matplotlib\n# from plotly.offline import init_notebook_mode, iplot, plot\n# import plotly.graph_objs as go\n# import random\n#\n# font_name = matplotlib.font_manager.FontProperties(\n# fname=\"C:/Windows/Fonts/gulim.ttc\" # 한글 폰트 위치를 넣어주세요\n# ).get_name()\n# matplotlib.rc('font', family=font_name)\n# model = Word2Vec.load('20191123_2300')\n#\n# def reduce_dimensions(model):\n# num_dimensions = 2 # final num dimensions (2D, 3D, etc)\n#\n# vectors = [] # positions in vector space\n# labels = [] # keep track of words to label our data again later\n# for word in model.wv.vocab:\n# vectors.append(model.wv[word])\n# labels.append(word)\n#\n# # convert both lists into numpy vectors for reduction\n# vectors = np.asarray(vectors)\n# labels = np.asarray(labels)\n#\n# # reduce using t-SNE\n# vectors = np.asarray(vectors)\n# tsne = TSNE(n_components=num_dimensions, random_state=0)\n# vectors = tsne.fit_transform(vectors)\n#\n# x_vals = [v[0] for v in vectors]\n# y_vals = [v[1] for v in vectors]\n# return x_vals, y_vals, labels\n#\n#\n# x_vals, y_vals, labels = reduce_dimensions(model)\n#\n# def plot_with_plotly(x_vals, y_vals, labels, plot_in_notebook=True):\n#\n# trace = go.Scatter(x=x_vals, y=y_vals, mode='text', text=labels)\n# data = [trace]\n#\n# if plot_in_notebook:\n# init_notebook_mode(connected=True)\n# iplot(data, filename='word-embedding-plot')\n# else:\n# plot(data, filename='word-embedding-plot.html')\n#\n#\n# def plot_with_matplotlib(x_vals, y_vals, labels):\n#\n# random.seed(0)\n#\n# plt.figure(figsize=(12, 12))\n# plt.scatter(x_vals, y_vals)\n#\n# #\n# # Label randomly subsampled 25 data points\n# #\n# indices = list(range(len(labels)))\n# selected_indices = random.sample(indices, 25)\n# for i in selected_indices:\n# plt.annotate(labels[i], (x_vals[i], y_vals[i]))\n# plt.show()\n#\n# try:\n# get_ipython()\n# except Exception:\n# plot_function = plot_with_matplotlib\n# else:\n# plot_function = plot_with_plotly\n#\n# plot_function(x_vals, y_vals, labels)" ]
[ [ "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.annotate", "matplotlib.pyplot.figure", "matplotlib.rc", "matplotlib.pyplot.show", "matplotlib.pyplot.scatter" ] ]
Repiphany/AoC
[ "d59badb62b82434bccd757e37d6d5c4d0bbf2838" ]
[ "2018/day_10/main.py" ]
[ "#!/usr/bin/env python3\n\nimport re\nimport numpy as np\nimport scipy.optimize\nimport sys\n\ndef moment(positions):\n center_of_mass = np.average(positions, axis = 0)\n return np.sum((positions - center_of_mass)**2)\n\ndef part_1(positions, velocities):\n f = lambda i : moment(positions + i*velocities)\n res = scipy.optimize.minimize(f, x0 = 0)\n pos_final = positions + int(res.x)*velocities\n x_min, y_min = np.min(pos_final, axis = 0).astype(int)\n x_max, y_max = np.max(pos_final, axis = 0).astype(int)\n for y in range(y_min, y_max + 1):\n for x in range(x_min, x_max + 1):\n if np.any(np.all((x, y) == pos_final, axis = 1)):\n sys.stdout.write('#')\n else:\n sys.stdout.write('.')\n sys.stdout.write('\\n')\n print(int(res.x))\n\nif __name__ == '__main__':\n positions, velocities = [], []\n with open('input', 'r') as f:\n for line in f:\n x, y, vx, vy = [int(i) for i in re.findall(r'-?\\d+', line)]\n positions.append((x, y))\n velocities.append((vx, vy))\n positions = np.asarray(positions, dtype = float)\n velocities = np.asarray(velocities, dtype = float)\n\n part_1(positions, velocities)\n\n" ]
[ [ "numpy.max", "numpy.asarray", "numpy.sum", "numpy.min", "numpy.all", "numpy.average" ] ]
lunarnautics/GamestonkTerminal
[ "a1564289c1f4071861240407a069ee57ecad8b84" ]
[ "gamestonk_terminal/common/residuals_analysis/residuals_api.py" ]
[ "\"\"\" Residuals API \"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nimport seaborn as sns\nfrom scipy import stats\nimport statsmodels.api as sm\n\n# pylint: disable=R0402\nimport matplotlib.gridspec as gridspec\nfrom statsmodels.graphics.gofplots import qqplot\nfrom statsmodels.tsa.stattools import adfuller, kpss, bds\nfrom statsmodels.stats.diagnostic import het_arch\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n plot_autoscale,\n check_positive,\n)\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal import feature_flags as gtff\n\n\nregister_matplotlib_converters()\n\n\ndef fit(\n other_args: List[str],\n ticker: str,\n stock: pd.Series,\n model_name: str,\n model: pd.Series,\n):\n \"\"\"Plot model fitting\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n stock : pd.Series\n Stock data\n model_name : str\n Model fitting name in use\n model : pd.Series\n Model fit data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"fit\",\n description=\"\"\"\n Plot model fitting\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n\n plt.plot(stock)\n plt.plot(model)\n plt.title(f\"{model_name} model fit on {ticker}\")\n plt.xlim(stock.index[0], stock.index[-1])\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.legend([ticker, model_name])\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef res(\n other_args: List[str],\n ticker: str,\n stock: pd.Series,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Plot residuals\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n stock : pd.Series\n Stock data\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"res\",\n description=\"\"\"\n Plot residuals\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n\n plt.plot(stock[1:].index, residuals)\n plt.title(f\"Residuals from {model_name} model fit on {ticker}\")\n plt.xlim(stock[1:].index[0], stock.index[-1])\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef hist(\n other_args: List[str],\n ticker: str,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Histogram and density curve\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"hist\",\n description=\"\"\"\n Histogram and density curve\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI, constrained_layout=True)\n\n sns.distplot(\n residuals,\n bins=35,\n color=\"blue\",\n hist=True,\n hist_kws={\"edgecolor\": \"black\"},\n kde=True,\n kde_kws={\"color\": \"black\", \"lw\": 3, \"label\": \"KDE\"},\n rug=True,\n rug_kws={\"edgecolor\": \"orange\"},\n )\n plt.title(f\"Histogram with Density from {model_name} fit on {ticker}\")\n plt.ylabel(\"Density\")\n plt.xlabel(\"Share Price\")\n plt.grid(True)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef plot_qqplot(\n other_args: List[str],\n ticker: str,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Qqplot time series against a standard normal curve\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"qqplot\",\n description=\"\"\"\n Qqplot time series against a standard normal curve\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI, constrained_layout=True)\n\n qqplot(residuals, stats.distributions.norm, fit=True, line=\"45\", ax=plt.gca())\n plt.title(f\"Q-Q plot residuals from {model_name} on {ticker}\")\n plt.ylabel(\"Sample quantiles\")\n plt.xlabel(\"Theoretical quantiles\")\n plt.grid(True)\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef acf(\n other_args: List[str],\n ticker: str,\n model_name: str,\n residuals: List[float],\n):\n \"\"\"Plot (partial) auto-correlation function\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n ticker : str\n Ticker of the stock\n model_name : str\n Model fitting name in use\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"acf\",\n description=\"\"\"\n Plot (partial) auto-correlation function\n \"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--lags\",\n dest=\"lags\",\n type=check_positive,\n default=40,\n help=\"maximum lags to display in plots\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n fig = plt.figure(\n figsize=plot_autoscale(), dpi=PLOT_DPI, constrained_layout=True\n )\n spec = gridspec.GridSpec(ncols=1, nrows=2, figure=fig)\n\n # Auto-correlation function for original time series\n ax_acf = fig.add_subplot(spec[0, 0])\n sm.graphics.tsa.plot_acf(residuals, lags=ns_parser.lags, ax=ax_acf)\n plt.title(\n f\"Auto-Correlation function applied to Residuals from {model_name} on {ticker}\"\n )\n # Partial auto-correlation function for original time series\n ax_pacf = fig.add_subplot(spec[1, 0])\n sm.graphics.tsa.plot_pacf(residuals, lags=ns_parser.lags, ax=ax_pacf)\n plt.title(\n f\"Partial Auto-Correlation function applied to Residuals from {model_name} on {ticker}\"\n )\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef normality(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Normality tests\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"normality\",\n description=\"\"\"\n Normality tests\n \"\"\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Kurtosis\n # Measures height and sharpness of the central peak relative to that of a standard bell curve\n k, kpval = stats.kurtosistest(residuals)\n\n # Skewness\n # Measure of the asymmetry of the probability distribution of a random variable about its mean\n s, spval = stats.skewtest(residuals)\n\n # Jarque-Bera goodness of fit test on sample data\n # Tests if the sample data has the skewness and kurtosis matching a normal distribution\n jb, jbpval = stats.jarque_bera(residuals)\n\n # Shapiro\n # The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution.\n sh, shpval = stats.shapiro(residuals)\n\n l_statistic = [k, s, jb, sh]\n l_pvalue = [kpval, spval, jbpval, shpval]\n\n print(\n pd.DataFrame(\n [l_statistic, l_pvalue],\n columns=[\"Kurtosis\", \"Skewness\", \"Jarque-Bera\", \"Shapiro-Wilk\"],\n index=[\"Statistic\", \"p-value\"],\n )\n .round(5)\n .to_string()\n )\n\n print(\"\")\n kurtosis_val = stats.kurtosis(residuals, fisher=True)\n print(\"Kurtosis value: %.4f\" % kurtosis_val)\n skew_val = stats.skew(residuals)\n print(\"Skewness value: %.4f\" % skew_val)\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef goodness(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Goodness of fit tests\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"goodness\",\n description=\"\"\"\n Goodness of fit tests\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Kolmogorov-Smirnov Test goodness of fit test on sample data\n ks, kspval = stats.kstest(residuals, \"norm\")\n print(\"Kolmogorov-Smirnov Test\")\n print(\"Statistic: %.4f\" % ks)\n print(\"p-value: %.4f\" % kspval)\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef arch(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Autoregressive conditional heteroscedasticity with Engle's test\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"arch\",\n description=\"\"\"\n Autoregressive conditional heteroscedasticity with Engle's test\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Engle's Test for Autoregressive Conditional Heteroscedasticity (ARCH)\n lm, lmpval, fval, fpval = het_arch(residuals)\n print(\"Lagrange multiplier test statistic\")\n print(\"Statistic: %.4f\" % lm)\n print(\"p-value: %.4f\" % lmpval)\n print(\"\")\n print(\"fstatistic for F test\")\n print(\"Statistic: %.4f\" % fval)\n print(\"p-value: %.4f\" % fpval)\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef unitroot(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Unit root test / stationarity (ADF, KPSS)\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"arch\",\n description=\"\"\"\n Unit root test / stationarity (ADF, KPSS)\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # The Augmented Dickey-Fuller test\n # Used to test for a unit root in a univariate process in the presence of serial correlation.\n # regression{‘c’,’ct’,’ctt’,’nc’} 'c' - Constant and 't'-trend order to include in regression\n # Note: 'ct' - The data is stationary around a trend\n result = adfuller(residuals, regression=\"c\")\n print(\"Augmented Dickey Fuller Test\")\n print(\"ADF Statistic: %.4f\" % result[0])\n print(\"p-value: %.4f\" % result[1])\n print(\"Used lags: %d\" % result[2])\n print(\"Num obs: %d\" % result[3])\n print(\"Critical Values:\")\n d = OrderedDict(sorted(result[4].items(), key=lambda t: t[1]))\n for key, value in d.items():\n print(f\"\\t{key}: {value:.3f}\")\n print(\"\")\n\n # Kwiatkowski-Phillips-Schmidt-Shin test\n # Test for level or trend stationarity\n # Note: regressionstr{‘c’, ‘ct’}\n # regressionstr{‘c’, ‘ct’} where:\n # ‘c’ : The data is stationary around a constant (default).\n # ‘ct’ : The data is stationary around a trend.\n # lags{None, ‘auto’, ‘legacy’}\n # see: https://www.statsmodels.org/dev/generated/statsmodels.tsa.stattools.kpss.html\n print(\"Kwiatkowski-Phillips-Schmidt-Shin Test\")\n result = kpss(residuals, regression=\"c\", nlags=\"auto\")\n print(\"KPSS Statistic: %.4f\" % result[0])\n print(\"Critical Values:\")\n d = OrderedDict(sorted(result[3].items(), key=lambda t: t[1], reverse=True))\n for key, value in d.items():\n print(f\"\\t{key}: {value:.3f}\")\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n\n\ndef independence(\n other_args: List[str],\n residuals: List[float],\n):\n \"\"\"Tests independent and identically distributed (i.i.d.) time series (BDS)\n\n Parameters\n ----------\n other_args : str\n Command line arguments to be processed with argparse\n residuals : List[float]\n Residuals data\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"independence\",\n description=\"\"\"\n Tests independent and identically distributed (i.i.d.) time series (BDS)\n \"\"\",\n )\n try:\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n result = bds(residuals, max_dim=6)\n print(\"BDS Test\")\n print(f\"Dim 2: z-static {result[0][0]:.4f} Prob {result[1][0]:.4f}\")\n print(f\"Dim 3: z-static {result[0][1]:.4f} Prob {result[1][1]:.4f}\")\n print(f\"Dim 4: z-static {result[0][2]:.4f} Prob {result[1][2]:.4f}\")\n print(f\"Dim 5: z-static {result[0][3]:.4f} Prob {result[1][3]:.4f}\")\n print(f\"Dim 6: z-static {result[0][4]:.4f} Prob {result[1][4]:.4f}\")\n\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n return\n" ]
[ [ "matplotlib.pyplot.xlim", "scipy.stats.skew", "matplotlib.pyplot.minorticks_on", "pandas.DataFrame", "scipy.stats.kurtosistest", "matplotlib.pyplot.gca", "scipy.stats.kurtosis", "pandas.plotting.register_matplotlib_converters", "scipy.stats.skewtest", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.ion", "scipy.stats.jarque_bera", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.ylabel", "scipy.stats.kstest", "scipy.stats.shapiro" ] ]
py1sl/neutronicstool
[ "da5e459e47bb67afbef9a9387e054023b6c0f048" ]
[ "format_from_EXCEL.py" ]
[ "\"\"\"\r\nThis tool reads in excel data, formats appropriately and plots graph of beam\r\ncurrent cycles over time.\r\nneeds xlrd package\r\n\"\"\"\r\n\r\nimport re\r\nimport pandas as pd\r\nimport logging\r\nimport utilities\r\n\r\n\r\ndef getdate():\r\n \"\"\"function to select appropriate start and end date for range of\r\n cycles we are interested in\r\n \"\"\"\r\n p = re.compile('[^0-9\\s]+')\r\n while True:\r\n date = input(\"Please input date in the format YYYY M D \\n\")\r\n m = p.search(date) # checks for any non numbers\r\n\r\n if m:\r\n logging.debug('Looks like you have a typo.')\r\n\r\n else:\r\n datelst = date.split() # splits along whitespace into list of strings\r\n datelst = list(map(int, datelst)) # converts list of strings into list of integers\r\n if datelst[1] > 12 or datelst[1] <= 0:\r\n logging.debug('Your month looks a little funny.')\r\n if datelst[2] > 31 or datelst[2] <= 0:\r\n logging.debug('Your day value looks strange.')\r\n else:\r\n logging.debug('I work!')\r\n return(datelst)\r\n # going to have to convert this string of integers into datetime data type\r\n\r\n\r\ndef findrng(date1, date2):\r\n \"\"\"\r\n Takes start and end date, finds the number of days between\r\n them.\r\n \"\"\"\r\n days = pd.date_range(date1, date2, freq='D')\r\n return days\r\n\r\n\r\ndef formatExcel(file):\r\n \"\"\"\r\n Takes data of interest in from excel file and formats to create a pandas\r\n dataframe. Currently acts on whole set of data.\r\n\r\n \"\"\"\r\n cols = \"B,C,I\"\r\n df = pd.read_excel(file, header=None, sheet_name='Data', skiprows=[0,1,2,3,4,5],na_values=['NA'], usecols = cols)\r\n df.columns = [\"Start\", \"Finish\", \"Average µA\"]\r\n df = df.drop(df.index[86:95])\r\n\r\n # Take start and end time for whole dataset\r\n # Date selectivity goes here, enter manually or select from excel file\r\n # check if we are in the correct range\r\n\r\n print(\"Please choose your start date\")\r\n start_date = getdate()\r\n print(start_date)\r\n\r\n print(\"Please choose your end date\")\r\n end_date = getdate()\r\n print(end_date)\r\n\r\n start_plot = pd.Timestamp(start_date[0], start_date[1], start_date[2], 0, 0, 0)\r\n end_plot = pd.Timestamp(end_date[0], end_date[1], end_date[2], 0, 0, 0)\r\n\r\n # Find range in days between start and end points\r\n rng = pd.date_range(start_plot, end_plot, freq='D')\r\n\r\n # Make empty dataset\r\n df0 = pd.DataFrame(index=rng, columns=[\"Average µA\"])\r\n df0 = df0.fillna(0)\r\n \r\n df['Dates'] = df.apply(lambda x: findrng(x['Start'], x['Finish']), axis=1)\r\n \"\"\"Uses findrng function on 'Start' and 'Finish' columns, creates a dataframe\r\n 'Dates' containing a set of days spanning each cycle run.\r\n \"\"\"\r\n\r\n df2 = pd.DataFrame()\r\n\r\n \"\"\"\"This loop takes each of the days in df['Dates'], matches it to its\r\n correct current value and appends that to our final dataframe df2.\r\n \"\"\"\r\n n = 0\r\n for j in df.iloc[:, 3]:\r\n n += 1\r\n for i in df.iloc[n-1][3]:\r\n df2 = df2.append({'Average µA': df.iloc[n-1][2], 'Dates': i}, ignore_index=True)\r\n\r\n df2 = df2.set_index('Dates')\r\n \"\"\"Uses dates column as index. \"\"\"\r\n\r\n df2 = df2.combine_first(df0)\r\n \"\"\"Ensures that empty values are set to zero through combining with an\r\n empty dataframe\"\"\"\r\n\r\n # chop data frame and only keep relevant data\r\n df2 = df2[start_plot:end_plot]\r\n\r\n return df2\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n utilities.setup_logging()\r\n df2 = formatExcel('cyclemainoperationalparameters.xlsx')\r\n # select from menu which file to load\r\n utilities.plot_irrad(df2)\r\n" ]
[ [ "pandas.Timestamp", "pandas.date_range", "pandas.DataFrame", "pandas.read_excel" ] ]
cogitoergoread/muszi-macrohard.hu
[ "e9bbd36b789e670f96622a3a2ba8327f0d897561" ]
[ "rlcard3/agents/dqn_agent_pytorch.py" ]
[ "''' DQN agent\n\nThe code is derived from https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/dqn.py\n\nCopyright (c) 2019 Matthew Judell\nCopyright (c) 2019 DATA Lab at Texas A&M University\nCopyright (c) 2016 Denny Britz\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import namedtuple\nfrom copy import deepcopy\n\nfrom rlcard3.agents.dqn_agent import Memory\nfrom rlcard3.utils.utils import remove_illegal\n\nTransition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'done'])\n\n\nclass DQNAgent(object):\n '''\n Approximate clone of rlcard3.agents.dqn_agent.DQNAgent\n that depends on PyTorch instead of Tensorflow\n '''\n def __init__(self,\n scope,\n replay_memory_size=20000,\n replay_memory_init_size=100,\n update_target_estimator_every=1000,\n discount_factor=0.99,\n epsilon_start=1.0,\n epsilon_end=0.1,\n epsilon_decay_steps=20000,\n batch_size=32,\n action_num=2,\n state_shape=None,\n train_every=1,\n mlp_layers=None,\n learning_rate=0.00005,\n device=None):\n\n '''\n Q-Learning algorithm for off-policy TD control using Function Approximation.\n Finds the optimal greedy policy while following an epsilon-greedy policy.\n\n Args:\n scope (str): The name of the DQN agent\n replay_memory_size (int): Size of the replay memory\n replay_memory_init_size (int): Number of random experiences to sampel when initializing\n the reply memory.\n update_target_estimator_every (int): Copy parameters from the Q estimator to the\n target estimator every N steps\n discount_factor (float): Gamma discount factor\n epsilon_start (int): Chance to sample a random action when taking an action.\n Epsilon is decayed over time and this is the start value\n epsilon_end (int): The final minimum value of epsilon after decaying is done\n epsilon_decay_steps (int): Number of steps to decay epsilon over\n batch_size (int): Size of batches to sample from the replay memory\n evaluate_every (int): Evaluate every N steps\n action_num (int): The number of the actions\n state_space (list): The space of the state vector\n train_every (int): Train the network every X steps.\n mlp_layers (list): The layer number and the dimension of each layer in MLP\n learning_rate (float): The learning rate of the DQN agent.\n device (torch.device): whether to use the cpu or gpu\n '''\n self.use_raw = False\n self.scope = scope\n self.replay_memory_init_size = replay_memory_init_size\n self.update_target_estimator_every = update_target_estimator_every\n self.discount_factor = discount_factor\n self.epsilon_decay_steps = epsilon_decay_steps\n self.batch_size = batch_size\n self.action_num = action_num\n self.train_every = train_every\n\n # Torch device\n if device is None:\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n else:\n self.device = device\n\n # Total timesteps\n self.total_t = 0\n\n # Total training step\n self.train_t = 0\n\n # The epsilon decay scheduler\n self.epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)\n\n # Create estimators\n self.q_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n self.target_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n\n # Create replay memory\n self.memory = Memory(replay_memory_size, batch_size)\n\n def feed(self, ts):\n ''' Store data in to replay buffer and train the agent. There are two stages.\n In stage 1, populate the memory without training\n In stage 2, train the agent every several timesteps\n\n Args:\n ts (list): a list of 5 elements that represent the transition\n '''\n (state, action, reward, next_state, done) = tuple(ts)\n self.feed_memory(state['obs'], action, reward, next_state['obs'], done)\n self.total_t += 1\n tmp = self.total_t - self.replay_memory_init_size\n if tmp>=0 and tmp%self.train_every == 0:\n self.train()\n\n def step(self, state):\n ''' Predict the action for genrating training data but\n have the predictions disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n A = self.predict(state['obs'])\n A = remove_illegal(A, state['legal_actions'])\n action = np.random.choice(np.arange(len(A)), p=A)\n return action\n\n def eval_step(self, state):\n ''' Predict the action for evaluation purpose.\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n q_values = self.q_estimator.predict_nograd(np.expand_dims(state['obs'], 0))[0]\n probs = remove_illegal(np.exp(q_values), state['legal_actions'])\n best_action = np.argmax(probs)\n return best_action, probs\n\n def predict(self, state):\n ''' Predict the action probabilities but have them\n disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n q_values (numpy.array): a 1-d array where each entry represents a Q value\n '''\n epsilon = self.epsilons[min(self.total_t, self.epsilon_decay_steps-1)]\n A = np.ones(self.action_num, dtype=float) * epsilon / self.action_num\n q_values = self.q_estimator.predict_nograd(np.expand_dims(state, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n\n def train(self):\n ''' Train the network\n\n Returns:\n loss (float): The loss of the current batch.\n '''\n state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample()\n\n # Calculate best next actions using Q-network (Double DQN)\n q_values_next = self.q_estimator.predict_nograd(next_state_batch)\n best_actions = np.argmax(q_values_next, axis=1)\n\n # Evaluate best next actions using Target-network (Double DQN)\n q_values_next_target = self.target_estimator.predict_nograd(next_state_batch)\n target_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \\\n self.discount_factor * q_values_next_target[np.arange(self.batch_size), best_actions]\n\n # Perform gradient descent update\n state_batch = np.array(state_batch)\n\n loss = self.q_estimator.update(state_batch, action_batch, target_batch)\n print('\\rINFO - Agent {}, step {}, rl-loss: {}'.format(self.scope, self.total_t, loss), end='')\n\n # Update the target estimator\n if self.train_t % self.update_target_estimator_every == 0:\n self.target_estimator = deepcopy(self.q_estimator)\n print(\"\\nINFO - Copied model parameters to target network.\")\n\n self.train_t += 1\n\n def feed_memory(self, state, action, reward, next_state, done):\n ''' Feed transition to memory\n\n Args:\n state (numpy.array): the current state\n action (int): the performed action ID\n reward (float): the reward received\n next_state (numpy.array): the next state after performing the action\n done (boolean): whether the episode is finished\n '''\n self.memory.save(state, action, reward, next_state, done)\n\n def get_state_dict(self):\n ''' Get the state dict to save models\n\n Returns:\n (dict): A dict of model states\n '''\n q_key = self.scope + '_q_estimator'\n q_value = self.q_estimator.qnet.state_dict()\n target_key = self.scope + '_target_estimator'\n target_value = self.target_estimator.qnet.state_dict()\n return {q_key: q_value, target_key: target_value}\n\n def load(self, checkpoint):\n ''' Load model\n\n Args:\n checkpoint (dict): the loaded state\n '''\n q_key = self.scope + '_q_estimator'\n self.q_estimator.qnet.load_state_dict(checkpoint[q_key])\n target_key = self.scope + '_target_estimator'\n self.target_estimator.qnet.load_state_dict(checkpoint[target_key])\n\nclass Estimator(object):\n '''\n Approximate clone of rlcard3.agents.dqn_agent.Estimator that\n uses PyTorch instead of Tensorflow. All methods input/output np.ndarray.\n\n Q-Value Estimator neural network.\n This network is used for both the Q-Network and the Target Network.\n '''\n\n def __init__(self, action_num=2, learning_rate=0.001, state_shape=None, mlp_layers=None, device=None):\n ''' Initilalize an Estimator object.\n\n Args:\n action_num (int): the number output actions\n state_shape (list): the shape of the state space\n mlp_layers (list): size of outputs of mlp layers\n device (torch.device): whether to use cpu or gpu\n '''\n self.action_num = action_num\n self.learning_rate=learning_rate\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n self.device = device\n\n # set up Q model and place it in eval mode\n qnet = EstimatorNetwork(action_num, state_shape, mlp_layers)\n qnet = qnet.to(self.device)\n self.qnet = qnet\n self.qnet.eval()\n\n # initialize the weights using Xavier init\n for p in self.qnet.parameters():\n if len(p.data.shape) > 1:\n nn.init.xavier_uniform_(p.data)\n\n # set up loss function\n self.mse_loss = nn.MSELoss(reduction='mean')\n\n # set up optimizer\n self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.learning_rate)\n\n def predict_nograd(self, s):\n ''' Predicts action values, but prediction is not included\n in the computation graph. It is used to predict optimal next\n actions in the Double-DQN algorithm.\n\n Args:\n s (np.ndarray): (batch, state_len)\n\n Returns:\n np.ndarray of shape (batch_size, NUM_VALID_ACTIONS) containing the estimated\n action values.\n '''\n with torch.no_grad():\n s = torch.from_numpy(s).float().to(self.device)\n q_as = self.qnet(s).cpu().numpy()\n return q_as\n\n def update(self, s, a, y):\n ''' Updates the estimator towards the given targets.\n In this case y is the target-network estimated\n value of the Q-network optimal actions, which\n is labeled y in Algorithm 1 of Minh et al. (2015)\n\n Args:\n s (np.ndarray): (batch, state_shape) state representation\n a (np.ndarray): (batch,) integer sampled actions\n y (np.ndarray): (batch,) value of optimal actions according to Q-target\n\n Returns:\n The calculated loss on the batch.\n '''\n self.optimizer.zero_grad()\n\n self.qnet.train()\n\n s = torch.from_numpy(s).float().to(self.device)\n a = torch.from_numpy(a).long().to(self.device)\n y = torch.from_numpy(y).float().to(self.device)\n\n # (batch, state_shape) -> (batch, action_num)\n q_as = self.qnet(s)\n\n # (batch, action_num) -> (batch, )\n Q = torch.gather(q_as, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)\n\n # update model\n batch_loss = self.mse_loss(Q, y)\n batch_loss.backward()\n self.optimizer.step()\n batch_loss = batch_loss.item()\n\n self.qnet.eval()\n\n return batch_loss\n\n\nclass EstimatorNetwork(nn.Module):\n ''' The function approximation network for Estimator\n It is just a series of tanh layers. All in/out are torch.tensor\n '''\n\n def __init__(self, action_num=2, state_shape=None, mlp_layers=None):\n ''' Initialize the Q network\n\n Args:\n action_num (int): number of legal actions\n state_shape (list): shape of state tensor\n mlp_layers (list): output size of each fc layer\n '''\n super(EstimatorNetwork, self).__init__()\n\n self.action_num = action_num\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n\n # build the Q network\n layer_dims = [np.prod(self.state_shape)] + self.mlp_layers\n fc = [nn.Flatten()]\n fc.append(nn.BatchNorm1d(layer_dims[0]))\n for i in range(len(layer_dims)-1):\n fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))\n fc.append(nn.Tanh())\n fc.append(nn.Linear(layer_dims[-1], self.action_num, bias=True))\n self.fc_layers = nn.Sequential(*fc)\n\n def forward(self, s):\n ''' Predict action values\n\n Args:\n s (Tensor): (batch, state_shape)\n '''\n return self.fc_layers(s)\n" ]
[ [ "torch.nn.Linear", "numpy.exp", "numpy.invert", "torch.cuda.is_available", "numpy.prod", "numpy.argmax", "numpy.arange", "numpy.expand_dims", "torch.nn.Flatten", "numpy.array", "torch.nn.Sequential", "torch.nn.Tanh", "torch.nn.MSELoss", "torch.no_grad", "numpy.ones", "torch.nn.init.xavier_uniform_", "torch.from_numpy", "torch.nn.BatchNorm1d", "numpy.linspace" ] ]
LorenFrankLab/rec_to_nwb
[ "d0630f414662963ebbe23aedf8f3ce07628636bc" ]
[ "rec_to_nwb/processing/nwb/components/iterator/multi_thread_data_iterator.py" ]
[ "import concurrent.futures\n\nimport numpy as np\nfrom hdmf.data_utils import DataChunk\nfrom rec_to_nwb.processing.nwb.components.iterator.data_iterator import \\\n DataIterator\n\n\nclass MultiThreadDataIterator(DataIterator):\n def __init__(self, data, number_of_threads=6):\n DataIterator.__init__(self, data)\n self.number_of_threads = number_of_threads\n\n def __next__(self):\n if self._current_index < self.number_of_steps:\n number_of_threads_in_current_step = min(\n self.number_of_threads,\n self.number_of_files_in_single_dataset - self.current_file)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n threads = [executor.submit(\n MultiThreadDataIterator.get_data_from_file,\n self.data, self.current_dataset, self.current_file + i)\n for i in range(number_of_threads_in_current_step)]\n data_from_multiple_files = ()\n for thread in threads:\n data_from_multiple_files += (thread.result(),)\n stacked_data_from_multiple_files = np.hstack(\n data_from_multiple_files)\n selection = self.get_selection(\n number_of_threads=number_of_threads_in_current_step,\n current_dataset=self.current_dataset,\n dataset_file_length=self.dataset_file_length,\n current_file=self.current_file,\n number_of_rows=self.number_of_rows)\n data_chunk = DataChunk(\n data=stacked_data_from_multiple_files, selection=selection)\n\n self._current_index += number_of_threads_in_current_step\n self.current_file += number_of_threads_in_current_step\n\n if self.current_file >= self.number_of_files_in_single_dataset:\n self.current_dataset += 1\n self.current_file = 0\n\n del stacked_data_from_multiple_files\n return data_chunk\n\n raise StopIteration\n\n next = __next__\n\n @staticmethod\n def get_data_from_file(data, current_dataset, current_file):\n return np.transpose(data.read_data(current_dataset, current_file))\n\n\n# TODO: finish this code and move to new file when data are extracted in a single file.\nclass ChunkedDataIterator(DataIterator):\n def __init__(self, data, number_of_threads=6, read_chunk_mb=100):\n DataIterator.__init__(self, data)\n self.number_of_threads = number_of_threads\n self.read_chunk_mb = read_chunk_mb\n # Figure out the size of each datafile in each dataset where one dataset is an epoch\n self.dataset_file_dims = ()\n for dataset in range(self.number_of_datasets):\n self.dataset_file_dims.append(data.get_data_dims(dataset, 0))\n\n def __next__(self):\n if self._current_index < self.number_of_steps:\n number_of_threads_in_current_step = min(\n self.number_of_threads,\n self.number_of_files_in_single_dataset - self.current_file)\n with concurrent.futures.ThreadPoolExecutor() as executor:\n threads = [executor.submit(\n MultiThreadDataIterator.get_data_from_file,\n self.data, self.current_dataset, self.current_file + i)\n for i in range(number_of_threads_in_current_step)]\n data_from_multiple_files = ()\n for thread in threads:\n data_from_multiple_files += (thread.result(),)\n stacked_data_from_multiple_files = np.hstack(\n data_from_multiple_files)\n selection = self.get_selection(\n number_of_threads=number_of_threads_in_current_step,\n current_dataset=self.current_dataset,\n dataset_file_length=self.dataset_file_length,\n current_file=self.current_file,\n number_of_rows=self.number_of_rows)\n data_chunk = DataChunk(\n data=stacked_data_from_multiple_files, selection=selection)\n\n self._current_index += number_of_threads_in_current_step\n self.current_file += number_of_threads_in_current_step\n\n if self.current_file >= self.number_of_files_in_single_dataset:\n self.current_dataset += 1\n self.current_file = 0\n\n del stacked_data_from_multiple_files\n return data_chunk\n\n raise StopIteration\n\n next = __next__\n\n @staticmethod\n def get_data_from_file(data, current_dataset, current_file):\n return np.transpose(data.read_data(current_dataset, current_file))\n" ]
[ [ "numpy.hstack" ] ]
RodrigoSanMartin/deploy_API_sagemaker_pytorch_webapp_sentiment_analysis
[ "d7df00b91c05a96ccf0975c621704cacb9bb0a37" ]
[ "train/train.py" ]
[ "\nimport argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n \n # TODO: Paste the train() method developed in the notebook here.\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n \n model.zero_grad() # Reseting gradients for each batch \n forward_pass = model.forward(batch_X) #Perform a forward pass of our model on batch_X input. \n loss = loss_fn(forward_pass, batch_y) #Get the loss for this batch \n loss.backward() # Get Gradients \n optimizer.step() # New Parameters\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n \n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n # Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n # Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n" ]
[ [ "torch.save", "torch.from_numpy", "torch.manual_seed", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "torch.nn.BCELoss", "torch.utils.data.TensorDataset" ] ]
farr/RVChallenge
[ "1925682f1a19442560401a7fd2a5b2dde9472f11" ]
[ "setup.py" ]
[ "from setuptools import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\nsetup(\n name='rvchallenge',\n version=\"0.0.1\",\n description='An attempt at competing in the RVChallenge.',\n long_description='See https://rv-challenge.wikispaces.com',\n\n author='Will M. Farr',\n author_email='w.farr@bham.ac.uk',\n\n license='MIT',\n\n packages=['rvchallenge'],\n\n ext_modules = cythonize([Extension('kepler', ['rvchallenge/kepler.pyx'])]),\n include_dirs = [np.get_include()]\n)\n" ]
[ [ "numpy.get_include" ] ]
vicgalle/ARAMARL
[ "1f0e3d3f76b345e12ec58029dc62d92f33738084" ]
[ "engine.py" ]
[ "\"\"\"\nThis module implements several environments, i.e., the simulators in which agents will interact and learn.\nAny environment is characterized by the following two methods:\n * step : receives the actions taken by the agents, and returns the new state of the simulator and the rewards\n perceived by each agent, amongst other things.\n * reset : sets the simulator at the initial state.\n\"\"\"\n\nimport numpy as np\n\n\nclass RMG():\n \"\"\"\n A two-agent environment for a repeated matrix (symmetric) game.\n Possible actions for each agent are (C)ooperate (0) and (D)efect (1).\n The state is s_t = (a_{t-1}, b_{t-1}) with a_{t-1} and b_{t-1} the actions of the two players in the last turn,\n plus an initial state s_0.\n \"\"\"\n # Possible actions\n NUM_AGENTS = 2\n NUM_ACTIONS = 2\n NUM_STATES = NUM_AGENTS*NUM_ACTIONS + 1 # we add the initial state.\n\n def __init__(self, max_steps, payouts, batch_size=1):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.payout_mat = payouts\n self.available_actions = [\n np.ones((batch_size, self.NUM_ACTIONS), dtype=int)\n for _ in range(self.NUM_AGENTS)\n ]\n\n self.step_count = None\n\n def reset(self):\n self.step_count = 0\n init_state = np.zeros((self.batch_size, self.NUM_STATES))\n init_state[:, -1] = 1\n observations = [init_state, init_state]\n info = [{'available_actions': aa} for aa in self.available_actions]\n return observations, info\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n rewards = []\n\n # The state is a OHE vector indicating [CC, CD, DC, DD, initial], (iff NUM_STATES = 5)\n state0 = np.zeros((self.batch_size, self.NUM_STATES))\n state1 = np.zeros((self.batch_size, self.NUM_STATES))\n for i, (a0, a1) in enumerate(zip(ac0, ac1)): # iterates over batch dimension\n rewards.append([self.payout_mat[a1][a0], self.payout_mat[a0][a1]])\n state0[i, a0 * 2 + a1] = 1\n state1[i, a1 * 2 + a0] = 1\n rewards = list(map(np.asarray, zip(*rewards)))\n observations = [state0, state1]\n\n done = (self.step_count == self.max_steps)\n info = [{'available_actions': aa} for aa in self.available_actions]\n\n return observations, rewards, done, info\n\n\nclass AdvRw():\n \"\"\"\n A two-action stateless environment in which an adversary controls the reward\n \"\"\"\n\n def __init__(self, mode='friend', p=0.5):\n self._mode = mode\n # adversary estimation of our action\n self._policy = np.asarray([0.5, 0.5])\n self._learning_rate = 0.25\n self._p = p # probability for the neutral environment\n\n def reset(self):\n # self._policy = np.asarray([0.5, 0.5])\n return\n\n def step(self, action):\n\n if self._mode == 'friend':\n if np.argmax(self._policy) == action:\n reward = +50\n else:\n reward = -50\n elif self._mode == 'adversary':\n if np.argmax(self._policy) == action:\n reward = -50\n else:\n reward = +50\n elif self._mode == 'neutral':\n box = np.random.rand() < self._p\n if int(box) == action:\n reward = +50\n else:\n reward = -50\n\n self._policy = (self._learning_rate * np.array([1.0-action, action])\n + (1.0-self._learning_rate) * self._policy)\n self._policy /= np.sum(self._policy)\n\n # print('---')\n #print('r', reward)\n #print('p', self._policy)\n # print('---')\n\n return None, (reward, -reward), True, None\n\n\nclass AdvRw2():\n \"\"\"\n Friend or Foe modified to model adversary separately..\n \"\"\"\n\n def __init__(self, max_steps, payout=50, batch_size=1):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.payout = payout\n self.available_actions = np.array([0, 1])\n self.step_count = 0\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n dm_reward = self.payout if ac0 == ac1 else -self.payout\n\n rewards = [dm_reward, -dm_reward] # Assuming zero-sum...\n observations = None\n\n done = (self.step_count == self.max_steps)\n\n return observations, rewards, done\n#\n\n\nclass AdvRwGridworld():\n \"\"\"\n Friend or Foe modified to model adversary separately, with gridworld\n \"\"\"\n\n def __init__(self, max_steps, batch_size=1):\n self.H = 4\n self.W = 3\n self.world = np.array([self.H, self.W]) # The gridworld\n\n self.targets = np.array([[0, 0], [0, 2]]) # Position of the targets\n self.DM = np.array([3, 1]) # Initial position of the DM\n\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.available_actions_DM = np.array(\n [0, 1, 2, 3]) # Up, right, down, left\n self.available_actions_Adv = np.array([0, 1]) # Select target 1 or 2.\n self.step_count = 0\n\n def reset(self):\n self.step_count = 0\n self.DM = np.array([3, 1])\n return\n\n def _coord2int(self, pos):\n return pos[0] + self.H*pos[1]\n\n def step(self, action):\n ac_DM, ac_Adv = action\n\n self.step_count += 1\n\n if ac_DM == 0: # Up\n self.DM[0] = np.maximum(0, self.DM[0] - 1)\n elif ac_DM == 1: # Right\n self.DM[1] = np.minimum(self.W - 1, self.DM[1] + 1)\n elif ac_DM == 2: # Down\n self.DM[0] = np.minimum(self.H - 1, self.DM[0] + 1)\n elif ac_DM == 3: # Left\n self.DM[1] = np.maximum(0, self.DM[1] - 1)\n\n done = False\n dm_reward = -1 # One step more\n adv_reward = 0\n\n # Check if DM is @ targets, then finish\n\n if np.all(self.DM == self.targets[0, :]):\n if ac_Adv == 0:\n dm_reward += 50\n adv_reward -= 50\n else:\n dm_reward -= 50\n adv_reward += 50\n done = True\n\n if np.all(self.DM == self.targets[1, :]):\n if ac_Adv == 1:\n dm_reward += 50\n adv_reward -= 50\n else:\n dm_reward -= 50\n adv_reward += 50\n done = True\n\n # Check if step limit, then finish\n\n if self.step_count == self.max_steps:\n done = True\n\n #dm_reward = self.payout if ac0 == ac1 else -self.payout\n\n # rewards = [dm_reward, -dm_reward] #Assuming zero-sum...\n #observations = None\n\n #done = (self.step_count == self.max_steps)\n\n return self._coord2int(self.DM), (dm_reward, adv_reward), done\n\n\nclass Blotto():\n \"\"\"\n Blotto game with multiple adversaries\n \"\"\"\n\n def __init__(self, max_steps, payout=50, batch_size=1, deterministic=True):\n self.max_steps = max_steps\n self.batch_size = batch_size\n #self.payout = payout\n self.available_actions = np.array([0, 1])\n self.step_count = 0\n self.deterministic = deterministic\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, actions):\n \"\"\" action[0] is that of the defender \"\"\"\n self.step_count += 1\n\n num_attackers = len(actions) - 1\n\n actions = np.asarray(actions)\n\n att_rew = np.sum(actions[1:, ], axis=0)\n tmp = actions[0, ] - att_rew\n\n draw_pos = tmp == 0\n if self.deterministic != True:\n tmp[tmp == 0] = np.random.choice(\n [-1, 1], size=len(tmp[tmp == 0]))*(actions[0, draw_pos] > 0)\n\n\n ind = np.sum(actions, axis=0) > 0 ## to see in which position there was at least one resource\n\n tmp = tmp*ind\n\n tmp[tmp < 0] = -1 # Defender looses corresponding position\n tmp[tmp > 0] = 1 # Defender wins corresponding position\n\n # print('tmp', tmp)\n\n reward_dm = tmp.sum()\n\n tmp2 = actions[1:, ] - actions[0, ]\n tmp2[tmp2 > 0] = 1\n tmp2[tmp2 < 0] = -1\n\n # print('tmp2', tmp2)\n\n # s = np.sum(actions[1:, draw_pos], axis=0)\n z = draw_pos & actions[1:, ]\n\n z_new = z/z.sum(axis=0)\n z_new = np.nan_to_num(z_new)\n z_new = z_new*ind\n\n # print('z_new', z_new)\n\n #z_new = np.zeros_like(z_new)\n z_new[:, draw_pos] = z_new[:, draw_pos]*np.sign(-tmp[draw_pos])\n\n tmp2[z == 1.] = 0\n\n # print('tmp2', tmp2)\n\n z_new = tmp2 + z_new\n\n # print('z-new', z_new)\n # print('tmp2', tmp2)\n\n rewards_atts = np.sum(z_new*(actions[1:, ] > 0), axis=1)\n\n rewards = [reward_dm]\n\n for r in rewards_atts:\n rewards.append(r)\n\n observations = None\n\n done = (self.step_count == self.max_steps)\n\n return observations, rewards, done\n\n\nclass modified_Blotto():\n \"\"\"\n Modified Blotto game with multiple adversaries (we just care about positions\n where there has been some attack)\n \"\"\"\n\n def __init__(self, max_steps, payout=50, batch_size=1, deterministic=True):\n self.max_steps = max_steps\n self.batch_size = batch_size\n #self.payout = payout\n self.available_actions = np.array([0, 1])\n self.step_count = 0\n self.deterministic = deterministic\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, actions):\n \"\"\" action[0] is that of the defender \"\"\"\n self.step_count += 1\n\n actions = np.asarray(actions)\n\n ## Defender's Reward\n att_rew = np.sum(actions[1:, ], axis=0)\n attacked_pos = att_rew > 0 ## indicates in which position attacks where performed\n\n tmp = actions[0, ] - att_rew\n tmp[np.logical_not(attacked_pos)] = 0.0\n\n # Code non-deterministic case ??\n\n tmp[tmp < 0] = -1 # Defender looses corresponding position\n tmp[tmp > 0] = 1 # Defender wins corresponding position\n reward_dm = tmp.sum()\n\n ## Attacker's Reward\n tmp_att = -tmp\n\n h = actions[1:] > 0\n units = tmp_att / np.sum(h, axis=0)\n units = np.nan_to_num(units)\n\n rewards_att = h*units\n rewards_atts = np.sum(rewards_att, axis=1)\n\n rewards = [reward_dm]\n\n for r in rewards_atts:\n rewards.append(r)\n\n observations = None\n\n done = (self.step_count == self.max_steps)\n\n return observations, rewards, done\n\n\nclass Urban():\n \"\"\"\n A two-agent environment for a urban resource allocation problem.\n \"\"\"\n\n def __init__(self):\n # The state is designated by s = (s_0, s_1, s_2, s_3)\n # s_0 represents wheter we are in the initial state or not\n # s_i, i>0 represent whether the attack was successful on the site i.\n self.state = np.array([1, 0, 0, 0])\n self.step_count = 0\n self.max_steps = 2 # as in the ARA for Urban alloc. paper\n self.payoffs = np.array([1., 0.75, 2.]) # v_i from the paper\n\n # Transition dynamics\n\n # p(s_1_i = 1 | d1_i, a_i) for site i\n self.p_s1_d1_a = np.array([[0, 0.85, 0.95],\n [0, 0.6, 0.75],\n [0, 0.3, 0.5],\n [0, 0.05, 0.1],\n [0, 0, 0.05]])\n\n # p(s_2_i = 1 | s_1_i, d2_i) for site i\n self.p_s2_s1_d2 = np.array([[0, 0, 0, 0, 0],\n [1., 0.95, 0.8, 0.6, 0.4]])\n\n self.n_sites = 3\n self.k = 0.005\n self.rho = 0.1\n self.c_A = 10.\n self.c_D = 10.\n\n self.available_actions_DM = [i for i in range(5**self.n_sites)] # up to four units in each site\n self.n_states = 2 ** (self.n_sites + 1)\n\n def state2idx(self, state):\n \"\"\"\n In [19]: state = np.array([1, 0, 0, 1])\n In [20]: state2idx(state)\n Out[20]: 9\n \"\"\"\n pows = np.array([1 << i for i in range(len(state))[::-1]])\n return np.dot(pows, state)\n\n def idx2state(self, idx):\n \"\"\"\n In [28]: idx = 9\n In [30]: idx2state(idx)\n Out[30]: array([1, 0, 0, 1])\n \"\"\"\n return (idx & (1 << np.arange(len(self.state))) > 0).astype(int)\n\n def actionDM2idx(self, a):\n \"\"\" Now we have 3 sites, in which we can defend with up to 5 units. \"\"\"\n pows = np.array([5**i for i in range(self.n_sites)[::-1]])\n return np.dot(pows, a)\n\n def idx2actionDM(self, idx):\n return list(map(int, (list(np.base_repr(idx, 5, padding=3))[-self.n_sites:])))\n\n def valid_actionDM(self, state_idx, action_idx, prev_action_idx):\n\n action = self.idx2actionDM(action_idx)\n prev_action = self.idx2actionDM(prev_action_idx)\n state = self.idx2state(state_idx)\n\n if state[0] == 1: #initial state\n #print('a', action)\n return np.sum(action) == 4\n else: # second move\n #print('b', action, prev_action)\n c1 = np.sum(action) == 4\n c2 = action[0] <= prev_action[0] + prev_action[1]\n c3 = action[1] <= prev_action[0] + prev_action[1] + prev_action[2]\n c4 = action[2] <= prev_action[1] + prev_action[2]\n return c1 & c2 & c3 & c4\n\n def reset(self):\n self.step_count = 0\n self.state = np.array([1, 0, 0, 0])\n return\n\n def step(self, action):\n\n # first action is that from the DM\n ac0, ac1 = action\n\n self.step_count += 1\n\n if self.step_count == 1:\n\n self.state = np.array([0, 0, 0, 0])\n for i in range(self.n_sites):\n p = self.p_s1_d1_a[ac0[i], ac1[i]]\n u = np.random.rand()\n if u <= p:\n self.state[i + 1] = 1 # success\n\n rewards = [0., 0.] # no rewards until end of episode\n observations = self.state\n\n done = False\n\n return observations, rewards, done\n\n elif self.step_count == 2: # end of episode\n\n for i in range(self.n_sites):\n p = self.p_s2_s1_d2[self.state[i+1], ac0[i]]\n u = np.random.rand()\n if u <= p:\n self.state[i + 1] = 1 # success\n\n done = True\n observations = self.state\n #print(np.dot(self.payoffs, self.state[1:]))\n rewards = [- np.exp(self.c_D * self.rho * np.dot(self.payoffs, self.state[1:])),\n np.exp(self.c_A * np.dot(self.payoffs,self.state[1:]) - np.sum(ac1 * self.k))] \n\n return observations, rewards, done\n\n\nclass SimpleCoin():\n \"\"\"\n Simple Coin Game from LOLA paper, where state is just the color of the coin.\n \"\"\"\n\n def __init__(self, max_steps, batch_size=1):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.available_actions = np.array([0, 1]) # 1 pick coin.\n self.step_count = 0\n self.state = 0 # initially, coin is red (for first player)\n\n def reset(self):\n self.step_count = 0\n return\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n rewards = np.asarray([ac0, ac1]) # +1 point if thw agent picks coin.\n \n # conflict\n if ac0 and self.state == 1:\n rewards[1] -= 2\n \n if ac1 and self.state == 0:\n rewards[0] -= 2\n\n if np.random.rand() < 0.5:\n self.state = 0\n else:\n self.state = 1\n\n done = (self.step_count == self.max_steps)\n\n return self.state, rewards, done\n#\n\nclass CoinGame():\n \"\"\"\n Coin Game from LOLA paper, played over a NxN grid\n \"\"\"\n\n def __init__(self, max_steps=5, batch_size=1, tabular=True):\n self.max_steps = max_steps\n self.batch_size = batch_size\n self.available_actions = np.array([0, 1, 2, 3]) # four directions to move. Agents pick up coins by moving onto the position where the coin is located\n self.step_count = 0\n self.N = 3\n self.available_actions = np.array(\n [0, 1])\n self.available_actions_DM = np.array(\n [0, 1, 2, 3])\n self.available_actions_Adv = np.array(\n [0, 1, 2, 3])\n #self.state = np.zeros([4, self.N, self.N]) # blue player, red player, blue coin, red coin positions as OHE over grid.\n\n self.blue_player = [1, 0]\n self.red_player = [1, 2]\n if (np.random.rand() < 0.0):\n self.blue_coin = [0, 1]\n self.red_coin = [2, 1]\n else:\n self.blue_coin = [2, 1]\n self.red_coin = [0, 1]\n\n self.tabular = tabular\n\n def get_state(self):\n o = np.zeros([4, self.N, self.N])\n o[0,self.blue_player[0], self.blue_player[1]] = 1\n o[1,self.red_player[0], self.red_player[1]] = 1\n o[2,self.blue_coin[0], self.blue_coin[1]] = 1\n o[3,self.red_coin[0], self.red_coin[1]] = 1\n\n if self.tabular:\n p1 = self.blue_player[0] + self.N*self.blue_player[1]\n p2 = self.red_player[0] + self.N*self.red_player[1]\n p3 = self.blue_coin[0] + self.N*self.blue_coin[1]\n p4 = self.red_coin[0] + self.N*self.red_coin[1]\n return int(p1 + (self.N)**2 * p2 + ((self.N)**2)**2 * p3 + ((self.N)**2)**3 * p4)\n\n return o\n\n def reset(self):\n self.step_count = 0\n\n # initial positions\n self.blue_player = [1, 0]\n self.red_player = [1, 2]\n\n if (np.random.rand() < 0.0):\n self.blue_coin = [0, 1]\n self.red_coin = [2, 1]\n else:\n self.blue_coin = [2, 1]\n self.red_coin = [0, 1]\n\n return\n\n def step(self, action):\n ac0, ac1 = action\n\n self.step_count += 1\n\n reward_blue, reward_red = 0, 0\n\n # agents move\n if ac0 == 0: # up\n self.blue_player[0] = np.maximum(self.blue_player[0] - 1, 0)\n elif ac0 == 1: # right\n self.blue_player[1] = np.minimum(self.blue_player[1] + 1, self.N-1)\n elif ac0 == 2: # down\n self.blue_player[0] = np.minimum(self.blue_player[0] + 1, self.N-1)\n else:\n self.blue_player[1] = np.maximum(self.blue_player[1] - 1, 0)\n\n if ac1 == 0: # up\n self.red_player[0] = np.maximum(self.red_player[0] - 1, 0)\n elif ac1 == 1: # right\n self.red_player[1] = np.minimum(self.red_player[1] + 1, self.N-1)\n elif ac1 == 2: # down\n self.red_player[0] = np.minimum(self.red_player[0] + 1, self.N-1)\n else:\n self.red_player[1] = np.maximum(self.red_player[1] - 1, 0)\n\n # check coins\n # if either agent picks coin, +1 for him\n if self.blue_player == self.blue_coin:\n if self.red_player == self.blue_coin:\n reward_blue += 0.5\n else:\n reward_blue += 1\n self.blue_coin = [-1, -1]\n\n if self.red_player == self.red_coin:\n if self.blue_player == self.red_coin:\n reward_red += 0.5\n else:\n reward_red += 1\n self.red_coin = [-1, -1]\n\n if self.blue_player == self.red_coin:\n if self.red_player == self.red_coin:\n reward_blue += 0.5\n else:\n reward_blue += 1\n self.red_coin = [-1, -1]\n \n if self.red_player == self.blue_coin:\n if self.blue_player == self.blue_coin:\n reward_red += 0.5\n else:\n reward_red += 1\n self.blue_coin = [-1, -1]\n \n \n \n \n done = self.step_count == self.max_steps\n \n return self.get_state(), np.array([reward_blue, reward_red]), done" ]
[ [ "numpy.logical_not", "numpy.array", "numpy.dot", "numpy.nan_to_num", "numpy.asarray", "numpy.zeros", "numpy.random.rand", "numpy.minimum", "numpy.sum", "numpy.ones", "numpy.sign", "numpy.argmax", "numpy.base_repr", "numpy.all", "numpy.maximum" ] ]
zxsted/meta-critic-networks
[ "1768751f84845bd6fe98a13d5b57dfaca154c1f8" ]
[ "multi_arm_bandit/mvn_arm4/mvn_test_arm4_sample10_new.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport random\nimport os\n\nimport json\n\ndef save_to_json(fname, data):\n with open(fname, 'w') as outfile:\n json.dump(data, outfile)\n\n# Hyper Parameters\nTASK_NUMS = 100\nTEST_NUMS_PER_TASK = 10\nARM_NUMS = 4\nSTEP = 300\nSAMPLE_NUMS = 10\n\n\nclass MultiArmBandit():\n \"\"\"docstring for MultiArmBandit\"\"\"\n def __init__(self,arm_nums,probs):\n self.arm_nums = arm_nums\n self.probs = probs#np.random.dirichlet(np.ones(arm_nums),size=1)[0]\n\n def step(self,action): # one hot action\n prob = np.sum(self.probs * action)\n if random.random() < prob:\n return 1\n else:\n return 0\n\n\n\nclass ActorNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,action_size):\n super(ActorNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,action_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = F.log_softmax(self.fc3(out))\n return out\n\nclass MetaValueNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,output_size):\n super(MetaValueNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,output_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = self.fc3(out)\n return out\n\nclass TaskConfigNetwork(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_layers, output_size):\n super(TaskConfigNetwork, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n # Set initial states\n h0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))\n c0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))\n # Forward propagate RNN\n out, _ = self.lstm(x, (h0, c0))\n # Decode hidden state of last time step\n out = self.fc(out[:, -1, :])\n return out\n\ndef roll_out(actor_network,task,sample_nums):\n actions = []\n rewards = []\n softmax_action = torch.exp(actor_network(Variable(torch.Tensor([[1]]))))\n for step in range(sample_nums):\n action = np.random.choice(ARM_NUMS,p=softmax_action.data.numpy()[0])\n one_hot_action = [int(i == action) for i in range(ARM_NUMS)]\n reward = task.step(one_hot_action)\n actions.append(one_hot_action)\n rewards.append([reward])\n\n return torch.Tensor([actions]),torch.Tensor([rewards])\n\ndef roll_out_actions(actor_network,sample_nums):\n actions = []\n rewards = []\n softmax_action = torch.exp(actor_network(Variable(torch.Tensor([[1]]))))\n for step in range(sample_nums):\n action = np.random.choice(ARM_NUMS,p=softmax_action.data.numpy()[0])\n one_hot_action = [int(i == action) for i in range(ARM_NUMS)]\n actions.append(one_hot_action)\n\n return torch.Tensor([actions])\n\ndef main():\n\n mvn_input_dim = ARM_NUMS + 3\n task_config_input_dim = ARM_NUMS + 1\n # init meta value network with a task config network\n meta_value_network = MetaValueNetwork(input_size = mvn_input_dim,hidden_size = 80,output_size = 1)\n task_config_network = TaskConfigNetwork(input_size = task_config_input_dim,hidden_size = 30,num_layers = 1,output_size = 3)\n\n if os.path.exists(\"meta_value_network_arm4.pkl\"):\n meta_value_network.load_state_dict(torch.load(\"meta_value_network_arm4.pkl\"))\n print(\"load meta value network success\")\n if os.path.exists(\"task_config_network_arm4.pkl\"):\n task_config_network.load_state_dict(torch.load(\"task_config_network_arm4.pkl\"))\n print(\"load task config network success\")\n\n\n # init a task generator for data fetching\n results = []\n\n total_rewards = 0\n\n task_probs = json.load(open(\"tasks_arm4.json\"))\n\n for episode in range(TASK_NUMS):\n res_i = {}\n task_prob = task_probs[episode][\"task_probs\"]\n task = MultiArmBandit(ARM_NUMS,np.array(task_prob))\n res_i[\"arm_nums\"] = ARM_NUMS\n res_i[\"task_probs\"] = task.probs.tolist()\n res_i[\"sample_nums\"] = SAMPLE_NUMS\n\n aver_rewards = []\n correct_probs = []\n for test_nums in range(TEST_NUMS_PER_TASK):\n actor_network = ActorNetwork(1,40,ARM_NUMS)\n actor_network_optim = torch.optim.Adam(actor_network.parameters(),lr=0.001)\n\n pre_actions,pre_rewards = roll_out(actor_network,task,SAMPLE_NUMS)\n pre_data_samples = torch.cat((pre_actions,pre_rewards),2)\n\n task_configs = task_config_network(Variable(pre_data_samples)).repeat(1,SAMPLE_NUMS).view(-1,3)\n\n for step in range(STEP):\n\n inputs = Variable(torch.Tensor([[1]])) #[1,1]\n #actions = roll_out_actions(actor_network,SAMPLE_NUMS)\n actions_var = Variable(pre_actions.view(-1,ARM_NUMS))\n actor_data_samples = torch.cat((actions_var,task_configs.detach()),1) #[task_nums,5]\n log_softmax_actions = actor_network(inputs) # [1,2]\n log_softmax_actions = log_softmax_actions.repeat(1,SAMPLE_NUMS).view(-1,ARM_NUMS)\n # train actor network\n\n actor_network_optim.zero_grad()\n qs = meta_value_network(actor_data_samples)\n actor_network_loss = - torch.mean(torch.sum(log_softmax_actions*actions_var,1)* qs) #+ actor_criterion(actor_y_samples,target_y)\n actor_network_loss.backward()\n\n actor_network_optim.step()\n\n choice = torch.exp(actor_network(inputs)).data[0].numpy()\n aver_reward = np.sum(choice * task.probs)\n optimal_action = np.argmax(task.probs)\n optimal_choice = [int(i == optimal_action) for i in range(ARM_NUMS)]\n correct_prob = np.sum(choice*optimal_choice)\n\n aver_rewards.append(float(aver_reward))\n correct_probs.append(float(correct_prob))\n total_rewards += aver_reward\n\n\n res_i[\"aver_rewards\"] = aver_rewards\n res_i[\"correct_probs\"] = correct_probs\n\n results.append(res_i)\n\n print(\"aver_reward\",np.mean(aver_rewards),\"correct prob:\",np.mean(correct_probs),\"task:\",task.probs)\n\n save_to_json('mvn_arm_4_sample_10.json', results)\n print(\"total aver reward:\",total_rewards/TASK_NUMS/TEST_NUMS_PER_TASK)\n\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.Linear", "numpy.array", "torch.cat", "torch.nn.LSTM", "torch.autograd.Variable", "numpy.sum", "numpy.mean", "numpy.argmax", "torch.load", "torch.Tensor", "torch.sum" ] ]
baustin13/two-stg-alma
[ "6400fbf1435fc4ef78331f8c730ce09dc5665cd5" ]
[ "Code/sage+gat+diffpool/cross_val.py" ]
[ "import networkx as nx\nimport numpy as np\nimport torch\n\nimport pickle\nimport random\n\nfrom graph_sampler import GraphSampler\n\ndef prepare_val_data(graphs, args, val_idx, max_nodes=0):\n\n random.shuffle(graphs)\n val_size = len(graphs) // 10\n train_graphs = graphs[:val_idx * val_size]\n if val_idx < 9:\n train_graphs = train_graphs + graphs[(val_idx+1) * val_size :]\n val_graphs = graphs[val_idx*val_size: (val_idx+1)*val_size]\n print('Num training graphs: ', len(train_graphs), \n '; Num validation graphs: ', len(val_graphs))\n\n print('Number of graphs: ', len(graphs))\n print('Number of edges: ', sum([G.number_of_edges() for G in graphs]))\n print('Max, avg, std of graph size: ', \n max([G.number_of_nodes() for G in graphs]), ', '\n \"{0:.2f}\".format(np.mean([G.number_of_nodes() for G in graphs])), ', '\n \"{0:.2f}\".format(np.std([G.number_of_nodes() for G in graphs])))\n\n # minibatch\n \n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=args.num_workers)\n\n dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,\n features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler, \n batch_size=args.batch_size, \n shuffle=False,\n num_workers=args.num_workers)\n print(\"feat dim\")\n print(dataset_sampler.feat_dim)\n return train_dataset_loader, val_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\n# split train, val, test sets: for original differential pooling setting (each train, val, test is a data loader)\ndef split_train_val_normal(graphs, args, val_test_idx, max_nodes, feat):\n\n # split train, val, test\n\n ## if there is a validation set: 80% train, 10% val, 10% test\n \n if args.val == True:\n val_test_size = len(graphs) // 5\n train_graphs = graphs[:val_test_idx * val_test_size]\n if val_test_idx < 4:\n train_graphs = train_graphs + graphs[(val_test_idx+1) * val_test_size :]\n val_test_graphs = graphs[val_test_idx*val_test_size: (val_test_idx+1)*val_test_size]\n val_size = len(val_test_graphs) // 2\n val_graphs = val_test_graphs[:val_size]\n test_graphs = val_test_graphs[val_size:]\n \n\n ## if there is no validation set: 90% train, 10% test\n else:\n test_idx = val_test_idx\n test_size = len(graphs) // 10\n train_graphs = graphs[:test_idx * test_size]\n if test_idx < 9:\n train_graphs = train_graphs + graphs[(test_idx+1) * test_size :]\n test_graphs = graphs[test_idx*test_size: (test_idx+1)*test_size]\n\n # train set loader\n print(len(train_graphs))\n dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes, features=args.feature_type)\n train_dataset_loader = torch.utils.data.DataLoader(\n dataset_sampler, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=args.num_workers)\n\n # test set loader\n testset_sampler = GraphSampler(test_graphs, normalize=False, max_num_nodes=max_nodes, features=args.feature_type)\n test_dataset_loader = torch.utils.data.DataLoader(\n testset_sampler, \n batch_size=args.batch_size, \n shuffle=True,\n num_workers=args.num_workers)\n\n if args.val:\n valset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes, features=args.feature_type)\n val_dataset_loader = torch.utils.data.DataLoader(\n valset_sampler, \n batch_size=args.batch_size, \n shuffle=False,\n num_workers=args.num_workers)\n else:\n val_dataset_loader = test_dataset_loader\n\n #print(\"feat dim\")\n #print(dataset_sampler.feat_dim)\n return train_dataset_loader, test_dataset_loader, val_dataset_loader, \\\n dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim\n\n\n\n\n# split train, val, test sets: for triplet train setting (each train, val, test is a dictionary, keys are the classes, values are arrays of graphs)\ndef split_train_val(graphs, args, val_test_idx, max_nodes, feat):\n\n num_classes = args.num_classes \n \n # shuffle the dataset\n random.shuffle(graphs)\n\n # split train, val, test\n\n ## if there is a validation set: 80% train, 10% val, 10% test\n if args.val == True:\n val_test_size = len(graphs) // 5\n train_graphs = graphs[:val_test_idx * val_test_size]\n if val_test_idx < 4:\n train_graphs = train_graphs + graphs[(val_test_idx+1) * val_test_size :]\n val_test_graphs = graphs[val_test_idx*val_test_size: (val_test_idx+1)*val_test_size]\n val_size = len(val_test_graphs) // 2\n val_graphs = val_test_graphs[:val_size]\n test_graphs = val_test_graphs[val_size:]\n \n\n ## if there is no validation set: 90% train, 10% test\n else:\n test_idx = val_test_idx\n test_size = len(graphs) // 10\n train_graphs = graphs[:test_idx * test_size]\n if test_idx < 9:\n train_graphs = train_graphs + graphs[(test_idx+1) * test_size :]\n test_graphs = graphs[test_idx*test_size: (test_idx+1)*test_size]\n\n train_graphs_dict = dict()\n test_graphs_dict = dict()\n val_graphs_dict = dict()\n\n for i in range(num_classes):\n train_graphs_dict[i] = []\n test_graphs_dict[i] = []\n val_graphs_dict[i] = []\n\n node_list = list(train_graphs[0].nodes)\n representative_node = node_list[0]\n\n feat_dim = train_graphs[0].nodes[representative_node]['feat'].shape[0]\n assign_feat_dim = feat_dim\n\n for train_graph in train_graphs:\n num_nodes = train_graph.number_of_nodes()\n # label\n label = int(train_graph.graph['label'])\n\n # adj\n adj = np.array(nx.to_numpy_matrix(train_graph))\n adj_padded = np.zeros((max_nodes, max_nodes))\n adj_padded[:num_nodes, :num_nodes] = adj\n train_graph.graph['adj'] = adj_padded\n\n # feats\n f = np.zeros((max_nodes, feat_dim), dtype=float)\n for i,u in enumerate(train_graph.nodes()):\n if args.feature_type == 'node-label':\n f[i,:] = train_graph.nodes[u]['feat']\n else:\n f[i,:] = (train_graph.nodes[u]['feat'].data).cpu().numpy()\n train_graph.graph['feats'] = f\n\n # num_nodes\n train_graph.graph['num_nodes'] = num_nodes\n\n # assign feats\n train_graph.graph['assign_feats'] = f\n \n train_graphs_dict[label].append(train_graph)\n\n\n for test_graph in test_graphs:\n\n num_nodes = test_graph.number_of_nodes()\n # label\n label = int(test_graph.graph['label'])\n\n # adj\n adj = np.array(nx.to_numpy_matrix(test_graph))\n adj_padded = np.zeros((max_nodes, max_nodes))\n adj_padded[:num_nodes, :num_nodes] = adj\n test_graph.graph['adj'] = adj_padded\n\n # feats\n f = np.zeros((max_nodes, feat_dim), dtype=float)\n for i,u in enumerate(test_graph.nodes()):\n if args.feature_type == 'node-label':\n f[i,:] = test_graph.nodes[u]['feat']\n else:\n f[i,:] = (test_graph.nodes[u]['feat'].data).cpu().numpy()\n\n test_graph.graph['feats'] = f\n\n # num_nodes\n test_graph.graph['num_nodes'] = num_nodes\n\n # assign feats\n test_graph.graph['assign_feats'] = f\n \n \n test_graphs_dict[label].append(test_graph)\n\n \n if args.val == True:\n for val_graph in val_graphs:\n\n num_nodes = val_graph.number_of_nodes()\n # label\n label = int(val_graph.graph['label'])\n\n # adj\n adj = np.array(nx.to_numpy_matrix(val_graph))\n adj_padded = np.zeros((max_nodes, max_nodes))\n adj_padded[:num_nodes, :num_nodes] = adj\n val_graph.graph['adj'] = adj_padded\n\n # feats\n f = np.zeros((max_nodes, feat_dim), dtype=float)\n for i,u in enumerate(val_graph.nodes()):\n if args.feature_type == 'node-label':\n f[i,:] = val_graph.nodes[u]['feat']\n else:\n f[i,:] = (val_graph.nodes[u]['feat'].data).cpu().numpy()\n\n val_graph.graph['feats'] = f\n\n # num_nodes\n val_graph.graph['num_nodes'] = num_nodes\n\n # assign feats\n val_graph.graph['assign_feats'] = f\n \n \n val_graphs_dict[label].append(val_graph)\n\n \n\n return train_graphs_dict, test_graphs_dict, val_graphs_dict, \\\n max_nodes, feat_dim, assign_feat_dim\n \n \n\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.zeros" ] ]
GarrettNicolai/OpenNMT-py
[ "9491d900ac1b50fe39da417bacc0b9d610331888" ]
[ "onmt/translate/translator.py" ]
[ "#!/usr/bin/env python\n\"\"\" Translator Class and builder \"\"\"\nfrom __future__ import print_function\nimport codecs\nimport os\nimport time\nimport numpy as np\nfrom itertools import count, zip_longest\n\nimport torch\n\nimport onmt.model_builder\nimport onmt.inputters as inputters\nimport onmt.decoders.ensemble\nfrom onmt.translate.beam_search import BeamSearch\nfrom onmt.translate.greedy_search import GreedySearch\nfrom onmt.utils.misc import tile, set_random_seed, report_matrix\nfrom onmt.utils.alignment import extract_alignment, build_align_pharaoh\nfrom onmt.modules.copy_generator import collapse_copy_scores\n\n\ndef build_translator(opt, report_score=True, logger=None, out_file=None):\n if out_file is None:\n out_file = codecs.open(opt.output, 'w+', 'utf-8')\n\n load_test_model = onmt.decoders.ensemble.load_test_model \\\n if len(opt.models) > 1 else onmt.model_builder.load_test_model\n fields, model, model_opt = load_test_model(opt)\n\n scorer = onmt.translate.GNMTGlobalScorer.from_opt(opt)\n translator = Translator.from_opt(\n model,\n fields,\n opt,\n model_opt,\n global_scorer=scorer,\n out_file=out_file,\n report_align=opt.report_align,\n report_score=report_score,\n logger=logger\n )\n model.decoder.set_eval_status(True)\n\n return translator\n\n\ndef max_tok_len(new, count, sofar):\n \"\"\"\n In token batching scheme, the number of sequences is limited\n such that the total number of src/tgt tokens (including padding)\n in a batch <= batch_size\n \"\"\"\n # Maintains the longest src and tgt length in the current batch\n global max_src_in_batch # this is a hack\n # Reset current longest length at a new batch (count=1)\n if count == 1:\n max_src_in_batch = 0\n # max_tgt_in_batch = 0\n # Src: [<bos> w1 ... wN <eos>]\n max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)\n # Tgt: [w1 ... wM <eos>]\n src_elements = count * max_src_in_batch\n return src_elements\n\n\nclass Translator(object):\n \"\"\"Translate a batch of sentences with a saved model.\n\n Args:\n model (onmt.modules.NMTModel): NMT model to use for translation\n fields (dict[str, torchtext.data.Field]): A dict\n mapping each side to its list of name-Field pairs.\n src_reader (onmt.inputters.DataReaderBase): Source reader.\n tgt_reader (onmt.inputters.TextDataReader): Target reader.\n gpu (int): GPU device. Set to negative for no GPU.\n n_best (int): How many beams to wait for.\n min_length (int): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n max_length (int): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n beam_size (int): Number of beams.\n random_sampling_topk (int): See\n :class:`onmt.translate.greedy_search.GreedySearch`.\n random_sampling_temp (int): See\n :class:`onmt.translate.greedy_search.GreedySearch`.\n stepwise_penalty (bool): Whether coverage penalty is applied every step\n or not.\n dump_beam (bool): Debugging option.\n block_ngram_repeat (int): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n ignore_when_blocking (set or frozenset): See\n :class:`onmt.translate.decode_strategy.DecodeStrategy`.\n replace_unk (bool): Replace unknown token.\n data_type (str): Source data type.\n verbose (bool): Print/log every translation.\n report_time (bool): Print/log total time/frequency.\n copy_attn (bool): Use copy attention.\n global_scorer (onmt.translate.GNMTGlobalScorer): Translation\n scoring/reranking object.\n out_file (TextIO or codecs.StreamReaderWriter): Output file.\n report_score (bool) : Whether to report scores\n logger (logging.Logger or NoneType): Logger.\n \"\"\"\n\n def __init__(\n self,\n model,\n fields,\n src_reader,\n tgt_reader,\n gpu=-1,\n n_best=1,\n min_length=0,\n max_length=100,\n ratio=0.,\n beam_size=30,\n random_sampling_topk=1,\n random_sampling_temp=1,\n stepwise_penalty=None,\n dump_beam=False,\n block_ngram_repeat=0,\n ignore_when_blocking=frozenset(),\n replace_unk=False,\n phrase_table=\"\",\n data_type=\"text\",\n verbose=False,\n report_time=False,\n copy_attn=False,\n global_scorer=None,\n out_file=None,\n report_align=False,\n report_score=True,\n logger=None,\n seed=-1):\n self.model = model\n self.fields = fields\n tgt_field = dict(self.fields)[\"tgt\"].base_field\n self._tgt_vocab = tgt_field.vocab\n self._tgt_eos_idx = self._tgt_vocab.stoi[tgt_field.eos_token]\n self._tgt_pad_idx = self._tgt_vocab.stoi[tgt_field.pad_token]\n self._tgt_bos_idx = self._tgt_vocab.stoi[tgt_field.init_token]\n self._tgt_unk_idx = self._tgt_vocab.stoi[tgt_field.unk_token]\n self._tgt_vocab_len = len(self._tgt_vocab)\n\n self._gpu = gpu\n self._use_cuda = gpu > -1\n self._dev = torch.device(\"cuda\", self._gpu) \\\n if self._use_cuda else torch.device(\"cpu\")\n\n self.n_best = n_best\n self.max_length = max_length\n\n self.beam_size = beam_size\n self.random_sampling_temp = random_sampling_temp\n self.sample_from_topk = random_sampling_topk\n\n self.min_length = min_length\n self.ratio = ratio\n self.stepwise_penalty = stepwise_penalty\n self.dump_beam = dump_beam\n self.block_ngram_repeat = block_ngram_repeat\n self.ignore_when_blocking = ignore_when_blocking\n self._exclusion_idxs = {\n self._tgt_vocab.stoi[t] for t in self.ignore_when_blocking}\n self.src_reader = src_reader\n self.tgt_reader = tgt_reader\n self.replace_unk = replace_unk\n if self.replace_unk and not self.model.decoder.attentional:\n raise ValueError(\n \"replace_unk requires an attentional decoder.\")\n self.phrase_table = phrase_table\n self.data_type = data_type\n self.verbose = verbose\n self.report_time = report_time\n\n self.copy_attn = copy_attn\n\n self.global_scorer = global_scorer\n if self.global_scorer.has_cov_pen and \\\n not self.model.decoder.attentional:\n raise ValueError(\n \"Coverage penalty requires an attentional decoder.\")\n self.out_file = out_file\n self.report_align = report_align\n self.report_score = report_score\n self.logger = logger\n\n self.use_filter_pred = False\n self._filter_pred = None\n\n # for debugging\n self.beam_trace = self.dump_beam != \"\"\n self.beam_accum = None\n if self.beam_trace:\n self.beam_accum = {\n \"predicted_ids\": [],\n \"beam_parent_ids\": [],\n \"scores\": [],\n \"log_probs\": []}\n\n set_random_seed(seed, self._use_cuda)\n\n @classmethod\n def from_opt(\n cls,\n model,\n fields,\n opt,\n model_opt,\n global_scorer=None,\n out_file=None,\n report_align=False,\n report_score=True,\n logger=None):\n \"\"\"Alternate constructor.\n\n Args:\n model (onmt.modules.NMTModel): See :func:`__init__()`.\n fields (dict[str, torchtext.data.Field]): See\n :func:`__init__()`.\n opt (argparse.Namespace): Command line options\n model_opt (argparse.Namespace): Command line options saved with\n the model checkpoint.\n global_scorer (onmt.translate.GNMTGlobalScorer): See\n :func:`__init__()`..\n out_file (TextIO or codecs.StreamReaderWriter): See\n :func:`__init__()`.\n report_align (bool) : See :func:`__init__()`.\n report_score (bool) : See :func:`__init__()`.\n logger (logging.Logger or NoneType): See :func:`__init__()`.\n \"\"\"\n\n src_reader = inputters.str2reader[opt.data_type].from_opt(opt)\n tgt_reader = inputters.str2reader[\"text\"].from_opt(opt)\n return cls(\n model,\n fields,\n src_reader,\n tgt_reader,\n gpu=opt.gpu,\n n_best=opt.n_best,\n min_length=opt.min_length,\n max_length=opt.max_length,\n ratio=opt.ratio,\n beam_size=opt.beam_size,\n random_sampling_topk=opt.random_sampling_topk,\n random_sampling_temp=opt.random_sampling_temp,\n stepwise_penalty=opt.stepwise_penalty,\n dump_beam=opt.dump_beam,\n block_ngram_repeat=opt.block_ngram_repeat,\n ignore_when_blocking=set(opt.ignore_when_blocking),\n replace_unk=opt.replace_unk,\n phrase_table=opt.phrase_table,\n data_type=opt.data_type,\n verbose=opt.verbose,\n report_time=opt.report_time,\n copy_attn=model_opt.copy_attn,\n global_scorer=global_scorer,\n out_file=out_file,\n report_align=report_align,\n report_score=report_score,\n logger=logger,\n seed=opt.seed)\n\n def _log(self, msg):\n if self.logger:\n self.logger.info(msg)\n else:\n print(msg)\n\n def _gold_score(self, batch, memory_bank, src_lengths, src_vocabs,\n use_src_map, enc_states, batch_size, src):\n if \"tgt\" in batch.__dict__:\n gs = self._score_target(\n batch, memory_bank, src_lengths, src_vocabs,\n batch.src_map if use_src_map else None)\n self.model.decoder.init_state(src, memory_bank, enc_states)\n else:\n gs = [0] * batch_size\n return gs\n\n def translate(\n self,\n src,\n tgt=None,\n src_dir=None,\n batch_size=None,\n batch_type=\"sents\",\n attn_debug=False,\n align_debug=False,\n phrase_table=\"\"):\n \"\"\"Translate content of ``src`` and get gold scores from ``tgt``.\n\n Args:\n src: See :func:`self.src_reader.read()`.\n tgt: See :func:`self.tgt_reader.read()`.\n src_dir: See :func:`self.src_reader.read()` (only relevant\n for certain types of data).\n batch_size (int): size of examples per mini-batch\n attn_debug (bool): enables the attention logging\n align_debug (bool): enables the word alignment logging\n\n Returns:\n (`list`, `list`)\n\n * all_scores is a list of `batch_size` lists of `n_best` scores\n * all_predictions is a list of `batch_size` lists\n of `n_best` predictions\n \"\"\"\n\n if batch_size is None:\n raise ValueError(\"batch_size must be set\")\n\n src_data = {\"reader\": self.src_reader, \"data\": src, \"dir\": src_dir}\n tgt_data = {\"reader\": self.tgt_reader, \"data\": tgt, \"dir\": None}\n _readers, _data, _dir = inputters.Dataset.config(\n [('src', src_data), ('tgt', tgt_data)])\n\n data = inputters.Dataset(\n self.fields, readers=_readers, data=_data, dirs=_dir,\n sort_key=inputters.str2sortkey[self.data_type],\n filter_pred=self._filter_pred\n )\n\n data_iter = inputters.OrderedIterator(\n dataset=data,\n device=self._dev,\n batch_size=batch_size,\n batch_size_fn=max_tok_len if batch_type == \"tokens\" else None,\n train=False,\n sort=False,\n sort_within_batch=True,\n shuffle=False\n )\n\n xlation_builder = onmt.translate.TranslationBuilder(\n data, self.fields, self.n_best, self.replace_unk, tgt,\n self.phrase_table\n )\n\n # Statistics\n counter = count(1)\n pred_score_total, pred_words_total = 0, 0\n gold_score_total, gold_words_total = 0, 0\n\n all_scores = []\n all_predictions = []\n\n start_time = time.time()\n\n for batch in data_iter:\n batch_data = self.translate_batch(\n batch, data.src_vocabs, attn_debug\n )\n translations = xlation_builder.from_batch(batch_data)\n for trans in translations:\n all_scores += [trans.pred_scores[:self.n_best]]\n pred_score_total += trans.pred_scores[0]\n pred_words_total += len(trans.pred_sents[0])\n if tgt is not None:\n gold_score_total += trans.gold_score\n gold_words_total += len(trans.gold_sent) + 1\n\n n_best_preds = [\" \".join(pred)\n for pred in trans.pred_sents[:self.n_best]]\n if self.report_align:\n align_pharaohs = [build_align_pharaoh(align) for align\n in trans.word_aligns[:self.n_best]]\n n_best_preds_align = [\" \".join(align) for align\n in align_pharaohs]\n n_best_preds = [pred + \" ||| \" + align\n for pred, align in zip(\n n_best_preds, n_best_preds_align)]\n all_predictions += [n_best_preds]\n self.out_file.write('\\n'.join(n_best_preds) + '\\n')\n self.out_file.flush()\n\n if self.verbose:\n sent_number = next(counter)\n output = trans.log(sent_number)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n if attn_debug:\n preds = trans.pred_sents[0]\n preds.append('</s>')\n attns = trans.attns[0].tolist()\n if self.data_type == 'text':\n srcs = trans.src_raw\n else:\n srcs = [str(item) for item in range(len(attns[0]))]\n output = report_matrix(srcs, preds, attns)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n if align_debug:\n if trans.gold_sent is not None:\n tgts = trans.gold_sent\n else:\n tgts = trans.pred_sents[0]\n align = trans.word_aligns[0].tolist()\n if self.data_type == 'text':\n srcs = trans.src_raw\n else:\n srcs = [str(item) for item in range(len(align[0]))]\n output = report_matrix(srcs, tgts, align)\n if self.logger:\n self.logger.info(output)\n else:\n os.write(1, output.encode('utf-8'))\n\n end_time = time.time()\n\n if self.report_score:\n msg = self._report_score('PRED', pred_score_total,\n pred_words_total)\n self._log(msg)\n if tgt is not None:\n msg = self._report_score('GOLD', gold_score_total,\n gold_words_total)\n self._log(msg)\n\n if self.report_time:\n total_time = end_time - start_time\n self._log(\"Total translation time (s): %f\" % total_time)\n self._log(\"Average translation time (s): %f\" % (\n total_time / len(all_predictions)))\n self._log(\"Tokens per second: %f\" % (\n pred_words_total / total_time))\n\n if self.dump_beam:\n import json\n json.dump(self.translator.beam_accum,\n codecs.open(self.dump_beam, 'w', 'utf-8'))\n return all_scores, all_predictions\n\n def _align_pad_prediction(self, predictions, bos, pad):\n \"\"\"\n Padding predictions in batch and add BOS.\n\n Args:\n predictions (List[List[Tensor]]): `(batch, n_best,)`, for each src\n sequence contain n_best tgt predictions all of which ended with\n eos id.\n bos (int): bos index to be used.\n pad (int): pad index to be used.\n\n Return:\n batched_nbest_predict (torch.LongTensor): `(batch, n_best, tgt_l)`\n \"\"\"\n dtype, device = predictions[0][0].dtype, predictions[0][0].device\n flatten_tgt = [best.tolist() for bests in predictions\n for best in bests]\n paded_tgt = torch.tensor(\n list(zip_longest(*flatten_tgt, fillvalue=pad)),\n dtype=dtype, device=device).T\n bos_tensor = torch.full([paded_tgt.size(0), 1], bos,\n dtype=dtype, device=device)\n full_tgt = torch.cat((bos_tensor, paded_tgt), dim=-1)\n batched_nbest_predict = full_tgt.view(\n len(predictions), -1, full_tgt.size(-1)) # (batch, n_best, tgt_l)\n return batched_nbest_predict\n\n def _align_forward(self, batch, predictions):\n \"\"\"\n For a batch of input and its prediction, return a list of batch predict\n alignment src indice Tensor in size ``(batch, n_best,)``.\n \"\"\"\n # (0) add BOS and padding to tgt prediction\n if hasattr(batch, 'tgt'):\n batch_tgt_idxs = batch.tgt.transpose(1, 2).transpose(0, 2)\n else:\n batch_tgt_idxs = self._align_pad_prediction(\n predictions, bos=self._tgt_bos_idx, pad=self._tgt_pad_idx)\n tgt_mask = (batch_tgt_idxs.eq(self._tgt_pad_idx) |\n batch_tgt_idxs.eq(self._tgt_eos_idx) |\n batch_tgt_idxs.eq(self._tgt_bos_idx))\n\n n_best = batch_tgt_idxs.size(1)\n # (1) Encoder forward.\n src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)\n\n # (2) Repeat src objects `n_best` times.\n # We use batch_size x n_best, get ``(src_len, batch * n_best, nfeat)``\n src = tile(src, n_best, dim=1)\n enc_states = tile(enc_states, n_best, dim=1)\n if isinstance(memory_bank, tuple):\n memory_bank = tuple(tile(x, n_best, dim=1) for x in memory_bank)\n else:\n memory_bank = tile(memory_bank, n_best, dim=1)\n src_lengths = tile(src_lengths, n_best) # ``(batch * n_best,)``\n\n # (3) Init decoder with n_best src,\n self.model.decoder.init_state(src, memory_bank, enc_states)\n # reshape tgt to ``(len, batch * n_best, nfeat)``\n tgt = batch_tgt_idxs.view(-1, batch_tgt_idxs.size(-1)).T.unsqueeze(-1)\n dec_in = tgt[:-1] # exclude last target from inputs\n _, attns = self.model.decoder(\n dec_in, memory_bank, memory_lengths=src_lengths, with_align=True)\n\n alignment_attn = attns[\"align\"] # ``(B, tgt_len-1, src_len)``\n # masked_select\n align_tgt_mask = tgt_mask.view(-1, tgt_mask.size(-1))\n prediction_mask = align_tgt_mask[:, 1:] # exclude bos to match pred\n # get aligned src id for each prediction's valid tgt tokens\n alignement = extract_alignment(\n alignment_attn, prediction_mask, src_lengths, n_best)\n return alignement\n\n def translate_batch(self, batch, src_vocabs, attn_debug):\n #self.model.decoder.set_eval_status(True)\n \"\"\"Translate a batch of sentences.\"\"\"\n with torch.no_grad():\n if self.beam_size == 1:\n decode_strategy = GreedySearch(\n pad=self._tgt_pad_idx,\n bos=self._tgt_bos_idx,\n eos=self._tgt_eos_idx,\n batch_size=batch.batch_size,\n min_length=self.min_length, max_length=self.max_length,\n block_ngram_repeat=self.block_ngram_repeat,\n exclusion_tokens=self._exclusion_idxs,\n return_attention=attn_debug or self.replace_unk,\n sampling_temp=self.random_sampling_temp,\n keep_topk=self.sample_from_topk)\n else:\n # TODO: support these blacklisted features\n assert not self.dump_beam\n decode_strategy = BeamSearch(\n self.beam_size,\n batch_size=batch.batch_size,\n pad=self._tgt_pad_idx,\n bos=self._tgt_bos_idx,\n eos=self._tgt_eos_idx,\n n_best=self.n_best,\n global_scorer=self.global_scorer,\n min_length=self.min_length, max_length=self.max_length,\n return_attention=attn_debug or self.replace_unk,\n block_ngram_repeat=self.block_ngram_repeat,\n exclusion_tokens=self._exclusion_idxs,\n stepwise_penalty=self.stepwise_penalty,\n ratio=self.ratio)\n \n #self.model.decoder.set_eval_status(False)\n\n return self._translate_batch_with_strategy(batch, src_vocabs,\n decode_strategy)\n\n def _run_encoder(self, batch):\n src, src_lengths = batch.src if isinstance(batch.src, tuple) \\\n else (batch.src, None)\n\n enc_states, memory_bank, src_lengths = self.model.encoder(\n src, src_lengths)\n if src_lengths is None:\n assert not isinstance(memory_bank, tuple), \\\n 'Ensemble decoding only supported for text data'\n src_lengths = torch.Tensor(batch.batch_size) \\\n .type_as(memory_bank) \\\n .long() \\\n .fill_(memory_bank.size(0))\n return src, enc_states, memory_bank, src_lengths\n\n def _decode_and_generate(\n self,\n decoder_in,\n memory_bank,\n batch,\n src_vocabs,\n memory_lengths,\n src_map=None,\n step=None,\n batch_offset=None):\n if self.copy_attn:\n # Turn any copied words into UNKs.\n decoder_in = decoder_in.masked_fill(\n decoder_in.gt(self._tgt_vocab_len - 1), self._tgt_unk_idx\n )\n\n # Decoder forward, takes [tgt_len, batch, nfeats] as input\n # and [src_len, batch, hidden] as memory_bank\n # in case of inference tgt_len = 1, batch = beam times batch_size\n # in case of Gold Scoring tgt_len = actual length, batch = 1 batch\n self.model.decoder.set_copy_info(batch, self._tgt_vocab)\n dec_out, dec_attn = self.model.decoder(\n decoder_in, memory_bank, memory_lengths=memory_lengths, step=step\n )\n\n # Generator forward.\n if not self.copy_attn:\n if \"std\" in dec_attn:\n attn = dec_attn[\"std\"]\n else:\n attn = None\n log_probs = self.model.generator(dec_out.squeeze(0))\n # returns [(batch_size x beam_size) , vocab ] when 1 step\n # or [ tgt_len, batch_size, vocab ] when full sentence\n else:\n attn = dec_attn[\"copy\"]\n #print(\"DEC_OUT: \", dec_out.size())\n #print(\"ATTN: \", attn.size())\n scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),\n attn.view(-1, attn.size(2)),\n src_map)\n # here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]\n if batch_offset is None:\n scores = scores.view(-1, batch.batch_size, scores.size(-1))\n scores = scores.transpose(0, 1).contiguous()\n else:\n scores = scores.view(-1, self.beam_size, scores.size(-1))\n\n\n #print(\"TGT_VOCAB: \", self._tgt_vocab)\n scores = collapse_copy_scores(\n scores,\n batch,\n self._tgt_vocab,\n src_vocabs,\n batch_dim=0,\n batch_offset=batch_offset\n )\n scores = scores.view(decoder_in.size(0), -1, scores.size(-1))\n\n log_probs = scores.squeeze(0).log()\n #print(log_probs.size())\n # returns [(batch_size x beam_size) , vocab ] when 1 step\n # or [ tgt_len, batch_size, vocab ] when full sentence\n return log_probs, attn\n\n def _translate_batch_with_strategy(\n self,\n batch,\n src_vocabs,\n decode_strategy):\n \"\"\"Translate a batch of sentences step by step using cache.\n\n Args:\n batch: a batch of sentences, yield by data iterator.\n src_vocabs (list): list of torchtext.data.Vocab if can_copy.\n decode_strategy (DecodeStrategy): A decode strategy to use for\n generate translation step by step.\n\n Returns:\n results (dict): The translation results.\n \"\"\"\n # (0) Prep the components of the search.\n use_src_map = self.copy_attn\n parallel_paths = decode_strategy.parallel_paths # beam_size\n batch_size = batch.batch_size\n\n # (1) Run the encoder on the src.\n src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)\n self.model.decoder.init_state(src, memory_bank, enc_states)\n\n results = {\n \"predictions\": None,\n \"scores\": None,\n \"attention\": None,\n \"batch\": batch,\n \"gold_score\": self._gold_score(\n batch, memory_bank, src_lengths, src_vocabs, use_src_map,\n enc_states, batch_size, src)}\n\n # (2) prep decode_strategy. Possibly repeat src objects.\n src_map = batch.src_map if use_src_map else None\n fn_map_state, memory_bank, memory_lengths, src_map = \\\n decode_strategy.initialize(memory_bank, src_lengths, src_map)\n if fn_map_state is not None:\n self.model.decoder.map_state(fn_map_state)\n\n # (3) Begin decoding step by step:\n for step in range(decode_strategy.max_length):\n decoder_input = decode_strategy.current_predictions.view(1, -1, 1)\n log_probs, attn = self._decode_and_generate(\n decoder_input,\n memory_bank,\n batch,\n src_vocabs,\n memory_lengths=memory_lengths,\n src_map=src_map,\n step=step,\n batch_offset=decode_strategy.batch_offset)\n\n decode_strategy.advance(log_probs, attn)\n any_finished = decode_strategy.is_finished.any()\n if any_finished:\n decode_strategy.update_finished()\n if decode_strategy.done:\n break\n\n select_indices = decode_strategy.select_indices\n\n if any_finished:\n # Reorder states.\n if isinstance(memory_bank, tuple):\n memory_bank = tuple(x.index_select(1, select_indices)\n for x in memory_bank)\n else:\n memory_bank = memory_bank.index_select(1, select_indices)\n\n memory_lengths = memory_lengths.index_select(0, select_indices)\n\n if src_map is not None:\n src_map = src_map.index_select(1, select_indices)\n\n if parallel_paths > 1 or any_finished:\n self.model.decoder.map_state(\n lambda state, dim: state.index_select(dim, select_indices))\n\n results[\"scores\"] = decode_strategy.scores\n results[\"predictions\"] = decode_strategy.predictions\n results[\"attention\"] = decode_strategy.attention\n if self.report_align:\n results[\"alignment\"] = self._align_forward(\n batch, decode_strategy.predictions)\n else:\n results[\"alignment\"] = [[] for _ in range(batch_size)]\n return results\n\n def _score_target(self, batch, memory_bank, src_lengths,\n src_vocabs, src_map):\n tgt = batch.tgt\n tgt_in = tgt[:-1]\n\n log_probs, attn = self._decode_and_generate(\n tgt_in, memory_bank, batch, src_vocabs,\n memory_lengths=src_lengths, src_map=src_map)\n\n log_probs[:, :, self._tgt_pad_idx] = 0\n gold = tgt[1:]\n gold_scores = log_probs.gather(2, gold)\n gold_scores = gold_scores.sum(dim=0).view(-1)\n\n return gold_scores\n\n def _report_score(self, name, score_total, words_total):\n if words_total == 0:\n msg = \"%s No words predicted\" % (name,)\n else:\n avg_score = score_total / words_total\n ppl = np.exp(-score_total.item() / words_total)\n msg = (\"%s AVG SCORE: %.4f, %s PPL: %.4f\" % (\n name, avg_score,\n name, ppl))\n return msg\n" ]
[ [ "torch.device", "torch.cat", "torch.Tensor", "torch.no_grad" ] ]
maxpark/hailo_model_zoo
[ "94beb7d80ef56e5dfa9978c90486e45a73306c79" ]
[ "hailo_model_zoo/core/postprocessing/detection/nanodet.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom tensorflow.image import combined_non_max_suppression\n\nfrom .centernet import COCO_2017_TO_2014_TRANSLATION\n\n\nclass NanoDetPostProc:\n def __init__(self, img_dims=(416, 416), nms_iou_thresh=0.6, labels_offset=0,\n score_threshold=0.3, anchors=None, classes=80, **kwargs):\n self._num_classes = classes\n self._image_dims = img_dims\n self._nms_iou_thresh = nms_iou_thresh\n self._score_threshold = score_threshold\n self._strides = anchors.strides\n self.reg_max = anchors.regression_length\n self._labels_offset = labels_offset\n\n def _get_scores_boxes(self, endnodes):\n scores, boxes = [], []\n for node in endnodes:\n fm_size_h, fm_size_w = node.shape[1:3]\n scores.append(tf.reshape(node[:, :, :, :self._num_classes],\n [-1, fm_size_h * fm_size_w, self._num_classes]))\n boxes.append(tf.reshape(node[:, :, :, self._num_classes:],\n [-1, fm_size_h * fm_size_w, 4, (self.reg_max + 1)]))\n return tf.concat(scores, axis=1), boxes\n\n def _box_decoding(self, raw_boxes):\n boxes = None\n for box_distribute, stride in zip(raw_boxes, self._strides):\n\n # create grid\n shape = [int(x / stride) for x in self._image_dims]\n grid_x = np.arange(shape[1])\n grid_y = np.arange(shape[0])\n grid_x, grid_y = np.meshgrid(grid_x, grid_y)\n ct_row = (grid_y.flatten() + 0.5) * stride\n ct_col = (grid_x.flatten() + 0.5) * stride\n center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)\n\n # box distribution to distance\n reg_range = np.arange(self.reg_max + 1)\n box_distance = tf.nn.softmax(box_distribute, axis=-1)\n box_distance = box_distance * np.reshape(reg_range, (1, 1, 1, -1))\n box_distance = tf.reduce_sum(box_distance, axis=-1)\n box_distance = box_distance * stride\n\n # decode box\n box_distance = tf.concat([box_distance[:, :, :2] * (-1), box_distance[:, :, 2:]], axis=-1)\n decode_box = np.expand_dims(center, axis=0) + box_distance\n\n # clipping\n xmin = tf.maximum(0.0, decode_box[:, :, 0]) / self._image_dims[1]\n ymin = tf.maximum(0.0, decode_box[:, :, 1]) / self._image_dims[0]\n xmax = tf.minimum(tf.cast(self._image_dims[1], tf.float32), decode_box[:, :, 2]) / self._image_dims[1]\n ymax = tf.minimum(tf.cast(self._image_dims[0], tf.float32), decode_box[:, :, 3]) / self._image_dims[0]\n decode_box = tf.transpose([ymin, xmin, ymax, xmax], [1, 2, 0])\n\n boxes = decode_box if boxes is None else tf.concat([boxes, decode_box], axis=1)\n return tf.expand_dims(boxes, axis=2)\n\n def postprocessing(self, endnodes, **kwargs):\n\n scores, raw_boxes = self._get_scores_boxes(endnodes)\n\n # decode score/class\n scores = tf.sigmoid(scores)\n\n # decode boxes\n boxes = self._box_decoding(raw_boxes)\n\n # nms\n (nmsed_boxes, nmsed_scores, nmsed_classes, num_detections) = \\\n combined_non_max_suppression(boxes=boxes,\n scores=scores,\n score_threshold=self._score_threshold,\n iou_threshold=self._nms_iou_thresh,\n max_output_size_per_class=100,\n max_total_size=100)\n\n # adding offset to the class prediction and cast to integer\n def translate_coco_2017_to_2014(nmsed_classes):\n return np.vectorize(COCO_2017_TO_2014_TRANSLATION.get)(nmsed_classes).astype(np.int32)\n\n nmsed_classes = tf.cast(tf.add(nmsed_classes, self._labels_offset), tf.int16)\n [nmsed_classes] = tf.py_function(translate_coco_2017_to_2014, [nmsed_classes], ['int32'])\n nmsed_classes.set_shape((1, 100))\n\n return {'detection_boxes': nmsed_boxes,\n 'detection_scores': nmsed_scores,\n 'detection_classes': nmsed_classes,\n 'num_detections': num_detections}\n" ]
[ [ "tensorflow.concat", "numpy.reshape", "tensorflow.sigmoid", "tensorflow.expand_dims", "numpy.vectorize", "tensorflow.transpose", "tensorflow.py_function", "tensorflow.reshape", "tensorflow.cast", "numpy.stack", "numpy.arange", "tensorflow.image.combined_non_max_suppression", "tensorflow.reduce_sum", "tensorflow.nn.softmax", "tensorflow.maximum", "tensorflow.add", "numpy.meshgrid", "numpy.expand_dims" ] ]
sunnyln/birdnet2
[ "d1a2b703475345d887c325c135013ed9f72d3a57" ]
[ "detectron2/modeling/meta_arch/rcnn.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport torch\nfrom torch import nn\n\nfrom detectron2.structures import ImageList\nfrom detectron2.utils.logger import log_first_n\n\nfrom ..backbone import build_backbone\nfrom ..postprocessing import detector_postprocess\nfrom ..proposal_generator import build_proposal_generator\nfrom ..roi_heads import build_roi_heads\nfrom .build import META_ARCH_REGISTRY\n\n__all__ = [\"GeneralizedRCNN\", \"ProposalNetwork\"]\n\n\n@META_ARCH_REGISTRY.register()\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Generalized R-CNN. Any models that contains the following three components:\n 1. Per-image feature extraction (aka backbone)\n 2. Region proposal generation\n 3. Per-region feature extraction and prediction\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.backbone = build_backbone(cfg)\n self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())\n self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())\n\n assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)\n num_channels = len(cfg.MODEL.PIXEL_MEAN)\n pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(num_channels, 1, 1)\n pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(num_channels, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n self.rotated_box_training = cfg.ROTATED_BOX_TRAINING\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances (optional): groundtruth :class:`Instances`\n * proposals (optional): :class:`Instances`, precomputed proposals.\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n Returns:\n list[dict]:\n Each dict is the output for one input image.\n The dict contains one key \"instances\" whose value is a :class:`Instances`.\n The :class:`Instances` object has the following keys:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\", \"pred_keypoints\"\n \"\"\"\n if not self.training:\n return self.inference(batched_inputs)\n\n images = self.preprocess_image(batched_inputs)\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN, \"'targets' in the model inputs is now renamed to 'instances'!\", n=10\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n\n features = self.backbone(images.tensor)\n\n if self.proposal_generator:\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n proposal_losses = {}\n\n _, detector_losses = self.roi_heads(images, features, proposals, gt_instances)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):\n \"\"\"\n Run inference on the given inputs.\n\n Args:\n batched_inputs (list[dict]): same as in :meth:`forward`\n detected_instances (None or list[Instances]): if not None, it\n contains an `Instances` object per image. The `Instances`\n object contains \"pred_boxes\" and \"pred_classes\" which are\n known boxes in the image.\n The inference will then skip the detection of bounding boxes,\n and only predict other per-ROI outputs.\n do_postprocess (bool): whether to apply post-processing on the outputs.\n\n Returns:\n same as in :meth:`forward`.\n \"\"\"\n assert not self.training\n\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n\n if detected_instances is None:\n if self.proposal_generator:\n proposals, _ = self.proposal_generator(images, features, None)\n else:\n assert \"proposals\" in batched_inputs[0]\n proposals = [x[\"proposals\"].to(self.device) for x in batched_inputs]\n\n results, _ = self.roi_heads(images, features, proposals, None)\n else:\n detected_instances = [x.to(self.device) for x in detected_instances]\n results = self.roi_heads.forward_with_given_boxes(features, detected_instances)\n\n if do_postprocess:\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width, rotated_box_training=self.rotated_box_training)\n processed_results.append({\"instances\": r})\n return processed_results\n else:\n return results\n\n def preprocess_image(self, batched_inputs):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [self.normalizer(x) for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images\n\n\n@META_ARCH_REGISTRY.register()\nclass ProposalNetwork(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.device = torch.device(cfg.MODEL.DEVICE)\n\n self.backbone = build_backbone(cfg)\n self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())\n\n pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(-1, 1, 1)\n pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(-1, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.to(self.device)\n\n def forward(self, batched_inputs):\n \"\"\"\n Args:\n Same as in :class:`GeneralizedRCNN.forward`\n\n Returns:\n list[dict]: Each dict is the output for one input image.\n The dict contains one key \"proposals\" whose value is a\n :class:`Instances` with keys \"proposal_boxes\" and \"objectness_logits\".\n \"\"\"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [self.normalizer(x) for x in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n features = self.backbone(images.tensor)\n\n if \"instances\" in batched_inputs[0]:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n elif \"targets\" in batched_inputs[0]:\n log_first_n(\n logging.WARN, \"'targets' in the model inputs is now renamed to 'instances'!\", n=10\n )\n gt_instances = [x[\"targets\"].to(self.device) for x in batched_inputs]\n else:\n gt_instances = None\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n # In training, the proposals are not useful at all but we generate them anyway.\n # This makes RPN-only models about 5% slower.\n if self.training:\n return proposal_losses\n\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n proposals, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"proposals\": r})\n return processed_results\n" ]
[ [ "torch.device", "torch.Tensor" ] ]
iglpdc/nilearn
[ "a4cc998b7a34fa48a77ce46f9f0b6b4e75d8a2d1" ]
[ "nilearn/regions/region_extractor.py" ]
[ "\"\"\"\nBetter brain parcellations for Region of Interest analysis\n\"\"\"\n\nimport numbers\nimport numpy as np\n\nfrom scipy.ndimage import label\nfrom scipy.stats import scoreatpercentile\n\nfrom sklearn.externals.joblib import Memory\n\nfrom .. import masking\nfrom ..input_data import NiftiMapsMasker\nfrom .._utils import check_niimg, check_niimg_4d\nfrom ..image import new_img_like, resample_img\nfrom ..image.image import _smooth_array, threshold_img\nfrom .._utils.niimg_conversions import concat_niimgs, _check_same_fov\nfrom .._utils.niimg import _safe_get_data\nfrom .._utils.compat import _basestring\nfrom .._utils.ndimage import _peak_local_max\nfrom .._utils.segmentation import _random_walker\n\n\ndef _threshold_maps_ratio(maps_img, threshold):\n \"\"\" Automatic thresholding of atlas maps image.\n\n Considers the given threshold as a ratio to the total number of voxels\n in the brain volume. This gives a certain number within the data\n voxel size which means that nonzero voxels which fall above than this\n size will be kept across all the maps.\n\n Parameters\n ----------\n maps_img: Niimg-like object\n an image of brain atlas maps.\n threshold: float\n If float, value is used as a ratio to n_voxels to get a certain threshold\n size in number to threshold the image. The value should be positive and\n within the range of number of maps (i.e. n_maps in 4th dimension).\n\n Returns\n -------\n threshold_maps_img: Nifti1Image\n gives us thresholded image.\n \"\"\"\n maps = check_niimg(maps_img)\n n_maps = maps.shape[-1]\n if not isinstance(threshold, numbers.Real) or threshold <= 0 or threshold > n_maps:\n raise ValueError(\"threshold given as ratio to the number of voxels must \"\n \"be Real number and should be positive and between 0 and \"\n \"total number of maps i.e. n_maps={0}. \"\n \"You provided {1}\".format(n_maps, threshold))\n else:\n ratio = threshold\n\n maps_data = np.nan_to_num(maps.get_data())\n\n abs_maps = np.abs(maps_data)\n # thresholding\n cutoff_threshold = scoreatpercentile(\n abs_maps, 100. - (100. / n_maps) * ratio)\n maps_data[abs_maps < cutoff_threshold] = 0.\n\n threshold_maps_img = new_img_like(maps, maps_data)\n\n return threshold_maps_img\n\n\ndef connected_regions(maps_img, min_region_size=1350,\n extract_type='local_regions', smoothing_fwhm=6,\n mask_img=None):\n \"\"\" Extraction of brain connected regions into separate regions.\n\n Note: the region size should be defined in mm^3. See the documentation for\n more details.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n maps_img: Niimg-like object\n an image of brain activation or atlas maps to be extracted into set of\n separate brain regions.\n\n min_region_size: int, default 1350 mm^3, optional\n Minimum volume in mm3 for a region to be kept. For example, if the voxel\n size is 3x3x3 mm then the volume of the voxel is 27mm^3. By default, it\n is 1350mm^3 which means we take minimum size of 1350 / 27 = 50 voxels.\n\n extract_type: str {'connected_components', 'local_regions'} \\\n default local_regions, optional\n If 'connected_components', each component/region in the image is extracted\n automatically by labelling each region based upon the presence of unique\n features in their respective regions.\n If 'local_regions', each component/region is extracted based on their\n maximum peak value to define a seed marker and then using random walker\n segementation algorithm on these markers for region separation.\n\n smoothing_fwhm: scalar, default 6mm, optional\n To smooth an image to extract most sparser regions. This parameter\n is passed `_smooth_array` and exists only for extract_type 'local_regions'.\n\n mask_img: Niimg-like object, default None\n If given, mask image is applied to input data.\n If None, no masking is applied.\n\n Returns\n -------\n regions_extracted_img: Nifti1Image\n gives the image in 4D of extracted brain regions. Each 3D image consists\n of only one separated region.\n\n index_of_each_map: numpy array\n an array of list of indices where each index denotes the identity\n of each extracted region to their family of brain maps.\n \"\"\"\n all_regions_imgs = []\n index_of_each_map = []\n maps_img = check_niimg(maps_img, atleast_4d=True)\n maps = _safe_get_data(maps_img).copy()\n affine = maps_img.get_affine()\n min_region_size = min_region_size / np.prod(np.diag(abs(affine[:3])))\n\n allowed_extract_types = ['connected_components', 'local_regions']\n if extract_type not in allowed_extract_types:\n message = (\"'extract_type' should be given either of these {0} \"\n \"You provided extract_type='{1}'\").format(allowed_extract_types, extract_type)\n raise ValueError(message)\n\n if mask_img is not None:\n if not _check_same_fov(maps_img, mask_img):\n mask_img = resample_img(mask_img,\n target_affine=maps_img.get_affine(),\n target_shape=maps_img.shape[:3],\n interpolation=\"nearest\")\n mask_data, _ = masking._load_mask_img(mask_img)\n # Set as 0 to the values which are outside of the mask\n maps[mask_data == 0.] = 0.\n\n for index in range(maps.shape[-1]):\n regions = []\n map_3d = maps[..., index]\n # Mark the seeds using random walker\n if extract_type == 'local_regions':\n smooth_map = _smooth_array(map_3d, affine=affine, fwhm=smoothing_fwhm)\n seeds = _peak_local_max(smooth_map)\n seeds_label, seeds_id = label(seeds)\n # Assign -1 to values which are 0. to indicate to ignore\n seeds_label[map_3d == 0.] = -1\n rw_maps = _random_walker(map_3d, seeds_label)\n # Now simply replace \"-1\" with \"0\" for regions separation\n rw_maps[rw_maps == -1] = 0.\n label_maps = rw_maps\n else:\n # Connected component extraction\n label_maps, n_labels = label(map_3d)\n\n # Takes the size of each labelized region data\n labels_size = np.bincount(label_maps.ravel())\n # set background labels sitting in zero index to zero\n labels_size[0] = 0.\n for label_id, label_size in enumerate(labels_size):\n if label_size > min_region_size:\n region_data = (label_maps == label_id) * map_3d\n region_img = new_img_like(maps_img, region_data)\n regions.append(region_img)\n\n index_of_each_map.extend([index] * len(regions))\n all_regions_imgs.extend(regions)\n\n regions_extracted_img = concat_niimgs(all_regions_imgs)\n\n return regions_extracted_img, index_of_each_map\n\n\nclass RegionExtractor(NiftiMapsMasker):\n \"\"\"Class for brain region extraction.\n\n Region Extraction is a post processing technique which\n is implemented to automatically segment each brain atlas maps\n into different set of separated brain activated region.\n Particularly, to show that each decomposed brain maps can be\n used to focus on a target specific Regions of Interest analysis.\n\n .. versionadded:: 0.2\n\n Parameters\n ----------\n maps_img: 4D Niimg-like object\n Image containing a set of whole brain atlas maps or statistically\n decomposed brain maps.\n\n mask_img: Niimg-like object or None, default None, optional\n Mask to be applied to input data, passed to NiftiMapsMasker.\n If None, no masking is applied.\n\n min_region_size: int, default 1350 mm^3, optional\n Minimum volume in mm3 for a region to be kept. For example, if\n the voxel size is 3x3x3 mm then the volume of the voxel is\n 27mm^3. By default, it is 1350mm^3 which means we take minimum\n size of 1350 / 27 = 50 voxels.\n\n threshold: number, default 1., optional\n A value used either in ratio_n_voxels or img_value or percentile\n `thresholding_strategy` based upon the choice of selection.\n\n thresholding_strategy: str {'ratio_n_voxels', 'img_value', 'percentile'}, optional\n If default 'ratio_n_voxels', we apply thresholding that will keep\n the more intense nonzero brain voxels (denoted as n_voxels)\n across all maps (n_voxels being the number of voxels in the brain\n volume). A float value given in `threshold` parameter indicates\n the ratio of voxels to keep meaning (if float=2. then maps will\n together have 2. x n_voxels non-zero voxels). If set to\n 'percentile', images are thresholded based on the score obtained\n with the given percentile on the data and the voxel intensities\n which are survived above this obtained score will be kept. If set\n to 'img_value', we apply thresholding based on the non-zero voxel\n intensities across all maps. A value given in `threshold`\n parameter indicates that we keep only those voxels which have\n intensities more than this value.\n\n extractor: str {'connected_components', 'local_regions'} default 'local_regions', optional\n If 'connected_components', each component/region in the image is\n extracted automatically by labelling each region based upon the\n presence of unique features in their respective regions. If\n 'local_regions', each component/region is extracted based on\n their maximum peak value to define a seed marker and then using\n random walker segementation algorithm on these markers for region\n separation.\n\n standardize: bool, True or False, default False, optional\n If True, the time series signals are centered and normalized by\n putting their mean to 0 and variance to 1. Recommended to\n set as True if signals are not already standardized.\n passed to class NiftiMapsMasker.\n\n detrend: bool, True or False, default False, optional\n This parameter is passed to nilearn.signal.clean basically\n indicates whether to detrend timeseries signals or not.\n passed to class NiftiMapsMasker.\n\n low_pass: float, default None, optional\n This value will be applied on the signals by passing to signal.clean\n Please see the related documentation signal.clean for more details.\n passed to class NiftiMapsMasker.\n\n high_pass: float, default None, optional\n This value will be applied on the signals by passing to signal.clean\n Please see the related documentation signal.clean for more details.\n passed to NiftiMapsMasker.\n\n t_r: float, default None, optional\n Repetition time in sec. This value is given to signal.clean\n Please see the related documentation for details.\n passed to NiftiMapsMasker.\n\n memory: instance of joblib.Memory, string, default None, optional\n Used to cache the masking process. If a string is given, the path\n is set with this string as a folder name in the directory.\n passed to NiftiMapsMasker.\n\n memory_level: int, default 0, optional\n Aggressiveness of memory catching. The higher the number, the higher\n the number of functions that will be cached. Zero mean no caching.\n passed to NiftiMapsMasker.\n\n verbose: int, default 0, optional\n Indicates the level of verbosity by printing the message. Zero\n indicates nothing is printed.\n\n Attributes\n ----------\n `index_` : numpy array\n array of list of indices where each index value is assigned to\n each separate region of its corresponding family of brain maps.\n\n `regions_img_` : Nifti1Image\n List of separated regions with each region lying on an\n original volume concatenated into a 4D image.\n\n References\n ----------\n * Abraham et al. \"Region segmentation for sparse decompositions:\n better brain parcellations from rest fMRI\", Sparsity Techniques in\n Medical Imaging, Sep 2014, Boston, United States. pp.8\n\n \"\"\"\n def __init__(self, maps_img, mask_img=None, min_region_size=1350,\n threshold=1., thresholding_strategy='ratio_n_voxels',\n extractor='local_regions', standardize=False, detrend=False,\n low_pass=None, high_pass=None, t_r=None,\n memory=Memory(cachedir=None), memory_level=0, verbose=0):\n super(RegionExtractor, self).__init__(\n maps_img=maps_img, mask_img=mask_img,\n standardize=standardize, detrend=detrend, low_pass=low_pass,\n high_pass=high_pass, t_r=t_r, memory=memory,\n memory_level=memory_level, verbose=verbose)\n self.maps_img = maps_img\n self.min_region_size = min_region_size\n self.thresholding_strategy = thresholding_strategy\n self.threshold = threshold\n self.extractor = extractor\n\n def fit(self, X=None, y=None):\n \"\"\" Prepare the data and setup for the region extraction\n \"\"\"\n maps_img = check_niimg_4d(self.maps_img)\n\n list_of_strategies = ['ratio_n_voxels', 'img_value', 'percentile']\n if self.thresholding_strategy not in list_of_strategies:\n message = (\"'thresholding_strategy' should be \"\n \"either of these {0}\").format(list_of_strategies)\n raise ValueError(message)\n\n if self.threshold is None or isinstance(self.threshold, _basestring):\n raise ValueError(\"The given input to threshold is not valid. \"\n \"Please submit a valid number specific to either of \"\n \"the strategy in {0}\".format(list_of_strategies))\n elif isinstance(self.threshold, numbers.Number):\n # foreground extraction\n if self.thresholding_strategy == 'ratio_n_voxels':\n threshold_maps = _threshold_maps_ratio(maps_img, self.threshold)\n else:\n if self.thresholding_strategy == 'percentile':\n self.threshold = \"{0}%\".format(self.threshold)\n threshold_maps = threshold_img(maps_img, mask_img=self.mask_img,\n threshold=self.threshold)\n\n # connected component extraction\n self.regions_img_, self.index_ = connected_regions(threshold_maps,\n self.min_region_size,\n self.extractor)\n\n self.maps_img = self.regions_img_\n super(RegionExtractor, self).fit()\n\n return self\n" ]
[ [ "scipy.stats.scoreatpercentile", "sklearn.externals.joblib.Memory", "scipy.ndimage.label", "numpy.abs" ] ]
hodamr/biu-advenced-ai-ex2
[ "2df6eb7ed389378326bd5c24fae43a65f190d221" ]
[ "deep_rl/utils/torch_utils.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nfrom .config import *\nimport torch\nimport torch.autograd as autograd\nimport os\n\ndef select_device(gpu_id):\n # if torch.cuda.is_available() and gpu_id >= 0:\n if gpu_id >= 0:\n Config.DEVICE = torch.device('cuda:%d' % (gpu_id))\n else:\n Config.DEVICE = torch.device('cpu')\n\ndef tensor(x):\n if isinstance(x, torch.Tensor):\n return x\n x = torch.tensor(x, device=Config.DEVICE, dtype=torch.float32)\n return x\n\ndef range_tensor(end):\n return torch.arange(end).long().to(Config.DEVICE)\n\ndef to_np(t):\n return t.cpu().detach().numpy()\n\ndef random_seed(seed=None):\n np.random.seed(seed)\n torch.manual_seed(np.random.randint(int(1e6)))\n\ndef set_one_thread():\n os.environ['OMP_NUM_THREADS'] = '1'\n os.environ['MKL_NUM_THREADS'] = '1'\n torch.set_num_threads(1)\n\ndef huber(x, k=1.0):\n return torch.where(x.abs() < k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))\n\ndef epsilon_greedy(epsilon, x):\n if len(x.shape) == 1:\n return np.random.randint(len(x)) if np.random.rand() < epsilon else np.argmax(x)\n elif len(x.shape) == 2:\n random_actions = np.random.randint(x.shape[1], size=x.shape[0])\n greedy_actions = np.argmax(x, axis=-1)\n dice = np.random.rand(x.shape[0])\n return np.where(dice < epsilon, random_actions, greedy_actions)\n\ndef sync_grad(target_network, src_network):\n for param, src_param in zip(target_network.parameters(), src_network.parameters()):\n param._grad = src_param.grad.clone()\n\n# adapted from https://github.com/pytorch/pytorch/issues/12160\ndef batch_diagonal(input):\n # idea from here: https://discuss.pytorch.org/t/batch-of-diagonal-matrix/13560\n # batches a stack of vectors (batch x N) -> a stack of diagonal matrices (batch x N x N)\n # works in 2D -> 3D, should also work in higher dimensions\n # make a zero matrix, which duplicates the last dim of input\n dims = input.size()\n dims = dims + dims[-1:]\n output = torch.zeros(dims, device=input.device)\n # stride across the first dimensions, add one to get the diagonal of the last dimension\n strides = [output.stride(i) for i in range(input.dim() - 1 )]\n strides.append(output.size(-1) + 1)\n # stride and copy the input to the diagonal\n output.as_strided(input.size(), strides).copy_(input)\n return output\n\ndef batch_trace(input):\n i = range_tensor(input.size(-1))\n t = input[:, i, i].sum(-1).unsqueeze(-1).unsqueeze(-1)\n return t\n\n\nclass DiagonalNormal:\n def __init__(self, mean, std):\n self.dist = torch.distributions.Normal(mean, std)\n self.sample = self.dist.sample\n\n def log_prob(self, action):\n return self.dist.log_prob(action).sum(-1).unsqueeze(-1)\n\n def entropy(self):\n return self.dist.entropy().sum(-1).unsqueeze(-1)\n\n def cdf(self, action):\n return self.dist.cdf(action).prod(-1).unsqueeze(-1)\n\nclass BatchCategorical:\n def __init__(self, logits):\n self.pre_shape = logits.size()[:-1]\n logits = logits.view(-1, logits.size(-1))\n self.dist = torch.distributions.Categorical(logits=logits)\n\n def log_prob(self, action):\n log_pi = self.dist.log_prob(action.view(-1))\n log_pi = log_pi.view(action.size()[:-1] + (-1, ))\n return log_pi\n\n def entropy(self):\n ent = self.dist.entropy()\n ent = ent.view(self.pre_shape + (-1, ))\n return ent\n\n def sample(self, sample_shape=torch.Size([])):\n ret = self.dist.sample(sample_shape)\n ret = ret.view(sample_shape + self.pre_shape + (-1, ))\n return ret\n\n" ]
[ [ "torch.zeros", "torch.device", "torch.Size", "torch.distributions.Categorical", "torch.arange", "torch.distributions.Normal", "torch.tensor", "torch.set_num_threads" ] ]
KidChou/yolov5_prune
[ "126054962197a51c79140384c591b9190d146019" ]
[ "models/common.py" ]
[ "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCommon modules\n\"\"\"\n\nimport logging\nimport math\nimport warnings\nfrom copy import copy\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torch.cuda import amp\n\nfrom utils.datasets import exif_transpose, letterbox\nfrom utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \\\n scale_coords, xyxy2xywh\nfrom utils.plots import Annotator, colors\nfrom utils.torch_utils import time_sync\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def forward_fuse(self, x):\n return self.act(self.conv(x))\n\n\nclass DWConv(Conv):\n # Depth-wise convolution class\n def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass TransformerLayer(nn.Module):\n # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)\n def __init__(self, c, num_heads):\n super().__init__()\n self.q = nn.Linear(c, c, bias=False)\n self.k = nn.Linear(c, c, bias=False)\n self.v = nn.Linear(c, c, bias=False)\n self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n self.fc1 = nn.Linear(c, c, bias=False)\n self.fc2 = nn.Linear(c, c, bias=False)\n\n def forward(self, x):\n x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n x = self.fc2(self.fc1(x)) + x\n return x\n\n\nclass TransformerBlock(nn.Module):\n # Vision Transformer https://arxiv.org/abs/2010.11929\n def __init__(self, c1, c2, num_heads, num_layers):\n super().__init__()\n self.conv = None\n if c1 != c2:\n self.conv = Conv(c1, c2)\n self.linear = nn.Linear(c2, c2) # learnable position embedding\n self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])\n self.c2 = c2\n\n def forward(self, x):\n if self.conv is not None:\n x = self.conv(x)\n b, _, w, h = x.shape\n p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3)\n return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h)\n\n\nclass Bottleneck(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass BottleneckCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass C3(nn.Module):\n # CSP Bottleneck with 3 convolutions\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\n\nclass C3TR(C3):\n # C3 module with TransformerBlock()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass C3SPP(C3):\n # C3 module with SPP()\n def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = SPP(c_, c_, k)\n\n\nclass C3Ghost(C3):\n # C3 module with GhostBottleneck()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e) # hidden channels\n self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)])\n\n\nclass SPP(nn.Module):\n # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729\n def __init__(self, c1, c2, k=(5, 9, 13)):\n super().__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n def forward(self, x):\n x = self.cv1(x)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass SPPF(nn.Module):\n # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher\n def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))\n super().__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * 4, c2, 1, 1)\n self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)\n\n def forward(self, x):\n x = self.cv1(x)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning\n y1 = self.m(x)\n y2 = self.m(y1)\n return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))\n\n\nclass Focus(nn.Module):\n # Focus wh information into c-space\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n # self.contract = Contract(gain=2)\n\n def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))\n # return self.conv(self.contract(x))\n\n\nclass GhostConv(nn.Module):\n # Ghost Convolution https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups\n super().__init__()\n c_ = c2 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, k, s, None, g, act)\n self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)\n\n def forward(self, x):\n y = self.cv1(x)\n return torch.cat([y, self.cv2(y)], 1)\n\n\nclass GhostBottleneck(nn.Module):\n # Ghost Bottleneck https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride\n super().__init__()\n c_ = c2 // 2\n self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw\n DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw\n GhostConv(c_, c2, 1, 1, act=False)) # pw-linear\n self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),\n Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()\n\n def forward(self, x):\n return self.conv(x) + self.shortcut(x)\n\n\nclass Contract(nn.Module):\n # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'\n s = self.gain\n x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)\n return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'\n s = self.gain\n x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)\n return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)\n\n\nclass Concat(nn.Module):\n # Concatenate a list of tensors along dimension\n def __init__(self, dimension=1):\n super().__init__()\n self.d = dimension\n\n def forward(self, x):\n return torch.cat(x, self.d)\n\n\nclass AutoShape(nn.Module):\n # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS\n conf = 0.25 # NMS confidence threshold\n iou = 0.45 # NMS IoU threshold\n classes = None # (optional list) filter by class\n multi_label = False # NMS multiple labels per box\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self, model):\n super().__init__()\n self.model = model.eval()\n\n def autoshape(self):\n LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()\n return self\n\n def _apply(self, fn):\n # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers\n self = super()._apply(fn)\n m = self.model.model[-1] # Detect()\n m.stride = fn(m.stride)\n m.grid = list(map(fn, m.grid))\n if isinstance(m.anchor_grid, list):\n m.anchor_grid = list(map(fn, m.anchor_grid))\n return self\n\n @torch.no_grad()\n def forward(self, imgs, size=640, augment=False, profile=False):\n # Inference from various sources. For height=640, width=1280, RGB images example inputs are:\n # file: imgs = 'data/images/zidane.jpg' # str or PosixPath\n # URI: = 'https://ultralytics.com/images/zidane.jpg'\n # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)\n # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)\n # numpy: = np.zeros((640,1280,3)) # HWC\n # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)\n # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images\n\n t = [time_sync()]\n p = next(self.model.parameters()) # for device and type\n if isinstance(imgs, torch.Tensor): # torch\n with amp.autocast(enabled=p.device.type != 'cpu'):\n return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference\n\n # Pre-process\n n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images\n shape0, shape1, files = [], [], [] # image and inference shapes, filenames\n for i, im in enumerate(imgs):\n f = f'image{i}' # filename\n if isinstance(im, (str, Path)): # filename or uri\n im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im\n im = np.asarray(exif_transpose(im))\n elif isinstance(im, Image.Image): # PIL Image\n im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f\n files.append(Path(f).with_suffix('.jpg').name)\n if im.shape[0] < 5: # image in CHW\n im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)\n im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input\n s = im.shape[:2] # HWC\n shape0.append(s) # image shape\n g = (size / max(s)) # gain\n shape1.append([y * g for y in s])\n imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update\n shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape\n x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad\n x = np.stack(x, 0) if n > 1 else x[0][None] # stack\n x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW\n x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32\n t.append(time_sync())\n\n with amp.autocast(enabled=p.device.type != 'cpu'):\n # Inference\n y = self.model(x, augment, profile)[0] # forward\n t.append(time_sync())\n\n # Post-process\n y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes,\n multi_label=self.multi_label, max_det=self.max_det) # NMS\n for i in range(n):\n scale_coords(shape1, y[i][:, :4], shape0[i])\n\n t.append(time_sync())\n return Detections(imgs, y, files, t, self.names, x.shape)\n\n\nclass Detections:\n # YOLOv5 detections class for inference results\n def __init__(self, imgs, pred, files, times=None, names=None, shape=None):\n super().__init__()\n d = pred[0].device # device\n gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations\n self.imgs = imgs # list of images as numpy arrays\n self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)\n self.names = names # class names\n self.files = files # image filenames\n self.xyxy = pred # xyxy pixels\n self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels\n self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized\n self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized\n self.n = len(self.pred) # number of images (batch size)\n self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)\n self.s = shape # inference BCHW shape\n\n def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):\n crops = []\n for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):\n s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string\n if pred.shape[0]:\n for c in pred[:, -1].unique():\n n = (pred[:, -1] == c).sum() # detections per class\n s += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \" # add to string\n if show or save or render or crop:\n annotator = Annotator(im, example=str(self.names))\n for *box, conf, cls in reversed(pred): # xyxy, confidence, class\n label = f'{self.names[int(cls)]} {conf:.2f}'\n if crop:\n file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None\n crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,\n 'im': save_one_box(box, im, file=file, save=save)})\n else: # all others\n annotator.box_label(box, label, color=colors(cls))\n im = annotator.im\n else:\n s += '(no detections)'\n\n im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np\n if pprint:\n LOGGER.info(s.rstrip(', '))\n if show:\n im.show(self.files[i]) # show\n if save:\n f = self.files[i]\n im.save(save_dir / f) # save\n if i == self.n - 1:\n LOGGER.info(f\"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}\")\n if render:\n self.imgs[i] = np.asarray(im)\n if crop:\n if save:\n LOGGER.info(f'Saved results to {save_dir}\\n')\n return crops\n\n def print(self):\n self.display(pprint=True) # print results\n LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %\n self.t)\n\n def show(self):\n self.display(show=True) # show results\n\n def save(self, save_dir='runs/detect/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir\n self.display(save=True, save_dir=save_dir) # save results\n\n def crop(self, save=True, save_dir='runs/detect/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None\n return self.display(crop=True, save=save, save_dir=save_dir) # crop results\n\n def render(self):\n self.display(render=True) # render results\n return self.imgs\n\n def pandas(self):\n # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])\n new = copy(self) # return copy\n ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns\n cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns\n for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):\n a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update\n setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n return new\n\n def tolist(self):\n # return a list of Detections objects, i.e. 'for result in results.tolist():'\n x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]\n for d in x:\n for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:\n setattr(d, k, getattr(d, k)[0]) # pop out of list\n return x\n\n def __len__(self):\n return self.n\n\n\nclass Classify(nn.Module):\n # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)\n self.flat = nn.Flatten()\n\n def forward(self, x):\n z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list\n return self.flat(self.conv(z)) # flatten to x(b,c2)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.cuda.amp.autocast", "torch.nn.BatchNorm2d", "torch.nn.LeakyReLU", "numpy.tile", "torch.nn.MultiheadAttention", "torch.nn.MaxPool2d", "pandas.DataFrame", "torch.tensor", "torch.nn.Flatten", "torch.nn.Identity", "torch.nn.Conv2d", "numpy.stack", "numpy.asarray", "numpy.ascontiguousarray", "torch.nn.SiLU", "torch.no_grad", "torch.from_numpy", "torch.nn.AdaptiveAvgPool2d" ] ]
cx201910/first_ml
[ "b4ece4f275911707dda5ca461989f1dfdbf25021" ]
[ "backend/ml_service/apps/endpoints/views.py" ]
[ "from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework import mixins\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.decorators import action\n\nfrom .models import Endpoint\nfrom .serializers import EndpointSerializer\n\nfrom .models import MLAlgorithm\nfrom .serializers import MLAlgorithmSerializer\n\nfrom .models import MLAlgorithmStatus\nfrom .serializers import MLAlgorithmStatusSerializer\n\nfrom .models import MLRequest\nfrom .serializers import MLRequestSerializer\n\nimport json\nfrom numpy.random import rand\nfrom rest_framework import views, status\nfrom rest_framework.response import Response\nfrom apps.ml.registry import MLRegistry\nfrom ml_service.wsgi import registry\n\nfrom django.db import transaction\nfrom apps.endpoints.models import ABTest\nfrom apps.endpoints.serializers import ABTestSerializer\nfrom apps.endpoints.models import PredictStore\nfrom apps.endpoints.serializers import PredictStoreSerializer\n\nfrom django.db.models import F\nimport datetime\n\n# Create your views here.\nclass EndpointViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = EndpointSerializer\n queryset = Endpoint.objects.all()\n\n\nclass MLAlgorithmViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n serializer_class = MLAlgorithmSerializer\n queryset = MLAlgorithm.objects.all()\n\n\ndef deactivate_other_statuses(instance):\n old_statuses = MLAlgorithmStatus.objects.filter(parent_mlalgorithm = instance.parent_mlalgorithm, created_at__lt=instance.created_at, active=True)\n for i in range(len(old_statuses)):\n old_statuses[i].active = False\n MLAlgorithmStatus.objects.bulk_update(old_statuses, ['active'])\n\nclass MLAlgorithmStatusViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, mixins.CreateModelMixin, viewsets.GenericViewSet):\n serializer_class = MLAlgorithmStatusSerializer\n queryset = MLAlgorithmStatus.objects.all()\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save(active=True)\n # set active=False for other statuses\n deactivate_other_statuses(instance)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass MLRequestViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet):\n serializer_class = MLRequestSerializer\n queryset = MLRequest.objects.all()\n\n\nclass PredictView(views.APIView):\n def post(self, request, endpoint_name, format=None):\n\n algorithm_status = self.request.query_params.get('status', 'production')\n algorithm_version = self.request.query_params.get('version')\n\n algs = MLAlgorithm.objects.filter(parent_endpoint__name=endpoint_name, status__status=algorithm_status, status__active=True)\n\n if algorithm_version is not None:\n algs = algs.filter(version = algorithm_version)\n\n if len(algs) == 0:\n return Response(\n {'status': 'Error', 'message': 'ML algorithm is not available'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n if len(algs) != 1 and algorithm_status != 'ab_testing':\n return Response(\n {'status': f'Error of {len(algs)} algorithms', 'message': 'ML algorithm selection is ambiguous. Please specify algorithm version.'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n alg_index = 0\n if algorithm_status == 'ab_testing':\n alg_index = 0 if rand() < 0.5 else 1\n\n algorithm_object = registry.endpoints[algs[alg_index].id]\n prediction = algorithm_object.compute_prediction(request.data)\n\n\n label = prediction['label'] if 'label' in prediction else 'error'\n ml_request = MLRequest(\n input_data=json.dumps(request.data),\n full_response=prediction,\n response=label,\n feedback='',\n parent_mlalgorithm=algs[alg_index],\n )\n ml_request.save()\n\n prediction['request_id'] = ml_request.id \n\n return Response(prediction)\n\n\nclass ABTestViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin,\n viewsets.GenericViewSet, mixins.CreateModelMixin,\n mixins.UpdateModelMixin):\n serializer_class = ABTestSerializer\n queryset = ABTest.objects.all()\n\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save()\n # update status for first algorithm\n\n status_1 = MLAlgorithmStatus(status = 'ab_testing',\n created_by=instance.created_by,\n parent_mlalgorithm = instance.parent_mlalgorithm_1,\n active=True)\n status_1.save()\n deactivate_other_statuses(status_1)\n # update status for second algorithm\n status_2 = MLAlgorithmStatus(status = 'ab_testing',\n created_by=instance.created_by,\n parent_mlalgorithm = instance.parent_mlalgorithm_2,\n active=True)\n status_2.save()\n deactivate_other_statuses(status_2)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass StopABTestView(views.APIView):\n def post(self, request, ab_test_id, format=None):\n\n try:\n ab_test = ABTest.objects.get(pk=ab_test_id)\n\n if ab_test.ended_at is not None:\n return Response({'message': 'AB Test already finished.'})\n\n date_now = datetime.datetime.now()\n # alg #1 accuracy\n all_responses_1 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_1, created_at__gt = ab_test.created_at, created_at__lt = date_now).count()\n correct_responses_1 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_1, created_at__gt = ab_test.created_at, created_at__lt = date_now, response=F('feedback')).count()\n accuracy_1 = correct_responses_1 / float(all_responses_1)\n print(all_responses_1, correct_responses_1, accuracy_1)\n\n # alg #2 accuracy\n all_responses_2 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_2, created_at__gt = ab_test.created_at, created_at__lt = date_now).count()\n correct_responses_2 = MLRequest.objects.filter(parent_mlalgorithm=ab_test.parent_mlalgorithm_2, created_at__gt = ab_test.created_at, created_at__lt = date_now, response=F('feedback')).count()\n accuracy_2 = correct_responses_2 / float(all_responses_2)\n print(all_responses_2, correct_responses_2, accuracy_2)\n\n # select algorithm with higher accuracy\n alg_id_1, alg_id_2 = ab_test.parent_mlalgorithm_1, ab_test.parent_mlalgorithm_2\n # swap\n if accuracy_1 < accuracy_2:\n alg_id_1, alg_id_2 = alg_id_2, alg_id_1\n\n status_1 = MLAlgorithmStatus(status = 'production',\n created_by=ab_test.created_by,\n parent_mlalgorithm = alg_id_1,\n active=True)\n status_1.save()\n deactivate_other_statuses(status_1)\n # update status for second algorithm\n status_2 = MLAlgorithmStatus(status = 'testing',\n created_by=ab_test.created_by,\n parent_mlalgorithm = alg_id_2,\n active=True)\n status_2.save()\n deactivate_other_statuses(status_2)\n\n\n summary = 'Algorithm #1 accuracy: {}, Algorithm #2 accuracy: {}'.format(accuracy_1, accuracy_2)\n ab_test.ended_at = date_now\n ab_test.summary = summary\n ab_test.save()\n\n except Exception as e:\n return Response({'status': 'Error', 'message': str(e)},\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response({'message': 'AB Test finished.', 'summary': summary})\n\n\nclass PredictStoreViewSet(mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):\n\n serializer_class = PredictStoreSerializer\n queryset = PredictStore.objects.all()\n \n @action(detail=True, methods=['post'])\n def predict(self, request, pk=None, format=None): \n serializer = PredictStoreSerializer(data=request.data) \n \n if serializer.is_valid(): \n ml_algorithm_s = serializer.validated_data['ml_algorithm'] \n created_by_s = serializer.validated_data['created_by'] \n target = serializer.validated_data['target']\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n alg_status = MLAlgorithmStatus(status='production',\n created_by=created_by_s,\n parent_mlalgorithm=ml_algorithm_s, active=True)\n alg_status.save()\n deactivate_other_statuses(alg_status) \n \n data = json.loads(request.data['input_data'])\n algs = MLAlgorithm.objects.filter(status__parent_mlalgorithm=ml_algorithm_s, status__active=True)\n\n algorithm_object = registry.endpoints[algs[0].id]\n prediction = algorithm_object.compute_prediction(data)\n label = prediction['label'] if 'label' in prediction else 'error'\n ml_request = MLRequest(\n input_data=json.dumps(data),\n full_response=prediction,\n response=label,\n feedback=target,\n parent_mlalgorithm=algs[0], )\n ml_request.save()\n\n prediction[\"request_id\"] = ml_request.id\n \n if serializer.is_valid(): \n serializer.validated_data['prediction'] = prediction \n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n if PredictStore.objects.filter(id=pk).exists():\n instance = PredictStore.objects.get(id=pk) \n instance.prediction = prediction\n instance.target = target\n instance.save() \n else: \n serializer.save() \n return Response(serializer.data) \n\n" ]
[ [ "numpy.random.rand" ] ]
rajahaseeb147/3dFacialPartSegmentation
[ "aedfed75558761295e9bf602b18c2c3b631080e5", "aedfed75558761295e9bf602b18c2c3b631080e5" ]
[ "Deep Learning/Implementation_3/models/pointnet_cls.py", "Deep Learning/Implementation_3/data_utils/ShapeNetDataLoader.py" ]
[ "import torch.nn as nn\r\nimport torch.utils.data\r\nimport torch.nn.functional as F\r\nfrom pointnet_utils import PointNetEncoder, feature_transform_reguliarzer\r\n\r\nclass get_model(nn.Module):\r\n def __init__(self, k=40, normal_channel=True):\r\n super(get_model, self).__init__()\r\n if normal_channel:\r\n channel = 6\r\n else:\r\n channel = 3\r\n self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel)\r\n self.fc1 = nn.Linear(1024, 512)\r\n self.fc2 = nn.Linear(512, 256)\r\n self.fc3 = nn.Linear(256, k)\r\n self.dropout = nn.Dropout(p=0.4)\r\n self.bn1 = nn.BatchNorm1d(512)\r\n self.bn2 = nn.BatchNorm1d(256)\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n x, trans, trans_feat = self.feat(x)\r\n x = F.relu(self.bn1(self.fc1(x)))\r\n x = F.relu(self.bn2(self.dropout(self.fc2(x))))\r\n x = self.fc3(x)\r\n x = F.log_softmax(x, dim=1)\r\n return x, trans_feat\r\n\r\nclass get_loss(torch.nn.Module):\r\n def __init__(self, mat_diff_loss_scale=0.001):\r\n super(get_loss, self).__init__()\r\n self.mat_diff_loss_scale = mat_diff_loss_scale\r\n\r\n def forward(self, pred, target, trans_feat):\r\n loss = F.nll_loss(pred, target)\r\n mat_diff_loss = feature_transform_reguliarzer(trans_feat)\r\n\r\n total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale\r\n return total_loss\r\n", "# *_*coding:utf-8 *_*\r\nimport os\r\nimport json\r\nimport warnings\r\nimport numpy as np\r\nfrom torch.utils.data import Dataset\r\nwarnings.filterwarnings('ignore')\r\n\r\ndef pc_normalize(pc):\r\n centroid = np.mean(pc, axis=0)\r\n pc = pc - centroid\r\n m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))\r\n pc = pc / m\r\n return pc\r\n\r\nclass PartNormalDataset(Dataset):\r\n def __init__(self,root = './data/shapenetcore_partanno_segmentation_benchmark_v0_normal', npoints=2500, split='train', class_choice=None, normal_channel=False):\r\n self.npoints = npoints\r\n self.root = root\r\n self.catfile = os.path.join(self.root, 'synsetoffset2category.txt')\r\n self.cat = {}\r\n self.normal_channel = normal_channel\r\n\r\n\r\n with open(self.catfile, 'r') as f:\r\n for line in f:\r\n ls = line.strip().split()\r\n self.cat[ls[0]] = ls[1]\r\n self.cat = {k: v for k, v in self.cat.items()}\r\n self.classes_original = dict(zip(self.cat, range(len(self.cat))))\r\n\r\n if not class_choice is None:\r\n self.cat = {k:v for k,v in self.cat.items() if k in class_choice}\r\n # print(self.cat)\r\n\r\n self.meta = {}\r\n with open(os.path.join(self.root, 'train_test_split', 'shuffled_train_file_list.json'), 'r') as f:\r\n train_ids = set([str(d.split('/')[2]) for d in json.load(f)])\r\n with open(os.path.join(self.root, 'train_test_split', 'shuffled_val_file_list.json'), 'r') as f:\r\n val_ids = set([str(d.split('/')[2]) for d in json.load(f)])\r\n with open(os.path.join(self.root, 'train_test_split', 'shuffled_test_file_list.json'), 'r') as f:\r\n test_ids = set([str(d.split('/')[2]) for d in json.load(f)])\r\n for item in self.cat:\r\n # print('category', item)\r\n self.meta[item] = []\r\n dir_point = os.path.join(self.root, self.cat[item])\r\n fns = sorted(os.listdir(dir_point))\r\n # print(fns[0][0:-4])\r\n if split == 'trainval':\r\n fns = [fn for fn in fns if ((fn[0:-4] in train_ids) or (fn[0:-4] in val_ids))]\r\n elif split == 'train':\r\n fns = [fn for fn in fns if fn[0:-4] in train_ids]\r\n elif split == 'val':\r\n fns = [fn for fn in fns if fn[0:-4] in val_ids]\r\n elif split == 'test':\r\n fns = [fn for fn in fns if fn[0:-4] in test_ids]\r\n else:\r\n print('Unknown split: %s. Exiting..' % (split))\r\n exit(-1)\r\n\r\n # print(os.path.basename(fns))\r\n for fn in fns:\r\n token = (os.path.splitext(os.path.basename(fn))[0])\r\n self.meta[item].append(os.path.join(dir_point, token + '.txt'))\r\n\r\n self.datapath = []\r\n for item in self.cat:\r\n for fn in self.meta[item]:\r\n self.datapath.append((item, fn))\r\n\r\n self.classes = {}\r\n for i in self.cat.keys():\r\n self.classes[i] = self.classes_original[i]\r\n\r\n # Mapping from category ('Chair') to a list of int [10,11,12,13] as segmentation labels\r\n self.seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],\r\n 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46],\r\n 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27],\r\n 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40],\r\n 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}\r\n\r\n # for cat in sorted(self.seg_classes.keys()):\r\n # print(cat, self.seg_classes[cat])\r\n\r\n self.cache = {} # from index to (point_set, cls, seg) tuple\r\n self.cache_size = 20000\r\n\r\n\r\n def __getitem__(self, index):\r\n if index in self.cache:\r\n point_set, cls, seg = self.cache[index]\r\n else:\r\n fn = self.datapath[index]\r\n cat = self.datapath[index][0]\r\n cls = self.classes[cat]\r\n cls = np.array([cls]).astype(np.int32)\r\n data = np.loadtxt(fn[1]).astype(np.float32)\r\n if not self.normal_channel:\r\n point_set = data[:, 0:3]\r\n else:\r\n point_set = data[:, 0:6]\r\n seg = data[:, -1].astype(np.int32)\r\n if len(self.cache) < self.cache_size:\r\n self.cache[index] = (point_set, cls, seg)\r\n point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])\r\n\r\n choice = np.random.choice(len(seg), self.npoints, replace=True)\r\n # resample\r\n point_set = point_set[choice, :]\r\n seg = seg[choice]\r\n\r\n return point_set, cls, seg\r\n\r\n def __len__(self):\r\n return len(self.datapath)\r\n\r\n\r\n\r\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.functional.log_softmax", "torch.nn.ReLU", "torch.nn.BatchNorm1d", "torch.nn.functional.nll_loss" ], [ "numpy.sum", "numpy.array", "numpy.loadtxt", "numpy.mean" ] ]
JinGyeSetBirdsFree/FudanOCR
[ "fd79b679044ea23fd9eb30691453ed0805d2e98b", "fd79b679044ea23fd9eb30691453ed0805d2e98b", "fd79b679044ea23fd9eb30691453ed0805d2e98b" ]
[ "model/super_resolution_model/DocumentSRModel/models/srunitnet_2x_2x.py", "model/detection_model/AdvancedEAST/nms/setup.py", "model/detection_model/LSN/lib/model/test_roi_align.py" ]
[ "import numpy as np\nfrom scipy.misc import imsave\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import models\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as Transforms\n\nfrom dataloader import TrainDataset, DevDataset, TestDataset\nfrom networks.unet import UNet, unet_weight_init\nfrom networks.hed import HED, HED_1L, hed_weight_init\nfrom networks.resnet import ResnetGenerator, Upscale4xResnetGenerator, Upscale2xResnetGenerator\nfrom networks.resnet_wdsr import WDSRResnetGenerator\nfrom networks.discriminators import NLayerDiscriminator\nfrom networks.vggfeature import VGGFeatureMap\nfrom utils.visualizer import Visualizer\nfrom utils.loss import BCE2d\nfrom utils.normalize import norm, denorm, weights_init_normal\nfrom utils.target import PSNR, SSIM, batch_compare_filter, batch_SSIM\n\n\nUSE_GPU = torch.cuda.is_available()\nNORM = 'batch'\n\nfrom scipy.misc import imsave\ndef save_img(img, save_fn=''):\n if not os.path.exists(os.path.split(save_fn)[0]):\n os.makedirs(os.path.split(save_fn)[0])\n if list(img.shape)[0] == 3:\n # save_image = img * 125.0\n save_image = img\n save_image = save_image.clamp(0, 1).numpy().transpose(1, 2, 0)\n else:\n save_image = img.squeeze().clamp(0, 1).numpy().transpose(1, 2, 0)\n\n imsave(save_fn, save_image)\n\n\nclass Model(object):\n def __init__(self, cfg):\n # parameter init\n self.env = cfg.env\n self.train_dataset = cfg.train_dataset\n self.valid_dataset = cfg.valid_dataset\n self.test_dataset = cfg.test_dataset\n self.data_dir = cfg.data_dir\n self.save_dir = cfg.save_dir\n\n self.num_threads = int(cfg.num_threads)\n self.num_epochs = int(cfg.num_epochs)\n self.save_epochs = int(cfg.save_epochs)\n self.pretrain_epochs = int(cfg.pretrain_epochs)\n self.batch_size = int(cfg.batch_size)\n self.valid_batch_size = int(cfg.valid_batch_size)\n self.test_batch_size = int(cfg.test_batch_size)\n self.plot_iter = int(cfg.plot_iter)\n self.crop_size = int(cfg.crop_size)\n self.scale_factor = int(cfg.scale_factor)\n self.lr = float(cfg.lr)\n\n def load_dataset(self, mode='train', random_scale=True, rotate=True, fliplr=True, fliptb=True):\n if mode == 'train':\n train_set = TrainDataset(os.path.join(self.data_dir, self.train_dataset),\n crop_size=self.crop_size, scale_factor=self.scale_factor,\n random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)\n return DataLoader(dataset=train_set, num_workers=self.num_threads,\n batch_size=self.batch_size, shuffle=True)\n elif mode == 'valid':\n valid_set = DevDataset(os.path.join(\n self.data_dir, self.valid_dataset))\n return DataLoader(dataset=valid_set, num_workers=self.num_threads,\n batch_size=self.valid_batch_size, shuffle=True)\n elif mode == 'test':\n test_set = TestDataset(os.path.join(\n self.data_dir, self.test_dataset))\n return DataLoader(dataset=test_set, num_workers=self.num_threads,\n batch_size=self.test_batch_size, shuffle=False)\n\n def train(self, edgenetpath=None, sr2x1_path=None, sr2x2_path=None, srcnn_path=None, srresnet_path=None,\n is_fine_tune=False, random_scale=True, rotate=True, fliplr=True, fliptb=True):\n vis = Visualizer(self.env)\n\n print('================ Loading datasets =================')\n # load training dataset\n print('## Current Mode: Train')\n # train_data_loader = self.load_dataset(mode='valid')\n train_data_loader = self.load_dataset(\n mode='train', random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)\n\n ##########################################################\n ##################### build network ######################\n ##########################################################\n print('Building Networks and initialize parameters\\' weights....')\n # init sr resnet\n # srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu', learn_residual=True)\n # srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu',learn_residual=True)\n srresnet2x1 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x2 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x1.apply(weights_init_normal)\n srresnet2x2.apply(weights_init_normal)\n\n # init discriminator\n discnet = NLayerDiscriminator(input_nc=3, ndf=64, n_layers=5)\n\n # init edgenet\n edgenet = HED_1L()\n if edgenetpath is None or not os.path.exists(edgenetpath):\n raise Exception('Invalid edgenet model')\n else:\n pretrained_dict = torch.load(edgenetpath)\n model_dict = edgenet.state_dict()\n pretrained_dict = {k: v for k,\n v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n edgenet.load_state_dict(model_dict)\n\n # init vgg feature\n featuremapping = VGGFeatureMap(models.vgg19(pretrained=True))\n\n # load pretrained srresnet or just initialize\n if sr2x1_path is None or not os.path.exists(sr2x1_path):\n print('===> initialize the srresnet2x1')\n print('======> No pretrained model')\n else:\n print('======> loading the weight from pretrained model')\n pretrained_dict = torch.load(sr2x1_path)\n model_dict = srresnet2x1.state_dict()\n\n pretrained_dict = {k: v for k,\n v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n srresnet2x1.load_state_dict(model_dict)\n\n if sr2x2_path is None or not os.path.exists(sr2x2_path):\n print('===> initialize the srresnet2x2')\n print('======> No pretrained model')\n else:\n print('======> loading the weight from pretrained model')\n pretrained_dict = torch.load(sr2x2_path)\n model_dict = srresnet2x2.state_dict()\n\n pretrained_dict = {k: v for k,\n v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n srresnet2x2.load_state_dict(model_dict)\n\n # optimizer init\n # different learning rate\n lr = self.lr\n\n srresnet2x1_optimizer = optim.Adam(\n srresnet2x1.parameters(), lr=lr, betas=(0.9, 0.999))\n srresnet2x2_optimizer = optim.Adam(\n srresnet2x2.parameters(), lr=lr, betas=(0.9, 0.999))\n disc_optimizer = optim.Adam(\n discnet.parameters(), lr=lr/10, betas=(0.9, 0.999))\n\n # loss function init\n MSE_loss = nn.MSELoss()\n BCE_loss = nn.BCELoss()\n\n # cuda accelerate\n if USE_GPU:\n edgenet.cuda()\n srresnet2x1.cuda()\n srresnet2x2.cuda()\n discnet.cuda()\n featuremapping.cuda()\n MSE_loss.cuda()\n BCE_loss.cuda()\n print('\\tCUDA acceleration is available.')\n\n ##########################################################\n ##################### train network ######################\n ##########################################################\n import torchnet as tnt\n from tqdm import tqdm\n from PIL import Image\n\n # batchnorm = nn.BatchNorm2d(1).cuda()\n\n edge_avg_loss = tnt.meter.AverageValueMeter()\n total_avg_loss = tnt.meter.AverageValueMeter()\n disc_avg_loss = tnt.meter.AverageValueMeter()\n # psnr_2x_avg = tnt.meter.AverageValueMeter()\n # ssim_2x_avg = tnt.meter.AverageValueMeter()\n # psnr_4x_avg = tnt.meter.AverageValueMeter()\n # ssim_4x_avg = tnt.meter.AverageValueMeter()\n\n srresnet2x1.train()\n srresnet2x2.train()\n discnet.train()\n\n itcnt = 0\n for epoch in range(self.num_epochs):\n edge_avg_loss.reset()\n total_avg_loss.reset()\n disc_avg_loss.reset()\n # psnr_2x_avg.reset()\n # ssim_2x_avg.reset()\n # psnr_4x_avg.reset()\n # ssim_4x_avg.reset()\n\n # learning rate is decayed by a factor every 20 epoch\n if (epoch + 1) % 5 == 0:\n for param_group in srresnet2x1_optimizer.param_groups:\n param_group[\"lr\"] *= 0.5\n print(\"Learning rate decay for srresnet2x1: lr={}\".format(\n srresnet2x1_optimizer.param_groups[0][\"lr\"]))\n for param_group in srresnet2x2_optimizer.param_groups:\n param_group[\"lr\"] *= 0.5\n print(\"Learning rate decay for srresnet2x2: lr={}\".format(\n srresnet2x2_optimizer.param_groups[0][\"lr\"]))\n for param_group in disc_optimizer.param_groups:\n param_group[\"lr\"] *= 0.5\n print(\"Learning rate decay for discnet: lr={}\".format(\n disc_optimizer.param_groups[0][\"lr\"]))\n\n itbar = tqdm(enumerate(train_data_loader))\n for ii, (hr, lr2x, lr4x, bc2x, bc4x) in itbar:\n\n mini_batch = hr.size()[0]\n\n hr_ = Variable(hr)\n lr2x_ = Variable(lr2x)\n lr4x_ = Variable(lr4x)\n bc2x_ = Variable(bc2x)\n bc4x_ = Variable(bc4x)\n real_label = Variable(torch.ones(mini_batch))\n fake_label = Variable(torch.zeros(mini_batch))\n\n # cuda mode setting\n if USE_GPU:\n hr_ = hr_.cuda()\n lr2x_ = lr2x_.cuda()\n lr4x_ = lr4x_.cuda()\n bc2x_ = bc2x_.cuda()\n bc4x_ = bc4x_.cuda()\n real_label = real_label.cuda()\n fake_label = fake_label.cuda()\n\n # =============================================================== #\n # ================ Edge-based srresnet training ================= #\n # =============================================================== #\n sr2x_ = srresnet2x1(lr4x_)\n sr4x_ = srresnet2x2(lr2x_)\n\n '''===================== Train Discriminator ====================='''\n if epoch + 1 > self.pretrain_epochs:\n disc_optimizer.zero_grad()\n\n #===== 2x disc loss =====#\n real_decision_2x = discnet(lr2x_)\n real_loss_2x = BCE_loss(\n real_decision_2x, real_label.detach())\n\n fake_decision_2x = discnet(sr2x_.detach())\n fake_loss_2x = BCE_loss(\n fake_decision_2x, fake_label.detach())\n\n disc_loss_2x = real_loss_2x + fake_loss_2x\n\n disc_loss_2x.backward()\n disc_optimizer.step()\n\n #===== 4x disc loss =====#\n real_decision_4x = discnet(hr_)\n real_loss_4x = BCE_loss(\n real_decision_4x, real_label.detach())\n\n fake_decision_4x = discnet(sr4x_.detach())\n fake_loss_4x = BCE_loss(\n fake_decision_4x, fake_label.detach())\n\n disc_loss_4x = real_loss_4x + fake_loss_4x\n\n disc_loss_4x.backward()\n disc_optimizer.step()\n\n disc_avg_loss.add(\n (disc_loss_2x + disc_loss_4x).data.item())\n\n '''=================== Train srresnet Generator ==================='''\n edge_trade_off = [0.7, 0.2, 0.1, 0.05, 0.01, 0.3]\n if epoch + 1 > self.pretrain_epochs:\n a1, a2, a3 = 0.75, 0.1, 0.65\n else:\n a1, a2, a3 = 0.75, 0.0, 0.7\n\n if not is_fine_tune:\n #============ calculate 2x loss ==============#\n srresnet2x1_optimizer.zero_grad()\n\n #### Edgenet Loss ####\n pred = edgenet(sr2x_)\n real = edgenet(lr2x_)\n\n edge_loss_2x = BCE_loss(pred.detach(), real.detach())\n # for i in range(6):\n # edge_loss_2x += edge_trade_off[i] * \\\n # BCE_loss(pred[i].detach(), real[i].detach())\n # edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])\n\n #### Content Loss ####\n content_loss_2x = MSE_loss(sr2x_, lr2x_) #+ 0.1*BCE_loss(1-sr2x_, 1-lr2x_)\n\n #### Perceptual Loss ####\n real_feature = featuremapping(lr2x_)\n fake_feature = featuremapping(sr2x_)\n vgg_loss_2x = MSE_loss(fake_feature, real_feature.detach())\n\n #### Adversarial Loss ####\n advs_loss_2x = BCE_loss(discnet(sr2x_), real_label) if epoch + 1 > self.pretrain_epochs else 0\n # advs_loss_2x = 0\n\n #============== loss backward ===============#\n total_loss_2x = a1 * edge_loss_2x + a2 * advs_loss_2x + \\\n a3 * content_loss_2x + (1.0 - a3) * vgg_loss_2x\n\n # total_loss_2x = 1.0 * content_loss_2x + 0.25 * vgg_loss_2x\n\n total_loss_2x.backward()\n srresnet2x1_optimizer.step()\n\n #============ calculate scores ==============#\n # psnr_2x_score_process = batch_compare_filter(\n # sr2x_.cpu().data, lr2x, PSNR)\n # psnr_2x_avg.add(psnr_2x_score_process)\n\n # ssim_2x_score_process = batch_compare_filter(\n # sr2x_.cpu().data, lr2x, SSIM)\n # ssim_2x_avg.add(ssim_2x_score_process)\n\n #============ calculate 4x loss ==============#\n if is_fine_tune:\n sr4x_ = srresnet2x2(srresnet2x1(lr4x_))\n\n srresnet2x2_optimizer.zero_grad()\n #### Edgenet Loss ####\n pred = edgenet(sr4x_)\n real = edgenet(hr_)\n\n # edge_loss_4x = 0\n edge_loss_4x = BCE_loss(pred.detach(), real.detach())\n # for i in range(6):\n # edge_loss_4x += edge_trade_off[i] * \\\n # BCE_loss(pred[i].detach(), real[i].detach())\n # edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])\n\n #### Content Loss ####\n content_loss_4x = MSE_loss(sr4x_, hr_) #+ 0.1*BCE_loss(1-sr4x_, 1-hr_)\n\n #### Perceptual Loss ####\n real_feature = featuremapping(hr_)\n fake_feature = featuremapping(sr4x_)\n vgg_loss_4x = MSE_loss(fake_feature, real_feature.detach())\n\n #### Adversarial Loss ####\n advs_loss_4x = BCE_loss(discnet(sr4x_), real_label) if epoch + 1 > self.pretrain_epochs else 0\n # advs_loss_4x = 0\n\n #============== loss backward ===============#\n total_loss_4x = a1 * edge_loss_4x + a2 * advs_loss_4x + \\\n a3 * content_loss_4x + (1.0 - a3) * vgg_loss_4x\n\n # total_loss_4x = 1.0 * content_loss_4x + 0.25 * vgg_loss_4x\n\n total_loss_4x.backward()\n srresnet2x2_optimizer.step()\n\n #============ calculate scores ==============#\n # psnr_4x_score_process = batch_compare_filter(\n # sr4x_.cpu().data, hr, PSNR)\n # psnr_4x_avg.add(psnr_4x_score_process)\n\n # ssim_4x_score_process = batch_compare_filter(\n # sr4x_.cpu().data, hr, SSIM)\n # ssim_4x_avg.add(ssim_4x_score_process)\n\n if is_fine_tune:\n total_avg_loss.add(total_loss_4x.data.item())\n edge_avg_loss.add(edge_loss_4x.data.item())\n else:\n total_avg_loss.add((total_loss_2x+total_loss_4x).data.item())\n edge_avg_loss.add((edge_loss_2x+edge_loss_4x).data.item())\n if epoch + 1 > self.pretrain_epochs:\n disc_avg_loss.add((advs_loss_2x+advs_loss_4x).data.item())\n\n if (ii+1) % self.plot_iter == self.plot_iter-1:\n res = {'edge loss': edge_avg_loss.value()[0],\n 'generate loss': total_avg_loss.value()[0],\n 'discriminate loss': disc_avg_loss.value()[0]}\n vis.plot_many(res, 'Deblur net Loss')\n\n # psnr_2x_score_origin = batch_compare_filter(\n # bc2x, lr2x, PSNR)\n # psnr_4x_score_origin = batch_compare_filter(bc4x, hr, PSNR)\n # res_psnr = {'2x_origin_psnr': psnr_2x_score_origin,\n # '2x_sr_psnr': psnr_2x_score_process,\n # '4x_origin_psnr': psnr_4x_score_origin,\n # '4x_sr_psnr': psnr_4x_score_process}\n # vis.plot_many(res_psnr, 'PSNR Score')\n\n # ssim_2x_score_origin = batch_compare_filter(\n # bc2x, lr2x, SSIM)\n # ssim_4x_score_origin = batch_compare_filter(bc4x, hr, SSIM)\n # res_ssim = {'2x_origin_ssim': ssim_2x_score_origin,\n # '2x_sr_ssim': ssim_2x_score_process,\n # '4x_origin_ssim': ssim_4x_score_origin,\n # '4x_sr_ssim': ssim_4x_score_process}\n # vis.plot_many(res_ssim, 'SSIM Score')\n\n #======================= Output result of total training processing =======================#\n itcnt += 1\n # itbar.set_description(\"Epoch: [%2d] [%d/%d] PSNR_2x_Avg: %.6f, SSIM_2x_Avg: %.6f, PSNR_4x_Avg: %.6f, SSIM_4x_Avg: %.6f\"\n # % ((epoch + 1), (ii + 1), len(train_data_loader),\n # psnr_2x_avg.value()[0], ssim_2x_avg.value()[\n # 0],\n # psnr_4x_avg.value()[0], ssim_4x_avg.value()[0]))\n itbar.set_description(\"Epoch: [%2d] [%d/%d]\"\n % ((epoch + 1), (ii + 1), len(train_data_loader)))\n\n if (ii+1) % self.plot_iter == self.plot_iter-1:\n # test_ = deblurnet(torch.cat([y_.detach(), x_edge], 1))\n hr_edge = edgenet(hr_)\n sr2x_edge = edgenet(sr2x_)\n sr4x_edge = edgenet(sr4x_)\n\n vis.images(hr_edge.cpu().data, win='HR edge predict', opts=dict(\n title='HR edge predict'))\n vis.images(sr2x_edge.cpu().data, win='SR2X edge predict', opts=dict(\n title='SR2X edge predict'))\n vis.images(sr4x_edge.cpu().data, win='SR4X edge predict', opts=dict(\n title='SR4X edge predict'))\n\n vis.images(lr2x, win='LR2X image',\n opts=dict(title='LR2X image'))\n vis.images(lr4x, win='LR4X image',\n opts=dict(title='LR4X image'))\n vis.images(bc2x, win='BC2X image',\n opts=dict(title='BC2X image'))\n vis.images(bc4x, win='BC4X image',\n opts=dict(title='BC4X image'))\n vis.images(sr2x_.cpu().data, win='SR2X image',\n opts=dict(title='SR2X image'))\n vis.images(sr4x_.cpu().data, win='SR4X image',\n opts=dict(title='SR4X image'))\n\n vis.images(hr, win='HR image',\n opts=dict(title='HR image'))\n\n t_save_dir = 'results/train_result/'+self.train_dataset\n if not os.path.exists(t_save_dir):\n os.makedirs(t_save_dir)\n\n if (epoch + 1) % self.save_epochs == 0 and (ii+1) % 200 == 0:\n self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n\n if (epoch + 1) % self.save_epochs == 0:\n self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, epoch+1))\n\n # Save final trained model and results\n vis.save([self.env])\n self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, self.num_epochs))\n self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.\n format(self.batch_size, self.lr, self.num_epochs))\n\n def test(self, sr2x1_path=None, sr2x2_path=None):\n test_data_dir = os.path.join(self.data_dir, self.test_dataset)\n result_data_dir = os.path.join(self.save_dir, \"test_results\", \"2x2UnitNet_SR_\"+self.test_dataset)\n if not os.path.exists(result_data_dir):\n os.makedirs(result_data_dir)\n\n # judge whether model exists\n if not os.path.exists(sr2x1_path):\n raise Exception('sr2x1 resnet model not exists')\n if not os.path.exists(sr2x2_path):\n raise Exception('sr2x2 resnet model not exists')\n\n # load network params\n # srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu', learn_residual=True)\n # srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n # norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x1 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x2 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)\n srresnet2x1.load_state_dict(torch.load(sr2x1_path))\n srresnet2x2.load_state_dict(torch.load(sr2x2_path))\n\n if USE_GPU:\n srresnet2x1.cuda()\n srresnet2x2.cuda()\n\n import torchnet as tnt\n from tqdm import tqdm\n from PIL import Image\n import time\n\n psnr_4x_avg = tnt.meter.AverageValueMeter()\n ssim_4x_avg = tnt.meter.AverageValueMeter()\n\n time_avg = tnt.meter.AverageValueMeter()\n\n srresnet2x1.eval()\n srresnet2x2.eval()\n\n # processing test data\n iterbar = tqdm(os.listdir(test_data_dir))\n import cv2\n import numpy as np\n for img_name in iterbar:\n try:\n img = cv2.imread(os.path.join(test_data_dir, img_name), cv2.IMREAD_COLOR)\n img = cv2.resize(img, None, None, 0.5, 0.5, interpolation=cv2.INTER_AREA)\n\n h, w, c = img.shape[0], img.shape[1], img.shape[2]\n w_lr4x, h_lr4x = int(\n w // self.scale_factor), int(h // self.scale_factor)\n w_lr2x, h_lr2x = w_lr4x * 2, h_lr4x * 2\n w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor\n\n w_num, h_num = w // self.crop_size, h // self.crop_size\n w_num += 1 if w % self.crop_size != 0 else 0\n h_num += 1 if h % self.crop_size != 0 else 0\n\n res = np.zeros((h*2, w*2, c), dtype=np.uint8)\n for i in range(w_num):\n l = i * self.crop_size\n l_new = l * 2\n r = min(l+self.crop_size, w)\n r_new = w * 2 if r == w else l_new + self.crop_size * 2\n for j in range(h_num):\n t = j * self.crop_size\n t_new = t * 2\n b = min(t+self.crop_size, h)\n b_new = h * 2 if b == h else t_new + self.crop_size * 2\n \n lr = img[t:b, l:r]\n\n lr = Transforms.ToTensor()(lr).unsqueeze(0)\n if USE_GPU:\n lr = lr.cuda()\n\n sr = srresnet2x1(lr).squeeze()\n\n res_sr = sr.cpu().data.clamp(0, 1).numpy().transpose(1, 2, 0)*255\n\n res[t_new:b_new, l_new:r_new] = res_sr\n \n cv2.imwrite(os.path.join(result_data_dir, img_name), res)\n except IOError:\n pass\n finally:\n pass\n\n\n # for img_name in iterbar:\n # try:\n # img = Image.open(os.path.join(test_data_dir, img_name)).convert(\"RGB\")\n # transform = Transforms.RandomCrop(self.crop_size)\n # img = transform(img)\n\n # w, h = img.size[0], img.size[1]\n # w_lr4x, h_lr4x = int(\n # w // self.scale_factor), int(h // self.scale_factor)\n # w_lr2x, h_lr2x = w_lr4x * 2, h_lr4x * 2\n # # w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor\n\n # # transform tensor\n # # hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)\n # # lr2x = img.resize((w_lr2x, h_lr2x), Image.ANTIALIAS)\n # lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)\n # lr4x = img.resize((w_lr2x, h_lr2x), Image.ANTIALIAS)\n\n # # hr_ = Transforms.ToTensor()(hr).unsqueeze(0)\n # # lr2x_ = Transforms.ToTensor()(lr2x).unsqueeze(0)\n # lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)\n\n # if USE_GPU:\n # # hr_ = hr_.cuda()\n # # lr2x_ = lr2x_.cuda()\n # lr4x_ = lr4x_.cuda()\n\n # torch.cuda.synchronize()\n # start = time.time()\n\n # sr4x_ = srresnet2x2(srresnet2x1(lr4x_))\n # # sr4x_ = srresnet2x1(lr4x_)\n\n # torch.cuda.synchronize()\n # end = time.time()\n\n # time_avg.add(end-start)\n # except IOError:\n # pass\n # finally:\n # pass\n\n # # calculate PSNR & SSIM\n # psnr_4x_score = batch_compare_filter(\n # sr4x_.cpu().data, hr_, PSNR)\n # ssim_4x_score = batch_compare_filter(\n # sr4x_.cpu().data, hr_, SSIM)\n # psnr_4x_avg.add(psnr_4x_score)\n # ssim_4x_avg.add(ssim_4x_score)\n\n # # save image\n # save_img(sr4x_.cpu().data, os.path.join(result_data_dir, img_name))\n\n print(time_avg.value()[0])\n print(\"final PSNR score: {}\".format(psnr_4x_avg.value()[0]))\n print(\"final SSIM score: {}\".format(ssim_4x_avg.value()[0]))\n\n def test_t(self, sr2x1_1_path=None, sr2x2_1_path=None, sr2x1_2_path=None, sr2x2_2_path=None):\n test_data_dir = os.path.join(self.data_dir, self.test_dataset)\n \n sr_edge_dir = os.path.join(self.save_dir, \"show_results\", \"2x2UnitNet_Edge_SR_\"+self.test_dataset)\n if not os.path.exists(sr_edge_dir):\n os.makedirs(sr_edge_dir)\n\n sr_none_dir = os.path.join(self.save_dir, \"show_results\", \"2x2UnitNet_none_SR_\"+self.test_dataset)\n if not os.path.exists(sr_none_dir):\n os.makedirs(sr_none_dir)\n \n bc_dir = os.path.join(self.save_dir, \"show_results\", \"Bicubic_SR_\"+self.test_dataset)\n if not os.path.exists(bc_dir):\n os.makedirs(bc_dir)\n \n hr_dir = os.path.join(self.save_dir, \"show_results\", \"HR_\"+self.test_dataset)\n if not os.path.exists(hr_dir):\n os.makedirs(hr_dir)\n\n lr_dir = os.path.join(self.save_dir, \"show_results\", \"LR_\"+self.test_dataset)\n if not os.path.exists(lr_dir):\n os.makedirs(lr_dir)\n\n # judge whether model exists\n if not os.path.exists(sr2x1_1_path):\n raise Exception('sr2x1 resnet model not exists')\n if not os.path.exists(sr2x2_1_path):\n raise Exception('sr2x2 resnet model not exists')\n if not os.path.exists(sr2x1_2_path):\n raise Exception('sr2x1 resnet model not exists')\n if not os.path.exists(sr2x2_2_path):\n raise Exception('sr2x2 resnet model not exists')\n\n # load network params\n srresnet2x1_edge = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x2_edge = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x1_none = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x2_none = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,\n norm=NORM, activation='prelu', learn_residual=True)\n srresnet2x1_edge.load_state_dict(torch.load(sr2x1_1_path))\n srresnet2x2_edge.load_state_dict(torch.load(sr2x2_1_path))\n srresnet2x1_none.load_state_dict(torch.load(sr2x1_2_path))\n srresnet2x2_none.load_state_dict(torch.load(sr2x2_2_path))\n\n if USE_GPU:\n srresnet2x1_edge.cuda()\n srresnet2x2_edge.cuda()\n srresnet2x1_none.cuda()\n srresnet2x2_none.cuda()\n\n import torchnet as tnt\n from tqdm import tqdm\n from PIL import Image\n\n psnr_edge_4x_avg = tnt.meter.AverageValueMeter()\n ssim_edge_4x_avg = tnt.meter.AverageValueMeter()\n psnr_none_4x_avg = tnt.meter.AverageValueMeter()\n ssim_none_4x_avg = tnt.meter.AverageValueMeter()\n\n # srresnet2x1_edge.eval()\n # srresnet2x2_edge.eval()\n # srresnet2x1_none.eval()\n # srresnet2x2_none.eval()\n\n # processing test data\n iterbar = tqdm(os.listdir(test_data_dir))\n for img_name in iterbar:\n img = Image.open(os.path.join(test_data_dir, img_name)).convert(\"RGB\")\n transform = Transforms.RandomCrop(self.crop_size)\n img = transform(img)\n\n w, h = img.size[0], img.size[1]\n w_lr4x, h_lr4x = int(\n w // self.scale_factor), int(h // self.scale_factor)\n w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor\n\n # transform tensor\n hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)\n lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)\n bc4x = lr4x.resize((w_hr, h_hr), Image.BICUBIC)\n\n hr_ = Transforms.ToTensor()(hr).unsqueeze(0)\n bc4x_ = Transforms.ToTensor()(bc4x).unsqueeze(0)\n lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)\n\n if USE_GPU:\n hr_ = hr_.cuda()\n lr4x_ = lr4x_.cuda()\n\n sr4x_edge_ = srresnet2x2_edge(srresnet2x1_edge(lr4x_))\n sr4x_none_ = srresnet2x2_none(srresnet2x1_none(lr4x_))\n\n # calculate PSNR & SSIM\n psnr_edge_4x_score = batch_compare_filter(\n sr4x_edge_.cpu().data, hr_, PSNR)\n ssim_edge_4x_score = batch_compare_filter(\n sr4x_edge_.cpu().data, hr_, SSIM)\n psnr_edge_4x_avg.add(psnr_edge_4x_score)\n ssim_edge_4x_avg.add(ssim_edge_4x_score)\n\n psnr_none_4x_score = batch_compare_filter(\n sr4x_none_.cpu().data, hr_, PSNR)\n ssim_none_4x_score = batch_compare_filter(\n sr4x_none_.cpu().data, hr_, SSIM)\n psnr_none_4x_avg.add(psnr_none_4x_score)\n ssim_none_4x_avg.add(ssim_none_4x_score)\n\n # save image\n save_img(sr4x_edge_.cpu().data, os.path.join(sr_edge_dir, img_name))\n save_img(sr4x_none_.cpu().data, os.path.join(sr_none_dir, img_name))\n save_img(bc4x_.cpu().data, os.path.join(bc_dir, img_name))\n save_img(hr_.cpu().data, os.path.join(hr_dir, img_name))\n save_img(lr4x_.cpu().data, os.path.join(lr_dir, img_name))\n\n print(\"final edge PSNR score: {}\".format(psnr_edge_4x_avg.value()[0]))\n print(\"final edge SSIM score: {}\".format(ssim_edge_4x_avg.value()[0]))\n\n print(\"final none PSNR score: {}\".format(psnr_none_4x_avg.value()[0]))\n print(\"final none SSIM score: {}\".format(ssim_none_4x_avg.value()[0]))\n\n def save_model(self, model, save_dir, model_name, mtype='pkl'):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n if mtype == 'pkl':\n save_path = os.path.join(save_dir, model_name+'.pkl')\n torch.save(model.state_dict(), save_path)\n elif mtype == 'pth':\n save_path = os.path.join(save_dir, model_name+'.pth')\n torch.save(model.state_dict(), save_path)\n", "from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nimport numpy\n\next_module = Extension(\n \"nms\",\n sources=[\"nms.pyx\"],\n extra_compile_args=[\"-std=c++11\"],\n language=\"c++\",\n include_dirs=[numpy.get_include()]\n)\n\nsetup(ext_modules=cythonize(ext_module,\n language_level=3,\n annotate=True))\n", "from roi_align.roi_align import RoIAlign # RoIAlign module\nfrom roi_align.roi_align import CropAndResize # crop_and_resize module\nfrom torchvision import transforms\nimport torch\nimport cv2\nimport numpy as np\nfrom torch.autograd import Variable\n\ndef to_varabile(data,requires_grad,is_cuda):\n if is_cuda:\n data = data.cuda()\n data = Variable(data,requires_grad=requires_grad)\n return data\n\n# input data\nis_cuda = torch.cuda.is_available()\n# image_data = cv2.imread('/data/2019AAAI/data/ctw15/test/text_image/1002.jpg')\nimage_data = np.ones((100,100,3))\nimage_data = image_data.transpose((2, 0, 1)).astype(np.float32)\nimage_data = torch.from_numpy((image_data))\nboxes_data = torch.Tensor([[0,0,200,200],[0,0,200,200]])\nbox_index_data = torch.IntTensor([0])\nimage = to_varabile(image_data, requires_grad=True, is_cuda=is_cuda)\nimage = image.unsqueeze(0)\nprint(image.size())\nboxes = to_varabile(boxes_data, requires_grad=False, is_cuda=is_cuda)\nbox_index = to_varabile(box_index_data, requires_grad=False, is_cuda=is_cuda)\nprint(image,boxes,box_index)\n# RoIAlign layer\nroi_align = RoIAlign(7, 7,extrapolation_value=0)\ncrops = roi_align(image, boxes, box_index)\nprint(crops)\n\n" ]
[ [ "torch.zeros", "torch.nn.MSELoss", "numpy.zeros", "torch.autograd.Variable", "torch.ones", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.nn.BCELoss", "scipy.misc.imsave", "torch.load" ], [ "numpy.get_include" ], [ "torch.IntTensor", "torch.autograd.Variable", "numpy.ones", "torch.from_numpy", "torch.cuda.is_available", "torch.Tensor" ] ]
ahesford/habis-tools
[ "82f82b99fa18452697404100edcf83bd03d35abc" ]
[ "habis/formats.py" ]
[ "'''\nRoutines for manipulating HABIS data file formats.\n'''\n\n# Copyright (c) 2015 Andrew J. Hesford. All rights reserved.\n# Restrictions are listed in the LICENSE file distributed with this package.\n\nimport mmap\nimport numpy as np\nimport os\nimport struct\n\nfrom itertools import repeat\n\nfrom collections import OrderedDict\n\nfrom functools import reduce, partial\n\nimport warnings\n\nclass ArgparseLoader(object):\n\t'''\n\tA factory to load arguments provided to argparse.ArgumentParser using a\n\tprovided lodaer function with a defined set of options.\n\t'''\n\tdef __init__(self, loader, *args, **kwargs):\n\t\t'''\n\t\tCreate a callable that accepts a single string argument and,\n\t\twhen called, invokes the provided loader function with the\n\t\tstring as the first argument. All other positional and keyword\n\t\targuments are stored and passed to the loader following the\n\t\tstring.\n\t\t'''\n\t\tif not callable(loader):\n\t\t\traise TypeError('Argument \"loader\" must be callable')\n\n\t\t# Retain a reference to the loader\n\t\tself._loader = loader\n\n\t\t# Retain the mode and a copy of the arguments\n\t\tself._args = tuple(args)\n\t\tself._kwargs = kwargs\n\n\n\tdef __call__(self, string):\n\t\t'''\n\t\tInvoke the loader associated with this instance, passing string\n\t\tas the first argument and all associated positional and keyword\n\t\targuments thereafter.\n\n\t\tAny error encountered, will be converted to an\n\t\targparse.ArgumentTypeError.\n\t\t'''\n\t\tfrom argparse import ArgumentTypeError\n\n\t\ttry:\n\t\t\treturn self._loader(string, *self._args, **self._kwargs)\n\t\texcept Exception as err:\n\t\t\tmessage = f'failed to load {string}: {err}'\n\t\t\traise ArgumentTypeError(f'failed to load {string}: {err}')\n\n\n# Warnings and errors related to WaveformSet I/O\nclass WaveformSetIOWarning(UserWarning): pass\nclass WaveformSetIOError(Exception): pass\n\ndef strict_int(x):\n\tix = int(x)\n\tif ix != x:\n\t\traise ValueError('Argument must be integer-compatible')\n\treturn ix\n\n\ndef strict_nonnegative_int(x, positive=False):\n\tx = strict_int(x)\n\tif positive and x <= 0:\n\t\traise ValueError('Argument must be positive')\n\telif x < 0:\n\t\traise ValueError('Argument must be nonnegative')\n\treturn x\n\n\ndef renderAndLoadYaml(data, **kwargs):\n\t'''\n\tAttempt to render the string data as a Mako template with kwargs passed\n\tto the Mako renderer with string_undefined=True. Parse the rendered\n\tresult as YAML using yaml.safe_load.\n\n\tIf the Mako template engine cannot be imported, the data is parsed as\n\tpure YAML. Specifying kwargs when Mako cannot be imported raises a\n\tTypeError.\n\t'''\n\tfrom yaml import safe_load\n\n\ttry:\n\t\tfrom mako.template import Template\n\texcept ImportError:\n\t\tif kwargs:\n\t\t\traise TypeError('Extra keyword arguments '\n\t\t\t\t\t'require Mako template engine')\n\t\treturn safe_load(data)\n\telse:\n\t\ttmpl = Template(text=data, strict_undefined=True)\n\t\treturn safe_load(tmpl.render(**kwargs))\n\n\ndef loadmatlist(files, *a, **k):\n\t'''\n\tA conveience function to produce the ordered dictionary\n\n\t\tOrderedDict(sorted(kv for f in files\n\t\t\t\tfor kv in loadkeymat(f, *a, **k).iteritems()))\n\n\tIf files is a string instead of any other iterable, it will be replaced\n\twith glob.glob(files) before being inserted into the above constructor.\n\n\tWhen files is a string, a special keyword argument, forcematch, may be\n\tprovided. This argument will be stripped from the kwargs dictionary k\n\tand, when True, will cause an IOError to be raised if the glob matches\n\tno files. Otherwise, if forcematch is omitted or False, a glob that\n\tmatches no files will cause an empty map to be returned.\n\t'''\n\tif isinstance(files, str):\n\t\tfrom glob import glob\n\t\tfiles = glob(files)\n\t\tforcematch = k.pop('forcematch', False)\n\t\tif forcematch and not files: raise IOError('No matches for glob \"files\"')\n\n\treturn OrderedDict(sorted(kv for f in files\n\t\tfor kv in loadkeymat(f, *a, **k).items()))\n\n\ndef loadkeymat(f, scalar=None, dtype=None, nkeys=None):\n\t'''\n\tA convenience function that will attempt to load a mapping from f using\n\tloadz_keymat or (if loadz_keymat fails) loadtxt_keymat. The optional\n\targuments scalar and dtype, if not None, are passed as kwargs to either\n\tload function.\n\n\tIf nkeys is not None, it will be used to verify the cardinality of keys\n\tin a mapping returned by a successful call to loadz_keymat or passed as\n\tan argument to loadtxt_keymat.\n\t'''\n\t# Build optional kwargs\n\tkwargs = { }\n\tif scalar is not None: kwargs['scalar'] = scalar\n\tif dtype is not None: kwargs['dtype'] = dtype\n\n\ttry:\n\t\tmapping = loadz_keymat(f, **kwargs)\n\texcept (ValueError, IOError):\n\t\tif nkeys is not None: kwargs['nkeys'] = nkeys\n\t\treturn loadtxt_keymat(f, **kwargs)\n\n\tif nkeys is not None and len(mapping):\n\t\tkey = next(iter(mapping.keys()))\n\n\t\ttry: nk = len(key)\n\t\texcept TypeError: nk = 1\n\n\t\tif nkeys != nk:\n\t\t\traise ValueError('Cardinality of keys in mapping does not match nkeys parameter')\n\n\treturn mapping\n\n\ndef savez_keymat(f, mapping, sortrows=True, compressed=False, comment=None):\n\t'''\n\tStores mapping, which maps one or more integers to one or more\n\tnumerical values, into f (which may be a string providing a file name,\n\tor an open file-like object) using numpy.savez (if compressed is\n\tFalse) or numpy.savez_compressed (if compressed is True).\n\n\tAll keys must contain the same number of integers. Each value in the\n\tmapping may consiste of an arbitrary number of numeric values.\n\n\tIf sortrows is True, the data will be stored in an order determined by\n\tsorted(mapping.keys()). Otherwise, the row order is either arbitrary or\n\tenforced by the input map (e.g., an OrderedDict).\n\n\tThe saved npz file contains three arrays: 'keys', an N-by-M integer\n\tarray such that each row specifies an M-integer key in the input\n\tmapping; 'values', which stores the values of the mapping flattened\n\taccording to the order of 'keys', and 'lengths', which specifies the\n\tlength of the value array for each associated key. That is,\n\n\t\tmapping[keys[i]] = values[start:start+lengths[i]],\n\n\twhere start = sum(lengths[j] for 0 <= j < i).\n\n\tIf the lengths of the value lists for all keys are the same, the\n\t'lengths' array may be just a scalar value, in which case 'lengths[i]'\n\tshould be interpreted as '([lengths] * len(keys))[i]'.\n\n\tIf comment is not None, it should be a string that will be stored as an\n\textra array, called 'comment', in the output file. The comment will be\n\tignored when loading the file.\n\t'''\n\t# Make sure any comment is a string\n\tif comment is not None: exargs = { 'comment': str(comment) }\n\telse: exargs = { }\n\n\tkeys = sorted(mapping.keys()) if sortrows else list(mapping.keys())\n\n\t# Build the length array and flattened value array\n\tlengths, values = [ ], [ ]\n\tfor k in keys:\n\t\tv = mapping[k]\n\n\t\ttry:\n\t\t\tlengths.append(len(v))\n\t\t\tvalues.extend(v)\n\t\texcept TypeError:\n\t\t\tlengths.append(1)\n\t\t\tvalues.append(v)\n\n\tlengths = np.array(lengths)\n\tvalues = np.array(values)\n\n\t# Collapse lengths to scalar if possible\n\ttry: lv = lengths[0]\n\texcept IndexError: lv = 0\n\tif np.all(lengths == lv):\n\t\tlengths = np.array(lv)\n\n\n\t# Verify the value array\n\tif not np.issubdtype(values.dtype, np.number):\n\t\traise TypeError('Values in mapping must be numeric')\n\n\t# Verify the key array\n\tkeys = np.array(keys)\n\tif not np.issubdtype(keys.dtype, np.integer) or keys.ndim > 2:\n\t\traise TypeError('Keys in mapping consist of one more integers and must have consistent cardinality')\n\n\tsavez = np.savez_compressed if compressed else np.savez\n\tsavez(f, keys=keys, values=values, lengths=lengths, **exargs)\n\n\ndef loadz_keymat(*args, **kwargs):\n\t'''\n\tLoad and return, using numpy.load(*args, **kwargs), a mapping (created\n\twith savez_keymat) from one or more integers to one or more numerical\n\tvalues.\n\n\tIf the number of elements in every value array is 1, setting an\n\toptional keyword argument scalar (True by default) to False will\n\tpreserve the values as 1-element Numpy arrays. Otherwise, 1-element\n\tNumpy arrays will be collapsed to scalars. The scalar keyword argument\n\tis stripped from the kwargs and is not passed to numpy.load.\n\n\tThe data types of the value arrays can be forced by specifying an\n\toptional keyword argument dtype. The dtype argument will be stripped\n\tfrom the kwargs and is not passed to numpy.load.\n\n\tThe returned mapping is an OrderedDict that preserves the ordering of\n\tkeys in the input file.\n\n\tIf the loaded file does not contain a valid mapping in the style\n\tprepared by savez_keymat, a ValueError will be raised.\n\n\tIf the file contains a \"comment\" key, it will be silently ignored.\n\t'''\n\t# Pull specialty kwargs\n\tscalar = kwargs.pop('scalar', True)\n\tdtype = kwargs.pop('dtype', None)\n\n\ttry:\n\t\t# Load the file\n\t\twith np.load(*args, **kwargs) as data:\n\t\t\ttry:\n\t\t\t\tfiles = set(data.keys())\n\n\t\t\t\t# Ignore a comment in the file\n\t\t\t\ttry: files.remove('comment')\n\t\t\t\texcept KeyError: pass\n\n\t\t\t\t# Make sure all other fields are recognized\n\t\t\t\tif files != { 'keys', 'values', 'lengths' }: raise ValueError\n\t\t\texcept (AttributeError, ValueError):\n\t\t\t\traise ValueError('Unrecognized data structure in input')\n\n\t\t\tkeys = data['keys']\n\t\t\tvalues = data['values']\n\t\t\tlengths = data['lengths']\n\texcept AttributeError:\n\t\traise ValueError('Invalid file format')\n\n\t# Convert the data type if desired\n\tif dtype is not None:\n\t\tvalues = values.astype(dtype)\n\n\tif not np.issubdtype(keys.dtype, np.integer) or not 0 < keys.ndim < 3:\n\t\traise ValueError('Invalid mapping key structure')\n\n\tif not np.issubdtype(lengths.dtype, np.integer) or lengths.ndim > 1:\n\t\traise ValueError('Invalid mapping length structure')\n\n\tif not np.issubdtype(values.dtype, np.number) or values.ndim != 1:\n\t\traise ValueError('Invalid mapping value structure')\n\n\tif lengths.ndim == 1 and len(lengths) != len(keys):\n\t\traise ValueError('Mapping lengths and keys do not have equal lengths')\n\n\tnvals = np.sum(lengths) if lengths.ndim == 1 else (lengths * len(keys))\n\tif len(values) != nvals:\n\t\traise ValueError('Mapping values do not have appropriate lengths')\n\n\tif scalar:\n\t\t# Determine whether the mapped values can be collapsed to scalars\n\t\tif lengths.ndim == 0:\n\t\t\tscalar = lengths == 1\n\t\telse:\n\t\t\tscalar = (lengths.shape[0] > 0 and\n\t\t\t\t\tall(lv == 1 for lv in lengths))\n\n\t# Collapse 1-element keys to scalars\n\ttry: keys = keys.squeeze(axis=1)\n\texcept ValueError: pass\n\n\tif keys.ndim == 2:\n\t\t# Convert a list of key values to a tuple of Python scalars\n\t\tkeys = [ tuple(k.tolist()) for k in keys ]\n\telse:\n\t\t# Collapse a single key value to a single Python scalar\n\t\tkeys = [ k.tolist() for k in keys ]\n\n\tmapping = OrderedDict()\n\tstart = 0\n\n\tfor key, lv in zip(keys, lengths if lengths.ndim == 1 else repeat(lengths)):\n\t\tmapping[key] = values[start] if scalar else values[start:start+lv]\n\t\tstart += lv\n\n\treturn mapping\n\n\ndef loadtxt_keymat(*args, **kwargs):\n\t'''\n\tLoads a textual Numpy matrix by calling numpy.loadtxt(*args, **kwargs),\n\tthen converts the output to an OrderedDict mapping integers in some\n\tpositive number of leading columns to Numpy arrays composed of the\n\tremaining columns. The ouput dictionary preserves the ordering of rows\n\tin the input file.\n\n\tIf the number of remaining columns is 1, setting an optional keyword\n\targument scalar (default: True) to False will preserve 1-element Numpy\n\tarrays as the values of the dictionary. Otherwise, 1-element Numpy\n\tarrays in the dictionary values will be collapsed to scalars. The\n\tscalar keyword argument is stripped from kwargs and is not passed to\n\tnumpy.loadtxt.\n\n\tThe dimensionality of the text matrix will be forced to 2 by adding\n\tndmin=2 to the kwargs. Therefore, this value should not be specified in\n\targs or kwargs.\n\n\tAn optional keyword argument, nkeys (default: 1), will be stripped from\n\tkwargs to determine the number of leading columns to use as keys. If\n\tnkeys is 1, the keys will be single integers. For nkeys > 1, the keys\n\twill be tuples of integers.\n\t'''\n\t# Pull speciality kwargs\n\tnkeys = strict_nonnegative_int(kwargs.pop('nkeys', 1), positive=True)\n\tscalar = kwargs.pop('scalar', True)\n\n\t# Ensure the dimensionality is correctly specified\n\tkwargs['ndmin'] = 2\n\tmat = np.loadtxt(*args, **kwargs)\n\n\t_, ncol = mat.shape\n\n\tif nkeys >= ncol:\n\t\traise ValueError('Number of key columns must be less than number of columns in matrix')\n\n\tdef kvmaker(g):\n\t\tk = tuple(strict_int(gv) for gv in g[:nkeys])\n\t\tv = g[nkeys:]\n\t\tif len(k) < 2: k = k[0]\n\t\tif scalar and len(v) < 2: v = v[0]\n\t\treturn k, v\n\n\treturn OrderedDict(kvmaker(g) for g in mat)\n\n\ndef savetxt_keymat(*args, **kwargs):\n\t'''\n\tStores a dictionary mapping integers to sequences as a textual Numpy\n\tmatrix using numpy.savetxt(*args, **kwargs), where the keys become the\n\tleading columns of the matrix and the remaining columns are populated\n\tby the corresponding values.\n\n\tIf a format is specified as the 'fmt' argument to savetxt, it must\n\taccount for the extra columns populated by the keys.\n\n\tIf kwargs contains a 'sortrows' argument, the Boolean value (defaulting\n\tto True) for the argument determines whether the mapping is sorted by\n\tkeys prior to output. Without sorting, the row order is either\n\tarbitrary or enforced by the input map (e.g., an OrderedDict). This\n\targument is not forwarded to savetxt.\n\t'''\n\t# Pull the map\n\tif len(args) > 1:\n\t\tx = args[1]\n\telse:\n\t\tx = kwargs.pop('X')\n\n\tsortrows = kwargs.pop('sortrows', True)\n\n\tdef aslist(x):\n\t\ttry: return list(x)\n\t\texcept TypeError: return list([x])\n\n\trows = iter(x.items()) if not sortrows else sorted(x.items())\n\n\t# Convert the dictionary to a list of lists\n\tmat = [ aslist(k) + aslist(v) for k, v in rows ]\n\n\t# Overwrite the input argument for the matrix\n\tif len(args) > 1:\n\t\targs = tuple(a if i != 1 else mat for i, a in enumerate(args))\n\telse:\n\t\tkwargs['X'] = mat\n\n\tnp.savetxt(*args, **kwargs)\n\n\ndef findenumfiles(dir, prefix='.*?', suffix='', ngroups=1):\n\t'''\n\tFind all files in the directory dir with a name matching the regexp\n\tr'^<PREFIX>(-([0-9]+)){ngroups}<SUFFIX>$', where <PREFIX> is replaced\n\twith an optional prefix and <SUFFIX> is replaced with an optional\n\tsuffix to restrict the search, and return a list of tuples in which the\n\tfirst item is the name and subsequent entries are the matched integers\n\t(which will number ngroups) in left-to-right order.\n\t'''\n\tfrom os.path import join\n\tfrom re import compile as recomp\n\n\tif ngroups < 1:\n\t\traise ValueError('At least one number group must be specified')\n\n\t# Build the number-matching portion\n\tnumstr = '-([0-9]+)' * ngroups\n\t# Enumerate the matching groups (0 is the whole matching string)\n\tgrpidx = tuple(range(ngroups + 1))\n\t# Build the regexp and filter the list of files in the directory\n\tregexp = recomp(r'^%s%s%s$' % (prefix, numstr, suffix))\n\t# When converting matched groups to integers, discard the whole-string group\n\treturn [tuple([join(dir, f)] + [int(g) for g in m.group(*grpidx)[1:]])\n\t\t\tfor f in os.listdir(dir) for m in [regexp.match(f)] if m]\n\n\ndef specreptype():\n\t'''\n\tReturns a numpy data type consisting of a 64-bit complex component,\n\tlabeled 'val', which stores the magnitude of a spectral component and a\n\t64-bit integer, labeled 'idx', which stores the component's FFT index.\n\t'''\n\treturn np.dtype([('val', np.complex64), ('idx', np.int64)])\n\n\ndef splitspecreps(a):\n\t'''\n\tBreak a record array a of concatenated spectral representations, with\n\tdtype habis.formats.specreptype(), into a list of record arrays\n\tcorresponding to each group of spectral representations in the original\n\tarray. The number of records in the first group (output[0]) is\n\tspecified by n[0] = (a[0]['idx'] + 1), with output[0] = a[:n[0]].\n\n\tThe number of records in a subsequent group (output[i]) is given by\n\n\t\tn[i] = (a[sum(n[:i-1])]['idx'] + 1),\n\n\twith output[i] = a[sum(n[:i-1]):sum(n[:i])].\n\t'''\n\tstart = 0\n\toutput = []\n\twhile start < len(a):\n\t\tnvals = a[start]['idx'] + 1\n\t\tif nvals < 1: raise ValueError('Spectral representation counts must be positive')\n\t\tgrp = a[start:start+nvals]\n\t\tif len(grp) < nvals: raise ValueError('Could not read specified number of records')\n\t\toutput.append(a[start:start+nvals])\n\t\tstart += nvals\n\treturn output\n\n\ndef countspecreps(f):\n\t'''\n\tFor a file f that contains sequence of spectral representations, return\n\tthe number of components in each group within the sequence. Thus, if A\n\trepresents the array of habis.formats.specreptype() records listed in the\n\tfile f, the output array n will have\n\n\t\tn[0] = (A[0]['idx'] + 1), and\n\t\tn[i] = (A[sum(n[:i-1])]['idx'] + 1).\n\t'''\n\tdtype = specreptype()\n\t# Open the file and determine its size\n\tinfile = open(f, 'rb')\n\tinfile.seek(0, os.SEEK_END)\n\tfend = infile.tell()\n\tinfile.seek(0, os.SEEK_SET)\n\t# Scan through the file to pick up all of the counts\n\tn = []\n\twhile (infile.tell() < fend):\n\t\t# Read the header record and add it to the list\n\t\tnrec = np.fromfile(infile, dtype=dtype, count=1)[0]['idx']\n\t\tn.append(nrec + 1)\n\t\t# Skip over the records for this group\n\t\tinfile.seek(nrec * dtype.itemsize, os.SEEK_CUR)\n\n\treturn n\n\n\ndef repreducer(n):\n\t'''\n\tThis is a factory function that returns a reducer function, suitable\n\tfor use in readfiresequence and readfirecapture, which selects only\n\trows whose repetition index matches the specified integer n.\n\t'''\n\tdef reducefunc(mat): return mat[mat[:,1].astype(int) == n]\n\treturn reducefunc\n\n\ndef readfirecapture(f, reducer=None):\n\t'''\n\tRead the capture of a single HABIS fire sequence (with any number of\n\ttransmit repetitions) in CSV format. The file has 4 header lines and is\n\tcomma-delimited. The format of each line is a sequence of integers\n\n\t\tchannel, repetition, samples...\n\n\twhere samples are in the range [-8192,8192). Channel values are indexed\n\tfrom zero.\n\n\tThe data is sorted first by channel and then by repetition index before\n\tprocessing.\n\n\tThe return value is a tuple (output, channels, repetitions), where\n\toutput is 3-D array of the form output[i,j,k], where i is the receive\n\tchannel index, j is the repetition, and k is the sample index. Every\n\treceive channel must contain the same number of repetitions or a\n\tValueError will be raised. The list channels contains elements that\n\tindicate the channel indices identified in the file, such that\n\tchannels[i] is the listed channel index for slice output[i,:,:].\n\tThe list repetitions is similarly defined such that reptitions[j] is\n\tthe listed repetition index for slice output[:,j,:].\n\n\tIf reducer is not None, it should be a callable that takes as input the\n\traw array data read from f and returns a filtered version of the data\n\tthat will be processed as that were the raw data read from the file.\n\t'''\n\tfrom pandas import read_csv\n\t# Read the data and use the reducer filter if appropriate\n\tdata = read_csv(f, skiprows=4, header=None).values\n\t# If reducer is None, a TypeError is raised; just ignore it\n\ttry: data = reducer(data)\n\texcept TypeError: pass\n\n\t# Sort the data according to channel and repetition\n\tidx = sorted((d[0], d[1], i) for i, d in enumerate(data[:,:2]))\n\tdata = data[[v[-1] for v in idx]]\n\t# Count the channels and reptitions\n\tdef counter(x, y):\n\t\t\"Count the channel and repetition in a result dictionary tuple\"\n\t\ttry: x[0][y[0]] += 1\n\t\texcept KeyError: x[0][y[0]] = 1\n\t\ttry: x[1][y[1]] += 1\n\t\texcept KeyError: x[1][y[1]] = 1\n\t\treturn x\n\tchannels, repetitions = reduce(counter, idx, ({}, {}))\n\t# Ensure that all channels have the same repetition count\n\tif len(set(channels.values())) != 1:\n\t\traise ValueError('All channels must have the same number of reptitions')\n\tif len(set(repetitions.values())) != 1:\n\t\traise ValueError('Each channel must have same set of reptition indices')\n\n\t# Strip out the channel and repetition indices\n\tchannels = sorted(channels.keys())\n\trepetitions = sorted(repetitions.keys())\n\n\tnchan = len(channels)\n\tnreps = len(repetitions)\n\tnsamps = data.shape[-1] - 2\n\n\treturn data[:,2:].reshape((nchan, nreps, nsamps)), channels, repetitions\n\n\ndef readfiresequence(fmt, findx, reducer=None):\n\t'''\n\tRead a series of HABIS fire capture fires whose names are given by the\n\tPython format string fmt. The string fmt is passed to the format\n\tfunction with each value in the sequence findx to produce a unique\n\tfilename. The output arrays of readfirecapture() are collected, in\n\tsequence, and concatenated along a new first axis.\n\n\tThe channel and reptition indices returned by readfirecapture() are\n\tignored. However, because np.concatenate() is used to produce the\n\tconcatenated output, every readfirecapture() array must have the same\n\tshape.\n\n\tThe reducer is passed to readfirecapture for processing per-fire data.\n\t'''\n\tdata = [readfirecapture(fmt.format(f), reducer=reducer)[0][np.newaxis,:,:,:]\n\t\t\tfor f in findx]\n\treturn np.concatenate(data, axis=0)\n\n\nclass TxGroupIndex(tuple):\n\t'''\n\tA class to encapsulate and type-check transmit-index pairs.\n\t'''\n\tdef __new__(cls, lidx, gidx):\n\t\t'''\n\t\tCreate a new TxGroupIndex with local index lidx and\n\t\tgroup index gidx.\n\t\t'''\n\t\tlidx = strict_nonnegative_int(lidx)\n\t\tgidx = strict_nonnegative_int(gidx)\n\t\treturn tuple.__new__(cls, (lidx, gidx))\n\t@property\n\tdef idx(self): return self[0]\n\t@property\n\tdef grp(self): return self[1]\n\n\tdef signForTx(self, transmission, group):\n\t\t'''\n\t\tReturn the sign (-1, 0, 1) of the given transmission\n\t\tnumber and group for this transmit and group index.\n\t\t'''\n\t\t# If the groups don't match, the sign is zero\n\t\tif group != self.grp: return 0\n\n\t\t# Count number of common bits in transmission and idx\n\t\ttxcom = strict_nonnegative_int(transmission) & self.idx\n\t\tcount = 0\n\t\twhile txcom:\n\t\t\ttxcom &= txcom - 1\n\t\t\tcount += 1\n\n\t\t# Sign is +1 for even number of common bits\n\t\treturn 1 - 2 * (count % 2)\n\n\nclass TxGroupConfiguration(tuple):\n\t'''\n\tA class to encapsulate and type-check transmit-group configurations.\n\t'''\n\tdef __new__(cls, count, size):\n\t\t'''\n\t\tCreate a new TxGroupConfiguration.\n\t\t'''\n\t\tcount = strict_nonnegative_int(count)\n\t\tsize = strict_nonnegative_int(size)\n\t\treturn tuple.__new__(cls, (count, size))\n\n\t@property\n\tdef count(self): return self[0]\n\t@property\n\tdef size(self): return self[1]\n\t@property\n\tdef maxtx(self): return self[0] * self[1]\n\n\nclass RxChannelHeader(tuple):\n\t'''\n\tA class to encapsulate and type-check receive-channel headers\n\tin WaveformSet files.\n\t'''\n\tdef __new__(cls, idx, pos, win, txgrp=None):\n\t\t'''\n\t\tCreate a new header for receive channel idx,\n\t\telement location pos = (px, py, pz), and data window\n\t\twin = (start, length). The transmit group txgrp may\n\t\teither be None or (index, group).\n\t\t'''\n\t\tfrom .sigtools import Window\n\t\tidx = strict_nonnegative_int(idx)\n\t\tpx, py, pz = pos\n\t\tpos = tuple(float(p) for p in (px, py, pz))\n\t\t# Force the window start to be nonnegative\n\t\twin = Window(win, nonneg=True)\n\t\tif txgrp is not None: txgrp = TxGroupIndex(*txgrp)\n\t\treturn tuple.__new__(cls, (idx, pos, win, txgrp))\n\t@property\n\tdef idx(self): return self[0]\n\t@property\n\tdef pos(self): return self[1]\n\t@property\n\tdef win(self): return self[2]\n\t@property\n\tdef txgrp(self): return self[3]\n\n\tdef copy(self, **kwargs):\n\t\t\"Copy the header, optionally replacing certain properties.\"\n\t\tkeys = ['idx', 'pos', 'win', 'txgrp']\n\t\tprops = dict((key, kwargs.pop(key, getattr(self, key))) for key in keys)\n\t\tif len(kwargs):\n\t\t\traise TypeError(\"Unrecognized keyword '%s'\" % (next(iter(kwargs.keys())),))\n\t\treturn type(self)(**props)\n\n\nclass WaveformSet(object):\n\t'''\n\tA class to encapsulate a (possibly multi-facet) set of pulse-echo\n\tmeasurements from a single target.\n\t'''\n\t# A bidirectional mapping between typecodes and Numpy dtype names\n\tfrom pycwp.util import bidict\n\ttypecodes = bidict({b'I2': 'int16', b'I4': 'int32', b'I8': 'int64', b'F2': 'float16',\n\t\t\tb'F4': 'float32', b'F8': 'float64', b'C4': 'complex64', b'C8': 'complex128'})\n\n\t@staticmethod\n\tdef _get_open(f=None, compression=None):\n\t\t'''\n\t\tReturn the appropriate open function to handle optionally\n\t\tcompressed files and a Boolean that is True iff compression was\n\t\tdetected or requested.\n\n\t\tIf f is not None, it should be the name of an existing file.\n\t\tThe python-magic module will be used to determine whether\n\t\tgzip.open, bz2.open or the regular open should be used to read\n\t\tthe file. The \"compression\" argument in this case is ignored.\n\n\t\tIf f is None, then compression should be one of None, 'gzip' or\n\t\t'bz2'.\n\t\t'''\n\t\timport bz2, gzip\n\t\topeners = { 'bz2': bz2.open, 'gzip': gzip.open, '': open }\n\n\t\tif not f:\n\t\t\tcompression = (compression or '').strip().lower()\n\t\t\terrmsg = 'Value of compression must be None, \"gzip\" or \"bz2\"'\n\t\telse:\n\t\t\ttry: import magic\n\t\t\texcept ImportError: mime = ''\n\t\t\telse: mime = magic.Magic(mime=True).from_file(f).lower()\n\n\t\t\tcompression = { 'application/x-gzip': 'gzip',\n\t\t\t\t\t'application/x-bzip2': 'bz2' }.get(mime, '')\n\t\t\terrmsg = 'Unable to determine file compression scheme'\n\n\t\ttry: return (openers[compression], compression != '')\n\t\texcept KeyError: raise ValueError(errmsg)\n\n\n\t@classmethod\n\tdef fromwaveform(cls, wave, copy=False, hdr=None, rid=0, tid=0, f2c=0):\n\t\t'''\n\t\tCreate a new WaveformSet object with a single transmit index\n\t\tand a single receive index with a sample count and data type\n\t\tdefined by the provided Waveform wave. The sole waveform record\n\t\twill be populated with wave.\n\n\t\tIf copy is False, the record in the WaveformSet will, whenever\n\t\tpossible, capture a reference to the waveform data instead of\n\t\tmaking a copy. If copy is True, a copy will always be made.\n\n\t\tIf hdr is not None, it should be a receive-channel header that\n\t\twill be used for the single receive-channel record in the\n\t\toutput WaveformSet. The value of hdr.win will be overwritten\n\t\twith wave.datawin, and the value of rid will be ignored.\n\n\t\tIf hdr is None, a default header\n\n\t\t\t(rid, [0., 0., 0.], wave.datawin)\n\n\t\twill be used.\n\n\t\tThe parameter tid should be a single nonnegative integer that\n\t\tspecifies the transmit index to assign to the Waveform.\n\n\t\tThe parameter f2c should be a single nonnegative integer that\n\t\tspecifies the fire-to-capture delay to encode in the set.\n\t\t'''\n\t\t# Create the set\n\t\twset = cls(1, tid, wave.nsamp, f2c, wave.dtype)\n\n\t\tif hdr is None:\n\t\t\t# Create a default header\n\t\t\thdr = RxChannelHeader(rid, [0.]*3, wave.datawin)\n\t\telse:\n\t\t\t# Ensure hdr is RxChannelHeader, then set datawin\n\t\t\thdr = RxChannelHeader(*hdr).copy(win=wave.datawin)\n\n\t\twset.setrecord(hdr, wave.getsignal(wave.datawin), copy)\n\t\treturn wset\n\n\n\t@classmethod\n\tdef empty_like(cls, wset, with_context=True):\n\t\t'''\n\t\tCreate a new instance of WaveformSet configured exactly as\n\t\twset, except without any waveform records.\n\n\t\tIf with_context is True, the dictionary wset.context will be\n\t\tcopied (shallowly) into the created WaveformSet. Otherwise, the\n\t\tcontext of the created WaveformSet will be empty\n\t\t'''\n\t\tnwset = cls(wset.ntx, wset.txstart, wset.nsamp, wset.f2c, wset.dtype, wset.txgrps)\n\t\tif with_context: nwset.context = wset.context.copy()\n\t\telse: nwset.context = { }\n\t\treturn nwset\n\n\n\tdef __init__(self, ntx=0, txstart=0, nsamp=4096, f2c=0,\n\t\t\tdtype=np.dtype('int16'), txgrps=None):\n\t\t'''\n\t\tCreate an empty WaveformSet object that embodies acquisitions\n\t\tof a set of waveforms from a total of ntx transmission indices (0-based)\n\t\tstarting from index txstart. Each acquisition starts after a\n\t\tfire-to-capture delay of f2c samples and persists for nsamp\n\t\tsamples. Waveform arrays are stored with the specified Numpy\n\t\tdtype.\n\n\t\tIf txgrps is specified, it should be a TxGroupConfiguration\n\t\tobject or a tuple of the form (count, size) that specifies the\n\t\tnumber of transmit groups into which transmissions are\n\t\tsubdivided, and the number of elements in each group.\n\t\t'''\n\t\t# Record the waveform dtype\n\t\tself._dtype = np.dtype(dtype)\n\n\t\t# Prepopulate properties that will be validated later\n\t\tself._f2c = 0\n\t\tself._nsamp = 0\n\t\tself._ntx = 0\n\t\tself._txstart = 0\n\t\tself._txgrps = None\n\n\t\t# Create an empty, ordered record dictionary\n\t\t# Needed for validation of other properties\n\t\tself._records = OrderedDict()\n\n\t\t# Create an empty group map\n\t\tself._groupmap = { }\n\n\t\t# Assign validated properties\n\t\tself.nsamp = nsamp\n\t\tself.f2c = f2c\n\n\t\t# Build and validate the transmit-channel mapping\n\t\tself.ntx = ntx\n\t\tself.txstart = txstart\n\n\t\t# Initialize the group configuration as specified\n\t\tself.txgrps = txgrps\n\n\t\t# Extra scan context can be read from a file header and is\n\t\t# passed on when writing compatible versions, but is never\n\t\t# inherently interpreted\n\t\tself.context = { }\n\n\n\t@classmethod\n\tdef _verify_file_version(cls, version, write=False):\n\t\t'''\n\t\tEnsure that the provided version matches one supported by the\n\t\tWaveformSet class. If version is unsupported, a ValueError is\n\t\traised. Otherwise, just return the version tuple.\n\t\t'''\n\t\ttry:\n\t\t\tmajor, minor = version\n\t\t\tmajor = strict_nonnegative_int(major)\n\t\t\tminor = strict_nonnegative_int(minor)\n\t\texcept (TypeError, ValueError):\n\t\t\traise ValueError('Version format is not recognized')\n\n\t\tif major != 1: raise ValueError('Unsupported major version')\n\n\t\tif not write:\n\t\t\t# Support all currently defined formats for reading\n\t\t\tif not (0 <= minor < 7):\n\t\t\t\traise ValueError('Unsupported minor version for reading')\n\t\t\treturn (major, minor)\n\n\t\t# Only version-6 writes are supported\n\t\tif minor != 6:\n\t\t\traise ValueError('Unsupported minor version for writing')\n\n\t\treturn major, minor\n\n\n\tdef store(self, f, append=False, ver=(1,6), compression=None):\n\t\t'''\n\t\tWrite the WaveformSet object to the data file in f (either a\n\t\tname or a file-like object that allows writing).\n\n\t\tIf append is True, the file-level header is not written. An\n\t\tunopened file is opened for appends instead of truncating an\n\t\texisting file. It is the caller's responsibility to assure that\n\t\tan existing file header is consistent with records written by\n\t\tthis method in append mode.\n\n\t\tThe compression argument should be None, 'gzip' or 'bz2'. If\n\t\tcompression is not None, f is a string and append is False, the\n\t\tfile will be opened as a gzip.GzipFile (for 'gzip') or\n\t\tbz2.BZ2File (for 'bz2'). It is a ValueError to specify a\n\t\tnon-None value for compression and a string for f when append\n\t\tmode is True. When f is not a string, the value of compression\n\t\tis ignored.\n\n\t\t** NOTE **\n\t\tBecause the WaveformSet may map some input file for waveform\n\t\tarrays after calling load(), calling store() with the same file\n\t\tused to load() may cause unexpected behavior.\n\t\t'''\n\t\t# Open the file if it is not open\n\t\tif isinstance(f, str):\n\t\t\topener, compressed = self._get_open(None, compression)\n\t\t\tif compressed and append:\n\t\t\t\traise ValueError('Append mode with compression is not supported')\n\t\t\tf = opener(f, ('ab' if append else 'wb'))\n\n\t\t# Verify that the output version is supported\n\t\tmajor, minor = self._verify_file_version(ver, write=True)\n\n\t\t# A missing transmit-group configuration takes the special value (0,0)\n\t\ttry: gcount, gsize = self.txgrps\n\t\texcept (TypeError, ValueError): gcount, gsize = 0, 0\n\n\t\tif not append:\n\t\t\t# Encode the magic number and file version\n\t\t\thbytes = struct.pack('<4s2I', b'WAVE', major, minor)\n\n\t\t\t# Encode temperature values\n\t\t\ttemps = self.context.get('temps', [float('nan')]*2)\n\t\t\thbytes += np.asarray(temps, dtype=np.float32).tobytes()\n\n\t\t\t# Encode the datatype\n\t\t\ttypecode = self.typecodes.inverse[np.dtype(self.dtype).name][0]\n\t\t\thbytes += struct.pack('<2s', typecode)\n\n\t\t\t# Encode transmission parameters\n\t\t\thbytes += struct.pack('<4I2HI', self.f2c, self.nsamp,\n\t\t\t\t\tself.nrx, self.ntx, gcount, gsize, self.txstart)\n\n\t\t\ttry:\n\t\t\t\t# Make sure TGC is a 1-D array\n\t\t\t\ttgc = np.asarray(self.context['tgc'], dtype=np.float32).squeeze()\n\t\t\texcept KeyError:\n\t\t\t\t# Header contains no TGC records\n\t\t\t\thbytes += struct.pack('<I', 0)\n\t\t\telse:\n\t\t\t\tif tgc.ndim != 1:\n\t\t\t\t\traise ValueError('TGC must be a 1-D array of floats')\n\t\t\t\thbytes += struct.pack('<I')\n\t\t\t\thbytes += tgc.tobytes()\n\n\t\t\tf.write(hbytes)\n\n\t\t# Write each record in turn\n\t\tfor idx in sorted(self.rxidx):\n\t\t\thdr, waveforms = self._get_record_raw(idx)\n\n\t\t\tif idx != hdr.idx:\n\t\t\t\traise ValueError('Record index does not match receive-channel index')\n\n\t\t\tpx, py, pz = hdr.pos\n\t\t\tws, wl = hdr.win\n\n\t\t\t# Without a transmit-group configuration, use (0,0)\n\t\t\ttry: li, gi = hdr.txgrp\n\t\t\texcept (TypeError, ValueError): li, gi = 0, 0\n\n\t\t\t# Enclode the receive-channel header\n\t\t\thbytes = struct.pack('<3I3f2I', idx, li, gi, px, py, pz, ws, wl)\n\n\t\t\tf.write(hbytes)\n\t\t\t# Encode the waveform data\n\t\t\twbytes = waveforms.tobytes()\n\t\t\tf.write(wbytes)\n\t\t\tf.flush()\n\n\n\t@staticmethod\n\tdef _funpack(f, fmt):\n\t\t'''\n\t\tRead from the file pointer f (using f.read) the appropriate\n\t\tnumber of bytes to unpack the struct described by the format\n\t\tstring fmt.\n\n\t\tThe file must already be open. Any exception is caught and\n\t\tconverted into a WaveformSetIOError.\n\t\t'''\n\t\ttry:\n\t\t\tsz = struct.calcsize(fmt)\n\t\t\treturn struct.unpack(fmt, f.read(sz))\n\t\texcept Exception as err:\n\t\t\traise WaveformSetIOError(f'Failure to unpack bytes: {err}')\n\n\n\t@staticmethod\n\tdef _npunpack(f, dtype, count):\n\t\t'''\n\t\tRead from the file point f (using f.read) the approriate number\n\t\tof bytes to built a 1-D Numpy array of the specified type and\n\t\tcount. The count must be nonnegative. If count is 0, the\n\t\treturned array will be empty.\n\n\t\tThe file must alread by open. Any exception raised by the I/O\n\t\tand Numpy bytes-to-array conversion is caught and converted\n\t\tinto a WaveformSetIOError.\n\t\t'''\n\t\tif count < 0:\n\t\t\traise ValueError(f'Cannot read {count} bytes into Numpy array')\n\t\telif count < 1:\n\t\t\treturn np.array([], dtype=dtype)\n\n\t\tdtype = np.dtype(dtype)\n\n\t\ttry:\n\t\t\trbytes = f.read(dtype.itemsize * count)\n\t\t\treturn np.frombuffer(rbytes, dtype, count)\n\t\texcept Exception as err:\n\t\t\traise WaveformSetIOError(f'Failure to read array: {err}')\n\n\n\t@classmethod\n\tdef load(cls, f, force_dtype=None, allow_duplicates=False,\n\t\t\tskip_zero_length=True, warn_on_error=True,\n\t\t\theader_only=False, stream_mode=False):\n\t\t'''\n\t\tCreate a WaveformSet object with the data in f, a file-like\n\t\tobject or string specifying a file name. If f is a file-like\n\t\tobject, parsing starts from the current file position.\n\n\t\tIn general, any error will cause a WaveformSetIOError exception\n\t\tto be raised.\n\n\t\tEach block of waveform data is memory-mapped (except when\n\t\tstream_mode is True; see below) from the source file. This\n\t\tmapping is copy-on-write; changes do not persist.\n\n\t\tIf force_dtype is not None, and the data type of records stored\n\t\tin the file is not equal to force_dtype, each record block will\n\t\tbe converted to the data type in the datatype argument.\n\n\t\tIf allow_duplicates is False, file parsing will halt the first\n\t\ttime a header is encounted for a receive-channel index\n\t\tpreviously encountered in the file. If allow_duplicates is\n\t\tTrue, each receive-channel record will replace any previously\n\t\tencountered records for the same channel index.\n\n\t\tRecords for which the data block has zero length will be read\n\t\tbut not stored in the WaveformSet object if skip_zero_length is\n\t\tTrue; if it is False, the empty record will be stored.\n\n\t\t** NOTE: If allow_duplicates is False, encountering multiple\n\t\trecords for the same receive-channel index will terminate even\n\t\tif one or more of the duplicate records has zero length and\n\t\tskip_zero_length is True.\n\n\t\tIt is an error if the number of parsed receive-channel records\n\t\tdoes not equal the number of records enconcoded in the file\n\t\theader. If warn_on_error is True, this error will cause a\n\t\twarning to be issued. Otherwise, a WaveformSetIOError will be\n\t\traised in case this error is encountered.\n\n\t\tIf header_only is True, the contents of the WaveformSet header\n\t\theader will be read from the file, but processing will stop\n\t\tbefore records are read and stored in the WaveformSet instance.\n\t\tNo file-length checks are performed to determine whether the\n\t\tfile contents are valid (beyond the ability to parse the\n\t\theader), and no indication of the receive channels encoded in\n\t\tthe file will be available.\n\n\t\tWhen header_only is False, this method returns the WaveformSet\n\t\tinstance. When header_only is True, this method returns the\n\t\tWaveformSet and the value of the \"nrx\" property encoded in the\n\t\tfile.\n\t\t\n\t\tIf stream_mode is True, the waveform data will not be\n\t\tmemory-mapped, but will be copied into locally controlled\n\t\tmemory. Furthermore, seeks will not be performed on the input,\n\t\tmaking this mode suitable for compressed input. (This method\n\t\twill not attempt to open compressed files, so the argument f\n\t\tshould be a GzipFile, BZ2File or similar instance if inline\n\t\tdecompression is desired.)\n\t\t'''\n\t\t# Open the file if it is not open\n\t\tif isinstance(f, str):\n\t\t\topener, compressed = cls._get_open(f)\n\t\t\tf = opener(f, mode='rb')\n\t\t\t# Force stream mode for compressed input\n\t\t\tif compressed: stream_mode = True\n\n\t\t# Convenience: attach the file to funpack and npunpack\n\t\tfunpack = partial(cls._funpack, f)\n\t\tnpunpack = partial(cls._npunpack, f)\n\n\t\t# Read the magic number and file version\n\t\ttry:\n\t\t\tmagic, major, minor = funpack('<4s2I')\n\t\t\tif magic != b'WAVE': raise WaveformSetIOError\n\t\texcept WaveformSetIOError:\n\t\t\traise WaveformSetIOError('Unable to identify WAVE header')\n\n\t\ttry: major, minor = cls._verify_file_version((major, minor))\n\t\texcept ValueError as err:\n\t\t\traise WaveformSetIOError(f'Unsupported WAVE format: {err}')\n\n\t\t# Create some empty context\n\t\tcontext = { }\n\n\t\tif minor > 4:\n\t\t\t# Read temperature context\n\t\t\ttry: context['temps'] = npunpack('float32', 2)\n\t\t\texcept WaveformSetIOError as err:\n\t\t\t\traise WaveformSetIOError(f'Invalid temperature: {err}')\n\n\t\t# Read the type code for this file\n\t\ttry:\n\t\t\ttypecode = funpack('<2s')[0]\n\t\t\tdtype = np.dtype(cls.typecodes[typecode])\n\t\texcept (WaveformSetIOError, KeyError) as err:\n\t\t\traise WaveformSetIOError(f'Invalid typecode: {err}')\n\n\t\tif force_dtype is not None:\n\t\t\t# Force a dtype conversion, if necessary\n\t\t\tforce_dtype = np.dtype(force_dtype)\n\t\t\tif force_dtype == dtype: force_dtype = None\n\n\t\t# Parse common transmission parameters\n\t\tf2c, nsamp, nrx, ntx = funpack('<4I')\n\n\t\t# By default, start the transmission indexing at 0\n\t\ttxstart = 0\n\t\t# Clear any group configuration for now\n\t\ttxgrps = None\n\n\t\tif minor > 1:\n\t\t\t# Read the group configuration\n\t\t\tcount, size = funpack('<2H')\n\t\t\t# Make sure both values are sensible integers\n\t\t\tcount = strict_nonnegative_int(count)\n\t\t\tsize = strict_nonnegative_int(size)\n\n\t\t\t# Only configure transmit groups if the count is positive\n\t\t\tif count > 0:\n\t\t\t\t# Default group size, if unspecified, is 10240 / count\n\t\t\t\tif size == 0:\n\t\t\t\t\tsize = 10240 // count\n\t\t\t\t\tif size * count != 10240:\n\t\t\t\t\t\tmsg = f'Unable to infer size for {count} groups'\n\t\t\t\t\t\traise WaveformIOError(msg)\n\n\t\t\t\ttxgrps = count, size\n\n\t\t\t# For version (1,4) and above, read an explicit txstart\n\t\t\tif minor >= 4: txstart = funpack('<I')[0]\n\n\t\t\t# Minor versions below 6 used fixed 256-value TGC records\n\t\t\tif minor < 6: rcount = 256\n\t\t\telse: rcount = funpack('<I')[0]\n\n\t\t\tif rcount:\n\t\t\t\ttry: tgc = npunpack('float32', rcount)\n\t\t\t\texcept WaveformSetIOError as err:\n\t\t\t\t\tmsg = f'Unable to read {rcount} TGC values: {err}'\n\t\t\t\t\traise WaveformSetIOError(msg)\n\t\t\t\t# For minor versions < 6, don't keep all-zero TGC\n\t\t\t\tif minor > 5 or np.count_nonzero(tgc):\n\t\t\t\t\tcontext['tgc'] = tgc\n\t\telif minor == 0:\n\t\t\t# Verion 0 uses an explicit 1-based transmit-index list\n\t\t\ttry: txidx = npunpack('uint32', ntx) - 1\n\t\t\texcept WaveformSetIOError:\n\t\t\t\tmsg = 'Tx list must contain {ntx} values: {err}'\n\t\t\t\traise WaveformSetIOError(msg)\n\n\t\t# Now create the empty object and associate context\n\t\twset = cls(ntx=ntx, txstart=txstart, nsamp=nsamp, f2c=f2c,\n\t\t\t\tdtype=(force_dtype or dtype), txgrps=txgrps)\n\t\twset.context = context\n\n\t\t# Skip processing of records in header_only mode\n\t\tif header_only: return wset, nrx\n\n\t\tif not stream_mode:\n\t\t\t# Use a single Python mmap buffer for backing data\n\t\t\t# (Map starts at file start; remember current location)\n\t\t\tfsrec = f.tell()\n\t\t\tbuf = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_COPY)\n\t\t\tf.seek(fsrec)\n\n\t\t# For (1, 2) files, keep a running index tally\n\t\tidx = -1\n\n\t\t# If the set isn't configured for transmit groups,\n\t\t# ignore any group spec in the receive-channel headers\n\t\tusegrps = (wset.txgrps is not None)\n\n\t\t# Keep track of duplicate records, if necessary\n\t\tif not allow_duplicates:\n\t\t\tencountered = set()\n\n\t\t# Parse through the specified number of receive records\n\t\t# As a special case, when nrx is zero, read all possible records\n\t\twhile nrx == 0 or wset.nrx < nrx:\n\t\t\tif minor == 2:\n\t\t\t\t# Update running index\n\t\t\t\tidx += 1\n\t\t\telse:\n\t\t\t\t# Read a global channel index\n\t\t\t\t# Correct 1-based indexing in early versions\n\t\t\t\ttry: idx = funpack('<I')[0] - int(minor < 2)\n\t\t\t\texcept WaveformSetIOError: break\n\n\t\t\t# Read element position and data window parameters\n\t\t\tif minor > 1:\n\t\t\t\t# Also read transmission group configuration\n\t\t\t\ttry: i, g, px, py, pz, ws, wl = funpack('<2I3f2I')\n\t\t\t\texcept WaveformSetIOError: break\n\n\t\t\t\ttxgrp = (i, g) if usegrps else None\n\t\t\t\tif minor == 2:\n\t\t\t\t\t# Correct an off-by-one window specification bug\n\t\t\t\t\tif wl == nsamp and ws == 1: ws = 0\n\t\t\telse:\n\t\t\t\ttry: px, py, pz, ws, wl = funpack('<3f2I')\n\t\t\t\texcept WaveformSetIOError: break\n\t\t\t\ttxgrp = None\n\n\t\t\t# Build the channel header\n\t\t\thdr = (idx, (px, py, pz), (ws, wl), txgrp)\n\n\t\t\tif not allow_duplicates:\n\t\t\t\tif idx in encountered:\n\t\t\t\t\tmsg = f'Parsing terminated at duplicate record {idx}'\n\t\t\t\t\twarnings.warn(WaveformSetIOWarning(msg))\n\t\t\t\t\t# Avoid detecting junk after duplicate header\n\t\t\t\t\tif not stream_mode: fsrec = f.tell()\n\t\t\t\t\tbreak\n\t\t\t\tencountered.add(idx)\n\n\t\t\t# Determine the shape of the waveform\n\t\t\twaveshape = (ntx, wl)\n\n\t\t\tif not stream_mode:\n\t\t\t\t# Return a view into the map\n\t\t\t\tfsmap = f.tell()\n\n\t\t\t\ttry:\n\t\t\t\t\twavemap = np.ndarray(waveshape,\n\t\t\t\t\t\t\tdtype=dtype, buffer=buf,\n\t\t\t\t\t\t\torder='C', offset=fsmap)\n\t\t\t\texcept TypeError: break\n\n\t\t\t\t# Skip to next header and update next record offset\n\t\t\t\tf.seek(fsmap + wavemap.nbytes)\n\t\t\t\tfsrec = f.tell()\n\t\t\telse:\n\t\t\t\t# Read into a new array\n\t\t\t\tnvals = waveshape[0] * waveshape[1]\n\n\t\t\t\ttry: wavemap = npunpack(dtype, nvals).reshape(waveshape, order='C')\n\t\t\t\texcept WaveformSetIOError: break\n\n\t\t\tif not skip_zero_length or wavemap.nbytes != 0:\n\t\t\t\tif force_dtype is not None:\n\t\t\t\t\twmap = wavemap.astype(force_dtype)\n\t\t\t\telse: wmap = wavemap\n\t\t\t\t# Add the record to the set\n\t\t\t\twset.setrecord(hdr, wmap, copy=False)\n\n\t\tif not stream_mode and f.tell() != fsrec:\n\t\t\twarnings.warn(WaveformSetIOWarning('Junk at end of file'))\n\n\t\tif nrx and wset.nrx != nrx:\n\t\t\terr = f'Header specifies {nrx} records, but read {wset.nrx}'\n\t\t\tif warn_on_error: warnings.warn(WaveformSetIOWarning(err))\n\t\t\telse: raise WaveformSetIOError(err)\n\n\t\treturn wset\n\n\n\t@property\n\tdef rxidx(self):\n\t\t'''\n\t\tReturn a list of receive-channel indices in file order.\n\t\t'''\n\t\treturn list(self._records.keys())\n\n\n\t@property\n\tdef txgrps(self):\n\t\t'''\n\t\tReturn the (count, size) of transmit groups, or None for no grouping.\n\t\t'''\n\t\treturn self._txgrps\n\n\n\t@txgrps.setter\n\tdef txgrps(self, grps):\n\t\t'''\n\t\tSet the group count and length. Removes any existing groupmap\n\t\tproperty.\n\t\t'''\n\t\tif grps == self._txgrps: return\n\n\t\tif self.nrx > 0:\n\t\t\traise ValueError('Cannot change transmit-group configuration with existing records')\n\n\t\tif grps is None:\n\t\t\tself._txgrps = None\n\t\t\tself.groupmap = None\n\t\t\treturn\n\n\t\ttry:\n\t\t\tgrps = TxGroupConfiguration(*grps)\n\t\texcept (TypeError, ValueError):\n\t\t\traise ValueError('Parameter must be None or (count, size) tuple')\n\n\t\tif grps.maxtx < self.ntx:\n\t\t\traise ValueError('Implied maximum transmission count is less than number of recorded transmissions')\n\t\tif grps.maxtx <= self.txstart:\n\t\t\traise ValueError('Implied maximum transmission count is less than starting transmission index')\n\n\t\tself._txgrps = grps\n\t\tself.groupmap = None\n\n\n\t@property\n\tdef txstart(self):\n\t\t'''\n\t\tReturn the first transmission index in the records.\n\t\t'''\n\t\treturn self._txstart\n\n\n\t@txstart.setter\n\tdef txstart(self, txstart):\n\t\t'''\n\t\tSet the first transmission index in the records, which must be\n\t\ta nonnegative integer within the tranmission range implied by\n\t\tthe group configuration in self.txgrps.\n\t\t'''\n\t\tif txstart == self._txstart: return\n\n\t\ttxstart = strict_nonnegative_int(txstart)\n\n\t\ttry:\n\t\t\tmaxtx = self.txgrps.maxtx\n\t\texcept AttributeError:\n\t\t\tpass\n\t\telse:\n\t\t\tif txstart >= maxtx:\n\t\t\t\traise ValueError('Parameter txstart exceeds maxtx of transmit-group configuration')\n\n\t\tself._txstart = txstart\n\n\n\t@property\n\tdef txidx(self):\n\t\t'''\n\t\tReturn a generator of tranmit-channel indices in file order.\n\t\t'''\n\t\ttxstart = self.txstart\n\t\ttxgrps = self.txgrps\n\n\t\ttry:\n\t\t\tmaxtx = self.txgrps.maxtx\n\t\texcept AttributeError:\n\t\t\tfor i in range(txstart, txstart + self.ntx):\n\t\t\t\tyield i\n\t\telse:\n\t\t\tfor i in range(txstart, txstart + self.ntx):\n\t\t\t\tyield i % maxtx\n\n\n\t@txidx.setter\n\tdef txidx(self, txidx):\n\t\t'''\n\t\tChecks the provided list for sequential ordering of the input\n\t\tsequence txidx and, if the check is satisfied, assigns\n\t\tself.txstart and self.ntx accordingly.\n\n\t\tIf the indices are not sequential, but self.txgrps is None, the\n\t\ttxgrp configuration and self.groupmap will be set to map\n\t\ttransmit indices 0 through len(txidx) - 1 to the elements of\n\t\ttxidx.\n\t\t'''\n\t\ttxidx = list(txidx)\n\n\t\ttry: txstart = txidx[0]\n\t\texcept IndexError:\n\t\t\tself.ntx = 0\n\t\t\tself.txstart = 0\n\t\t\treturn\n\n\t\ttry:\n\t\t\tmaxtx = self.txgrps.maxtx\n\t\texcept AttributeError:\n\t\t\tdef nextval(x): return (x + 1)\n\t\telse:\n\t\t\tdef nextval(x): return (x + 1) % maxtx\n\n\t\tlast = txstart\n\t\tsequential = True\n\n\t\tfor nv in txidx[1:]:\n\t\t\tlast = nextval(last)\n\t\t\tif nv != last:\n\t\t\t\tsequential = False\n\t\t\t\tbreak\n\n\t\tdef atomic_set(txstart, ntx):\n\t\t\t# Record the old txstart to ensure atomicity\n\t\t\totxstart = self.txstart\n\t\t\tself.txstart = txstart\n\n\t\t\ttry: self.ntx = ntx\n\t\t\texcept:\n\t\t\t\t# Restore the old txstart before failing\n\t\t\t\tself.txstart = otxstart\n\t\t\t\traise\n\n\t\tif not sequential:\n\t\t\tif self.txgrps is not None:\n\t\t\t\traise ValueError('Indices must be sequential or wrap when txgrps is defines')\n\t\t\t# Set txgrp configuration to remap out-of-sequence indices\n\t\t\tatomic_set(0, len(txidx))\n\t\t\tself.txgrps = (self.ntx, 1)\n\t\t\tself.groupmap = { txi: (0, i) for i, txi in enumerate(txidx) }\n\t\telse:\n\t\t\tatomic_set(txstart, len(txidx))\n\n\n\t@property\n\tdef ntx(self):\n\t\t'''\n\t\tReturn the number of transmissions per receive channel.\n\t\t'''\n\t\treturn self._ntx\n\n\n\t@ntx.setter\n\tdef ntx(self, ntx):\n\t\t'''\n\t\tSet the number of transmissions per receive channel.\n\t\t'''\n\t\t# Take no action if the count hasn't changed\n\t\tif ntx == self._ntx: return\n\n\t\t# Don't attempt to change the transmit count with existing records\n\t\tif self.nrx > 0:\n\t\t\traise ValueError('Cannot change number of transmissions with existing records')\n\n\t\ttry:\n\t\t\tif ntx > self.txgrps.maxtx:\n\t\t\t\traise ValueError('Number of transmissions must not exceed maxtx implied by transmit-group configuration')\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\tself._ntx = strict_nonnegative_int(ntx)\n\n\n\t@property\n\tdef nrx(self):\n\t\t'''\n\t\tReturn the number of receive channels in this waveform set.\n\t\t'''\n\t\treturn len(self._records)\n\n\n\t@property\n\tdef dtype(self):\n\t\t'''\n\t\tReturn the datatype used to store waveforms.\n\t\t'''\n\t\treturn self._dtype\n\n\n\t@dtype.setter\n\tdef dtype(self, value):\n\t\t'''\n\t\tSet the datatype used to store waveforms.\n\t\t'''\n\t\tif self._dtype == value: return\n\n\t\tif self.nrx > 0:\n\t\t\traise ValueError('Cannot change datatype with existing records')\n\t\tself._dtype = np.dtype(value)\n\n\n\t@property\n\tdef nsamp(self):\n\t\t'''\n\t\tReturn the total number of samples collected in the acquisitions.\n\t\t'''\n\t\treturn self._nsamp\n\n\n\t@nsamp.setter\n\tdef nsamp(self, nsamp):\n\t\t'''\n\t\tSet the total number of samples in the acquisition window.\n\t\tEnsure existing records don't fall outside of the window.\n\t\t'''\n\t\tif self._nsamp == nsamp: return\n\n\t\t# Force the new value to be an nonnegative integer\n\t\tnsamp = strict_nonnegative_int(nsamp)\n\n\t\t# Check all existing records to ensure their windows don't\n\t\t# extend past the new acquisition window\n\t\tfor hdr, wforms in self.allrecords():\n\t\t\tstart, length = hdr.win\n\t\t\tif start + length > nsamp:\n\t\t\t\traise ValueError('Acquisition window fails to contain stored waveforms')\n\n\t\t# Set the new value\n\t\tself._nsamp = nsamp\n\n\n\t@property\n\tdef f2c(self):\n\t\t'''\n\t\tReturn the fire-to-capture delay in 20-MHz samples.\n\t\t'''\n\t\treturn self._f2c\n\n\n\t@f2c.setter\n\tdef f2c(self, val):\n\t\t'''\n\t\tSet the fire-to-capture delay in 20-MHz samples.\n\t\t'''\n\t\tif self._f2c == val: return\n\t\tself._f2c = strict_nonnegative_int(val)\n\n\n\t@property\n\tdef groupmap(self):\n\t\t'''\n\t\tAccess a copy of the map between global element indices to\n\t\ttuples (local index, group index) that govern firing order.\n\t\t'''\n\t\treturn dict(self._groupmap)\n\n\n\t@groupmap.setter\n\tdef groupmap(self, grpmap):\n\t\t'''\n\t\tCheck the provided mapping from global element indices to\n\t\t(local index, group index) for consistency and assign the map\n\t\tto this instance.\n\n\t\tSet grpmap to None or an object with 0 len() to clear the map.\n\t\t'''\n\t\tif grpmap is None or len(grpmap) < 1:\n\t\t\tself._groupmap = { }\n\t\t\treturn\n\n\t\tif self.txgrps is None:\n\t\t\traise ValueError('Cannot set a group map without a txgrps configuration for the WaveformSet')\n\n\t\t# Make sure the map is valid and consistent with txgrp configuration\n\t\tngrpmap = { }\n\t\tfor k, v in grpmap.items():\n\t\t\tki = strict_nonnegative_int(k)\n\t\t\tvi, vg = [strict_nonnegative_int(vl) for vl in v]\n\t\t\tif vi >= self.txgrps.size:\n\t\t\t\traise ValueError('Local index in group map exceeds txgrp size')\n\t\t\tif vg >= self.txgrps.count:\n\t\t\t\traise ValueError('Group index in group map exceeds txgrp count')\n\t\t\tngrpmap[ki] = (vi, vg)\n\n\t\t# Check any local receive-channels for consistence\n\t\tfor hdr in self.allheaders():\n\t\t\tif ngrpmap.get(hdr.idx, hdr.txgrp) != hdr.txgrp:\n\t\t\t\traise ValueError('Group map does not match receive-channel record at index %d' % hdr.idx)\n\n\t\tself._groupmap = ngrpmap\n\n\n\tdef element2tx(self, elt, unfold=True):\n\t\t'''\n\t\tConvert an element index elt into a transmission index. If no\n\t\ttransmit-group configuration exists, this is *ALWAYS* the\n\t\tidentity map.\n\n\t\tWhen a transmit-group configuration exists, self.groupmap is\n\t\tfirst checked for a transmit index for elt. If the groupmap\n\t\tdoes not exist or fails to specify the necessary index, the\n\t\ttxgrp configuration for a receive-channel record for index elt\n\t\t(if one exists) is used.\n\n\t\tIf unfold is True, the transmission index is a scalar value\n\t\tthat directly indexes rows in record arrays. If unfold is\n\t\tFalse, the transmission index is a pair (locidx, grpnum) that\n\t\tmaps to the unfolded index, t, by\n\n\t\t\tt = locidx + grpnum * self.txgrps.gsize.\n\t\t'''\n\t\telt = strict_nonnegative_int(elt)\n\n\t\ttry: gcount, gsize = self.txgrps\n\t\texcept TypeError: return elt\n\n\t\ttry:\n\t\t\ttxgrp = self._groupmap[elt]\n\t\texcept KeyError:\n\t\t\ttry: txgrp = self.getheader(elt).txgrp\n\t\t\texcept KeyError:\n\t\t\t\traise KeyError('Could not find map record for receive channel %d' % elt)\n\n\t\ttry:\n\t\t\tidx, grp = txgrp\n\t\texcept (TypeError, ValueError) as e:\n\t\t\traise ValueError('Unable to unpack invalid txgrp for channel %d' % elt)\n\n\t\treturn (grp * gsize + idx) if unfold else (idx, grp)\n\n\n\tdef tx2row(self, tid):\n\t\t'''\n\t\tConvert a transmit-channel index into a waveform-array row index.\n\t\t'''\n\t\t# Ensure that the argument is properly bounded\n\t\ttid = strict_nonnegative_int(tid)\n\n\t\ttxstart = self.txstart\n\n\t\ttry: maxtx = self.txgrps.maxtx\n\t\texcept AttributeError: maxtx = None\n\n\t\tif maxtx is not None:\n\t\t\tif tid >= maxtx:\n\t\t\t\traise ValueError('Argument tid exceeds self.txgrps.maxtx')\n\t\t\t# Shift low values to account for wraparound\n\t\t\tif tid < txstart: tid += maxtx\n\n\t\t# Shift relative to start\n\t\ttid -= self.txstart\n\n\t\t# Ensure the bounds are sensible\n\t\tif not 0 <= tid < self.ntx:\n\t\t\traise ValueError('Transmit index is not contained in this file')\n\t\treturn tid\n\n\n\tdef _get_record_raw(self, rid):\n\t\t'''\n\t\tReturn the raw (header, data) record for a given receive\n\t\tchannel rid, with only sanity checks on rid.\n\t\t'''\n\t\treturn self._records[strict_nonnegative_int(rid)]\n\n\n\tdef getheader(self, rid):\n\t\t'''\n\t\tReturn the channel header for receive channel rid.\n\t\t'''\n\t\treturn self._get_record_raw(rid)[0]\n\n\n\tdef getrecord(self, rid, tid=None, window=None, dtype=None, maptids=False):\n\t\t'''\n\t\tReturn a (header, waveforms) record for the receive channel\n\t\twith channel index rid. If window is None and dtype is None,\n\t\tthe waveforms data array is a view of the internal\n\t\tcopy-on-write memory map.\n\n\t\tIf tid is not None, it should be a scalar integer or an\n\t\titerable of integers that represent transmit channel indices to\n\t\tpull from the waveform array. When tid is a scalar, a 1-D array\n\t\tis returned to represent the samples for the specified\n\t\ttransmission. When tid is an iterable (even of length 1), a 2-D\n\t\tarray is returned with transmit indices along the rows (in the\n\t\torder specified by tid) and waveform samples along the columns.\n\t\tWhen tid is None, self.txidx is assumed.\n\n\t\tIf window is not None, it should be a tuple (start, length)\n\t\tthat specifies the first sample and length of the temporal\n\t\twindow over which the waveforms are interpreted. Even if window\n\t\tmatches the internal window in the header, a copy of the\n\t\twaveform array will be made.\n\n\t\tIf dtype is not None, the output copy of the waveforms in the\n\t\trecord will be cast to this datatype.\n\n\t\tIf exactly one of window or dtype is None, the corresponding\n\t\tvalue from the record will be used.\n\n\t\tTo force a copy without knowing or changing the window and\n\t\tdtype, pass dtype=0.\n\n\t\tIf maptids is True, any indices specified in tid will be\n\t\tconverted from an element index to a transmission index using\n\t\tself.element2tx().\n\t\t'''\n\t\t# Grab receive record, copy header to avoid corruption\n\t\thdr, waveforms = self._get_record_raw(rid)\n\n\t\tif maptids and tid is not None:\n\t\t\t# Map the transmit indices to element indices\n\t\t\ttry:\n\t\t\t\ttid = self.element2tx(tid)\n\t\t\texcept TypeError:\n\t\t\t\ttid = [self.element2tx(t) for t in tid]\n\n\t\ttry:\n\t\t\ttcidx = self.tx2row(tid)\n\t\t\tsingletx = True\n\t\texcept TypeError:\n\t\t\tsingletx = False\n\t\t\tif tid is None:\n\t\t\t\ttcidx = list(range(self.ntx))\n\t\t\telse:\n\t\t\t\ttcidx = [self.tx2row(t) for t in tid]\n\n\t\tif window is None:\n\t\t\tif dtype is None:\n\t\t\t\t# With no type override, just return a view\n\t\t\t\treturn hdr, waveforms[tcidx,:]\n\t\t\telse:\n\t\t\t\t# Force a type conversion and copy\n\t\t\t\tif dtype == 0:\n\t\t\t\t\tdtype = waveforms.dtype\n\t\t\t\treturn hdr, waveforms[tcidx,:].astype(dtype, copy=True)\n\n\t\t# Handle a specific data window\n\t\tfrom .sigtools import Window\n\t\twindow = Window(window)\n\n\t\t# Handle unspecified data types\n\t\tif dtype is None or dtype == 0:\n\t\t\tdtype = waveforms.dtype\n\n\t\t# Create an output array to store the results\n\t\toshape = (1 if singletx else len(tcidx), window.length)\n\t\toutput = np.zeros(oshape, dtype=dtype)\n\n\t\ttry:\n\t\t\t# Figure out the overlapping sample window\n\t\t\t# Raises TypeError if overlap() returns None\n\t\t\tfrom pycwp.cutil import overlap\n\t\t\tostart, istart, wlen = overlap(window, hdr.win)\n\t\t\toend, iend = ostart + wlen, istart + wlen\n\n\t\t\t# Copy portion of waveforms overlapping the window\n\t\t\toutput[:,ostart:oend] = waveforms[tcidx,istart:iend]\n\t\texcept TypeError: pass\n\n\t\t# For a scalar tid, collapse the 2-D array\n\t\tif singletx: output = output[0]\n\n\t\t# Override the window in the header copy\n\t\treturn hdr.copy(win=window), output\n\n\n\tdef getwaveform(self, rid, tid, *args, cyclic=False, **kwargs):\n\t\t'''\n\t\tReturn, as one or more habis.sigtools.Waveform objects, the\n\t\twaveform(s) recorded at receive-channel index rid from the\n\t\t(scalar or iterable of) transmission(s) tid.\n\n\t\tIf tid is a scalar, a single Waveform object is returned.\n\t\tOtherwise, if tid is an iterable or None (which pulls all\n\t\ttransmissions), a list of Waveform objects is returned.\n\n\t\tThe Waveform time reference is the global time reference. In\n\t\tother words, the Waveform is created from the raw record, then\n\t\tshifted by self.f2c. If the shift moves the data window past\n\t\tthe end of the window (0, self.nsamp), some of the data will be\n\t\tclipped. To instead cyclically wrap any samples that would be\n\t\tclipped, pass cyclic=True to this method.\n\n\t\tExtra args and kwargs are passed through to getrecord().\n\t\t'''\n\t\tfrom .sigtools import Waveform\n\t\t# Grab the relevant row of the record\n\t\thdr, wform = self.getrecord(rid, tid, *args, **kwargs)\n\n\t\t# Wrap a single desired signal in a Waveform object\n\t\tif np.ndim(wform) == 1:\n\t\t\twave = Waveform(self.nsamp, wform, hdr.win.start)\n\t\t\twave = wave.shift(self.f2c, cyclic=cyclic)\n\t\t\treturn wave\n\t\telse:\n\t\t\twarr = [ ]\n\t\t\tfor w in wform:\n\t\t\t\twave = Waveform(self.nsamp, w, hdr.win.start)\n\t\t\t\twave = wave.shift(self.f2c, cyclic=cyclic)\n\t\t\t\twarr.append(wave)\n\t\t\treturn warr\n\n\n\tdef delrecord(self, rid):\n\t\t'''\n\t\tDelete the waveform record for the receive-channel index rid.\n\t\t'''\n\t\tdel self._records[strict_nonnegative_int(rid)]\n\n\n\tdef clearall(self):\n\t\t'''\n\t\tDelete all waveform records in the set.\n\t\t'''\n\t\t# Just create a new record dictionary\n\t\tself._records = OrderedDict()\n\n\n\tdef setrecord(self, hdr, waveforms=None, copy=True):\n\t\t'''\n\t\tSave a waveform record consisting of the provided header and\n\t\twaveform array. If a record for the receive channel specified\n\t\tin the header already exists, it will be overwritten.\n\t\tOtherwise, the record will be created.\n\n\t\tIf the header specifies None for txgrp, but the WaveformSet\n\t\ttransmit-group configuration is not None, any groupmap\n\t\tassociated with the WaveformSet will be searched for a matching\n\t\treceive-channel index to create a matching txgrp. No other\n\t\tautomatic txgrp manipulation is attempted.\n\n\t\tThe waveform array must either be a Numpy ndarray or None. When\n\t\twaveforms takes the special value None, a new, all-zero\n\t\twaveform array is created (regardless of the value of copy).\n\n\t\tIf copy is False, a the record will store a reference to the\n\t\twaveform array if the types are compatible. If copy is True, a\n\t\tlocal copy of the waveform array, cast to this set's dtype,\n\t\twill always be made.\n\t\t'''\n\t\thdr = RxChannelHeader(*hdr)\n\n\t\tif self.txgrps is not None:\n\t\t\t# Ensure consistency with the group configuration\n\t\t\tif hdr.txgrp is None:\n\t\t\t\t# Check the group map for a matching record\n\t\t\t\ttry:\n\t\t\t\t\ttxgrp = self.element2tx(hdr.idx, unfold=False)\n\t\t\t\texcept (KeyError, TypeError):\n\t\t\t\t\traise ValueError('Record is missing required txgrp configuration')\n\t\t\t\telse:\n\t\t\t\t\thdr = hdr.copy(txgrp=txgrp)\n\t\t\telif hdr.txgrp.grp >= self.txgrps.count:\n\t\t\t\traise ValueError('Record group number too large')\n\t\t\telif hdr.txgrp.idx >= self.txgrps.size:\n\t\t\t\traise ValueError('Record local index too large')\n\t\t\telse:\n\t\t\t\t# Ensure consistency with the groupmap\n\t\t\t\ttry:\n\t\t\t\t\trgrp = self.groupmap[hdr.idx]\n\t\t\t\texcept (TypeError, KeyError):\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tif rgrp != hdr.txgrp:\n\t\t\t\t\t\traise ValueError('Record txgrp does not match groupmap')\n\t\telif hdr.txgrp is not None:\n\t\t\traise ValueError('Record contains inappropriate txgrp configuration')\n\n\t\t# Check that the header bounds make sense\n\t\tif hdr.win.end > self.nsamp:\n\t\t\traise ValueError('Waveform sample window exceeds acquisition window duration')\n\n\t\tif waveforms is None:\n\t\t\t# Create an all-zero waveform array\n\t\t\twshape = (self.ntx, hdr.win.length)\n\t\t\twaveforms = np.zeros(wshape, dtype=self.dtype)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tif copy or waveforms.dtype != self.dtype:\n\t\t\t\t\t# Make a copy of the waveform in proper format\n\t\t\t\t\traise TypeError('Conversion of dtypes required')\n\t\t\texcept (AttributeError, TypeError):\n\t\t\t\twaveforms = np.array(waveforms, dtype=self.dtype)\n\n\t\t\t# Pad 0-d and 1-d waveforms to 2-d\n\t\t\tif waveforms.ndim < 2:\n\t\t\t\twaveforms = waveforms[[None] * (2 - waveforms.ndim)]\n\n\t\t\t# Check the proper shape of the provided array\n\t\t\tntx, nsamp = waveforms.shape\n\t\t\tif ntx != self.ntx:\n\t\t\t\traise ValueError('Waveform array does not match transmission count for set')\n\t\t\tif nsamp != hdr.win.length:\n\t\t\t\traise ValueError('Waveform array does not match sample count specified in header')\n\n\t\t# Add or replace the record\n\t\tself._records[hdr.idx] = (hdr, waveforms)\n\n\n\tdef allrecords(self, *args, **kwargs):\n\t\t'''\n\t\tReturn a generator that fetches each record, in channel-index\n\t\torder, using self.getrecord(rid, window, dtype).\n\t\t'''\n\t\tfor rid in sorted(self.rxidx):\n\t\t\tyield self.getrecord(rid, *args, **kwargs)\n\n\n\tdef allheaders(self):\n\t\t'''\n\t\tReturn a generator that fetches, in channel-index order, only\n\t\tthe receive-channel record headers.\n\t\t'''\n\t\tfor rid in sorted(self.rxidx):\n\t\t\tyield self.getheader(rid)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.count_nonzero", "numpy.savetxt", "numpy.asarray", "numpy.zeros", "numpy.sum", "numpy.load", "numpy.ndarray", "numpy.loadtxt", "numpy.ndim", "numpy.frombuffer", "numpy.all", "numpy.fromfile", "numpy.issubdtype", "numpy.dtype", "pandas.read_csv" ] ]
jgerardin/covid-chicago
[ "c2b91fdb42eece413e6fb0f6cee019357b96e00d" ]
[ "data_processing/exceeding_capacity_1.py" ]
[ "print('Importing packages...')\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.dates as mdates\nimport datetime\n#sns.set(color_codes=True)\nimport matplotlib as mpl\nmpl.rcParams['pdf.fonttype'] = 42\nimport statistics as st\nsns.set_style('whitegrid', {'axes.linewidth' : 0.5})\nfrom statsmodels.distributions.empirical_distribution import ECDF\nimport scipy\nimport gc\n\ncolumn_list = ['scen_num', 'reopening_multiplier_4']\nfor ems_region in range(1,12):\n column_list.append('hosp_det_EMS-' + str(ems_region))\n column_list.append('hosp_det_cumul_EMS-' + str(ems_region))\n column_list.append('detected_cumul_EMS-' + str(ems_region))\n\n#Specify paths to trajectories. For this run, all trajectories were temporarily stored in the same folder.\n\nprint('Reading trajectories...')\nsub1 = pd.read_csv('trajectoriesDat_1.csv', usecols=column_list) #0.08 - 0.09\nprint('Trajectory 1 read.')\nsub2 = pd.read_csv('trajectoriesDat_2.csv', usecols=column_list) #0.10 - 0.115\nprint('Trajectory 2 read.')\nsub3 = pd.read_csv('trajectoriesDat_3.csv', usecols=column_list) #0.087 - 0.10\nprint('Trajectory 3 read.')\nsub4 = pd.read_csv('trajectoriesDat_08.csv', usecols=column_list) # 0.08 - 0.10\nsub4['scen_num'] = sub4['scen_num'].values + 1000\nprint('Trajectory 4 read.')\nsub5 = pd.read_csv('trajectoriesDat_300.csv', usecols=column_list) #0.1 - 0.11\nsub5['scen_num'] = sub5['scen_num'].values + 2000\nprint('Trajectory 5 read.')\nsub6 = pd.read_csv('trajectoriesDat_600.csv', usecols=column_list) #0.115 - 0.13\nsub6['scen_num'] = sub6['scen_num'].values + 2000\nprint('Trajectory 6 read.')\nsub7 = pd.read_csv('trajectoriesDat_1000.csv', usecols=column_list) #0.13 - 0.15\nsub7['scen_num'] = sub7['scen_num'].values + 2000\nprint('Trajectory 7 read.')\nsub8 = pd.read_csv('trajectoriesDat_15.csv', usecols=column_list) #0.13 - 0.15\nsub8['scen_num'] = sub8['scen_num'].values + 3000\nprint('Trajectory 8 read.')\n\n###loop here\nfor region in ['NE', 'NC', 'CE', 'SO']:\n for capacity in ['high', 'low']:\n for metric in ['det', 'hosp']: #current implementation only allows tracking new_detected and new_hosp.\n boink = []\n\n ### Region\n\n #hospital_capacity = 1907\n #NE 4919 8609 12299\n #NC 1089 1907 2724\n #CE 856 1498 2140\n #SO 640 1121 1601\n\n ### Metric to assess:\n if metric == 'det':\n notif = 'new_det_' + region\n if metric == 'hosp':\n notif = 'new_hosp_det_' + region\n\n ### Simulation Dates to Examine\n lower_limit = 145\n upper_limit = 225\n grain = 1\n\n prob_over_array = []\n range_1 = np.arange(0, 25, 0.01)\n\n ### Capacity\n ### Which trajectories to use for each capacity were determined by hand.\n if region == 'NE':\n if capacity == 'low':\n hospital_capacity = 4919\n trajectories = pd.concat([sub1, sub3, sub4]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 8609\n trajectories = pd.concat([sub1, sub2, sub3]).reset_index()\n elif region == 'NC':\n if capacity == 'low':\n hospital_capacity = 1089\n trajectories = pd.concat([sub4, sub5, sub6, sub7]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 1907\n trajectories = pd.concat([sub5, sub6, sub7]).reset_index()\n elif region == 'CE':\n if capacity == 'low':\n hospital_capacity = 856\n trajectories = pd.concat([sub5, sub6, sub7]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 1498\n trajectories = sub8 #pd.concat([sub5, sub6, sub7, sub8]).reset_index() ##need new\n elif region == 'SO':\n if capacity == 'low':\n hospital_capacity = 640\n trajectories = pd.concat([sub1, sub2, sub3]).reset_index()\n elif capacity == 'high':\n hospital_capacity = 1121\n trajectories = pd.concat([sub5, sub6, sub7]).reset_index()\n\n #NE Region\n\n trajectories['hosp_det_NE'] = trajectories['hosp_det_EMS-11'] + \\\n trajectories['hosp_det_EMS-10'] + \\\n trajectories['hosp_det_EMS-9'] + \\\n trajectories['hosp_det_EMS-8'] + \\\n trajectories['hosp_det_EMS-7']\n\n trajectories['hosp_det_cumul_NE'] = trajectories['hosp_det_cumul_EMS-11'] + \\\n trajectories['hosp_det_cumul_EMS-10'] + \\\n trajectories['hosp_det_cumul_EMS-9'] + \\\n trajectories['hosp_det_cumul_EMS-8'] + \\\n trajectories['hosp_det_cumul_EMS-7']\n\n trajectories['detected_cumul_NE'] = trajectories['detected_cumul_EMS-11'] + \\\n trajectories['detected_cumul_EMS-10'] + \\\n trajectories['detected_cumul_EMS-9'] + \\\n trajectories['detected_cumul_EMS-8'] + \\\n trajectories['detected_cumul_EMS-7']\n\n #NC Region\n\n trajectories['hosp_det_NC'] = trajectories['hosp_det_EMS-1'] + trajectories['hosp_det_EMS-2'] \n trajectories['hosp_det_cumul_NC'] = trajectories['hosp_det_cumul_EMS-1'] + trajectories['hosp_det_cumul_EMS-2'] \n trajectories['detected_cumul_NC'] = trajectories['detected_cumul_EMS-1'] + trajectories['detected_cumul_EMS-2']\n\n #CE Region\n\n trajectories['hosp_det_CE'] = trajectories['hosp_det_EMS-3'] + trajectories['hosp_det_EMS-6'] \n trajectories['hosp_det_cumul_CE'] = trajectories['hosp_det_cumul_EMS-3'] + trajectories['hosp_det_cumul_EMS-6'] \n trajectories['detected_cumul_CE'] = trajectories['detected_cumul_EMS-3'] + trajectories['detected_cumul_EMS-6']\n\n #SO Region\n\n trajectories['hosp_det_SO'] = trajectories['hosp_det_EMS-4'] + trajectories['hosp_det_EMS-5'] \n trajectories['hosp_det_cumul_SO'] = trajectories['hosp_det_cumul_EMS-4'] + trajectories['hosp_det_cumul_EMS-5'] \n trajectories['detected_cumul_SO'] = trajectories['detected_cumul_EMS-4'] + trajectories['detected_cumul_EMS-5']\n\n print('Region: ' + region)\n print('Capacity: ' + str(capacity))\n print('Metric: ' + str(notif))\n thresh = []\n p_array = []\n dates_array = []\n over_array = []\n no_array = []\n days_array = np.arange(lower_limit,upper_limit, grain)\n for notif_period in days_array:\n trajectories_new = trajectories\n unique_scen = np.array(list(set(trajectories_new['scen_num'].values)))\n overflow_date = []\n max_date = []\n #notif = 'new_detected'\n overflow_traj = []\n traj = []\n non_overflow_traj = []\n overflow_scens = []\n non_overflow_scens = []\n non_overflow_crit_day = []\n overflow_crit_day = []\n overflow_week = []\n overflow_prior_week = []\n non_overflow_week = []\n non_overflow_prior_week = []\n crit_day = []\n week = []\n week_prior = []\n crit = notif_period\n for scen in unique_scen:\n new = trajectories_new[(trajectories_new['scen_num'] == scen)].reset_index()\n new['new_hosp_det_NE'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_NE'].values))\n new['new_det_NE'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_NE'].values))\n new['new_hosp_det_NC'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_NC'].values))\n new['new_det_NC'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_NC'].values))\n new['new_hosp_det_CE'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_CE'].values))\n new['new_det_CE'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_CE'].values))\n new['new_hosp_det_SO'] = np.append(np.array([0.0]), np.diff(new['hosp_det_cumul_SO'].values))\n new['new_det_SO'] = np.append(np.array([0.0]), np.diff(new['detected_cumul_SO'].values))\n hosp = new['hosp_det_' + region].values #new['hosp_det'].values\n i = 0\n traj.append(hosp)\n while (hosp[i] < hospital_capacity) & (i < len(hosp)-1):\n i += 1\n crit_day.append(i)\n if i == len(hosp) - 1:\n non_overflow_traj.append(hosp)\n non_overflow_scens.append(scen)\n\n #crit_day.append(i)\n non_overflow_week.append(np.mean(new[notif].values[crit-7:crit]))\n non_overflow_prior_week.append(np.mean(new[notif].values[crit-14:crit-7]))\n else:\n overflow_traj.append(hosp)\n overflow_scens.append(scen)\n\n #crit_day.append(i)\n overflow_week.append(np.mean(new[notif].values[crit-7:crit]))\n overflow_prior_week.append(np.mean(new[notif].values[crit-14:crit-7]))\n overflow_week = np.array(overflow_week)\n overflow_prior_week = np.array(overflow_prior_week)\n non_overflow_week = np.array(non_overflow_week)\n non_overflow_prior_week = np.array(non_overflow_prior_week) \n overflow_date = np.array(overflow_date)\n max_date = np.array(max_date)\n week = np.array(week)\n crit_day = np.array(crit_day)\n week_prior = np.array(week_prior)\n boink.append(np.mean(week/week_prior))\n over = overflow_week/overflow_prior_week\n no = non_overflow_week/non_overflow_prior_week\n #ecdf_over = ECDF(over)\n #ecdf_no = ECDF(no)\n #prob_over = np.cumsum(ecdf_no(range_1)-ecdf_over(range_1))/np.sum(ecdf_no(range_1)-ecdf_over(range_1))\n #print('Mean Over: ' + str(np.mean(over)))\n #print('Mean No: ' + str(np.mean(no)))\n if np.mean(over) > np.mean(no):\n p_over = scipy.stats.norm.pdf(range_1, np.mean(over), np.std(np.append(over,no, axis=0)))\n p_no = scipy.stats.norm.pdf(range_1, np.mean(no), np.std(np.append(over,no, axis=0)))\n prob_over = p_over/(p_over+p_no)\n prob_over_array.append(prob_over)\n over_array.append(np.median(over))\n no_array.append(np.median(no))\n #thresh.append((np.median(over) + np.median(no))/2)\n stat, p = scipy.stats.ttest_ind(over,no)\n p_array.append(p)\n dates_array.append(dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(crit)))\n print(crit)\n over_array = np.array(over_array)\n no_array = np.array(no_array)\n print('done')\n\n #trace fig\n full_dates_array = []\n for ni in np.arange(0,370,1):\n full_dates_array.append(dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ni)))\n plt.figure(figsize=(10,6))\n for traject in overflow_traj:\n if (len(traject) == len(full_dates_array)):\n plt.plot(full_dates_array, traject, color='r', alpha=0.1)\n for traject in non_overflow_traj:\n if (len(traject) == len(full_dates_array)):\n plt.plot(full_dates_array, traject, color='b', alpha=0.1)\n #plt.yscale('log')\n plt.hlines(hospital_capacity, xmin=dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(0)), xmax=dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ni)))\n plt.xlim([dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(0)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ni))])\n #plt.vlines(np.median(crit_day[crit_day != 369]),ymin=1,ymax=30000, linestyle='dashed', alpha=0.4)\n plt.ylabel(region + ' Hospitalized', fontsize=14)\n formatter = mdates.DateFormatter(\"%m-%y\")\n ax = plt.gca()\n ax.xaxis.set_major_formatter(formatter)\n #ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))\n ax.xaxis.set_major_locator(mdates.MonthLocator())\n #plt.xlabel('Simulation Day', fontsize=14)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n #plt.savefig('sims_2.png', dpi=200)\n #plt.savefig('sims_2.pdf')\n print('Proportion of sims that do not exceed: ' + str(np.sum(crit_day == 369)/(len(trajectories)/370)))\n print('Number of trajectories: ' + str(len(trajectories)/370))\n\n\n #p-value fig\n plt.figure(figsize=(10,6))\n plt.plot(dates_array, p_array)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n ax = plt.gca()\n formatter = mdates.DateFormatter(\"%m-%d\")\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))\n #ax.xaxis.set_major_locator(mdates.MonthLocator())\n plt.yscale('log')\n plt.ylabel('Significance of Difference Between\\nOverflow Scenarios and Non-Overflow Scenarios\\n(p-value of t-test)', fontsize=14)\n plt.savefig('p_val_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.png', dpi=200)\n plt.savefig('p_val_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.pdf')\n pd.DataFrame({'date':dates_array, 'p_val':p_array}).to_csv('p_val_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.csv')\n\n\n #Threshold fig\n thresh_0 = .05\n thresh_1 = .20\n thresh_2 = .50\n thresh_3 = .80\n thresh_4 = .95\n thresh_0_array = []\n thresh_1_array = []\n thresh_2_array = []\n thresh_3_array = []\n thresh_4_array = []\n count = 0\n for prob_array in prob_over_array:\n i = 0\n while prob_array[i] < thresh_0:\n i += 1\n thresh_0_array.append(i)\n i = 0\n while prob_array[i] < thresh_1:\n i += 1\n thresh_1_array.append(i)\n i = 0\n while prob_array[i] < thresh_2:\n i += 1\n thresh_2_array.append(i)\n i = 0\n while prob_array[i] < thresh_3:\n i += 1\n thresh_3_array.append(i)\n i = 0\n while prob_array[i] < thresh_4:\n i += 1\n thresh_4_array.append(i)\n count += 1\n print(count)\n thresh_0_array = np.array(thresh_0_array)\n thresh_1_array = np.array(thresh_1_array)\n thresh_2_array = np.array(thresh_2_array)\n thresh_3_array = np.array(thresh_3_array)\n thresh_4_array = np.array(thresh_4_array)\n\n plt.figure(figsize=(10,6))\n\n plt.plot(dates_array, 100*(range_1[thresh_4_array]-1), alpha=1.0, color='r', label='95% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_3_array]-1), alpha=0.75, color='r', label='80% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_2_array]-1), alpha=1.0, color='k', linestyle='dashed', label='50% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_1_array]-1), alpha=0.50, color='r', label='20% chance of exceeding capacity')\n plt.plot(dates_array, 100*(range_1[thresh_0_array]-1), alpha=0.25, color='r', label='5% chance of exceeding capacity')\n #plt.axvline(dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(193)))\n ax = plt.gca()\n formatter = mdates.DateFormatter(\"%m-%d\")\n ax.xaxis.set_major_formatter(formatter)\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=7))\n overflows_occur = 175\n alpha = 0.02\n for ele in np.sort(crit_day[crit_day != 369].copy()):\n plt.fill_between(x=[dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(ele)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(upper_limit+5))], y1=-30, y2=120, color='k', alpha=alpha, hatch='/', linewidth=0) #label='scenarios begin to exceed capacity'\n #plt.fill_between(x=[dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(overflows_occur)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(205))], y1=-30, y2=120, color='k', alpha=0.05, hatch='/', linewidth=0) #label='scenarios begin to exceed capacity'\n #plt.fill_between(x=[dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(overflows_occur+2)), dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(205))], y1=-30, y2=120, color='k', alpha=0.05, hatch='/', linewidth=0) #label='scenarios begin to exceed capacity'\n plt.xlim([dt.datetime(month=2, day=13, year=2020) + dt.timedelta(days=int(145)),dt.datetime(month=10, day=1, year=2020)])\n plt.ylim([-30,100])\n plt.ylabel('Threshold % change in\\n' + notif + '\\nfrom previous week', fontsize=14)\n plt.xlabel('Date of Assessment', fontsize=14)\n plt.legend(fontsize=12)\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n #plt.savefig('overflow_prob_draft_2.png', dpi=200)\n #plt.savefig('overflow_prob_draft_2.pdf')\n plt.savefig('overflow_prob_draft_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.png', dpi=200)\n plt.savefig('overflow_prob_draft_' + str(notif) + '_' + region + str(hospital_capacity) + '_1.pdf')" ]
[ [ "numpy.median", "matplotlib.dates.DateFormatter", "numpy.mean", "pandas.concat", "pandas.read_csv", "matplotlib.dates.DayLocator", "matplotlib.pyplot.xticks", "pandas.DataFrame", "numpy.arange", "numpy.append", "matplotlib.pyplot.gca", "matplotlib.pyplot.yscale", "numpy.array", "scipy.stats.ttest_ind", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "numpy.diff", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.dates.MonthLocator", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
conradjones/ngraph-bridge
[ "042011e6653b3ac0983511cf6604f9881cc6ee4b", "042011e6653b3ac0983511cf6604f9881cc6ee4b" ]
[ "test/python/test_tanhgrad.py", "examples/mnist/mnist_deep_simplified_distributed.py" ]
[ "# ==============================================================================\n# Copyright 2018-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"nGraph TensorFlow bridge AvgPoolBackprop operation test\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops.gen_math_ops import tanh_grad\nfrom common import NgraphTest\n\n\nclass TestTanhGradOp(NgraphTest):\n\n def test_tanhgrad_2d(self):\n y = constant_op.constant(\n self.generate_random_numbers(30, 1.0, 10.0), shape=[10, 3])\n y_delta = constant_op.constant(\n self.generate_random_numbers(30, 0.0, 10.0), shape=[10, 3])\n\n out = tanh_grad(y, y_delta)\n\n def run_test(sess):\n return sess.run(out)\n\n assert np.allclose(\n self.with_ngraph(run_test), self.without_ngraph(run_test))\n\n def test_tanhgrad_3d(self):\n y = constant_op.constant(\n self.generate_random_numbers(60, 5.0, 30.0), shape=[10, 3, 2])\n y_delta = constant_op.constant(\n self.generate_random_numbers(60, 10.0, 40.0), shape=[10, 3, 2])\n\n out = tanh_grad(y, y_delta)\n\n def run_test(sess):\n return sess.run(out)\n\n assert np.allclose(\n self.with_ngraph(run_test), self.without_ngraph(run_test))\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file is derived from\n# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/mnist/mnist_deep.py\n# with changed by Intel using Horovod.\n#\n# Copyright 2017-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A simplified deep MNIST classifier using convolutional layers.\nThis script has the following changes when compared to mnist_deep.py:\n1. no dropout layer (which disables the rng op)\n2. no truncated normal initialzation(which disables the while op)\n\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/pros\n\"\"\"\n# Disable linter warnings to maintain consistency with tutorial.\n# pylint: disable=invalid-name\n# pylint: disable=g-bad-import-order\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport tempfile\nimport getpass\nimport time\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\nimport ngraph_bridge\nimport horovod.tensorflow as hvd\n\nFLAGS = None\n\n\ndef deepnn(x):\n \"\"\"deepnn builds the graph for a deep net for classifying digits.\n\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is the\n number of pixels in a standard MNIST image.\n\n Returns:\n A tuple (y, a scalar placeholder). y is a tensor of shape (N_examples, 10), with values\n equal to the logits of classifying the digit into one of 10 classes (the\n digits 0-9). The scalar placeholder is meant for the probability of dropout. Since we don't\n use a dropout layer in this script, this placeholder is of no relavance and acts as a dummy.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images are\n # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32], \"W_conv1\")\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64], \"W_conv2\")\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([7 * 7 * 64, 1024], \"W_fc1\")\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 10], \"W_fc2\")\n b_fc2 = bias_variable([10])\n\n # y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n y_conv = tf.matmul(h_fc1, W_fc2) + b_fc2\n return y_conv, tf.placeholder(tf.float32)\n\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(\n x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape, name):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n weight_var = tf.get_variable(name, shape)\n return weight_var\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef train_mnist_cnn(FLAGS):\n # Config\n config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n inter_op_parallelism_threads=1)\n config_ngraph_enabled = ngraph_bridge.update_config(config)\n\n # Note: Additional configuration option to boost performance is to set the\n # following environment for the run:\n # OMP_NUM_THREADS=44 KMP_AFFINITY=granularity=fine,scatter\n # The OMP_NUM_THREADS number should correspond to the number of\n # cores in the system\n\n # Import data\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n\n # Create the model\n x = tf.placeholder(tf.float32, [None, 784])\n\n # Define loss and optimizer\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n # Build the graph for the deep net\n y_conv, keep_prob = deepnn(x)\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=y_, logits=y_conv)\n cross_entropy = tf.reduce_mean(cross_entropy)\n\n # add distributed wrapper to \"adam_optimizer\"\n opt = hvd.DistributedOptimizer(tf.train.AdamOptimizer(1e-4))\n global_step = tf.contrib.framework.get_or_create_global_step()\n with tf.name_scope('distributed_optimizer'):\n train_step = opt.minimize(cross_entropy, global_step=global_step)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n tf.summary.scalar('Training accuracy', accuracy)\n tf.summary.scalar('Loss function', cross_entropy)\n\n graph_location = \"/tmp/\" + getpass.getuser(\n ) + \"/tensorboard-logs/mnist-convnet\"\n print('Saving graph to: %s' % graph_location)\n\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(graph_location)\n train_writer.add_graph(tf.get_default_graph())\n\n saver = tf.train.Saver()\n train_loops = FLAGS.train_loop_count\n num_test_images = FLAGS.test_image_count\n hooks = [\n # Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states\n # from rank 0 to all other processes. This is necessary to ensure consistent\n # initialization of all workers when training is started with random weights\n # or restored from a checkpoint.\n hvd.BroadcastGlobalVariablesHook(0),\n # Horovod: adjust number of steps based on number of ranks.\n #tf.train.StopAtStepHook(train_loops // hvd.size())\n tf.train.StopAtStepHook(train_loops)\n ]\n\n with tf.train.MonitoredTrainingSession(\n hooks=hooks, config=config_ngraph_enabled) as sess:\n\n step = 0\n start = time.time()\n\n loss_values = []\n test_accuracy = []\n while not sess.should_stop():\n batch = mnist.train.next_batch(FLAGS.batch_size)\n sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})\n step += 1\n if step % 10 == 0:\n t = time.time()\n if hvd.rank() == 0:\n print('step %d training accuracy %g %g sec to evaluate' %\n (step,\n sess.run(\n accuracy, feed_dict={\n x: batch[0],\n y_: batch[1]\n }), time.time() - t))\n t = time.time()\n _, summary, loss = sess.run([train_step, merged, cross_entropy],\n feed_dict={\n x: batch[0],\n y_: batch[1],\n keep_prob: 0.5\n })\n loss_values.append(loss)\n if hvd.rank() == 0:\n print('step %d, loss %g, %g sec for training step' %\n (step, loss, time.time() - t))\n train_writer.add_summary(summary, step)\n\n if step == (train_loops // hvd.size() - 1) and hvd.rank() == 0:\n x_test = mnist.test.images[:num_test_images]\n y_test = mnist.test.labels[:num_test_images]\n print('test accuracy: ',\n sess.run(accuracy, feed_dict={\n x: x_test,\n y_: y_test\n }))\n test_accuracy.append(accuracy)\n\n print(\"Training finished. Running test\")\n saver.save(sess, FLAGS.model_dir)\n return loss_values, test_accuracy\n\n\ndef main(_):\n train_mnist_cnn(FLAGS)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--data_dir',\n type=str,\n default='/tmp/tensorflow/mnist/input_data',\n help='Directory where input data is stored')\n\n parser.add_argument(\n '--train_loop_count',\n type=int,\n default=1000,\n help='Number of training iterations')\n\n parser.add_argument('--batch_size', type=int, default=50, help='Batch Size')\n\n parser.add_argument(\n '--test_image_count',\n type=int,\n default=None,\n help=\"Number of test images to evaluate on\")\n\n parser.add_argument(\n '--model_dir',\n type=str,\n default='./mnist_trained/',\n help='enter model dir')\n\n FLAGS, unparsed = parser.parse_known_args()\n hvd.init()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.python.ops.gen_math_ops.tanh_grad" ], [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.conv2d", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.train.MonitoredTrainingSession", "tensorflow.cast", "tensorflow.get_default_graph", "tensorflow.argmax", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.constant", "tensorflow.ConfigProto", "tensorflow.app.run", "tensorflow.nn.max_pool", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.contrib.framework.get_or_create_global_step", "tensorflow.placeholder", "tensorflow.get_variable", "tensorflow.name_scope", "tensorflow.summary.merge_all", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.train.StopAtStepHook", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean" ] ]
apoorvanand/Deep-Virtual-Try-On
[ "56d536d46913afb8504ad3336697f2adf7dc965c" ]
[ "lib/geometric_matching_multi_gpu.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom torchvision import models\nimport os\nimport torch.nn.functional as F\nimport numpy as np\nimport sys\n\nsys.path.append('..')\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('Linear') != -1:\n init.normal(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef weights_init_xavier(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_normal_(m.weight.data, gain=0.02)\n elif classname.find('Linear') != -1:\n init.xavier_normal_(m.weight.data, gain=0.02)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('Linear') != -1:\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, 0.02)\n init.constant_(m.bias.data, 0.0)\n\n\ndef init_weights(net, init_type='normal'):\n print('initialization method [%s]' % init_type)\n if init_type == 'normal':\n net.apply(weights_init_normal)\n elif init_type == 'xavier':\n net.apply(weights_init_xavier)\n elif init_type == 'kaiming':\n net.apply(weights_init_kaiming)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n\nclass FeatureExtraction(nn.Module):\n def __init__(self, input_nc, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(FeatureExtraction, self).__init__()\n downconv = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1)\n model = [downconv, nn.ReLU(True), norm_layer(ngf)]\n for i in range(n_layers):\n in_ngf = 2**i * ngf if 2**i * ngf < 512 else 512\n out_ngf = 2**(i+1) * ngf if 2**i * ngf < 512 else 512\n downconv = nn.Conv2d(in_ngf, out_ngf, kernel_size=4, stride=2, padding=1)\n model += [downconv, nn.ReLU(True)]\n model += [norm_layer(out_ngf)]\n model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]\n model += [norm_layer(512)]\n model += [nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nn.ReLU(True)]\n \n self.model = nn.Sequential(*model)\n init_weights(self.model, init_type='normal')\n\n def forward(self, x):\n return self.model(x)\n\nclass FeatureL2Norm(torch.nn.Module):\n def __init__(self):\n super(FeatureL2Norm, self).__init__()\n\n def forward(self, feature):\n epsilon = 1e-6\n norm = torch.pow(torch.sum(torch.pow(feature,2),1)+epsilon,0.5).unsqueeze(1).expand_as(feature)\n return torch.div(feature,norm)\n \nclass FeatureCorrelation(nn.Module):\n def __init__(self):\n super(FeatureCorrelation, self).__init__()\n \n def forward(self, feature_A, feature_B):\n b,c,h,w = feature_A.size()\n # reshape features for matrix multiplication\n feature_A = feature_A.transpose(2,3).contiguous().view(b,c,h*w)\n feature_B = feature_B.view(b,c,h*w).transpose(1,2)\n # perform matrix mult.\n feature_mul = torch.bmm(feature_B,feature_A)\n correlation_tensor = feature_mul.view(b,h,w,h*w).transpose(2,3).transpose(1,2)\n return correlation_tensor\n \nclass FeatureRegression(nn.Module):\n def __init__(self, input_nc=512,output_dim=6, use_cuda=True):\n super(FeatureRegression, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(input_nc, 512, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Conv2d(512, 256, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n )\n self.linear = nn.Linear(64 * 4 * 3, output_dim)\n self.tanh = nn.Tanh()\n # if use_cuda:\n # self.conv.cuda()\n # self.linear.cuda()\n # self.tanh.cuda()\n\n def forward(self, x):\n x = self.conv(x)\n x = x.reshape(x.size(0), -1)\n x = self.linear(x)\n x = self.tanh(x)\n return x\n\nclass AffineGridGen(nn.Module):\n def __init__(self, out_h=256, out_w=192, out_ch = 3):\n super(AffineGridGen, self).__init__() \n self.out_h = out_h\n self.out_w = out_w\n self.out_ch = out_ch\n \n def forward(self, theta):\n theta = theta.contiguous()\n batch_size = theta.size()[0]\n out_size = torch.Size((batch_size,self.out_ch,self.out_h,self.out_w))\n return F.affine_grid(theta, out_size)\n \nclass TpsGridGen(nn.Module):\n def __init__(self, out_h=256, out_w=192, use_regular_grid=True, grid_size=3, reg_factor=0, use_cuda=True):\n super(TpsGridGen, self).__init__()\n self.out_h, self.out_w = out_h, out_w\n self.reg_factor = reg_factor\n self.use_cuda = use_cuda\n\n # create grid in numpy\n self.grid = np.zeros([self.out_h, self.out_w, 3], dtype=np.float32)\n # sampling grid with dim-0 coords (Y)\n self.grid_X,self.grid_Y = np.meshgrid(np.linspace(-1,1,out_w),np.linspace(-1,1,out_h))\n # grid_X,grid_Y: size [1,H,W,1,1]\n self.grid_X = torch.FloatTensor(self.grid_X).unsqueeze(0).unsqueeze(3)\n self.grid_Y = torch.FloatTensor(self.grid_Y).unsqueeze(0).unsqueeze(3)\n if use_cuda:\n self.grid_X = self.grid_X.cuda()\n self.grid_Y = self.grid_Y.cuda()\n\n # initialize regular grid for control points P_i\n if use_regular_grid:\n axis_coords = np.linspace(-1,1,grid_size)\n self.N = grid_size*grid_size\n P_Y,P_X = np.meshgrid(axis_coords,axis_coords)\n P_X = np.reshape(P_X,(-1,1)) # size (N,1)\n P_Y = np.reshape(P_Y,(-1,1)) # size (N,1)\n P_X = torch.FloatTensor(P_X)\n P_Y = torch.FloatTensor(P_Y)\n self.P_X_base = P_X.clone()\n self.P_Y_base = P_Y.clone()\n self.Li = self.compute_L_inverse(P_X,P_Y).unsqueeze(0)\n self.P_X = P_X.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)\n self.P_Y = P_Y.unsqueeze(2).unsqueeze(3).unsqueeze(4).transpose(0,4)\n if use_cuda:\n self.P_X = self.P_X.cuda()\n self.P_Y = self.P_Y.cuda()\n self.P_X_base = self.P_X_base.cuda()\n self.P_Y_base = self.P_Y_base.cuda()\n\n def forward(self, theta):\n gpu_id = theta.get_device()\n self.grid_X = self.grid_X.to(gpu_id)\n self.grid_Y = self.grid_Y.to(gpu_id)\n self.P_X = self.P_X.to(gpu_id)\n self.P_Y = self.P_Y.to(gpu_id)\n self.P_X_base = self.P_X_base.to(gpu_id)\n self.P_Y_base = self.P_Y_base.to(gpu_id)\n self.Li = self.Li.to(gpu_id) \n warped_grid = self.apply_transformation(theta,torch.cat((self.grid_X,self.grid_Y),3))\n return warped_grid\n \n def compute_L_inverse(self,X,Y):\n N = X.size()[0] # num of points (along dim 0)\n # construct matrix K\n Xmat = X.expand(N,N)\n Ymat = Y.expand(N,N)\n P_dist_squared = torch.pow(Xmat-Xmat.transpose(0,1),2)+torch.pow(Ymat-Ymat.transpose(0,1),2)\n P_dist_squared[P_dist_squared==0]=1 # make diagonal 1 to avoid NaN in log computation\n K = torch.mul(P_dist_squared,torch.log(P_dist_squared))\n # construct matrix L\n O = torch.FloatTensor(N,1).fill_(1)\n Z = torch.FloatTensor(3,3).fill_(0) \n P = torch.cat((O,X,Y),1)\n L = torch.cat((torch.cat((K,P),1),torch.cat((P.transpose(0,1),Z),1)),0)\n self.Li = torch.inverse(L)\n if self.use_cuda:\n self.Li = self.Li.cuda()\n return self.Li\n \n def apply_transformation(self,theta,points):\n if theta.dim()==2:\n theta = theta.unsqueeze(2).unsqueeze(3)\n # points should be in the [B,H,W,2] format,\n # where points[:,:,:,0] are the X coords \n # and points[:,:,:,1] are the Y coords \n \n # input are the corresponding control points P_i\n batch_size = theta.size()[0]\n # split theta into point coordinates\n Q_X=theta[:,:self.N,:,:].squeeze(3)\n Q_Y=theta[:,self.N:,:,:].squeeze(3)\n Q_X = Q_X + self.P_X_base.expand_as(Q_X)\n Q_Y = Q_Y + self.P_Y_base.expand_as(Q_Y)\n \n # get spatial dimensions of points\n points_b = points.size()[0]\n points_h = points.size()[1]\n points_w = points.size()[2]\n \n # repeat pre-defined control points along spatial dimensions of points to be transformed\n P_X = self.P_X.expand((1,points_h,points_w,1,self.N))\n P_Y = self.P_Y.expand((1,points_h,points_w,1,self.N))\n \n # compute weigths for non-linear part\n W_X = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_X)\n W_Y = torch.bmm(self.Li[:,:self.N,:self.N].expand((batch_size,self.N,self.N)),Q_Y)\n # reshape\n # W_X,W,Y: size [B,H,W,1,N]\n W_X = W_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n W_Y = W_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n # compute weights for affine part\n A_X = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_X)\n A_Y = torch.bmm(self.Li[:,self.N:,:self.N].expand((batch_size,3,self.N)),Q_Y)\n # reshape\n # A_X,A,Y: size [B,H,W,1,3]\n A_X = A_X.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n A_Y = A_Y.unsqueeze(3).unsqueeze(4).transpose(1,4).repeat(1,points_h,points_w,1,1)\n \n # compute distance P_i - (grid_X,grid_Y)\n # grid is expanded in point dim 4, but not in batch dim 0, as points P_X,P_Y are fixed for all batch\n points_X_for_summation = points[:,:,:,0].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,0].size()+(1,self.N))\n points_Y_for_summation = points[:,:,:,1].unsqueeze(3).unsqueeze(4).expand(points[:,:,:,1].size()+(1,self.N))\n \n if points_b==1:\n delta_X = points_X_for_summation-P_X\n delta_Y = points_Y_for_summation-P_Y\n else:\n # use expanded P_X,P_Y in batch dimension\n delta_X = points_X_for_summation-P_X.expand_as(points_X_for_summation)\n delta_Y = points_Y_for_summation-P_Y.expand_as(points_Y_for_summation)\n \n dist_squared = torch.pow(delta_X,2)+torch.pow(delta_Y,2)\n # U: size [1,H,W,1,N]\n dist_squared[dist_squared==0]=1 # avoid NaN in log computation\n U = torch.mul(dist_squared,torch.log(dist_squared)) \n \n # expand grid in batch dimension if necessary\n points_X_batch = points[:,:,:,0].unsqueeze(3)\n points_Y_batch = points[:,:,:,1].unsqueeze(3)\n if points_b==1:\n points_X_batch = points_X_batch.expand((batch_size,)+points_X_batch.size()[1:])\n points_Y_batch = points_Y_batch.expand((batch_size,)+points_Y_batch.size()[1:])\n \n points_X_prime = A_X[:,:,:,:,0]+ \\\n torch.mul(A_X[:,:,:,:,1],points_X_batch) + \\\n torch.mul(A_X[:,:,:,:,2],points_Y_batch) + \\\n torch.sum(torch.mul(W_X,U.expand_as(W_X)),4)\n \n points_Y_prime = A_Y[:,:,:,:,0]+ \\\n torch.mul(A_Y[:,:,:,:,1],points_X_batch) + \\\n torch.mul(A_Y[:,:,:,:,2],points_Y_batch) + \\\n torch.sum(torch.mul(W_Y,U.expand_as(W_Y)),4)\n \n return torch.cat((points_X_prime,points_Y_prime),3)\n \n# Defines the Unet generator.\n# |num_downs|: number of downsamplings in UNet. For example,\n# if |num_downs| == 7, image of size 128x128 will become of size 1x1\n# at the bottleneck\nclass UnetGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf=64,\n norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetGenerator, self).__init__()\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)\n for i in range(num_downs - 5):\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)\n\n self.model = unet_block\n\n def forward(self, input):\n return self.model(input)\n\n\n# Defines the submodule with skip connection.\n# X -------------------identity---------------------- X\n# |-- downsampling -- |submodule| -- upsampling --|\nclass UnetSkipConnectionBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n use_bias = norm_layer == nn.InstanceNorm2d\n\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n upconv = nn.Conv2d(inner_nc * 2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)\n down = [downconv]\n up = [uprelu, upsample, upconv, upnorm]\n model = down + [submodule] + up\n elif innermost:\n upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upsample, upconv, upnorm]\n model = down + up\n else:\n upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n upconv = nn.Conv2d(inner_nc*2, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upsample, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else:\n return torch.cat([x, self.model(x)], 1)\n\nclass Vgg19(nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg19, self).__init__()\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(2):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(2, 7):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(7, 12):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(12, 21):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(21, 30):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n \n def forward(self, X):\n h_relu1 = self.slice1(X)\n h_relu2 = self.slice2(h_relu1)\n h_relu3 = self.slice3(h_relu2)\n h_relu4 = self.slice4(h_relu3)\n h_relu5 = self.slice5(h_relu4)\n out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]\n return out\n\nclass VGGLoss(nn.Module):\n def __init__(self, layids = None):\n super(VGGLoss, self).__init__()\n self.vgg = Vgg19()\n self.vgg.cuda()\n self.criterion = nn.L1Loss()\n self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]\n self.layids = layids\n\n def forward(self, x, y):\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n loss = 0\n if self.layids is None:\n self.layids = list(range(len(x_vgg)))\n for i in self.layids:\n loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())\n return loss\n\nclass GMM(nn.Module):\n \"\"\" Geometric Matching Module\n \"\"\"\n def __init__(self, opt):\n super(GMM, self).__init__()\n self.extractionA = FeatureExtraction(22, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d) \n self.extractionB = FeatureExtraction(3, ngf=64, n_layers=3, norm_layer=nn.BatchNorm2d)\n self.l2norm = FeatureL2Norm()\n self.correlation = FeatureCorrelation()\n self.regression = FeatureRegression(input_nc=192, output_dim=2*opt.grid_size**2, use_cuda=True)\n self.gridGen = TpsGridGen(opt.fine_height, opt.fine_width, use_cuda=True, grid_size=opt.grid_size)\n \n def forward(self, inputA, inputB):\n featureA = self.extractionA(inputA)\n featureB = self.extractionB(inputB)\n featureA = self.l2norm(featureA)\n featureB = self.l2norm(featureB)\n correlation = self.correlation(featureA, featureB)\n\n theta = self.regression(correlation)\n grid = self.gridGen(theta)\n return grid, theta\n\ndef save_checkpoint(model, save_path):\n if not os.path.exists(os.path.dirname(save_path)):\n os.makedirs(os.path.dirname(save_path))\n\n torch.save(model.cpu().state_dict(), save_path)\n model.cuda()\n\ndef load_checkpoint(model, checkpoint_path):\n if not os.path.exists(checkpoint_path):\n return\n model.load_state_dict(torch.load(checkpoint_path))\n model.cuda()\n\nif __name__ == '__main__':\n import config\n # in1 = torch.rand(4,3,256,192).cuda()\n # in2 = torch.rand(4,3,256,192).cuda()\n # cfg = config.Config().parse()\n # gmm = GMM(cfg)\n # gmm.cuda()\n # out = gmm(in1, in2)\n\n tps = TpsGridGen(256,192,True)\n theta = torch.randn(1,6)\n grid = tps(theta)\n print(grid.shape)\n\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.LeakyReLU", "torch.nn.init.kaiming_normal_", "torch.inverse", "torch.nn.BatchNorm2d", "torch.bmm", "torch.load", "torch.nn.functional.affine_grid", "torch.Size", "torch.mul", "torch.nn.init.constant_", "torch.FloatTensor", "torch.nn.init.normal_", "torch.div", "torch.nn.init.xavier_normal_", "numpy.reshape", "numpy.zeros", "torch.nn.Sequential", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.init.normal", "torch.log", "torch.pow", "torch.nn.Dropout", "torch.nn.L1Loss", "torch.nn.Upsample", "numpy.linspace", "numpy.meshgrid", "torch.randn" ] ]
JJUNGYUN/tensorflow-fast-style-transfer
[ "faf8608399b14de008edf533169b2cf25c811dbc" ]
[ "utils.py" ]
[ "import numpy as np\nimport PIL.Image\nimport os\nimport scipy\nfrom matplotlib.pyplot import imread, imsave\nfrom skimage.transform import resize\n\n\n\"\"\"Helper-functions to load MSCOCO DB\"\"\"\n# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py\ndef get_img(src, img_size=False):\n img = imread(src)\n if not (len(img.shape) == 3 and img.shape[2] == 3):\n img = np.dstack((img,img,img))\n if img_size != False:\n img = resize(img, img_size)\n return img\n\ndef get_files(img_dir):\n files = list_files(img_dir)\n return list(map(lambda x: os.path.join(img_dir,x), files))\n\ndef list_files(in_path):\n files = []\n for (dirpath, dirnames, filenames) in os.walk(in_path):\n files.extend(filenames)\n break\n return files\n\n\"\"\"Helper-functions for image manipulation\"\"\"\n# borrowed from https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/15_Style_Transfer.ipynb\n\n# This function loads an image and returns it as a numpy array of floating-points.\n# The image can be automatically resized so the largest of the height or width equals max_size.\n# or resized to the given shape\ndef load_image(filename, shape=None, max_size=None):\n image = PIL.Image.open(filename)\n\n if max_size is not None:\n # Calculate the appropriate rescale-factor for\n # ensuring a max height and width, while keeping\n # the proportion between them.\n factor = float(max_size) / np.max(image.size)\n\n # Scale the image's height and width.\n size = np.array(image.size) * factor\n\n # The size is now floating-point because it was scaled.\n # But PIL requires the size to be integers.\n size = size.astype(int)\n\n # Resize the image.\n image = resize(size, PIL.Image.LANCZOS) # PIL.Image.LANCZOS is one of resampling filter\n\n if shape is not None:\n image = resize(shape, PIL.Image.LANCZOS) # PIL.Image.LANCZOS is one of resampling filter\n\n # Convert to numpy floating-point array.\n return np.float32(image)\n\n# Save an image as a jpeg-file.\n# The image is given as a numpy array with pixel-values between 0 and 255.\ndef save_image(image, filename):\n # Ensure the pixel-values are between 0 and 255.\n image = np.clip(image, 0.0, 255.0)\n\n # Convert to bytes.\n image = image.astype(np.uint8)\n\n # Write the image-file in jpeg-format.\n with open(filename, 'wb') as file:\n PIL.Image.fromarray(image).save(file, 'jpeg')" ]
[ [ "numpy.max", "numpy.array", "numpy.float32", "numpy.clip", "numpy.dstack", "matplotlib.pyplot.imread" ] ]
vfdev-5/POT
[ "e757b75976ece1e6e53e655852b9f8863e7b6f5a" ]
[ "test/test_da.py" ]
[ "\"\"\"Tests for module da on Domain Adaptation \"\"\"\n\n# Author: Remi Flamary <remi.flamary@unice.fr>\n#\n# License: MIT License\n\nimport numpy as np\nfrom numpy.testing.utils import assert_allclose, assert_equal\n\nimport ot\nfrom ot.datasets import make_data_classif\nfrom ot.utils import unif\n\n\ndef test_sinkhorn_lpl1_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.SinkhornLpl1Transport()\n\n # test its computed\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.SinkhornLpl1Transport()\n otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.SinkhornLpl1Transport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n assert mass_semi == 0, \"semisupervised mode not working\"\n\n\ndef test_sinkhorn_l1l2_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.SinkhornL1l2Transport()\n\n # test its computed\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n assert hasattr(otda, \"log_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.SinkhornL1l2Transport()\n otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.SinkhornL1l2Transport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]\n assert_allclose(mass_semi, np.zeros_like(mass_semi),\n rtol=1e-9, atol=1e-9)\n\n # check everything runs well with log=True\n otda = ot.da.SinkhornL1l2Transport(log=True)\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert len(otda.log_.keys()) != 0\n\n\ndef test_sinkhorn_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.SinkhornTransport()\n\n # test its computed\n otda.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n assert hasattr(otda, \"log_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.SinkhornTransport()\n otda_unsup.fit(Xs=Xs, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.SinkhornTransport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n assert mass_semi == 0, \"semisupervised mode not working\"\n\n # check everything runs well with log=True\n otda = ot.da.SinkhornTransport(log=True)\n otda.fit(Xs=Xs, ys=ys, Xt=Xt)\n assert len(otda.log_.keys()) != 0\n\n\ndef test_emd_transport_class():\n \"\"\"test_sinkhorn_transport\n \"\"\"\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otda = ot.da.EMDTransport()\n\n # test its computed\n otda.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otda, \"cost_\")\n assert hasattr(otda, \"coupling_\")\n\n # test dimensions of coupling\n assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # test inverse transform\n transp_Xt = otda.inverse_transform(Xt=Xt)\n assert_equal(transp_Xt.shape, Xt.shape)\n\n Xt_new, _ = make_data_classif('3gauss2', nt + 1)\n transp_Xt_new = otda.inverse_transform(Xt=Xt_new)\n\n # check that the oos method is working\n assert_equal(transp_Xt_new.shape, Xt_new.shape)\n\n # test fit_transform\n transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n # test unsupervised vs semi-supervised mode\n otda_unsup = ot.da.EMDTransport()\n otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt)\n n_unsup = np.sum(otda_unsup.cost_)\n\n otda_semi = ot.da.EMDTransport()\n otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt)\n assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0])))\n n_semisup = np.sum(otda_semi.cost_)\n\n # check that the cost matrix norms are indeed different\n assert n_unsup != n_semisup, \"semisupervised mode not working\"\n\n # check that the coupling forbids mass transport between labeled source\n # and labeled target samples\n mass_semi = np.sum(\n otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max])\n mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]\n\n # we need to use a small tolerance here, otherwise the test breaks\n assert_allclose(mass_semi, np.zeros_like(mass_semi),\n rtol=1e-2, atol=1e-2)\n\n\ndef test_mapping_transport_class():\n \"\"\"test_mapping_transport\n \"\"\"\n\n ns = 60\n nt = 120\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n Xs_new, _ = make_data_classif('3gauss', ns + 1)\n\n ##########################################################################\n # kernel == linear mapping tests\n ##########################################################################\n\n # check computation and dimensions if bias == False\n otda = ot.da.MappingTransport(kernel=\"linear\", bias=False)\n otda.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otda, \"coupling_\")\n assert hasattr(otda, \"mapping_\")\n assert hasattr(otda, \"log_\")\n\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[1], Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # check computation and dimensions if bias == True\n otda = ot.da.MappingTransport(kernel=\"linear\", bias=True)\n otda.fit(Xs=Xs, Xt=Xt)\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[1] + 1, Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n ##########################################################################\n # kernel == gaussian mapping tests\n ##########################################################################\n\n # check computation and dimensions if bias == False\n otda = ot.da.MappingTransport(kernel=\"gaussian\", bias=False)\n otda.fit(Xs=Xs, Xt=Xt)\n\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[0], Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # check computation and dimensions if bias == True\n otda = ot.da.MappingTransport(kernel=\"gaussian\", bias=True)\n otda.fit(Xs=Xs, Xt=Xt)\n assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0])))\n assert_equal(otda.mapping_.shape, ((Xs.shape[0] + 1, Xt.shape[1])))\n\n # test margin constraints\n mu_s = unif(ns)\n mu_t = unif(nt)\n assert_allclose(\n np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3)\n assert_allclose(\n np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3)\n\n # test transform\n transp_Xs = otda.transform(Xs=Xs)\n assert_equal(transp_Xs.shape, Xs.shape)\n\n transp_Xs_new = otda.transform(Xs_new)\n\n # check that the oos method is working\n assert_equal(transp_Xs_new.shape, Xs_new.shape)\n\n # check everything runs well with log=True\n otda = ot.da.MappingTransport(kernel=\"gaussian\", log=True)\n otda.fit(Xs=Xs, Xt=Xt)\n assert len(otda.log_.keys()) != 0\n\n\ndef test_linear_mapping():\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n A, b = ot.da.OT_mapping_linear(Xs, Xt)\n\n Xst = Xs.dot(A) + b\n\n Ct = np.cov(Xt.T)\n Cst = np.cov(Xst.T)\n\n np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)\n\n\ndef test_linear_mapping_class():\n\n ns = 150\n nt = 200\n\n Xs, ys = make_data_classif('3gauss', ns)\n Xt, yt = make_data_classif('3gauss2', nt)\n\n otmap = ot.da.LinearTransport()\n\n otmap.fit(Xs=Xs, Xt=Xt)\n assert hasattr(otmap, \"A_\")\n assert hasattr(otmap, \"B_\")\n assert hasattr(otmap, \"A1_\")\n assert hasattr(otmap, \"B1_\")\n\n Xst = otmap.transform(Xs=Xs)\n\n Ct = np.cov(Xt.T)\n Cst = np.cov(Xst.T)\n\n np.testing.assert_allclose(Ct, Cst, rtol=1e-2, atol=1e-2)\n" ]
[ [ "numpy.testing.utils.assert_equal", "numpy.testing.assert_allclose", "numpy.zeros_like", "numpy.cov", "numpy.sum" ] ]
TropComplique/bicycle-gan
[ "4bc8f4cdbe138e23c8a02c408cfb8e2ff7dfe6ab" ]
[ "networks/encoder.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass ResNetEncoder(nn.Module):\n\n def __init__(self, in_channels, out_dimension, depth=48, num_blocks=5):\n \"\"\"\n Arguments:\n in_channels: an integer.\n out_channels: an integer.\n depth: an integer.\n num_blocks: an integer, number of resnet blocks.\n \"\"\"\n super(ResNetEncoder, self).__init__()\n\n layers = [\n nn.Conv2d(in_channels, depth, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n ]\n\n for n in range(1, num_blocks + 1):\n in_depth = depth * min(4, n)\n out_depth = depth * min(4, n + 1)\n layers.append(BasicBlock(in_depth, out_depth))\n\n # so, after all these layers the\n # input is downsampled by 2**(1 + num_blocks)\n\n layers.extend([\n nn.LeakyReLU(0.2, inplace=True),\n nn.AdaptiveAvgPool2d(1)\n ])\n\n self.layers = nn.Sequential(*layers)\n self.fc1 = nn.Linear(out_depth, out_dimension)\n self.fc2 = nn.Linear(out_depth, out_dimension)\n\n def forward(self, x):\n \"\"\"\n I assume that h and w are\n divisible by 2**(1 + num_blocks).\n\n The input tensor represents\n images with pixel values in [0, 1] range.\n\n Arguments:\n x: a float tensor with shape [b, in_channels, h, w].\n Returns:\n two float tensors with shape [b, out_dimension].\n \"\"\"\n x = 2.0 * x - 1.0\n x = self.layers(x) # shape [b, out_channels, 1, 1]\n x = x.view(x.size(0), -1)\n\n mean = self.fc1(x)\n logvar = self.fc2(x)\n return mean, logvar\n\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels):\n super(BasicBlock, self).__init__()\n\n self.layers = nn.Sequential(\n nn.InstanceNorm2d(in_channels, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False),\n nn.InstanceNorm2d(in_channels, affine=True),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),\n nn.AvgPool2d(kernel_size=2, stride=2)\n )\n\n self.shortcut = nn.Sequential(\n nn.AvgPool2d(kernel_size=2, stride=2),\n nn.Conv2d(in_channels, out_channels, kernel_size=1)\n )\n\n def forward(self, x):\n return self.layers(x) + self.shortcut(x)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.LeakyReLU", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.nn.AdaptiveAvgPool2d" ] ]
Rintarooo/MDVRP_MHA
[ "f196f1c99c3e4efa1ab6d75f4af77685afe4d191" ]
[ "Torch/Nets/decoder_utils.py" ]
[ "import torch\nimport torch.nn as nn\n\nclass Env():\n\tdef __init__(self, x, node_embeddings):\n\t\tsuper().__init__()\n\t\t\"\"\"depot_xy: (batch, n_depot, 2)\n\t\t\tcustomer_xy: (batch, n_customer, 2)\n\t\t\t--> xy: (batch, n_node, 2); Coordinates of depot + customer nodes\n\t\t\tn_node= n_depot + n_customer\n\t\t\tdemand: (batch, n_customer)\n\t\t\t??? --> demand: (batch, n_car, n_customer)\n\t\t\tD(remaining car capacity): (batch, n_car)\n\t\t\tnode_embeddings: (batch, n_node, embed_dim)\n\t\t\t--> node_embeddings: (batch, n_car, n_node, embed_dim)\n\n\t\t\tcar_start_node: (batch, n_car); start node index of each car\n\t\t\tcar_cur_node: (batch, n_car); current node index of each car\n\t\t\tcar_run: (batch, car); distance each car has run \n\t\t\tpi: (batch, n_car, decoder_step); which index node each car has moved \n\t\t\tdist_mat: (batch, n_node, n_node); distance matrix\n\t\t\ttraversed_nodes: (batch, n_node)\n\t\t\ttraversed_customer: (batch, n_customer)\n\t\t\"\"\"\n\t\tself.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\t\tself.demand = x['demand']\n\t\tself.xy = torch.cat([x['depot_xy'], x['customer_xy']], 1)\n\t\tself.car_start_node, self.D = x['car_start_node'], x['car_capacity']\n\t\tself.car_cur_node = self.car_start_node\n\t\tself.pi = self.car_start_node.unsqueeze(-1)\n\n\t\tself.n_depot = x['depot_xy'].size(1)\n\t\tself.n_customer = x['customer_xy'].size(1)\n\t\tself.n_car = self.car_start_node.size(1)\n\t\tself.batch, self.n_node, self.embed_dim = node_embeddings.size()\n\t\tself.node_embeddings = node_embeddings[:,None,:,:].repeat(1,self.n_car,1,1)\n\t\t\n\t\tself.demand_include_depot = torch.cat([torch.zeros((self.batch, self.n_depot), dtype = torch.float, device = self.device), self.demand], dim = 1)\n\t\tassert self.demand_include_depot.size(1) == self.n_node, 'demand_include_depot'\n\t\t\n\t\t# self.demand = demand[:,None,:].repeat(1,self.n_car,1)\t\t\n\t\tself.car_run = torch.zeros((self.batch, self.n_car), dtype = torch.float, device = self.device)\n\n\t\tself.dist_mat = self.build_dist_mat()\n\t\tself.mask_depot, self.mask_depot_unused = self.build_depot_mask()\n\t\tself.traversed_customer = torch.zeros((self.batch, self.n_customer), dtype = torch.bool, device = self.device)\n\t\t\n\tdef build_dist_mat(self):\n\t\txy = self.xy.unsqueeze(1).repeat(1, self.n_node, 1, 1)\n\t\tconst_xy = self.xy.unsqueeze(2).repeat(1, 1, self.n_node, 1)\n\t\tdist_mat = torch.sqrt(((xy - const_xy) ** 2).sum(dim = 3))\n\t\treturn dist_mat\n\n\tdef build_depot_mask(self):\n\t\ta = torch.arange(self.n_depot, device = self.device).reshape(1, 1, -1).repeat(self.batch, self.n_car, 1)\n\t\tb = self.car_start_node[:,:,None].repeat(1, 1, self.n_depot)\n\t\tdepot_one_hot = (a==b).bool()#.long()\n\t\treturn depot_one_hot, torch.logical_not(depot_one_hot)\n\n\tdef get_mask(self, next_node, next_car):\n\t\t\"\"\"self.demand **excludes depot**: (batch, n_nodes-1)\n\t\t\tselected_demand: (batch, 1)\n\t\t\tif next node is depot, do not select demand\n\t\t\tself.D: (batch, n_car, 1), D denotes \"remaining vehicle capacity\"\n\t\t\tself.capacity_over_customer **excludes depot**: (batch, n_car, n_customer)\n\t\t\tvisited_customer **excludes depot**: (batch, n_customer, 1)\n\t\t\tis_next_depot: (batch, 1), e.g. [[True], [True], ...]\n\n\t\t\"\"\"\n\t\tis_next_depot = (self.car_cur_node == self.car_start_node).bool()#.long().sum(-1)\n\t\t# e.g., is_next_depot = next_node == 0 or next_node == 1\n\t\t# is_next_depot: (batch, n_car), e.g. [[True], [True], ...]\n\n\t\t\n\t\tnew_traversed_node = torch.eye(self.n_node, device = self.device)[next_node.squeeze(1)]\n\t\t# new_traversed_node: (batch, node)\n\t\tnew_traversed_customer = new_traversed_node[:,self.n_depot:]\n\t\t# new_traversed_customer: (batch, n_customer)\n\t\tself.traversed_customer = self.traversed_customer | new_traversed_customer.bool()\n\t\t# traversed_customer: (batch, n_customer)\n\n\t\tselected_demand = torch.gather(input = self.demand_include_depot, dim = 1, index = next_node)\n\t\t# selected_demand: (batch, 1)\n\t\tselected_car = torch.eye(self.n_car, device = self.device)[next_car.squeeze(1)]\n\t\t# selected_car: (batch, n_car)\n\t\tcar_used_demand = selected_car * selected_demand\n\t\t# car_used_demand: (batch, n_car) \t\t\n\t\tself.D -= car_used_demand\n\t\t# D: (batch, n_car)\n\t\t# self.D = torch.clamp(self.D, min = 0.)\n\t\t\n\t\tD_over_customer = self.demand[:,None,:].repeat(1,self.n_car,1) > self.D[:,:,None].repeat(1,1,self.n_customer)\n\t\tmask_customer = D_over_customer | self.traversed_customer[:,None,:].repeat(1,self.n_car,1)\n\t\t# mask_customer: (batch, n_car, n_customer)\n\n\t\tmask_depot = is_next_depot & ((mask_customer == False).long().sum(dim = 2).sum(dim = 1)[:,None].repeat(1,self.n_car) > 0)\n\t\t# mask_depot: (batch, n_car)\n\t\t\"\"\"mask_depot = True --> We cannot choose depot in the next step \n\t\t\tif 1) the vehicle is at the depot in the next step\n\t\t\tor 2) there is a customer node which has not been visited yet\n\t\t\"\"\"\n\n\t\tmask_depot = self.mask_depot & mask_depot.bool()[:,:,None].repeat(1,1,self.n_depot)\n\t\t# mask_depot: (batch, n_car, n_depot)\n\n\t\tmask_depot = self.mask_depot_unused | mask_depot\n\t\t\"\"\"mask_depot: (batch, n_car, n_depot) \n\t\t\tmask_customer: (batch, n_car, n_customer) \n\t\t\t--> return mask: (batch, n_car, n_node ,1)\n\t\t\"\"\"\n\t\treturn torch.cat([mask_depot, mask_customer], dim = -1).unsqueeze(-1)\n\t\t\n\tdef generate_step_context(self):\n\t\t\"\"\"D: (batch, n_car)\n\t\t\t--> D: (batch, n_car, 1, 1)\n\t\t\t\n\t\t\teach_car_idx: (batch, n_car, 1, embed_dim)\n\t\t\tnode_embeddings: (batch, n_car, n_node, embed_dim)\n\t\t\t--> prev_embeddings(initially, depot_embeddings): (batch, n_car, 1, embed)\n\t\t\tnode embeddings where car is located\n\t\t\t\n\t\t\treturn step_context: (batch, n_car, 1, embed+1)\n\t\t\"\"\"\n\t\teach_car_idx = self.car_cur_node[:,:,None,None].repeat(1,1,1,self.embed_dim)\t\t\n\t\tprev_embeddings = torch.gather(input = self.node_embeddings, dim = 2, index = each_car_idx)\n\t\tstep_context = torch.cat([prev_embeddings, self.D[:,:,None,None]], dim = -1)\n\t\treturn step_context\n\n\tdef _get_step(self, next_node, next_car):\n\t\t\"\"\"next_node **includes depot** : (batch, 1) int(=long), range[0, n_node-1]\n\t\t\t\n\t\t\treturn\n\t\t\tmask: (batch, n_car, n_node ,1)\n\t\t\tstep_context: (batch, n_car, 1, embed+1)\n\t\t\"\"\"\n\t\tself.update_node_path(next_node, next_car)\n\t\tself.update_car_distance()\n\t\tmask = self.get_mask(next_node, next_car)\n\t\tstep_context = self.generate_step_context()\n\t\treturn mask, step_context\n\n\tdef _get_step_t1(self):\n\t\t\"\"\"return\n\t\t\tmask: (batch, n_car, n_node ,1)\n\t\t\tstep_context: (batch, n_car, 1, embed+1)\n\t\t\"\"\"\n\t\tmask_t1 = self.get_mask_t1()\n\t\tstep_context_t1 = self.generate_step_context()\t\t\n\t\treturn mask_t1, step_context_t1\n\n\tdef get_mask_t1(self):\n\t\t\"\"\"mask_depot: (batch, n_car, n_depot) \n\t\t\tmask_customer: (batch, n_car, n_customer) \n\t\t\t--> return mask: (batch, n_car, n_node ,1)\n\t\t\"\"\"\n\t\tmask_depot_t1 = self.mask_depot | self.mask_depot_unused\n\t\tmask_customer_t1 = self.traversed_customer[:,None,:].repeat(1,self.n_car,1)\n\t\treturn torch.cat([mask_depot_t1, mask_customer_t1], dim = -1).unsqueeze(-1)\n\t\t\n\tdef update_node_path(self, next_node, next_car):\n\t\t# car_node: (batch, n_car)\n\t\t# pi: (batch, n_car, decoder_step)\n\t\tself.car_prev_node = self.car_cur_node\n\t\ta = torch.arange(self.n_car, device = self.device).reshape(1, -1).repeat(self.batch, 1)\n\t\tb = next_car.reshape(self.batch, 1).repeat(1, self.n_car)\n\t\tmask_car = (a == b).long()\n\t\tnew_node = next_node.reshape(self.batch, 1).repeat(1, self.n_car)\n\t\tself.car_cur_node = mask_car * new_node + (1 - mask_car) * self.car_cur_node\n\t\t# (1-mask_car) keeps the same node for the unused car, mask_car updates new node for the used car\n\t\tself.pi = torch.cat([self.pi, self.car_cur_node.unsqueeze(-1)], dim = -1)\n\n\tdef update_car_distance(self):\n\t\tprev_node_dist_vec = torch.gather(input = self.dist_mat, dim = 1, index = self.car_prev_node[:,:,None].repeat(1,1,self.n_node))\n\t\t# dist = torch.gather(input = prev_node_dist_vec, dim = 2, index = self.car_cur_node[:,None,:].repeat(1,self.n_car,1))\n\t\tdist = torch.gather(input = prev_node_dist_vec, dim = 2, index = self.car_cur_node[:,:,None])\n\t\tself.car_run += dist.squeeze(-1)\n\t\t# print(self.car_run[0])\n\n\tdef return_depot_all_car(self):\n\t\tself.pi = torch.cat([self.pi, self.car_start_node.unsqueeze(-1)], dim = -1)\n\t\tself.car_prev_node = self.car_cur_node\n\t\tself.car_cur_node = self.car_start_node\n\t\tself.update_car_distance()\n\n\tdef get_log_likelihood(self, _log_p, _idx):\n\t\t\"\"\"_log_p: (batch, decode_step, n_car * n_node)\n\t\t\t_idx: (batch, decode_step, 1), selected index\n\t\t\"\"\"\n\t\tlog_p = torch.gather(input = _log_p, dim = 2, index = _idx)\n\t\treturn log_p.squeeze(-1).sum(dim = 1)\n\nclass Sampler(nn.Module):\n\t\"\"\"args; logits: (batch, n_car * n_nodes)\n\t\treturn; next_node: (batch, 1)\n\t\tTopKSampler --> greedy; sample one with biggest probability\n\t\tCategoricalSampler --> sampling; randomly sample one from possible distribution based on probability\n\t\"\"\"\n\tdef __init__(self, n_samples = 1, **kwargs):\n\t\tsuper().__init__(**kwargs)\n\t\tself.n_samples = n_samples\n\t\t\nclass TopKSampler(Sampler):\n\tdef forward(self, logits):\n\t\treturn torch.topk(logits, self.n_samples, dim = 1)[1]\n\t\t# torch.argmax(logits, dim = 1).unsqueeze(-1)\n\nclass CategoricalSampler(Sampler):\n\tdef forward(self, logits):\n\t\treturn torch.multinomial(logits.exp(), self.n_samples)" ]
[ [ "torch.zeros", "torch.cat", "torch.arange", "torch.gather", "torch.logical_not", "torch.cuda.is_available", "torch.eye", "torch.topk" ] ]
piyueh/SEM-Exercises
[ "d25e6c1bc609022189952d97488828113cfb2206" ]
[ "utils/misc/misc.py" ]
[ "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2016 Pi-Yueh Chuang <pychuang@gwu.edu>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"Some misc functions\"\"\"\n\nimport numpy\nimport numbers\nimport functools\n\n# TODO: replace assertion with if ... raise\n\n\ndef factorial(n):\n \"\"\"Naive implementation of factorial\n\n For serious use, please consider scipy.special.factorial\n\n Args:\n n: an integer\n Returns:\n n!\n \"\"\"\n\n if not isinstance(n, (int, numpy.int_)):\n raise ValueError(\n \"n is not an integer: {0}, {1}\".format(n, type(n)))\n\n if n == 0:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(1, n+1))\n\n\ndef factorial_division(bg, end):\n \"\"\"Naive implementation of factorial division: end! / bg!\n\n This function is to avoid integer overflow. If end and bg are big, it is\n dangerous to use fractional(end) / fractional(bg) due to the potential of\n integer overflow.\n\n For serious use, please consider scipy.special.factorial\n\n Args:\n bg: the beginning integer\n end: the endding integer\n Returns:\n end! / bg!\n \"\"\"\n\n if not isinstance(bg, (int, numpy.int_)):\n raise ValueError(\n \"bg is not an integer: {0}, {1}\".format(bg, type(bg)))\n if not isinstance(end, (int, numpy.int_)):\n raise ValueError(\n \"end is not an integer: {0}, {1}\".format(end, type(end)))\n if bg < 0:\n raise ValueError(\"bg can not be smaller than zero!\")\n if end < bg:\n raise ValueError(\n \"end should larger than or equal to bg: \" +\n \"bg={0}, end={1}\".format(bg, end))\n\n if end == bg:\n return 1\n else:\n return functools.reduce(lambda x, y: x * y, range(bg+1, end+1))\n\n\ndef gamma(n):\n \"\"\"Naive implementation of gamma function (integer input)\n\n For serious use, please consider scipy.special.gamma\n\n Args:\n n: the integer\n Returns:\n (n-1)!\n \"\"\"\n return factorial(n-1)\n\n\ndef strip_trivial(z, tol=1e-8):\n \"\"\"if any element in array z is smaller than tol, we set it to zero\n\n Args:\n z: the array to be cleaned\n tol: the tolerance\n\n Returns:\n \"\"\"\n # TODO implement different way to lower the dependence of numpy\n z = z.astype(numpy.complex128)\n z = numpy.where(numpy.abs(z.real) < tol, z.imag*1j, z)\n z = numpy.where(numpy.abs(z.imag) < tol, z.real, z)\n z = numpy.real(z) if (z.imag == 0).all() else z\n\n return z\n\n\ndef check_array(arry, msg=\"Can't convert input to numpy.ndarray\"):\n \"\"\"check whether the input is a numpy array, and try to convert it\n\n Args:\n arry: the data to be checked\n msg: the message to be passed to error instance\n\n Returns:\n arry as a numpy.ndarray\n\n Raise:\n TypeError, if it fail to convert the input to a numpy array\n \"\"\"\n\n if isinstance(arry, (numbers.Number, numpy.number)):\n return numpy.array([arry])\n elif isinstance(arry, list):\n return numpy.array(arry)\n elif isinstance(arry, numpy.ndarray):\n return arry\n else:\n raise TypeError(msg)\n" ]
[ [ "numpy.array", "numpy.real", "numpy.abs" ] ]
ztultrebor/Kaggle-Santander_Challenge
[ "af5132f986089553a2192183f53ed3b0ec2bcf1b" ]
[ "XGB.py" ]
[ "#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\nfrom GridSearch import GridSearch\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom xgboost import XGBClassifier\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import roc_auc_score\n\n\n#===================================prep data==================================\n\nnp.random.seed(42)\n\ntarget_col = 'TARGET'\nid_col = 'ID'\n\nX_train = pd.read_csv('./Level1Data/Xtrain.csv')\nX_train['GBpred'] = pd.read_csv('./Level1Data/GBPredtrain.csv')\nX_train['ADApred'] = pd.read_csv('./Level1Data/ADAPredtrain.csv')\ny_train = pd.read_csv('./Level1Data/ytrain.csv')[target_col]\n\n\n#==========================Gradient Boost Classifier===========================\n\nparams = {\n 'n_estimators' : scipy.stats.geom(1/150.),\n 'max_depth' : scipy.stats.randint(2,7),\n 'learning_rate' : scipy.stats.expon(0, 0.01),\n 'min_samples_leaf' : scipy.stats.geom(1/10.),\n 'subsample' : scipy.stats.beta(2,1),\n 'colsample_bytree' : scipy.stats.beta(2,1)\n }\n\nclf = XGBClassifier()\n\nGridSearch(\n classifier = clf,\n paramdict = params,\n iters = 729,\n X = X_train,\n y = y_train,\n X_reserve = None,\n y_reserve = None\n)\n" ]
[ [ "scipy.stats.expon", "numpy.random.seed", "scipy.stats.geom", "scipy.stats.randint", "pandas.read_csv", "scipy.stats.beta" ] ]
CatherineH/python-sewing
[ "01873f6341c7ce8e26d4e61aab9d52a586d667f6" ]
[ "merge_pieces.py" ]
[ "from svgpathtools import svg2paths, Path, Line\nfrom svgwrite import Drawing, rgb\nimport argparse\nfrom math import atan, asin, sin, cos, pi\nfrom numpy import argmin\nfrom utils import calc_overall_bbox\n\nparser = argparse.ArgumentParser(\n description='Generate a merged piece from two pieces by stretching the pattern piece along an edge')\nparser.add_argument('--filename', type=str,\n help='The filename of the svg with at least two pattern pieces.')\n\n\nclass Intersection(object):\n def __init__(self, point=1.0+1.0*1j, diff=0.0):\n self.point = point\n self.diff = diff\n\n\nclass PathClip(object):\n def __init__(self, index=0, t=0.0, target=1.0+1.0*1j):\n self.index = index\n self.t = t\n self.target = target\n\n\ndef flatten_shape(i, all_paths, merge_paths):\n dwg = Drawing(\"merge_output%s.svg\" % i, profile='tiny')\n\n def draw_line(start, end, offset=0.0):\n start += offset\n end += offset\n dwg.add(dwg.line(start=(start.real, start.imag), end=(end.real, end.imag),\n stroke_width=4, stroke=rgb(255, 0, 0)))\n\n dwg.add(dwg.path(**{'d': all_paths[i].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(0, 0, 0)}))\n dwg.add(dwg.path(**{'d': merge_paths[i].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(255, 0, 0)}))\n bbox = calc_overall_bbox(all_paths[i])\n width, height = abs(bbox[1] - bbox[0]), abs(bbox[3] - bbox[2])\n margin = 40\n lower = min(bbox[2], bbox[3]) + height+margin\n left = min(bbox[0], bbox[1]) + margin\n\n def draw_marker(loc, col=rgb(255, 0, 0), offset=(left, lower)):\n dwg.add(dwg.circle(center=(loc.real + offset[0], loc.imag + offset[1]), r=4,\n fill=col))\n\n max_axis = max(width, height)\n num_lines = 10\n points = [merge_paths[i].point(j / num_lines) for j in range(num_lines)] + [\n merge_paths[i].point(1.0)]\n angles = [\n asin((points[j + 1].imag - points[j].imag) / abs(points[j + 1] - points[j]))\n for j in range(num_lines)]\n\n ends = [max_axis * (sin(angle) + cos(angle) * 1j) for angle in\n angles]\n intersection_clips = []\n for j, end in enumerate(ends):\n end_point = end + points[j]\n intersections = other_paths[i].intersect(Line(start=points[j], end=end_point))\n\n for intersection in intersections[0]:\n intersection_point = intersection[1].point(intersection[2])\n target = merge_paths[i].length()*(1-j/num_lines) + abs(intersection_point - points[j])*1j\n intersection_clips.append(PathClip(index=other_paths[i].index(intersection[1]),\n t=intersection[2],\n target=target))\n if j % 10 == 0:\n draw_line(points[j], intersection_point)\n draw_marker(intersection_point, rgb(0, 255, 0), (0, 0))\n break\n\n # make the flexed points by chopping the chunks of the other paths out, then\n # translating and rotating them such that their end points line up with the diff lines\n def transform_side(sides, targets, angle_offset=0):\n def angle(point1, point2):\n diff = point1-point2\n if diff.real == 0:\n return 90.0\n return atan(diff.imag / diff.real)*180.0/pi\n # change this so that it has two targets\n transformed_side = Path(*sides)\n source_angle = angle(transformed_side.end, transformed_side.start) - \\\n angle(targets[0], targets[1])\n transformed_side = transformed_side.rotated(-source_angle+angle_offset)\n source = transformed_side.end if angle_offset == 0 else transformed_side.start\n diff = targets[1] - source\n transformed_side = transformed_side.translated(diff)\n draw_marker(targets[0], rgb(0, 200, 200))\n draw_marker(targets[1], rgb(0, 255, 255))\n transformed_diff = abs(transformed_side.start - transformed_side.end)\n targets_diff = abs(targets[0]-targets[1])\n if transformed_diff < targets_diff :\n transformed_side.insert(0, Line(start=targets[0],\n end=transformed_side.start))\n elif transformed_diff > targets_diff:\n # pop elements off until the transformed diff is smaller\n while transformed_diff > targets_diff:\n transformed_side.pop(0)\n transformed_diff = abs(transformed_side.start - transformed_side.end)\n print(\"path\", transformed_side)\n print(\"path is longer\", transformed_diff-targets_diff)\n return transformed_side\n\n start_index = 0\n curr_t = 0\n flexed_path = []\n t_resolution = 0.01\n if intersection_clips[0].index > intersection_clips[-1].index or \\\n (intersection_clips[0].index == intersection_clips[-1].index and\n intersection_clips[0].t > intersection_clips[-1].t):\n intersection_clips.reverse()\n # add the end of the shape to the intersection clips\n intersection_clips.append(PathClip(index=len(other_paths[i])-1, t=1.0,\n target=merge_paths[i].length()))\n last_target = 0\n for clip in intersection_clips:\n sides = []\n print(\"boundaries\", start_index, clip.index, curr_t, clip.t)\n upper_t = clip.t if start_index == clip.index else 1.0\n while start_index <= clip.index and curr_t < upper_t:\n curr_seg = other_paths[i][start_index]\n while curr_t < upper_t:\n max_t = curr_t + t_resolution if curr_t+t_resolution < clip.t else clip.t\n sides.append(Line(start=curr_seg.point(curr_t),\n end=curr_seg.point(max_t)))\n curr_t += t_resolution\n curr_t = upper_t\n if start_index != clip.index:\n curr_t = 0.0\n if upper_t == 1.0:\n start_index += 1\n upper_t = clip.t if start_index == clip.index else 1.0\n if len(sides) != 0:\n flexed_path.append(transform_side(sides, [last_target, clip.target]))\n last_target = clip.target\n\n straight_path = [Line(start=0, end=merge_paths[i].length())]\n for p in flexed_path:\n p = p.translated(left+lower*1j)\n dwg.add(dwg.path(d=p.d(), fill=\"none\", stroke_width=4,\n stroke=rgb(255, 0, 0)))\n\n transformed_path = flexed_path + straight_path\n transformed_path = Path(*transformed_path).translated(left + lower*1j)\n dwg.add(dwg.path(d=transformed_path.d(), fill=\"none\", stroke_width=4,\n stroke=rgb(0, 0, 0)))\n bbox = calc_overall_bbox(list(all_paths[i]) + list(transformed_path))\n\n width, height = abs(bbox[1] - bbox[0]), abs(bbox[3] - bbox[2])\n dwg.viewbox(min(bbox[0], bbox[1]), min(bbox[2], bbox[3]), width, height)\n dwg.save()\n return flexed_path\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n all_paths, attributes = svg2paths(args.filename)\n # how do we figure out what sections of the path are linked?\n diffs = [[abs(i.start - j.start) for j in all_paths[0]] for i in\n all_paths[1]]\n # get the location of the lowest value of the diffs - this will tell us the offset\n diff_min = [argmin(diff) for diff in diffs]\n offset_diffs = [diff_min[i + 1] - diff_min[i] for i in range(len(diff_min) - 1)]\n # pull out the longest contiguous section of 1s\n start_one = offset_diffs.index(1)\n end_one = offset_diffs[::-1].index(1)\n # for each of the shapes, construct a new shape where the section in the merge paths\n # is straight\n merge_paths = [Path(*list(all_paths[i])[start_one:end_one]) for i in range(0, 2)]\n other_paths = [Path(*list(all_paths[i])[end_one:]+list(all_paths[i])[0:start_one])\n for i in range(0, 2)]\n flexed_paths = [flatten_shape(i, all_paths, merge_paths) for i in range(0, 2)]\n dwg = Drawing(\"flexed_sides.svg\", profile=\"tiny\")\n upper_sizes = [0, 0]\n for i, path_list in enumerate(flexed_paths):\n bbox = calc_overall_bbox(path_list)\n if i == 0:\n upper_sizes = [max(bbox[0], bbox[1]), abs(bbox[3] - bbox[2])]\n transform = \"scale(1, {})\".format(-1 if i == 0 else 1)\n group = dwg.add(dwg.g(transform=transform))\n for path in path_list:\n path = path.translated(-min(bbox[2], bbox[3])*1j)\n group.add(dwg.path(**{'d': path.d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(0, 0, 0)}))\n bbox = calc_overall_bbox(flexed_paths[1])\n dwg.viewbox(min(bbox[0], bbox[1]), -upper_sizes[1],\n abs(min(bbox[0], bbox[1]) -max(bbox[0], bbox[1], upper_sizes[0])),\n abs(bbox[3] - bbox[2])+upper_sizes[1])\n dwg.save()\n # render the shapes selected\n dwg = Drawing(\"merge_output.svg\", profile='tiny')\n for path in all_paths:\n dwg.add(dwg.path(\n **{'d': path.d(), 'fill': \"none\", 'stroke-width': 4, 'stroke': rgb(0, 0, 0)}))\n dwg.add(dwg.path(**{'d': merge_paths[0].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(255, 0, 0)}))\n dwg.add(dwg.path(**{'d': merge_paths[1].d(), 'fill': \"none\", 'stroke-width': 4,\n 'stroke': rgb(0, 255, 0)}))\n bbox = calc_overall_bbox([x for x in all_paths[0]] + [x for x in all_paths[1]])\n dwg.viewbox(min(bbox[0], bbox[1]), min(bbox[2], bbox[3]), abs(bbox[1] - bbox[0]),\n abs(bbox[3] - bbox[2]))\n dwg.save()\n" ]
[ [ "numpy.argmin" ] ]
furminator/Furminator-MCPE-Tool
[ "4fe247351503781db2012815c1e40e881d9e1bba" ]
[ "viewports/camera.py" ]
[ "# -*- coding: utf_8 -*-\n# The above line is necessary, unless we want problems with encodings...\nimport sys\nfrom compass import CompassOverlay\nfrom raycaster import TooFarException\nimport raycaster\nimport keys\nimport pygame\n\nimport math\nimport copy\nimport numpy\nfrom config import config\nimport frustum\nimport logging\nimport glutils\nimport mceutils\nimport itertools\nimport pymclevel\n\nfrom math import isnan\nfrom datetime import datetime, timedelta\n\nfrom OpenGL import GL\nfrom OpenGL import GLU\n\nfrom albow import alert, AttrRef, Button, Column, input_text, Row, TableColumn, TableView, Widget, CheckBox, \\\n TextFieldWrapped, MenuButton, ChoiceButton, IntInputRow, TextInputRow, showProgress, IntField, ask\nfrom albow.controls import Label, ValueDisplay\nfrom albow.dialogs import Dialog, wrapped_label\nfrom albow.openglwidgets import GLViewport\nfrom albow.extended_widgets import BasicTextInputRow, CheckBoxLabel\nfrom albow.translate import _\nfrom albow.root import get_top_widget\nfrom pygame import mouse\nfrom depths import DepthOffset\nfrom editortools.operation import Operation\nfrom glutils import gl\nfrom editortools.nbtexplorer import SlotEditor\n\nclass SignEditOperation(Operation):\n def __init__(self, tool, level, tileEntity, backupTileEntity):\n self.tool = tool\n self.level = level\n self.tileEntity = tileEntity\n self.undoBackupEntityTag = backupTileEntity\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(self.tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(self.tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(self.tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(self.tileEntity), (1, 1, 1))\n\nclass CameraViewport(GLViewport):\n anchor = \"tlbr\"\n\n oldMousePosition = None\n dontShowMessageAgain = False\n\n def __init__(self, editor, def_enc=None):\n self.editor = editor\n global DEF_ENC\n DEF_ENC = def_enc or editor.mcedit.def_enc\n rect = editor.mcedit.rect\n GLViewport.__init__(self, rect)\n\n # Declare a pseudo showCommands function, since it is called by other objects before its creation in mouse_move.\n self.showCommands = lambda:None\n\n near = 0.5\n far = 4000.0\n\n self.near = near\n self.far = far\n\n self.brake = False\n self.lastTick = datetime.now()\n # self.nearheight = near * tang\n\n self.cameraPosition = (16., 45., 16.)\n self.velocity = [0., 0., 0.]\n\n self.yaw = -45. # degrees\n self._pitch = 0.1\n\n self.cameraVector = self._cameraVector()\n\n # A state machine to dodge an apparent bug in pygame that generates erroneous mouse move events\n # 0 = bad event already happened\n # 1 = app just started or regained focus since last bad event\n # 2 = mouse cursor was hidden after state 1, next event will be bad\n self.avoidMouseJumpBug = 1\n\n config.settings.drawSky.addObserver(self)\n config.settings.drawFog.addObserver(self)\n config.settings.superSecretSettings.addObserver(self)\n config.settings.showCeiling.addObserver(self)\n config.controls.cameraAccel.addObserver(self, \"accelFactor\")\n config.controls.cameraMaxSpeed.addObserver(self, \"maxSpeed\")\n config.controls.cameraBrakingSpeed.addObserver(self, \"brakeMaxSpeed\")\n config.controls.invertMousePitch.addObserver(self)\n config.controls.autobrake.addObserver(self)\n config.controls.swapAxes.addObserver(self)\n config.settings.compassToggle.addObserver(self)\n\n config.settings.fov.addObserver(self, \"fovSetting\", callback=self.updateFov)\n\n self.mouseVector = (0, 0, 0)\n\n self.root = self.get_root()\n self.hoveringCommandBlock = [False, \"\"]\n self.block_info_parsers = None\n # self.add(DebugDisplay(self, \"cameraPosition\", \"blockFaceUnderCursor\", \"mouseVector\", \"mouse3dPoint\"))\n\n @property\n def pitch(self):\n return self._pitch\n\n @pitch.setter\n def pitch(self, val):\n self._pitch = min(89.999, max(-89.999, val))\n\n def updateFov(self, val=None):\n hfov = self.fovSetting\n fov = numpy.degrees(2.0 * numpy.arctan(self.size[0] / self.size[1] * numpy.tan(numpy.radians(hfov) * 0.5)))\n\n self.fov = fov\n self.tang = numpy.tan(numpy.radians(fov))\n\n def stopMoving(self):\n self.velocity = [0, 0, 0]\n\n def brakeOn(self):\n self.brake = True\n\n def brakeOff(self):\n self.brake = False\n\n tickInterval = 1000 / config.settings.targetFPS.get()\n\n oldPosition = (0, 0, 0)\n\n flyMode = config.settings.flyMode.property()\n\n def tickCamera(self, frameStartTime, inputs, inSpace):\n timePassed = (frameStartTime - self.lastTick).microseconds\n if timePassed <= self.tickInterval * 1000 or not pygame.key.get_focused():\n return\n\n self.lastTick = frameStartTime\n timeDelta = float(timePassed) / 1000000.\n timeDelta = min(timeDelta, 0.125) # 8fps lower limit!\n drag = config.controls.cameraDrag.get()\n accel_factor = drag + config.controls.cameraAccel.get()\n\n # if we're in space, move faster\n\n drag_epsilon = 10.0 * timeDelta\n\n if self.brake:\n max_speed = self.brakeMaxSpeed\n else:\n max_speed = self.maxSpeed\n\n if inSpace or self.root.sprint:\n accel_factor *= 3.0\n max_speed *= 3.0\n self.root.sprint = False\n elif config.settings.viewMode.get() == \"Chunk\":\n accel_factor *= 2.0\n max_speed *= 2.0\n\n pi = self.editor.cameraPanKeys\n mouseSpeed = config.controls.mouseSpeed.get()\n self.yaw += pi[0] * mouseSpeed\n self.pitch += pi[1] * mouseSpeed\n\n if config.settings.viewMode.get() == \"Chunk\":\n (dx, dy, dz) = (0, -0.25, -1)\n self.yaw = -180\n self.pitch = 10\n elif self.flyMode:\n (dx, dy, dz) = self._anglesToVector(self.yaw, 0)\n elif self.swapAxes:\n p = self.pitch\n if p > 80:\n p = 0\n\n (dx, dy, dz) = self._anglesToVector(self.yaw, p)\n\n else:\n (dx, dy, dz) = self._cameraVector()\n\n velocity = self.velocity # xxx learn to use matrix/vector libs\n i = inputs\n yaw = numpy.radians(self.yaw)\n cosyaw = -numpy.cos(yaw)\n sinyaw = numpy.sin(yaw)\n\n directedInputs = mceutils.normalize((\n i[0] * cosyaw + i[2] * dx,\n i[1] + i[2] * dy,\n i[2] * dz - i[0] * sinyaw,\n ))\n\n # give the camera an impulse according to the state of the inputs and in the direction of the camera\n cameraAccel = map(lambda x: x * accel_factor * timeDelta, directedInputs)\n # cameraImpulse = map(lambda x: x*impulse_factor, directedInputs)\n\n newVelocity = map(lambda a, b: a + b, velocity, cameraAccel)\n velocityDir, speed = mceutils.normalize_size(newVelocity)\n\n # apply drag\n if speed:\n if self.autobrake and not any(inputs):\n speed *= 0.15\n else:\n\n sign = speed / abs(speed)\n speed = abs(speed)\n speed = speed - (drag * timeDelta)\n if speed < 0.0:\n speed = 0.0\n speed *= sign\n\n speed = max(-max_speed, min(max_speed, speed))\n\n if abs(speed) < drag_epsilon:\n speed = 0\n\n velocity = map(lambda a: a * speed, velocityDir)\n\n # velocity = map(lambda p,d: p + d, velocity, cameraImpulse)\n d = map(lambda a, b: abs(a - b), self.cameraPosition, self.oldPosition)\n if d[0] + d[2] > 32.0:\n self.oldPosition = self.cameraPosition\n self.updateFloorQuad()\n\n self.cameraPosition = map(lambda p, d: p + d * timeDelta, self.cameraPosition, velocity)\n if self.cameraPosition[1] > 3800.:\n self.cameraPosition[1] = 3800.\n elif self.cameraPosition[1] < -1000.:\n self.cameraPosition[1] = -1000.\n\n self.velocity = velocity\n self.cameraVector = self._cameraVector()\n\n self.editor.renderer.position = self.cameraPosition\n if self.editor.currentTool.previewRenderer:\n self.editor.currentTool.previewRenderer.position = self.cameraPosition\n\n def setModelview(self):\n pos = self.cameraPosition\n look = numpy.array(self.cameraPosition)\n look = look.astype(float) + self.cameraVector\n up = (0, 1, 0)\n GLU.gluLookAt(pos[0], pos[1], pos[2],\n look[0], look[1], look[2],\n up[0], up[1], up[2])\n\n def _cameraVector(self):\n return self._anglesToVector(self.yaw, self.pitch)\n\n @staticmethod\n def _anglesToVector(yaw, pitch):\n def nanzero(x):\n if isnan(x):\n return 0\n else:\n return x\n\n dx = -math.sin(math.radians(yaw)) * math.cos(math.radians(pitch))\n dy = -math.sin(math.radians(pitch))\n dz = math.cos(math.radians(yaw)) * math.cos(math.radians(pitch))\n return map(nanzero, [dx, dy, dz])\n\n def updateMouseVector(self):\n self.mouseVector = self._mouseVector()\n\n def _mouseVector(self):\n \"\"\"\n returns a vector reflecting a ray cast from the camera\n position to the mouse position on the near plane\n \"\"\"\n x, y = mouse.get_pos()\n # if (x, y) not in self.rect:\n # return (0, 0, 0); # xxx\n\n y = self.root.height - y\n point1 = unproject(x, y, 0.0)\n point2 = unproject(x, y, 1.0)\n v = numpy.array(point2) - point1\n v = mceutils.normalize(v)\n return v\n\n def _blockUnderCursor(self, center=False):\n \"\"\"\n returns a point in 3d space that was determined by\n reading the depth buffer value\n \"\"\"\n try:\n GL.glReadBuffer(GL.GL_BACK)\n except Exception:\n logging.exception('Exception during glReadBuffer')\n ws = self.root.size\n if center:\n x, y = ws\n x //= 2\n y //= 2\n else:\n x, y = mouse.get_pos()\n if (x < 0 or y < 0 or x >= ws[0] or\n y >= ws[1]):\n return 0, 0, 0\n\n y = ws[1] - y\n\n try:\n pixel = GL.glReadPixels(x, y, 1, 1, GL.GL_DEPTH_COMPONENT, GL.GL_FLOAT)\n newpoint = unproject(x, y, pixel[0])\n except Exception:\n return 0, 0, 0\n\n return newpoint\n\n def updateBlockFaceUnderCursor(self):\n focusPair = None\n if not self.enableMouseLag or self.editor.frames & 1:\n self.updateMouseVector()\n if self.editor.mouseEntered:\n if not self.mouseMovesCamera:\n try:\n focusPair = raycaster.firstBlock(self.cameraPosition, self._mouseVector(), self.editor.level,\n 100, config.settings.viewMode.get())\n except TooFarException:\n mouse3dPoint = self._blockUnderCursor()\n focusPair = self._findBlockFaceUnderCursor(mouse3dPoint)\n elif self.editor.longDistanceMode:\n mouse3dPoint = self._blockUnderCursor(True)\n focusPair = self._findBlockFaceUnderCursor(mouse3dPoint)\n\n # otherwise, find the block at a controllable distance in front of the camera\n if focusPair is None:\n if self.blockFaceUnderCursor is None or self.mouseMovesCamera:\n focusPair = (self.getCameraPoint(), (0, 0, 0))\n else:\n focusPair = self.blockFaceUnderCursor\n\n try:\n if focusPair[0] is not None and self.editor.level.tileEntityAt(*focusPair[0]):\n changed = False\n te = self.editor.level.tileEntityAt(*focusPair[0])\n backupTE = copy.deepcopy(te)\n if te[\"id\"].value == \"Sign\" or self.editor.level.defsIds.mcedit_ids.get(te[\"id\"].value) in (\"DEF_BLOCKS_STANDING_SIGN\", \"DEFS_BLOCKS_WALL_SIGN\"):\n if \"Text1\" in te and \"Text2\" in te and \"Text3\" in te and \"Text4\" in te:\n for i in xrange(1,5):\n if len(te[\"Text\"+str(i)].value) > 32767:\n te[\"Text\"+str(i)] = pymclevel.TAG_String(str(te[\"Text\"+str(i)].value)[:32767])\n changed = True\n if changed:\n response = None\n if not self.dontShowMessageAgain:\n response = ask(\"Found a sign that exceeded the maximum character limit. Automatically trimmed the sign to prevent crashes.\", responses=[\"Ok\", \"Don't show this again\"])\n if response is not None and response == \"Don't show this again\":\n self.dontShowMessageAgain = True\n op = SignEditOperation(self.editor, self.editor.level, te, backupTE)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n except:\n pass\n\n self.blockFaceUnderCursor = focusPair\n\n def _findBlockFaceUnderCursor(self, projectedPoint):\n \"\"\"Returns a (pos, Face) pair or None if one couldn't be found\"\"\"\n d = [0, 0, 0]\n\n try:\n intProjectedPoint = map(int, map(numpy.floor, projectedPoint))\n except ValueError:\n return None # catch NaNs\n intProjectedPoint[1] = max(-1, intProjectedPoint[1])\n\n # find out which face is under the cursor. xxx do it more precisely\n faceVector = ((projectedPoint[0] - (intProjectedPoint[0] + 0.5)),\n (projectedPoint[1] - (intProjectedPoint[1] + 0.5)),\n (projectedPoint[2] - (intProjectedPoint[2] + 0.5))\n )\n\n av = map(abs, faceVector)\n\n i = av.index(max(av))\n delta = faceVector[i]\n if delta < 0:\n d[i] = -1\n else:\n d[i] = 1\n\n potentialOffsets = []\n\n try:\n block = self.editor.level.blockAt(*intProjectedPoint)\n except (EnvironmentError, pymclevel.ChunkNotPresent):\n return intProjectedPoint, d\n\n if block == pymclevel.alphaMaterials.SnowLayer.ID:\n potentialOffsets.append((0, 1, 0))\n else:\n # discard any faces that aren't likely to be exposed\n for face, offsets in pymclevel.faceDirections:\n point = map(lambda a, b: a + b, intProjectedPoint, offsets)\n try:\n neighborBlock = self.editor.level.blockAt(*point)\n if block != neighborBlock:\n potentialOffsets.append(offsets)\n except (EnvironmentError, pymclevel.ChunkNotPresent):\n pass\n\n # check each component of the face vector to see if that face is exposed\n if tuple(d) not in potentialOffsets:\n av[i] = 0\n i = av.index(max(av))\n d = [0, 0, 0]\n delta = faceVector[i]\n if delta < 0:\n d[i] = -1\n else:\n d[i] = 1\n if tuple(d) not in potentialOffsets:\n av[i] = 0\n i = av.index(max(av))\n d = [0, 0, 0]\n delta = faceVector[i]\n if delta < 0:\n d[i] = -1\n else:\n d[i] = 1\n\n if tuple(d) not in potentialOffsets:\n if len(potentialOffsets):\n d = potentialOffsets[0]\n else:\n # use the top face as a fallback\n d = [0, 1, 0]\n\n return intProjectedPoint, d\n\n @property\n def ratio(self):\n return self.width / float(self.height)\n\n startingMousePosition = None\n\n def mouseLookOn(self):\n self.root.capture_mouse(self)\n self.focus_switch = None\n self.startingMousePosition = mouse.get_pos()\n\n if self.avoidMouseJumpBug == 1:\n self.avoidMouseJumpBug = 2\n\n def mouseLookOff(self):\n self.root.capture_mouse(None)\n if self.startingMousePosition:\n mouse.set_pos(*self.startingMousePosition)\n self.startingMousePosition = None\n\n @property\n def mouseMovesCamera(self):\n return self.root.captured_widget is not None\n\n def toggleMouseLook(self):\n if not self.mouseMovesCamera:\n self.mouseLookOn()\n else:\n self.mouseLookOff()\n\n # mobs is overrided in __init__\n mobs = pymclevel.Entity.monsters + [\"[Custom]\"]\n\n @mceutils.alertException\n def editMonsterSpawner(self, point):\n mobs = self.mobs\n _mobs = {}\n # Get the mobs from the versionned data\n defsIds = self.editor.level.defsIds\n mcedit_defs = defsIds.mcedit_defs\n mcedit_ids = defsIds.mcedit_ids\n if mcedit_defs.get('spawner_monsters'):\n mobs = []\n for a in mcedit_defs['spawner_monsters']:\n _id = mcedit_ids[a]\n name = _(mcedit_defs[_id]['name'])\n _mobs[name] = a\n _mobs[a] = name\n mobs.append(name)\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(mcedit_defs.get(\"MobSpawner\", \"MobSpawner\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"Delay\"] = pymclevel.TAG_Short(120)\n tileEntity[\"EntityId\"] = pymclevel.TAG_String(mcedit_defs.get(mobs[0], mobs[0]))\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n def addMob(id):\n if id not in mobs:\n mobs.insert(0, id)\n mobTable.selectedIndex = 0\n\n def selectTableRow(i, evt):\n if mobs[i] == \"[Custom]\":\n id = input_text(\"Type in an EntityID for this spawner. Invalid IDs may crash Minecraft.\", 150)\n if id:\n addMob(id)\n else:\n return\n mobTable.selectedIndex = mobs.index(id)\n else:\n mobTable.selectedIndex = i\n\n if evt.num_clicks == 2:\n panel.dismiss()\n\n mobTable = TableView(columns=(\n TableColumn(\"\", 200),\n )\n )\n mobTable.num_rows = lambda: len(mobs)\n mobTable.row_data = lambda i: (mobs[i],)\n mobTable.row_is_selected = lambda x: x == mobTable.selectedIndex\n mobTable.click_row = selectTableRow\n mobTable.selectedIndex = 0\n\n def selectedMob():\n val = mobs[mobTable.selectedIndex]\n return _mobs.get(val, val)\n\n def cancel():\n mobs[mobTable.selectedIndex] = id\n panel.dismiss()\n\n if \"EntityId\" in tileEntity:\n _id = tileEntity[\"EntityId\"].value\n elif \"SpawnData\" in tileEntity:\n _id = tileEntity[\"SpawnData\"][\"id\"].value\n else:\n _id = \"[Custom]\"\n\n # Something weird here since the first implementation of the versionned definition.\n # It may happen 'mcedit_defs.get(mcedit_ids.get(_id, _id), {}).get(\"name\", _id)'\n # does not return the wanted data (dict).\n # Could not yet debug that, but I guess it is related to the versionned data loading...\n # -- D.C.-G.\n # print mcedit_ids.get(_id, _id)\n # print mcedit_defs.get(mcedit_ids.get(_id, _id), {})\n _id2 = mcedit_defs.get(mcedit_ids.get(_id, _id), {})\n if isinstance(_id2, (str, unicode)):\n _id = _id2\n id = mcedit_defs.get(mcedit_ids.get(_id, _id), {}).get(\"name\", _id)\n\n addMob(id)\n\n mobTable.selectedIndex = mobs.index(id)\n oldChoiceCol = Column((Label(_(\"Current: \") + _mobs.get(id, id), align='l', width=200), ))\n newChoiceCol = Column((ValueDisplay(width=200, get_value=lambda: _(\"Change to: \") + selectedMob()), mobTable))\n\n lastRow = Row((Button(\"OK\", action=panel.dismiss), Button(\"Cancel\", action=cancel)))\n panel.add(Column((oldChoiceCol, newChoiceCol, lastRow)))\n panel.shrink_wrap()\n panel.present()\n\n class MonsterSpawnerEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n if id != selectedMob():\n # If the level has a 'setSpawnerData, call it instead of using the code here\n if hasattr(self.editor.level, \"setSpawnerData\"):\n tileEntity = self.editor.level.setSpawnerData(tileEntity, selectedMob())\n else:\n if \"EntityId\" in tileEntity:\n tileEntity[\"EntityId\"] = pymclevel.TAG_String(selectedMob())\n if \"SpawnData\" in tileEntity:\n # Try to not clear the spawn data, but only update the mob id\n # tileEntity[\"SpawnData\"] = pymclevel.TAG_Compound()\n tag_id = pymclevel.TAG_String(selectedMob())\n if \"id\" in tileEntity[\"SpawnData\"]:\n tag_id.name = \"id\"\n tileEntity[\"SpawnData\"][\"id\"] = tag_id\n if \"EntityId\" in tileEntity[\"SpawnData\"]:\n tileEntity[\"SpawnData\"][\"EntityId\"] = tag_id\n if \"SpawnPotentials\" in tileEntity:\n for potential in tileEntity[\"SpawnPotentials\"]:\n if \"Entity\" in potential:\n # MC 1.9+\n if potential[\"Entity\"][\"id\"].value == id or (\"EntityId\" in potential[\"Entity\"] and potential[\"Entity\"][\"EntityId\"].value == id):\n potential[\"Entity\"] = pymclevel.TAG_Compound()\n potential[\"Entity\"][\"id\"] = pymclevel.TAG_String(selectedMob())\n elif \"Properties\" in potential:\n # MC before 1.9\n if \"Type\" in potential and potential[\"Type\"].value == id:\n potential[\"Type\"] = pymclevel.TAG_String(selectedMob())\n # We also can change some other values in the Properties tag, but it is useless in MC 1.8+.\n # The fact is this data will not be updated by the game after the mob type is changed, but the old mob will not spawn.\n # put_entityid = False\n # put_id = False\n # if \"EntityId\" in potential[\"Properties\"] and potential[\"Properties\"][\"EntityId\"].value == id:\n # put_entityid = True\n # if \"id\" in potential[\"Properties\"] and potential[\"Properties\"][\"id\"].value == id:\n # put_id = True\n # new_props = pymclevel.TAG_Compound()\n # if put_entityid:\n # new_props[\"EntityId\"] = pymclevel.TAG_String(selectedMob())\n # if put_id:\n # new_props[\"id\"] = pymclevel.TAG_String(selectedMob())\n # potential[\"Properties\"] = new_props\n op = MonsterSpawnerEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editJukebox(self, point):\n discs = {\n \"[No Record]\": None,\n \"13\": 2256,\n \"cat\": 2257,\n \"blocks\": 2258,\n \"chirp\": 2259,\n \"far\": 2260,\n \"mall\": 2261,\n \"mellohi\": 2262,\n \"stal\": 2263,\n \"strad\": 2264,\n \"ward\": 2265,\n \"11\": 2266,\n \"wait\": 2267\n }\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(\"RecordPlayer\")\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n def selectTableRow(i, evt):\n discTable.selectedIndex = i\n\n if evt.num_clicks == 2:\n panel.dismiss()\n\n discTable = TableView(columns=(\n TableColumn(\"\", 200),\n )\n )\n discTable.num_rows = lambda: len(discs)\n discTable.row_data = lambda i: (selectedDisc(i),)\n discTable.row_is_selected = lambda x: x == discTable.selectedIndex\n discTable.click_row = selectTableRow\n discTable.selectedIndex = 0\n\n def selectedDisc(id):\n if id == 0:\n return \"[No Record]\"\n return discs.keys()[discs.values().index(id + 2255)]\n\n def cancel():\n if id == \"[No Record]\":\n discTable.selectedIndex = 0\n else:\n discTable.selectedIndex = discs[id] - 2255\n panel.dismiss()\n\n if \"RecordItem\" in tileEntity:\n if tileEntity[\"RecordItem\"][\"id\"].value == \"minecraft:air\":\n id = \"[No Record]\"\n else:\n id = tileEntity[\"RecordItem\"][\"id\"].value[17:]\n elif \"Record\" in tileEntity:\n if tileEntity[\"Record\"].value == 0:\n id = \"[No Record]\"\n else:\n id = selectedDisc(tileEntity[\"Record\"].value - 2255)\n else:\n id = \"[No Record]\"\n\n if id == \"[No Record]\":\n discTable.selectedIndex = 0\n else:\n discTable.selectedIndex = discs[id] - 2255\n\n oldChoiceCol = Column((Label(_(\"Current: \") + id, align='l', width=200), ))\n newChoiceCol = Column((ValueDisplay(width=200, get_value=lambda: _(\"Change to: \") + selectedDisc(discTable.selectedIndex)), discTable))\n\n lastRow = Row((Button(\"OK\", action=panel.dismiss), Button(\"Cancel\", action=cancel)))\n panel.add(Column((oldChoiceCol, newChoiceCol, lastRow)))\n panel.shrink_wrap()\n panel.present()\n\n class JukeboxEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n if id != selectedDisc(discTable.selectedIndex):\n if \"RecordItem\" in tileEntity:\n del tileEntity[\"RecordItem\"]\n if discTable.selectedIndex == 0:\n tileEntity[\"Record\"] = pymclevel.TAG_Int(0)\n self.editor.level.setBlockDataAt(tileEntity[\"x\"].value, tileEntity[\"y\"].value, tileEntity[\"z\"].value, 0)\n else:\n tileEntity[\"Record\"] = pymclevel.TAG_Int(discTable.selectedIndex + 2255)\n self.editor.level.setBlockDataAt(tileEntity[\"x\"].value, tileEntity[\"y\"].value, tileEntity[\"z\"].value, 1)\n op = JukeboxEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editNoteBlock(self, point):\n notes = [\n \"F# (0.5)\", \"G (0.53)\", \"G# (0.56)\",\n \"A (0.6)\", \"A# (0.63)\", \"B (0.67)\",\n \"C (0.7)\", \"C# (0.75)\", \"D (0.8)\",\n \"D# (0.85)\", \"E (0.9)\", \"F (0.95)\",\n \"F# (1.0)\", \"G (1.05)\", \"G# (1.1)\",\n \"A (1.2)\", \"A# (1.25)\", \"B (1.32)\",\n \"C (1.4)\", \"C# (1.5)\", \"D (1.6)\",\n \"D# (1.7)\", \"E (1.8)\", \"F (1.9)\",\n \"F# (2.0)\"\n ]\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.defsIds.mcedit_defs.get(\"MobSpawner\", \"MobSpawner\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"note\"] = pymclevel.TAG_Byte(0)\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n def selectTableRow(i, evt):\n noteTable.selectedIndex = i\n\n if evt.num_clicks == 2:\n panel.dismiss()\n\n noteTable = TableView(columns=(\n TableColumn(\"\", 200),\n )\n )\n noteTable.num_rows = lambda: len(notes)\n noteTable.row_data = lambda i: (notes[i],)\n noteTable.row_is_selected = lambda x: x == noteTable.selectedIndex\n noteTable.click_row = selectTableRow\n noteTable.selectedIndex = 0\n\n def selectedNote():\n return notes[noteTable.selectedIndex]\n\n def cancel():\n noteTable.selectedIndex = id\n panel.dismiss()\n\n id = tileEntity[\"note\"].value\n\n noteTable.selectedIndex = id\n\n oldChoiceCol = Column((Label(_(\"Current: \") + notes[id], align='l', width=200), ))\n newChoiceCol = Column((ValueDisplay(width=200, get_value=lambda: _(\"Change to: \") + selectedNote()), noteTable))\n\n lastRow = Row((Button(\"OK\", action=panel.dismiss), Button(\"Cancel\", action=cancel)))\n panel.add(Column((oldChoiceCol, newChoiceCol, lastRow)))\n panel.shrink_wrap()\n panel.present()\n\n class NoteBlockEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n if id != noteTable.selectedIndex:\n tileEntity[\"note\"] = pymclevel.TAG_Byte(noteTable.selectedIndex)\n op = NoteBlockEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editSign(self, point):\n\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n linekeys = [\"Text\" + str(i) for i in xrange(1, 5)]\n\n # From version 1.8, signs accept Json format.\n # 1.9 does no more support the old raw string fomat.\n splitVersion = self.editor.level.gameVersion.split('.')\n newFmtVersion = ['1','9']\n fmt = \"\"\n json_fmt = False\n\n f = lambda a,b: (a + (['0'] * max(len(b) - len(a), 0)), b + (['0'] * max(len(a) - len(b), 0)))\n if False not in map(lambda x,y: (int(x) if x.isdigit() else x) >= (int(y) if y.isdigit() else y),*f(splitVersion, newFmtVersion))[:2]:\n json_fmt = True\n fmt = '{\"text\":\"\"}'\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n # Don't know how to handle the difference between wall and standing signs for now...\n # Just let this like it is until we can find the way!\n tileEntity[\"id\"] = pymclevel.TAG_String(\"Sign\")\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n for l in linekeys:\n tileEntity[l] = pymclevel.TAG_String(fmt)\n self.editor.level.addTileEntity(tileEntity)\n\n panel = Dialog()\n\n lineFields = [TextFieldWrapped(width=400) for l in linekeys]\n for l, f in zip(linekeys, lineFields):\n\n f.value = tileEntity[l].value\n\n # Double quotes handling for olf sign text format.\n if f.value == 'null':\n f.value = fmt\n elif json_fmt and f.value == '':\n f.value = fmt\n else:\n if f.value.startswith('\"') and f.value.endswith('\"'):\n f.value = f.value[1:-1]\n if '\\\\\"' in f.value:\n f.value = f.value.replace('\\\\\"', '\"')\n\n colors = [\n u\"§0 Black\",\n u\"§1 Dark Blue\",\n u\"§2 Dark Green\",\n u\"§3 Dark Aqua\",\n u\"§4 Dark Red\",\n u\"§5 Dark Purple\",\n u\"§6 Gold\",\n u\"§7 Gray\",\n u\"§8 Dark Gray\",\n u\"§9 Blue\",\n u\"§a Green\",\n u\"§b Aqua\",\n u\"§c Red\",\n u\"§d Light Purple\",\n u\"§e Yellow\",\n u\"§f White\",\n ]\n\n def menu_picked(index):\n c = u\"§%d\"%index\n currentField = panel.focus_switch.focus_switch\n currentField.text += c # xxx view hierarchy\n currentField.insertion_point = len(currentField.text)\n\n def changeSign():\n unsavedChanges = False\n fmt = '\"{}\"'\n u_fmt = u'\"%s\"'\n if json_fmt:\n fmt = '{}'\n u_fmt = u'%s'\n for l, f in zip(linekeys, lineFields):\n oldText = fmt.format(tileEntity[l])\n tileEntity[l] = pymclevel.TAG_String(u_fmt%f.value[:255])\n if fmt.format(tileEntity[l]) != oldText and not unsavedChanges:\n unsavedChanges = True\n if unsavedChanges:\n op = SignEditOperation(self.editor, self.editor.level, tileEntity, undoBackupEntityTag)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n panel.dismiss()\n\n colorMenu = MenuButton(\"Add Color Code...\", colors, menu_picked=menu_picked)\n\n row = Row((Button(\"OK\", action=changeSign), Button(\"Cancel\", action=panel.dismiss)))\n\n column = [Label(\"Edit Sign\")] + lineFields + [colorMenu, row]\n\n panel.add(Column(column))\n panel.shrink_wrap()\n panel.present()\n\n @mceutils.alertException\n def editSkull(self, point):\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n skullTypes = {\n \"Skeleton\": 0,\n \"Wither Skeleton\": 1,\n \"Zombie\": 2,\n \"Player\": 3,\n \"Creeper\": 4,\n }\n\n inverseSkullType = {\n 0: \"Skeleton\",\n 1: \"Wither Skeleton\",\n 2: \"Zombie\",\n 3: \"Player\",\n 4: \"Creeper\",\n }\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n # Don't know how to handle the difference between skulls in this context signs for now...\n # Tests nedded!\n tileEntity[\"id\"] = pymclevel.TAG_String(\"Skull\")\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"SkullType\"] = pymclevel.TAG_Byte(3)\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Skull Data\")\n usernameField = TextFieldWrapped(width=150)\n panel = Dialog()\n skullMenu = ChoiceButton(map(str, skullTypes))\n\n if \"Owner\" in tileEntity:\n usernameField.value = str(tileEntity[\"Owner\"][\"Name\"].value)\n elif \"ExtraType\" in tileEntity:\n usernameField.value = str(tileEntity[\"ExtraType\"].value)\n else:\n usernameField.value = \"\"\n\n oldUserName = usernameField.value\n skullMenu.selectedChoice = inverseSkullType[tileEntity[\"SkullType\"].value]\n oldSelectedSkull = skullMenu.selectedChoice\n\n class SkullEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateSkull():\n if usernameField.value != oldUserName or oldSelectedSkull != skullMenu.selectedChoice:\n tileEntity[\"ExtraType\"] = pymclevel.TAG_String(usernameField.value)\n tileEntity[\"SkullType\"] = pymclevel.TAG_Byte(skullTypes[skullMenu.selectedChoice])\n if \"Owner\" in tileEntity:\n del tileEntity[\"Owner\"]\n op = SkullEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBTN = Button(\"OK\", action=updateSkull)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n\n column = [titleLabel, usernameField, skullMenu, okBTN, cancel]\n panel.add(Column(column))\n panel.shrink_wrap()\n panel.present()\n\n @mceutils.alertException\n def editCommandBlock(self, point):\n panel = Dialog()\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.defsIds.mcedit_defs.get(\"Control\", \"Control\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"Command\"] = pymclevel.TAG_String()\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(\"@\")\n tileEntity[\"TrackOutput\"] = pymclevel.TAG_Byte(0)\n tileEntity[\"SuccessCount\"] = pymclevel.TAG_Int(0)\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Command Block\")\n commandField = TextFieldWrapped(width=650)\n nameField = TextFieldWrapped(width=200)\n successField = IntInputRow(\"SuccessCount\", min=0, max=15)\n trackOutput = CheckBox()\n\n # Fix for the '§ is ħ' issue\n# try:\n# commandField.value = tileEntity[\"Command\"].value.decode(\"unicode-escape\")\n# except:\n# commandField.value = tileEntity[\"Command\"].value\n commandField.value = tileEntity[\"Command\"].value\n\n oldCommand = commandField.value\n trackOutput.value = tileEntity.get(\"TrackOutput\", pymclevel.TAG_Byte(0)).value\n oldTrackOutput = trackOutput.value\n nameField.value = tileEntity.get(\"CustomName\", pymclevel.TAG_String(\"@\")).value\n oldNameField = nameField.value\n successField.subwidgets[1].value = tileEntity.get(\"SuccessCount\", pymclevel.TAG_Int(0)).value\n oldSuccess = successField.subwidgets[1].value\n\n class CommandBlockEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateCommandBlock():\n if oldCommand != commandField.value or oldTrackOutput != trackOutput.value or oldNameField != nameField.value or oldSuccess != successField.subwidgets[1].value:\n tileEntity[\"Command\"] = pymclevel.TAG_String(commandField.value)\n tileEntity[\"TrackOutput\"] = pymclevel.TAG_Byte(trackOutput.value)\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(nameField.value)\n tileEntity[\"SuccessCount\"] = pymclevel.TAG_Int(successField.subwidgets[1].value)\n\n op = CommandBlockEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBTN = Button(\"OK\", action=updateCommandBlock)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n column = [titleLabel, Label(\"Command:\"), commandField, Row((Label(\"Custom Name:\"), nameField)), successField,\n Row((Label(\"Track Output\"), trackOutput)), okBTN, cancel]\n panel.add(Column(column))\n panel.shrink_wrap()\n panel.present()\n\n return\n\n @mceutils.alertException\n def editContainer(self, point, containerID):\n tileEntityTag = self.editor.level.tileEntityAt(*point)\n if tileEntityTag is None:\n tileEntityTag = pymclevel.TileEntity.Create(containerID)\n pymclevel.TileEntity.setpos(tileEntityTag, point)\n self.editor.level.addTileEntity(tileEntityTag)\n\n if tileEntityTag[\"id\"].value != containerID:\n return\n\n undoBackupEntityTag = copy.deepcopy(tileEntityTag)\n\n def itemProp(key):\n # xxx do validation here\n def getter(self):\n if 0 == len(tileEntityTag[\"Items\"]):\n return 0\n return tileEntityTag[\"Items\"][self.selectedItemIndex][key].value\n\n def setter(self, val):\n if 0 == len(tileEntityTag[\"Items\"]):\n return\n self.dirty = True\n tileEntityTag[\"Items\"][self.selectedItemIndex][key].value = val\n\n return property(getter, setter)\n\n class ChestWidget(Widget):\n dirty = False\n Slot = itemProp(\"Slot\")\n id = itemProp(\"id\")\n Damage = itemProp(\"Damage\")\n Count = itemProp(\"Count\")\n itemLimit = pymclevel.TileEntity.maxItems.get(containerID, 26)\n\n def slotFormat(slot):\n slotNames = pymclevel.TileEntity.slotNames.get(containerID)\n if slotNames:\n return slotNames.get(slot, slot)\n return slot\n\n chestWidget = ChestWidget()\n chestItemTable = TableView(columns=[\n TableColumn(\"Slot\", 60, \"l\", fmt=slotFormat),\n TableColumn(\"ID / ID Name\", 345, \"l\"),\n TableColumn(\"DMG\", 50, \"l\"),\n TableColumn(\"Count\", 65, \"l\"),\n\n TableColumn(\"Name\", 260, \"l\"),\n ])\n\n def itemName(id, damage):\n try:\n return pymclevel.items.items.findItem(id, damage).name\n except pymclevel.items.ItemNotFound:\n return \"Unknown Item\"\n\n def getRowData(i):\n item = tileEntityTag[\"Items\"][i]\n slot, id, damage, count = item[\"Slot\"].value, item[\"id\"].value, item[\"Damage\"].value, item[\"Count\"].value\n return slot, id, damage, count, itemName(id, damage)\n\n chestWidget.selectedItemIndex = 0\n\n def selectTableRow(i, evt):\n chestWidget.selectedItemIndex = i\n # Disabling the item selector for now, since we need PE items resources.\n# if evt.num_clicks > 1:\n# selectButtonAction()\n\n def changeValue(data):\n s, i, c, d = data\n s = int(s)\n chestWidget.Slot = s\n chestWidget.id = i\n chestWidget.Count = int(c)\n chestWidget.Damage = int(d)\n\n\n chestItemTable.num_rows = lambda: len(tileEntityTag[\"Items\"])\n chestItemTable.row_data = getRowData\n chestItemTable.row_is_selected = lambda x: x == chestWidget.selectedItemIndex\n chestItemTable.click_row = selectTableRow\n chestItemTable.change_value = changeValue\n\n def selectButtonAction():\n SlotEditor(chestItemTable,\n (chestWidget.Slot, chestWidget.id or u\"\", chestWidget.Count, chestWidget.Damage)\n ).present()\n\n maxSlot = pymclevel.TileEntity.maxItems.get(tileEntityTag[\"id\"].value, 27) - 1\n fieldRow = (\n IntInputRow(\"Slot: \", ref=AttrRef(chestWidget, 'Slot'), min=0, max=maxSlot),\n BasicTextInputRow(\"ID / ID Name: \", ref=AttrRef(chestWidget, 'id'), width=300),\n # Text to allow the input of internal item names\n IntInputRow(\"DMG: \", ref=AttrRef(chestWidget, 'Damage'), min=0, max=32767),\n IntInputRow(\"Count: \", ref=AttrRef(chestWidget, 'Count'), min=-1, max=64),\n # This button is inactive for now, because we need to work with different IDs types:\n # * The 'human' IDs: Stone, Glass, Swords...\n # * The MC ones: minecraft:stone, minecraft:air...\n # * The PE ones: 0:0, 1:0...\n# Button(\"Select\", action=selectButtonAction)\n )\n\n def deleteFromWorld():\n i = chestWidget.selectedItemIndex\n item = tileEntityTag[\"Items\"][i]\n id = item[\"id\"].value\n Damage = item[\"Damage\"].value\n\n deleteSameDamage = CheckBoxLabel(\"Only delete items with the same damage value\")\n deleteBlocksToo = CheckBoxLabel(\"Also delete blocks placed in the world\")\n if id not in (8, 9, 10, 11): # fluid blocks\n deleteBlocksToo.value = True\n\n w = wrapped_label(\n \"WARNING: You are about to modify the entire world. This cannot be undone. Really delete all copies of this item from all land, chests, furnaces, dispensers, dropped items, item-containing tiles, and player inventories in this world?\",\n 60)\n col = (w, deleteSameDamage)\n if id < 256:\n col += (deleteBlocksToo,)\n\n d = Dialog(Column(col), [\"OK\", \"Cancel\"])\n\n if d.present() == \"OK\":\n def deleteItemsIter():\n i = 0\n if deleteSameDamage.value:\n def matches(t):\n return t[\"id\"].value == id and t[\"Damage\"].value == Damage\n else:\n def matches(t):\n return t[\"id\"].value == id\n\n def matches_itementity(e):\n if e[\"id\"].value != \"Item\":\n return False\n if \"Item\" not in e:\n return False\n t = e[\"Item\"]\n return matches(t)\n\n for player in self.editor.level.players:\n tag = self.editor.level.getPlayerTag(player)\n tag[\"Inventory\"].value = [t for t in tag[\"Inventory\"].value if not matches(t)]\n\n for chunk in self.editor.level.getChunks():\n if id < 256 and deleteBlocksToo.value:\n matchingBlocks = chunk.Blocks == id\n if deleteSameDamage.value:\n matchingBlocks &= chunk.Data == Damage\n if any(matchingBlocks):\n chunk.Blocks[matchingBlocks] = 0\n chunk.Data[matchingBlocks] = 0\n chunk.chunkChanged()\n self.editor.invalidateChunks([chunk.chunkPosition])\n\n for te in chunk.TileEntities:\n if \"Items\" in te:\n l = len(te[\"Items\"])\n\n te[\"Items\"].value = [t for t in te[\"Items\"].value if not matches(t)]\n if l != len(te[\"Items\"]):\n chunk.dirty = True\n entities = [e for e in chunk.Entities if matches_itementity(e)]\n if len(entities) != len(chunk.Entities):\n chunk.Entities.value = entities\n chunk.dirty = True\n\n yield (i, self.editor.level.chunkCount)\n i += 1\n\n progressInfo = _(\"Deleting the item {0} from the entire world ({1} chunks)\").format(\n itemName(chestWidget.id, 0), self.editor.level.chunkCount)\n\n showProgress(progressInfo, deleteItemsIter(), cancel=True)\n\n self.editor.addUnsavedEdit()\n chestWidget.selectedItemIndex = min(chestWidget.selectedItemIndex, len(tileEntityTag[\"Items\"]) - 1)\n\n def deleteItem():\n i = chestWidget.selectedItemIndex\n item = tileEntityTag[\"Items\"][i]\n tileEntityTag[\"Items\"].value = [t for t in tileEntityTag[\"Items\"].value if t is not item]\n chestWidget.selectedItemIndex = min(chestWidget.selectedItemIndex, len(tileEntityTag[\"Items\"]) - 1)\n\n def deleteEnable():\n return len(tileEntityTag[\"Items\"]) and chestWidget.selectedItemIndex != -1\n\n def addEnable():\n return len(tileEntityTag[\"Items\"]) < chestWidget.itemLimit\n\n def addItem():\n slot = 0\n for item in tileEntityTag[\"Items\"]:\n if slot == item[\"Slot\"].value:\n slot += 1\n if slot >= chestWidget.itemLimit:\n return\n item = pymclevel.TAG_Compound()\n item[\"id\"] = pymclevel.TAG_String(\"minecraft:\")\n item[\"Damage\"] = pymclevel.TAG_Short(0)\n item[\"Slot\"] = pymclevel.TAG_Byte(slot)\n item[\"Count\"] = pymclevel.TAG_Byte(1)\n tileEntityTag[\"Items\"].append(item)\n\n addItemButton = Button(\"New Item (1.7+)\", action=addItem, enable=addEnable)\n deleteItemButton = Button(\"Delete This Item\", action=deleteItem, enable=deleteEnable)\n deleteFromWorldButton = Button(\"Delete All Instances Of This Item From World\", action=deleteFromWorld,\n enable=deleteEnable)\n deleteCol = Column((addItemButton, deleteItemButton, deleteFromWorldButton))\n\n fieldRow = Row(fieldRow)\n col = Column((chestItemTable, fieldRow, deleteCol))\n\n chestWidget.add(col)\n chestWidget.shrink_wrap()\n\n Dialog(client=chestWidget, responses=[\"Done\"]).present()\n level = self.editor.level\n\n class ChestEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n level.addTileEntity(tileEntityTag)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntityTag)\n level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntityTag), (1, 1, 1))\n\n def redo(self):\n level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntityTag), (1, 1, 1))\n\n if chestWidget.dirty:\n op = ChestEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n @mceutils.alertException\n def editFlowerPot(self, point):\n panel = Dialog()\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.mcedit_defs.get(\"FlowerPot\", \"FlowerPot\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"Item\"] = pymclevel.TAG_String(\"\")\n tileEntity[\"Data\"] = pymclevel.TAG_Int(0)\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Flower Pot\")\n Item = TextFieldWrapped(width=300, text=tileEntity[\"Item\"].value)\n oldItem = Item.value\n Data = IntField(width=300,text=str(tileEntity[\"Data\"].value))\n oldData = Data.value\n\n class FlowerPotEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateFlowerPot():\n if oldData != Data.value or oldItem != Item.value:\n tileEntity[\"Item\"] = pymclevel.TAG_String(Item.value)\n tileEntity[\"Data\"] = pymclevel.TAG_Int(Data.value)\n\n op = FlowerPotEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBtn = Button(\"OK\", action=updateFlowerPot)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n panel.add(Column((titleLabel, Row((Label(\"Item\"), Item)), Row((Label(\"Data\"), Data)), okBtn, cancel)))\n panel.shrink_wrap()\n panel.present()\n\n @mceutils.alertException\n def editEnchantmentTable(self, point):\n panel = Dialog()\n tileEntity = self.editor.level.tileEntityAt(*point)\n undoBackupEntityTag = copy.deepcopy(tileEntity)\n if not tileEntity:\n tileEntity = pymclevel.TAG_Compound()\n tileEntity[\"id\"] = pymclevel.TAG_String(self.editor.level.defsIds.mcedit_defs.get(\"EnchantTable\", \"EnchantTable\"))\n tileEntity[\"x\"] = pymclevel.TAG_Int(point[0])\n tileEntity[\"y\"] = pymclevel.TAG_Int(point[1])\n tileEntity[\"z\"] = pymclevel.TAG_Int(point[2])\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(\"\")\n self.editor.level.addTileEntity(tileEntity)\n\n titleLabel = Label(\"Edit Enchantment Table\")\n try:\n name = tileEntity[\"CustomName\"].value\n except:\n name = \"\"\n name = TextFieldWrapped(width=300, text=name)\n oldName = name.value\n\n class EnchantmentTableEditOperation(Operation):\n def __init__(self, tool, level):\n self.tool = tool\n self.level = level\n self.undoBackupEntityTag = undoBackupEntityTag\n self.canUndo = False\n\n def perform(self, recordUndo=True):\n if self.level.saving:\n alert(\"Cannot perform action while saving is taking place\")\n return\n self.level.addTileEntity(tileEntity)\n self.canUndo = True\n\n def undo(self):\n self.redoBackupEntityTag = copy.deepcopy(tileEntity)\n self.level.addTileEntity(self.undoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def redo(self):\n self.level.addTileEntity(self.redoBackupEntityTag)\n return pymclevel.BoundingBox(pymclevel.TileEntity.pos(tileEntity), (1, 1, 1))\n\n def updateEnchantmentTable():\n if oldName != name.value:\n tileEntity[\"CustomName\"] = pymclevel.TAG_String(name.value)\n\n op = EnchantmentTableEditOperation(self.editor, self.editor.level)\n self.editor.addOperation(op)\n if op.canUndo:\n self.editor.addUnsavedEdit()\n\n chunk = self.editor.level.getChunk(int(int(point[0]) / 16), int(int(point[2]) / 16))\n chunk.dirty = True\n panel.dismiss()\n\n okBtn = Button(\"OK\", action=updateEnchantmentTable)\n cancel = Button(\"Cancel\", action=panel.dismiss)\n panel.add(Column((titleLabel, Row((Label(\"Custom Name\"), name)), okBtn, cancel)))\n panel.shrink_wrap()\n panel.present()\n\n should_lock = False\n\n def rightClickDown(self, evt):\n # self.rightMouseDragStart = datetime.now()\n self.should_lock = True\n self.toggleMouseLook()\n\n def rightClickUp(self, evt):\n if not get_top_widget().is_modal:\n return\n if not self.should_lock and self.editor.level:\n self.should_lock = False\n self.toggleMouseLook()\n # if self.rightMouseDragStart is None:\n # return\n\n # td = datetime.now() - self.rightMouseDragStart\n # # except AttributeError:\n # # return\n # # print \"RightClickUp: \", td\n # if td.microseconds > 180000:\n # self.mouseLookOff()\n\n def leftClickDown(self, evt):\n self.editor.toolMouseDown(evt, self.blockFaceUnderCursor)\n\n if evt.num_clicks == 2:\n def distance2(p1, p2):\n return numpy.sum(map(lambda a, b: (a - b) ** 2, p1, p2))\n\n point, face = self.blockFaceUnderCursor\n if point:\n point = map(lambda x: int(numpy.floor(x)), point)\n if self.editor.currentTool is self.editor.selectionTool:\n try:\n block = self.editor.level.blockAt(*point)\n materials = self.editor.level.materials\n if distance2(point, self.cameraPosition) > 4:\n blockEditors = {\n materials.MonsterSpawner.ID: self.editMonsterSpawner,\n materials.Sign.ID: self.editSign,\n materials.WallSign.ID: self.editSign,\n materials.MobHead.ID: self.editSkull,\n materials.CommandBlock.ID: self.editCommandBlock,\n materials.CommandBlockRepeating.ID: self.editCommandBlock,\n materials.CommandBlockChain.ID: self.editCommandBlock,\n pymclevel.alphaMaterials.Jukebox.ID: self.editJukebox,\n materials.NoteBlock.ID: self.editNoteBlock,\n materials.FlowerPot.ID: self.editFlowerPot,\n materials.EnchantmentTable.ID: self.editEnchantmentTable\n }\n edit = blockEditors.get(block)\n if edit:\n self.editor.endSelection()\n edit(point)\n else:\n # detect \"container\" tiles\n te = self.editor.level.tileEntityAt(*point)\n if te and \"Items\" in te and \"id\" in te:\n self.editor.endSelection()\n self.editContainer(point, te[\"id\"].value)\n except (EnvironmentError, pymclevel.ChunkNotPresent):\n pass\n\n def leftClickUp(self, evt):\n self.editor.toolMouseUp(evt, self.blockFaceUnderCursor)\n\n # --- Event handlers ---\n\n def mouse_down(self, evt):\n button = keys.remapMouseButton(evt.button)\n logging.debug(\"Mouse down %d @ %s\", button, evt.pos)\n\n if button == 1:\n if sys.platform == \"darwin\" and evt.ctrl:\n self.rightClickDown(evt)\n else:\n self.leftClickDown(evt)\n elif button == 2:\n self.rightClickDown(evt)\n elif button == 3 and sys.platform == \"darwin\" and evt.alt:\n self.leftClickDown(evt)\n else:\n evt.dict['keyname'] = \"mouse{}\".format(button)\n self.editor.key_down(evt)\n\n self.editor.focus_on(None)\n # self.focus_switch = None\n\n def mouse_up(self, evt):\n button = keys.remapMouseButton(evt.button)\n logging.debug(\"Mouse up %d @ %s\", button, evt.pos)\n if button == 1:\n if sys.platform == \"darwin\" and evt.ctrl:\n self.rightClickUp(evt)\n else:\n self.leftClickUp(evt)\n elif button == 2:\n self.rightClickUp(evt)\n elif button == 3 and sys.platform == \"darwin\" and evt.alt:\n self.leftClickUp(evt)\n else:\n evt.dict['keyname'] = \"mouse{}\".format(button)\n self.editor.key_up(evt)\n\n def mouse_drag(self, evt):\n self.mouse_move(evt)\n self.editor.mouse_drag(evt)\n\n lastRendererUpdate = datetime.now()\n\n def mouse_move(self, evt):\n if self.avoidMouseJumpBug == 2:\n self.avoidMouseJumpBug = 0\n return\n\n def sensitivityAdjust(d):\n return d * config.controls.mouseSpeed.get() / 10.0\n\n self.editor.mouseEntered = True\n if self.mouseMovesCamera:\n self.should_lock = False\n pitchAdjust = sensitivityAdjust(evt.rel[1])\n if self.invertMousePitch:\n pitchAdjust = -pitchAdjust\n self.yaw += sensitivityAdjust(evt.rel[0])\n self.pitch += pitchAdjust\n if datetime.now() - self.lastRendererUpdate > timedelta(0, 0, 500000):\n self.editor.renderer.loadNearbyChunks()\n self.lastRendererUpdate = datetime.now()\n\n # adjustLimit = 2\n\n # self.oldMousePosition = (x, y)\n # if (self.startingMousePosition[0] - x > adjustLimit or self.startingMousePosition[1] - y > adjustLimit or\n # self.startingMousePosition[0] - x < -adjustLimit or self.startingMousePosition[1] - y < -adjustLimit):\n # mouse.set_pos(*self.startingMousePosition)\n # event.get(MOUSEMOTION)\n # self.oldMousePosition = (self.startingMousePosition)\n\n #if config.settings.showCommands.get():\n\n def activeevent(self, evt):\n if evt.state & 0x2 and evt.gain != 0:\n self.avoidMouseJumpBug = 1\n\n @property\n def tooltipText(self):\n #if self.hoveringCommandBlock[0] and (self.editor.currentTool is self.editor.selectionTool and self.editor.selectionTool.infoKey == 0):\n # return self.hoveringCommandBlock[1] or \"[Empty]\"\n if self.editor.currentTool is self.editor.selectionTool and self.editor.selectionTool.infoKey == 0 and config.settings.showQuickBlockInfo.get():\n point, face = self.blockFaceUnderCursor\n if point:\n if not self.block_info_parsers or (BlockInfoParser.last_level != self.editor.level):\n self.block_info_parsers = BlockInfoParser.get_parsers(self.editor)\n block = self.editor.level.blockAt(*point)\n if block:\n if block in self.block_info_parsers:\n return self.block_info_parsers[block](point)\n return self.editor.currentTool.worldTooltipText\n\n floorQuad = numpy.array(((-4000.0, 0.0, -4000.0),\n (-4000.0, 0.0, 4000.0),\n (4000.0, 0.0, 4000.0),\n (4000.0, 0.0, -4000.0),\n ), dtype='float32')\n\n def updateFloorQuad(self):\n floorQuad = ((-4000.0, 0.0, -4000.0),\n (-4000.0, 0.0, 4000.0),\n (4000.0, 0.0, 4000.0),\n (4000.0, 0.0, -4000.0),\n )\n\n floorQuad = numpy.array(floorQuad, dtype='float32')\n if self.editor.renderer.inSpace():\n floorQuad *= 8.0\n floorQuad += (self.cameraPosition[0], 0.0, self.cameraPosition[2])\n self.floorQuad = floorQuad\n self.floorQuadList.invalidate()\n\n def drawFloorQuad(self):\n self.floorQuadList.call(self._drawFloorQuad)\n\n @staticmethod\n def _drawCeiling():\n lines = []\n minz = minx = -256\n maxz = maxx = 256\n append = lines.append\n for x in xrange(minx, maxx + 1, 16):\n append((x, 0, minz))\n append((x, 0, maxz))\n for z in xrange(minz, maxz + 1, 16):\n append((minx, 0, z))\n append((maxx, 0, z))\n\n GL.glColor(0.3, 0.7, 0.9)\n GL.glVertexPointer(3, GL.GL_FLOAT, 0, numpy.array(lines, dtype='float32'))\n\n GL.glEnable(GL.GL_DEPTH_TEST)\n GL.glDepthMask(False)\n GL.glDrawArrays(GL.GL_LINES, 0, len(lines))\n GL.glDisable(GL.GL_DEPTH_TEST)\n GL.glDepthMask(True)\n\n def drawCeiling(self):\n GL.glMatrixMode(GL.GL_MODELVIEW)\n # GL.glPushMatrix()\n x, y, z = self.cameraPosition\n x -= x % 16\n z -= z % 16\n y = self.editor.level.Height\n GL.glTranslate(x, y, z)\n self.ceilingList.call(self._drawCeiling)\n GL.glTranslate(-x, -y, -z)\n\n _floorQuadList = None\n\n @property\n def floorQuadList(self):\n if not self._floorQuadList:\n self._floorQuadList = glutils.DisplayList()\n return self._floorQuadList\n\n _ceilingList = None\n\n @property\n def ceilingList(self):\n if not self._ceilingList:\n self._ceilingList = glutils.DisplayList()\n return self._ceilingList\n\n @property\n def floorColor(self):\n if self.drawSky:\n return 0.0, 0.0, 1.0, 0.3\n else:\n return 0.0, 1.0, 0.0, 0.15\n\n # floorColor = (0.0, 0.0, 1.0, 0.1)\n\n def _drawFloorQuad(self):\n GL.glDepthMask(True)\n GL.glPolygonOffset(DepthOffset.ChunkMarkers + 2, DepthOffset.ChunkMarkers + 2)\n GL.glVertexPointer(3, GL.GL_FLOAT, 0, self.floorQuad)\n GL.glColor(*self.floorColor)\n with gl.glEnable(GL.GL_BLEND, GL.GL_DEPTH_TEST, GL.GL_POLYGON_OFFSET_FILL):\n GL.glDrawArrays(GL.GL_QUADS, 0, 4)\n\n @property\n def drawSky(self):\n return self._drawSky\n\n @drawSky.setter\n def drawSky(self, val):\n self._drawSky = val\n if self.skyList:\n self.skyList.invalidate()\n if self._floorQuadList:\n self._floorQuadList.invalidate()\n\n skyList = None\n\n def drawSkyBackground(self):\n if self.skyList is None:\n self.skyList = glutils.DisplayList()\n self.skyList.call(self._drawSkyBackground)\n\n def _drawSkyBackground(self):\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glPushMatrix()\n GL.glLoadIdentity()\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glPushMatrix()\n GL.glLoadIdentity()\n GL.glEnableClientState(GL.GL_COLOR_ARRAY)\n\n quad = numpy.array([-1, -1, -1, 1, 1, 1, 1, -1], dtype='float32')\n if self.editor.level.dimNo == -1:\n colors = numpy.array([0x90, 0x00, 0x00, 0xff,\n 0x90, 0x00, 0x00, 0xff,\n 0x90, 0x00, 0x00, 0xff,\n 0x90, 0x00, 0x00, 0xff, ], dtype='uint8')\n elif self.editor.level.dimNo == 1:\n colors = numpy.array([0x22, 0x27, 0x28, 0xff,\n 0x22, 0x27, 0x28, 0xff,\n 0x22, 0x27, 0x28, 0xff,\n 0x22, 0x27, 0x28, 0xff, ], dtype='uint8')\n else:\n colors = numpy.array([0x48, 0x49, 0xBA, 0xff,\n 0x8a, 0xaf, 0xff, 0xff,\n 0x8a, 0xaf, 0xff, 0xff,\n 0x48, 0x49, 0xBA, 0xff, ], dtype='uint8')\n\n alpha = 1.0\n\n if alpha > 0.0:\n if alpha < 1.0:\n GL.glEnable(GL.GL_BLEND)\n\n GL.glVertexPointer(2, GL.GL_FLOAT, 0, quad)\n GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, 0, colors)\n GL.glDrawArrays(GL.GL_QUADS, 0, 4)\n\n if alpha < 1.0:\n GL.glDisable(GL.GL_BLEND)\n\n GL.glDisableClientState(GL.GL_COLOR_ARRAY)\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glPopMatrix()\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glPopMatrix()\n\n enableMouseLag = config.settings.enableMouseLag.property()\n\n @property\n def drawFog(self):\n return self._drawFog and not self.editor.renderer.inSpace()\n\n @drawFog.setter\n def drawFog(self, val):\n self._drawFog = val\n\n fogColor = numpy.array([0.6, 0.8, 1.0, 1.0], dtype='float32')\n fogColorBlack = numpy.array([0.0, 0.0, 0.0, 1.0], dtype='float32')\n\n def enableFog(self):\n GL.glEnable(GL.GL_FOG)\n if self.drawSky:\n GL.glFogfv(GL.GL_FOG_COLOR, self.fogColor)\n else:\n GL.glFogfv(GL.GL_FOG_COLOR, self.fogColorBlack)\n\n GL.glFogf(GL.GL_FOG_DENSITY, 0.0001 * config.settings.fogIntensity.get())\n\n @staticmethod\n def disableFog():\n GL.glDisable(GL.GL_FOG)\n\n def getCameraPoint(self):\n distance = self.editor.currentTool.cameraDistance\n return [i for i in itertools.imap(lambda p, d: int(numpy.floor(p + d * distance)),\n self.cameraPosition,\n self.cameraVector)]\n\n blockFaceUnderCursor = (0, 0, 0), (0, 0, 0)\n\n viewingFrustum = None\n\n def setup_projection(self):\n distance = 1.0\n if self.editor.renderer.inSpace():\n distance = 8.0\n GLU.gluPerspective(max(self.fov, 25.0), self.ratio, self.near * distance, self.far * distance)\n\n def setup_modelview(self):\n self.setModelview()\n\n def gl_draw(self):\n self.tickCamera(self.editor.frameStartTime, self.editor.cameraInputs, self.editor.renderer.inSpace())\n self.render()\n\n def render(self):\n self.viewingFrustum = frustum.Frustum.fromViewingMatrix()\n\n if self.superSecretSettings:\n self.editor.drawStars()\n if self.drawSky:\n self.drawSkyBackground()\n if self.drawFog:\n self.enableFog()\n\n self.drawFloorQuad()\n\n self.editor.renderer.viewingFrustum = self.viewingFrustum\n self.editor.renderer.draw()\n\n if self.showCeiling and not self.editor.renderer.inSpace():\n self.drawCeiling()\n\n if self.editor.level:\n try:\n self.updateBlockFaceUnderCursor()\n except (EnvironmentError, pymclevel.ChunkNotPresent) as e:\n logging.debug(\"Updating cursor block: %s\", e)\n self.blockFaceUnderCursor = (None, None)\n\n self.root.update_tooltip()\n\n (blockPosition, faceDirection) = self.blockFaceUnderCursor\n if blockPosition:\n self.editor.updateInspectionString(blockPosition)\n\n if self.find_widget(mouse.get_pos()) == self:\n ct = self.editor.currentTool\n if ct:\n ct.drawTerrainReticle()\n ct.drawToolReticle()\n else:\n self.editor.drawWireCubeReticle()\n\n for t in self.editor.toolbar.tools:\n t.drawTerrainMarkers()\n t.drawToolMarkers()\n\n if self.drawFog:\n self.disableFog()\n\n if self.compassToggle:\n if self._compass is None:\n self._compass = CompassOverlay()\n\n x = getattr(getattr(self.editor, 'copyPanel', None), 'width', 0)\n if x:\n x = x /float( self.editor.mainViewport.width)\n self._compass.x = x\n self._compass.yawPitch = self.yaw, 0\n\n with gl.glPushMatrix(GL.GL_PROJECTION):\n GL.glLoadIdentity()\n GL.glOrtho(0., 1., float(self.height) / self.width, 0, -200, 200)\n\n self._compass.draw()\n else:\n self._compass = None\n\n _compass = None\n \nclass BlockInfoParser(object):\n last_level = None\n nbt_ending = \"\\n\\nPress ALT for NBT\"\n edit_ending = \", Double-Click to Edit\"\n \n @classmethod\n def get_parsers(cls, editor):\n cls.last_level = editor.level\n parser_map = {}\n for subcls in cls.__subclasses__():\n instance = subcls(editor.level)\n try:\n blocks = instance.getBlocks()\n except KeyError:\n continue\n if isinstance(blocks, (str, int)):\n parser_map[blocks] = instance.parse_info\n elif isinstance(blocks, (list, tuple)):\n for block in blocks:\n parser_map[block] = instance.parse_info\n return parser_map\n \n def getBlocks(self):\n raise NotImplementedError()\n \n def parse_info(self, pos):\n raise NotImplementedError()\n\n\nclass SpawnerInfoParser(BlockInfoParser):\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return self.level.materials[\"minecraft:mob_spawner\"].ID\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n spawn_data = tile_entity.get(\"SpawnData\", {})\n if spawn_data:\n id = spawn_data.get('EntityId', None)\n if not id:\n id = spawn_data.get('id', None)\n if not id:\n value = repr(NameError(\"Malformed spawn data: could not find 'EntityId' or 'id' tag.\"))\n else:\n value = id.value\n return \"{} Spawner{}{}\".format(value, self.nbt_ending, self.edit_ending)\n return \"[Empty]{}{}\".format(self.nbt_ending, self.edit_ending)\n \nclass JukeboxInfoParser(BlockInfoParser):\n id_records = {\n 2256: \"13\",\n 2257: \"Cat\",\n 2258: \"Blocks\",\n 2259: \"Chirp\",\n 2260: \"Far\",\n 2261: \"Mall\",\n 2262: \"Mellohi\",\n 2263: \"Stal\",\n 2264: \"Strad\",\n 2265: \"Ward\",\n 2266: \"11\",\n 2267: \"Wait\"\n }\n \n name_records = {\n \"minecraft:record_13\": \"13\",\n \"minecraft:record_cat\": \"Cat\",\n \"minecraft:record_blocks\": \"Blocks\",\n \"minecraft:record_chirp\": \"Chirp\",\n \"minecraft:record_far\": \"Far\",\n \"minecraft:record_mall\": \"Mall\",\n \"minecraft:record_mellohi\": \"Mellohi\",\n \"minecraft:record_stal\": \"Stal\",\n \"minecraft:record_strad\": \"Strad\",\n \"minecraft:record_ward\": \"Ward\",\n \"minecraft:record_11\": \"11\",\n \"minecraft:record_wait\": \"Wait\"\n }\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return self.level.materials[\"minecraft:jukebox\"].ID\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n if \"Record\" in tile_entity:\n value = tile_entity[\"Record\"].value\n if value in self.id_records:\n return self.id_records[value] + \" Record\" + self.nbt_ending + self.edit_ending\n elif \"RecordItem\" in tile_entity:\n value = tile_entity[\"RecordItem\"][\"id\"].value\n if value in self.name_records:\n return \"{} Record{}{}\".format(self.name_records[value], self.nbt_ending, self.edit_ending)\n return \"[No Record]{}{}\".format(self.nbt_ending, self.edit_ending)\n \nclass CommandBlockInfoParser(BlockInfoParser):\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return [\n self.level.materials[\"minecraft:command_block\"].ID,\n self.level.materials[\"minecraft:repeating_command_block\"].ID,\n self.level.materials[\"minecraft:chain_command_block\"].ID\n ]\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n value = tile_entity.get(\"Command\", pymclevel.TAG_String(\"\")).value\n if value:\n if len(value) > 1500:\n return \"{}\\n**COMMAND IS TOO LONG TO SHOW MORE**{}{}\".format(value[:1500], self.nbt_ending, self.edit_ending)\n return \"{}{}{}\".format(value, self.nbt_ending, self.edit_ending)\n return \"[Empty Command Block]{}{}\".format(self.nbt_ending, self.edit_ending)\n \nclass ContainerInfoParser(BlockInfoParser):\n \n def __init__(self, level):\n self.level = level\n \n def getBlocks(self):\n return [\n self.level.materials[\"minecraft:dispenser\"].ID,\n self.level.materials[\"minecraft:chest\"].ID,\n self.level.materials[\"minecraft:furnace\"].ID,\n self.level.materials[\"minecraft:lit_furnace\"].ID,\n self.level.materials[\"minecraft:trapped_chest\"].ID,\n self.level.materials[\"minecraft:hopper\"].ID,\n self.level.materials[\"minecraft:dropper\"].ID,\n self.level.materials[\"minecraft:brewing_stand\"].ID\n ]\n \n def parse_info(self, pos):\n tile_entity = self.level.tileEntityAt(*pos)\n if tile_entity:\n return \"Contains {} Items {}{}\".format(len(tile_entity.get(\"Items\", [])), self.nbt_ending, self.edit_ending)\n return \"[Empty Container]{}{}\".format(self.nbt_ending, self.edit_ending)\n\ndef unproject(x, y, z):\n try:\n return GLU.gluUnProject(x, y, z)\n except ValueError: # projection failed\n return 0, 0, 0\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.radians", "numpy.cos", "numpy.floor" ] ]
ziyoujiyi/PaddleRec
[ "9a107c56af2d1ee282975bcc8edb1ad5fb7e7973" ]
[ "models/recall/ncf/evaluate.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport sklearn\nimport math\n\"\"\"\nExtracting information from infer data\n\"\"\"\nfilename = './result.txt'\nf = open(filename, \"r\")\nlines = f.readlines()\nf.close()\nresult = []\nfor line in lines:\n if \"prediction\" in str(line):\n result.append(line)\nresult = result[:-1]\n\npair = []\nfor line in result:\n line = line.strip().split(\",\")\n for seg in line:\n if \"user\" in seg:\n user_id = seg.strip().split(\":\")[1].strip(\" \").strip(\"[]\")\n if \"prediction\" in seg:\n prediction = seg.strip().split(\":\")[1].strip(\" \").strip(\"[]\")\n if \"label\" in seg:\n label = seg.strip().split(\":\")[1].strip(\" \").strip(\"[]\")\n pair.append([int(user_id), float(prediction), int(label)])\n\n\ndef takeSecond(x):\n return x[1]\n\n\n\"\"\"\nEvaluate the performance (Hit_Ratio, NDCG) of top-K recommendation\n\"\"\"\nhits = []\nndcg = []\npair = [pair[i:i + 100] for i in range(0, len(pair), 100)]\nfor user in pair:\n user.sort(key=takeSecond, reverse=True)\n each_user_top10_line = user[:10]\n each_user_top10_line_label = [i[2] for i in each_user_top10_line]\n if 1 in each_user_top10_line_label:\n i = each_user_top10_line_label.index(1)\n ndcg.append(math.log(2) / math.log(i + 2))\n hits.append(1)\n else:\n hits.append(0)\n ndcg.append(0)\n\nprint(\"user_num:\", len(hits))\nprint(\"hit ratio:\", np.array(hits).mean())\nprint(\"ndcg:\", np.array(ndcg).mean())\n" ]
[ [ "numpy.array" ] ]
Kayce001/mmdeploy
[ "59470fef0b28e0b760c72269e0696bbdf57db7f1", "59470fef0b28e0b760c72269e0696bbdf57db7f1" ]
[ "mmdeploy/codebase/mmcls/deploy/classification_model.py", "mmdeploy/codebase/mmdet/deploy/object_detection_model.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import List, Sequence, Union\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcls.datasets import DATASETS\nfrom mmcls.models.classifiers.base import BaseClassifier\nfrom mmcv.utils import Registry\n\nfrom mmdeploy.codebase.base import BaseBackendModel\nfrom mmdeploy.utils import (Backend, get_backend, get_codebase_config,\n load_config)\n\n\ndef __build_backend_model(cls_name: str, registry: Registry, *args, **kwargs):\n return registry.module_dict[cls_name](*args, **kwargs)\n\n\n__BACKEND_MODEL = mmcv.utils.Registry(\n 'backend_classifiers', build_func=__build_backend_model)\n\n\n@__BACKEND_MODEL.register_module('end2end')\nclass End2EndModel(BaseBackendModel):\n \"\"\"End to end model for inference of classification.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files(e.g.\n '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string represents device type.\n class_names (Sequence[str]): A list of string specifying class names.\n deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(\n self,\n backend: Backend,\n backend_files: Sequence[str],\n device: str,\n class_names: Sequence[str],\n deploy_cfg: Union[str, mmcv.Config] = None,\n ):\n super(End2EndModel, self).__init__(deploy_cfg=deploy_cfg)\n self.CLASSES = class_names\n self.deploy_cfg = deploy_cfg\n self._init_wrapper(\n backend=backend, backend_files=backend_files, device=device)\n\n def _init_wrapper(self, backend: Backend, backend_files: Sequence[str],\n device: str):\n output_names = self.output_names\n self.wrapper = BaseBackendModel._build_wrapper(\n backend=backend,\n backend_files=backend_files,\n device=device,\n output_names=output_names,\n deploy_cfg=self.deploy_cfg)\n\n def forward(self, img: List[torch.Tensor], *args, **kwargs) -> list:\n \"\"\"Run forward inference.\n\n Args:\n img (List[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n\n if isinstance(img, list):\n input_img = img[0].contiguous()\n else:\n input_img = img.contiguous()\n outputs = self.forward_test(input_img, *args, **kwargs)\n\n return list(outputs)\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \\\n List[np.ndarray]:\n \"\"\"The interface for forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n List[np.ndarray]: A list of classification prediction.\n \"\"\"\n outputs = self.wrapper({self.input_name: imgs})\n outputs = self.wrapper.output_to_list(outputs)\n outputs = [out.detach().cpu().numpy() for out in outputs]\n return outputs\n\n def show_result(self,\n img: np.ndarray,\n result: list,\n win_name: str = '',\n show: bool = True,\n out_file: str = None):\n \"\"\"Show predictions of classification.\n Args:\n img: (np.ndarray): Input image to draw predictions.\n result (list): A list of predictions.\n win_name (str): The name of visualization window.\n show (bool): Whether to show plotted image in windows. Defaults to\n `True`.\n out_file (str): Output image file to save drawn predictions.\n\n Returns:\n np.ndarray: Drawn image, only if not `show` or `out_file`.\n \"\"\"\n return BaseClassifier.show_result(\n self, img, result, show=show, win_name=win_name, out_file=out_file)\n\n\n@__BACKEND_MODEL.register_module('sdk')\nclass SDKEnd2EndModel(End2EndModel):\n \"\"\"SDK inference class, converts SDK output to mmcls format.\"\"\"\n\n def forward(self, img: List[torch.Tensor], *args, **kwargs) -> list:\n \"\"\"Run forward inference.\n\n Args:\n img (List[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n\n pred = self.wrapper.invoke(\n [img[0].contiguous().detach().cpu().numpy()])[0]\n pred = np.array(pred, dtype=np.float32)\n return pred[np.argsort(pred[:, 0])][np.newaxis, :, 1]\n\n\ndef get_classes_from_config(model_cfg: Union[str, mmcv.Config]):\n \"\"\"Get class name from config.\n\n Args:\n model_cfg (str | mmcv.Config): Input model config file or\n Config object.\n\n Returns:\n list[str]: A list of string specifying names of different class.\n \"\"\"\n model_cfg = load_config(model_cfg)[0]\n module_dict = DATASETS.module_dict\n data_cfg = model_cfg.data\n\n if 'train' in data_cfg:\n module = module_dict[data_cfg.train.type]\n elif 'val' in data_cfg:\n module = module_dict[data_cfg.val.type]\n elif 'test' in data_cfg:\n module = module_dict[data_cfg.test.type]\n else:\n raise RuntimeError(f'No dataset config found in: {model_cfg}')\n\n return module.CLASSES\n\n\ndef build_classification_model(model_files: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config],\n device: str, **kwargs):\n \"\"\"Build classification model for different backend.\n\n Args:\n model_files (Sequence[str]): Input model file(s).\n model_cfg (str | mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str | mmcv.Config): Input deployment config file or\n Config object.\n device (str): Device to input model.\n\n Returns:\n BaseBackendModel: Classifier for a configured backend.\n \"\"\"\n # load cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n\n backend = get_backend(deploy_cfg)\n model_type = get_codebase_config(deploy_cfg).get('model_type', 'end2end')\n class_names = get_classes_from_config(model_cfg)\n\n backend_classifier = __BACKEND_MODEL.build(\n model_type,\n backend=backend,\n backend_files=model_files,\n device=device,\n class_names=class_names,\n deploy_cfg=deploy_cfg,\n **kwargs)\n\n return backend_classifier\n", "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\nfrom typing import List, Sequence, Tuple, Union\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.utils import Registry\nfrom mmdet.core import bbox2result\nfrom mmdet.datasets import DATASETS\nfrom mmdet.models import BaseDetector\n\nfrom mmdeploy.backend.base import get_backend_file_count\nfrom mmdeploy.codebase.base import BaseBackendModel\nfrom mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms\nfrom mmdeploy.utils import (Backend, get_backend, get_codebase_config,\n get_partition_config, load_config)\n\n\ndef __build_backend_model(partition_name: str, backend: Backend,\n backend_files: Sequence[str], device: str,\n class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config],\n registry: Registry, **kwargs):\n return registry.module_dict[partition_name](\n backend=backend,\n backend_files=backend_files,\n class_names=class_names,\n device=device,\n model_cfg=model_cfg,\n deploy_cfg=deploy_cfg,\n **kwargs)\n\n\n# Use registry to store models with different partition methods\n# If a model doesn't need to partition, we don't need this registry\n__BACKEND_MODEL = mmcv.utils.Registry(\n 'backend_detectors', build_func=__build_backend_model)\n\n\n@__BACKEND_MODEL.register_module('end2end')\nclass End2EndModel(BaseBackendModel):\n \"\"\"End to end model for inference of detection.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n super().__init__(deploy_cfg=deploy_cfg)\n self.CLASSES = class_names\n self.deploy_cfg = deploy_cfg\n self._init_wrapper(\n backend=backend, backend_files=backend_files, device=device)\n\n def _init_wrapper(self, backend: Backend, backend_files: Sequence[str],\n device: str):\n \"\"\"Initialize backend wrapper.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n \"\"\"\n output_names = self.output_names\n self.wrapper = BaseBackendModel._build_wrapper(\n backend=backend,\n backend_files=backend_files,\n device=device,\n output_names=output_names,\n deploy_cfg=self.deploy_cfg)\n\n @staticmethod\n def __clear_outputs(\n test_outputs: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[Union[List[torch.Tensor], List[np.ndarray]]]:\n \"\"\"Removes additional outputs and detections with zero and negative\n score.\n\n Args:\n test_outputs (List[Union[torch.Tensor, np.ndarray]]):\n outputs of forward_test.\n\n Returns:\n List[Union[List[torch.Tensor], List[np.ndarray]]]:\n outputs with without zero score object.\n \"\"\"\n batch_size = len(test_outputs[0])\n\n num_outputs = len(test_outputs)\n outputs = [[None for _ in range(batch_size)]\n for _ in range(num_outputs)]\n\n for i in range(batch_size):\n inds = test_outputs[0][i, :, 4] > 0.0\n for output_id in range(num_outputs):\n outputs[output_id][i] = test_outputs[output_id][i, inds, ...]\n return outputs\n\n @staticmethod\n def postprocessing_masks(det_bboxes: np.ndarray,\n det_masks: np.ndarray,\n img_w: int,\n img_h: int,\n mask_thr_binary: float = 0.5) -> np.ndarray:\n \"\"\"Additional processing of masks. Resizes masks from [num_det, 28, 28]\n to [num_det, img_w, img_h]. Analog of the 'mmdeploy.codebase.mmdet.\n models.roi_heads.fcn_mask_head._do_paste_mask' function.\n\n Args:\n det_bboxes (np.ndarray): Bbox of shape [num_det, 4]\n det_masks (np.ndarray): Masks of shape [num_det, 28, 28].\n img_w (int): Width of the original image.\n img_h (int): Height of the original image.\n mask_thr_binary (float): The threshold for the mask.\n\n Returns:\n np.ndarray: masks of shape [N, num_det, img_h, img_w].\n \"\"\"\n masks = det_masks\n bboxes = det_bboxes\n\n num_det = bboxes.shape[0]\n # Skip postprocessing if no detections are found.\n if num_det == 0:\n return np.zeros((0, img_h, img_w))\n\n if isinstance(masks, np.ndarray):\n masks = torch.tensor(masks)\n bboxes = torch.tensor(bboxes)\n\n result_masks = []\n for bbox, mask in zip(bboxes, masks):\n\n x0_int, y0_int = 0, 0\n x1_int, y1_int = img_w, img_h\n\n img_y = torch.arange(y0_int, y1_int, dtype=torch.float32) + 0.5\n img_x = torch.arange(x0_int, x1_int, dtype=torch.float32) + 0.5\n x0, y0, x1, y1 = bbox\n\n img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n if torch.isinf(img_x).any():\n inds = torch.where(torch.isinf(img_x))\n img_x[inds] = 0\n if torch.isinf(img_y).any():\n inds = torch.where(torch.isinf(img_y))\n img_y[inds] = 0\n\n gx = img_x[None, :].expand(img_y.size(0), img_x.size(0))\n gy = img_y[:, None].expand(img_y.size(0), img_x.size(0))\n grid = torch.stack([gx, gy], dim=2)\n\n img_masks = F.grid_sample(\n mask.to(dtype=torch.float32)[None, None, :, :],\n grid[None, :, :, :],\n align_corners=False)\n\n mask = img_masks\n mask = (mask >= mask_thr_binary).to(dtype=torch.bool)\n result_masks.append(mask.numpy())\n result_masks = np.concatenate(result_masks, axis=1)\n return result_masks.squeeze(0)\n\n def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict],\n *args, **kwargs):\n \"\"\"Run forward inference.\n\n Args:\n img (Sequence[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n img_metas (Sequence[dict]): A list of meta info for image(s).\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n input_img = img[0].contiguous()\n outputs = self.forward_test(input_img, img_metas, *args, **kwargs)\n outputs = End2EndModel.__clear_outputs(outputs)\n batch_dets, batch_labels = outputs[:2]\n batch_masks = outputs[2] if len(outputs) == 3 else None\n batch_size = input_img.shape[0]\n img_metas = img_metas[0]\n results = []\n rescale = kwargs.get('rescale', True)\n for i in range(batch_size):\n dets, labels = batch_dets[i], batch_labels[i]\n if rescale:\n scale_factor = img_metas[i]['scale_factor']\n\n if isinstance(scale_factor, (list, tuple, np.ndarray)):\n assert len(scale_factor) == 4\n scale_factor = np.array(scale_factor)[None, :] # [1,4]\n dets[:, :4] /= scale_factor\n\n if 'border' in img_metas[i]:\n # offset pixel of the top-left corners between original image\n # and padded/enlarged image, 'border' is used when exporting\n # CornerNet and CentripetalNet to onnx\n x_off = img_metas[i]['border'][2]\n y_off = img_metas[i]['border'][0]\n dets[:, [0, 2]] -= x_off\n dets[:, [1, 3]] -= y_off\n dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype)\n\n dets_results = bbox2result(dets, labels, len(self.CLASSES))\n\n if batch_masks is not None:\n masks = batch_masks[i]\n img_h, img_w = img_metas[i]['img_shape'][:2]\n ori_h, ori_w = img_metas[i]['ori_shape'][:2]\n export_postprocess_mask = True\n if self.deploy_cfg is not None:\n\n mmdet_deploy_cfg = get_post_processing_params(\n self.deploy_cfg)\n # this flag enable postprocess when export.\n export_postprocess_mask = mmdet_deploy_cfg.get(\n 'export_postprocess_mask', True)\n if not export_postprocess_mask:\n masks = End2EndModel.postprocessing_masks(\n dets[:, :4], masks, ori_w, ori_h)\n else:\n masks = masks[:, :img_h, :img_w]\n # avoid to resize masks with zero dim\n if rescale and masks.shape[0] != 0:\n masks = masks.astype(np.float32)\n masks = torch.from_numpy(masks)\n masks = torch.nn.functional.interpolate(\n masks.unsqueeze(0), size=(ori_h, ori_w))\n masks = masks.squeeze(0).detach().numpy()\n if masks.dtype != bool:\n masks = masks >= 0.5\n segms_results = [[] for _ in range(len(self.CLASSES))]\n for j in range(len(dets)):\n segms_results[labels[j]].append(masks[j])\n results.append((dets_results, segms_results))\n else:\n results.append(dets_results)\n return results\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \\\n Tuple[np.ndarray, np.ndarray]:\n \"\"\"The interface for forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5]\n and class labels of shape [N, num_det].\n \"\"\"\n outputs = self.wrapper({self.input_name: imgs})\n outputs = self.wrapper.output_to_list(outputs)\n outputs = [out.detach().cpu().numpy() for out in outputs]\n return outputs\n\n def show_result(self,\n img: np.ndarray,\n result: list,\n win_name: str = '',\n show: bool = True,\n score_thr: float = 0.3,\n out_file=None):\n return BaseDetector.show_result(\n self,\n img=img,\n result=result,\n score_thr=score_thr,\n show=show,\n win_name=win_name,\n out_file=out_file)\n\n\n@__BACKEND_MODEL.register_module('single_stage')\nclass PartitionSingleStageModel(End2EndModel):\n \"\"\"Partitioned single stage detection model.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n model_cfg (str|mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n super().__init__(backend, backend_files, device, class_names,\n deploy_cfg, **kwargs)\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n self.model_cfg = model_cfg\n\n def _init_wrapper(self, backend, backend_files, device):\n self.wrapper = BaseBackendModel._build_wrapper(\n backend=backend,\n backend_files=backend_files,\n device=device,\n output_names=['scores', 'boxes'],\n deploy_cfg=self.deploy_cfg)\n\n def partition0_postprocess(self, scores: torch.Tensor,\n bboxes: torch.Tensor):\n \"\"\"Perform post-processing for partition 0.\n\n Args:\n scores (Tensor): The detection scores of shape\n [N, num_boxes, num_classes].\n bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].\n\n Returns:\n tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n cfg = self.model_cfg.model.test_cfg\n deploy_cfg = self.deploy_cfg\n\n post_params = get_post_processing_params(deploy_cfg)\n max_output_boxes_per_class = post_params.max_output_boxes_per_class\n iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)\n score_threshold = cfg.get('score_thr', post_params.score_threshold)\n pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \\\n else post_params.pre_top_k\n keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)\n ret = multiclass_nms(\n bboxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold=iou_threshold,\n score_threshold=score_threshold,\n pre_top_k=pre_top_k,\n keep_top_k=keep_top_k)\n ret = [r.cpu() for r in ret]\n return ret\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs):\n \"\"\"Implement forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n list[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n outputs = self.wrapper({self.input_name: imgs})\n outputs = self.wrapper.output_to_list(outputs)\n scores, bboxes = outputs[:2]\n return self.partition0_postprocess(scores, bboxes)\n\n\n@__BACKEND_MODEL.register_module('two_stage')\nclass PartitionTwoStageModel(End2EndModel):\n \"\"\"Partitioned two stage detection model.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n model_cfg (str|mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n\n self.model_cfg = model_cfg\n\n super().__init__(backend, backend_files, device, class_names,\n deploy_cfg, **kwargs)\n from mmdet.models.builder import build_head, build_roi_extractor\n\n from ..models.roi_heads.bbox_head import bbox_head__get_bboxes\n\n self.bbox_roi_extractor = build_roi_extractor(\n model_cfg.model.roi_head.bbox_roi_extractor)\n self.bbox_head = build_head(model_cfg.model.roi_head.bbox_head)\n\n class Context:\n pass\n\n ctx = Context()\n ctx.cfg = self.deploy_cfg\n self.bbox_head__get_bboxes = partial(bbox_head__get_bboxes, ctx)\n\n def _init_wrapper(self, backend, backend_files, device):\n n = get_backend_file_count(backend)\n num_feat = self.model_cfg['model']['neck']['num_outs']\n partition0_output_names = [\n 'feat/{}'.format(i) for i in range(num_feat)\n ] + ['scores', 'boxes']\n\n self.first_wrapper = BaseBackendModel._build_wrapper(\n backend,\n backend_files[0:n],\n device,\n partition0_output_names,\n deploy_cfg=self.deploy_cfg)\n\n self.second_wrapper = BaseBackendModel._build_wrapper(\n backend,\n backend_files[n:2 * n],\n device, ['cls_score', 'bbox_pred'],\n deploy_cfg=self.deploy_cfg)\n\n def partition0_postprocess(self, x: Sequence[torch.Tensor],\n scores: torch.Tensor, bboxes: torch.Tensor):\n \"\"\"Perform post-processing for partition 0.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale levels.\n scores (Tensor): The detection scores of shape\n [N, num_boxes, num_classes].\n bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].\n\n Returns:\n tuple(Tensor, Tensor): rois and bbox_feats.\n \"\"\"\n # rpn-nms + roi-extractor\n cfg = self.model_cfg.model.test_cfg.rpn\n deploy_cfg = self.deploy_cfg\n\n post_params = get_post_processing_params(deploy_cfg)\n iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)\n score_threshold = cfg.get('score_thr', post_params.score_threshold)\n pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \\\n else post_params.pre_top_k\n keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)\n # only one class in rpn\n max_output_boxes_per_class = keep_top_k\n proposals, _ = multiclass_nms(\n bboxes,\n scores,\n max_output_boxes_per_class,\n iou_threshold=iou_threshold,\n score_threshold=score_threshold,\n pre_top_k=pre_top_k,\n keep_top_k=keep_top_k)\n\n rois = proposals\n batch_index = torch.arange(\n rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n return rois, bbox_feats\n\n def partition1_postprocess(self, rois: torch.Tensor,\n cls_score: torch.Tensor,\n bbox_pred: torch.Tensor,\n img_metas: Sequence[dict]):\n \"\"\"Perform post-processing for partition 1.\n Args:\n rois (torch.Tensor): Input tensor of roi.\n cls_score (torch.Tensor): Scores of all classes.\n bbox_pred (torch.Tensor): Bounding box proposals.\n img_metas (Sequence[dict]): A list of image(s) meta information.\n\n Returns:\n tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class\n labels of shape [N, num_det].\n \"\"\"\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n bbox_pred.size(-1))\n\n rcnn_test_cfg = self.model_cfg.model.test_cfg.rcnn\n return self.bbox_head__get_bboxes(\n self.bbox_head,\n rois,\n cls_score,\n bbox_pred,\n img_metas[0][0]['img_shape'],\n img_metas[0][0]['scale_factor'],\n cfg=rcnn_test_cfg)\n\n def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict],\n *args, **kwargs):\n \"\"\"Implement forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n img_metas (Sequence[dict]): A list of image(s) meta information.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n outputs = self.first_wrapper({'input': imgs})\n outputs = self.first_wrapper.output_to_list(outputs)\n feats = outputs[:-2]\n scores, bboxes = outputs[-2:]\n\n # partition0_postprocess\n rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes)\n\n # partition1 forward\n bbox_feats = bbox_feats.contiguous()\n outputs = self.second_wrapper({'bbox_feats': bbox_feats})\n outputs = self.second_wrapper.output_to_list(outputs)\n cls_score, bbox_pred = outputs[:2]\n\n # partition1_postprocess\n outputs = self.partition1_postprocess(rois, cls_score, bbox_pred,\n img_metas)\n outputs = [out.detach().cpu() for out in outputs]\n return outputs\n\n\n@__BACKEND_MODEL.register_module('ncnn_end2end')\nclass NCNNEnd2EndModel(End2EndModel):\n \"\"\"NCNNEnd2EndModel.\n\n End2end NCNN model inference class. Because it has DetectionOutput layer\n and its output is different from original mmdet style of `dets`, `labels`.\n\n Args:\n backend (Backend): The backend enum, specifying backend type.\n backend_files (Sequence[str]): Paths to all required backend files\n (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn).\n device (str): A string specifying device type.\n class_names (Sequence[str]): A list of string specifying class names.\n model_cfg (str|mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str|mmcv.Config): Deployment config file or loaded Config\n object.\n \"\"\"\n\n def __init__(self, backend: Backend, backend_files: Sequence[str],\n device: str, class_names: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config], **kwargs):\n assert backend == Backend.NCNN, f'only supported ncnn, but give \\\n {backend.value}'\n\n super(NCNNEnd2EndModel,\n self).__init__(backend, backend_files, device, class_names,\n deploy_cfg, **kwargs)\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n self.model_cfg = model_cfg\n\n def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> List:\n \"\"\"Implement forward test.\n\n Args:\n imgs (torch.Tensor): Input image(s) in [N x C x H x W] format.\n\n Returns:\n list[np.ndarray]: dets of shape [N, num_det, 5] and\n class labels of shape [N, num_det].\n \"\"\"\n _, _, H, W = imgs.shape\n outputs = self.wrapper({self.input_name: imgs})\n for key, item in outputs.items():\n if item is None:\n return [np.zeros((1, 0, 5)), np.zeros((1, 0))]\n out = self.wrapper.output_to_list(outputs)[0]\n labels = out[:, :, 0] - 1\n scales = torch.tensor([W, H, W, H]).reshape(1, 1, 4)\n scores = out[:, :, 1:2]\n boxes = out[:, :, 2:6] * scales\n dets = torch.cat([boxes, scores], dim=2)\n dets = dets.detach().cpu().numpy()\n labels = labels.detach().cpu().numpy()\n return [dets, labels]\n\n\n@__BACKEND_MODEL.register_module('sdk')\nclass SDKEnd2EndModel(End2EndModel):\n \"\"\"SDK inference class, converts SDK output to mmdet format.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.has_mask = self.deploy_cfg.codebase_config.get('has_mask', False)\n\n def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict],\n *args, **kwargs):\n \"\"\"Run forward inference.\n\n Args:\n img (Sequence[torch.Tensor]): A list contains input image(s)\n in [N x C x H x W] format.\n img_metas (Sequence[dict]): A list of meta info for image(s).\n *args: Other arguments.\n **kwargs: Other key-pair arguments.\n\n Returns:\n list: A list contains predictions.\n \"\"\"\n dets, labels, masks = self.wrapper.invoke(\n [img[0].contiguous().detach().cpu().numpy()])[0]\n det_results = bbox2result(dets[np.newaxis, ...], labels[np.newaxis,\n ...],\n len(self.CLASSES))\n if self.has_mask:\n segm_results = [[] for _ in range(len(self.CLASSES))]\n ori_h, ori_w = img_metas[0]['ori_shape'][:2]\n for bbox, label, mask in zip(dets, labels, masks):\n img_mask = np.zeros((ori_h, ori_w), dtype=np.uint8)\n left = int(max(np.floor(bbox[0]) - 1, 0))\n top = int(max(np.floor(bbox[1]) - 1, 0))\n img_mask[top:top + mask.shape[0],\n left:left + mask.shape[1]] = mask\n segm_results[label].append(img_mask)\n return [(det_results, segm_results)]\n return [det_results]\n\n\ndef get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs) -> \\\n List[str]:\n \"\"\"Get class name from config. The class name is the `classes` field if it\n is set in the config, or the classes in `module_dict` of MMDet whose type\n is set in the config.\n\n Args:\n model_cfg (str | mmcv.Config): Input model config file or\n Config object.\n\n Returns:\n List[str]: A list of string specifying names of different class.\n \"\"\"\n # load cfg if necessary\n model_cfg = load_config(model_cfg)[0]\n\n # For custom dataset\n if 'classes' in model_cfg:\n return list(model_cfg['classes'])\n\n module_dict = DATASETS.module_dict\n data_cfg = model_cfg.data\n classes = None\n module = None\n\n keys = ['test', 'val', 'train']\n\n for key in keys:\n if key in data_cfg:\n if 'classes' in data_cfg[key]:\n classes = list(data_cfg[key]['classes'])\n break\n elif 'type' in data_cfg[key]:\n module = module_dict[data_cfg[key]['type']]\n break\n\n if classes is None and module is None:\n raise RuntimeError(f'No dataset config found in: {model_cfg}')\n\n if classes is not None:\n return classes\n else:\n return module.CLASSES\n\n\ndef build_object_detection_model(model_files: Sequence[str],\n model_cfg: Union[str, mmcv.Config],\n deploy_cfg: Union[str, mmcv.Config],\n device: str, **kwargs):\n \"\"\"Build object detection model for different backends.\n\n Args:\n model_files (Sequence[str]): Input model file(s).\n model_cfg (str | mmcv.Config): Input model config file or Config\n object.\n deploy_cfg (str | mmcv.Config): Input deployment config file or\n Config object.\n device (str): Device to input model\n\n Returns:\n End2EndModel: Detector for a configured backend.\n \"\"\"\n # load cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n\n backend = get_backend(deploy_cfg)\n class_names = get_classes_from_config(model_cfg)\n\n partition_config = get_partition_config(deploy_cfg)\n if partition_config is not None:\n partition_type = partition_config.get('type', None)\n else:\n codebase_config = get_codebase_config(deploy_cfg)\n # Default Config is 'end2end'\n partition_type = codebase_config.get('model_type', 'end2end')\n\n backend_detector = __BACKEND_MODEL.build(\n partition_type,\n backend=backend,\n backend_files=model_files,\n class_names=class_names,\n device=device,\n model_cfg=model_cfg,\n deploy_cfg=deploy_cfg,\n **kwargs)\n\n return backend_detector\n" ]
[ [ "numpy.array", "numpy.argsort" ], [ "numpy.concatenate", "torch.cat", "numpy.array", "torch.stack", "numpy.zeros", "torch.arange", "torch.from_numpy", "torch.tensor", "torch.isinf", "numpy.floor" ] ]
tcapelle/tsai
[ "36a2f704abf174515c55115832f08ea2d9753e14" ]
[ "tsai/models/MINIROCKET.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/111b_models.MINIROCKET.ipynb (unless otherwise specified).\n\n__all__ = ['MiniRocketClassifier', 'load_minirocket', 'MiniRocketRegressor', 'load_minirocket',\n 'MiniRocketVotingClassifier', 'get_minirocket_preds', 'MiniRocketVotingRegressor']\n\n# Cell\nimport sklearn\nfrom sklearn.metrics import make_scorer\nfrom sklearn.linear_model import RidgeCV, RidgeClassifierCV\nfrom sklearn.ensemble import VotingClassifier, VotingRegressor\nfrom ..imports import *\nfrom ..utils import *\nfrom ..data.external import *\nfrom .layers import *\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n# Cell\nclass MiniRocketClassifier(sklearn.pipeline.Pipeline):\n \"\"\"Time series classification using MINIROCKET features and a linear classifier\"\"\"\n def __init__(self, num_features=10_000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):\n \"\"\" MiniRocketClassifier is recommended for up to 10k time series.\n\n For a larger dataset, you can use MINIROCKET (in Pytorch).\n scoring = None --> defaults to accuracy.\n \"\"\"\n\n # Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by\n # Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketClassifier\")\n\n self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,\n max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state)),\n ('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas,\n normalize=normalize_features,\n scoring=scoring,\n class_weight=class_weight,\n **kwargs))]\n store_attr()\n self._validate_steps()\n\n def __repr__(self):\n return f'Pipeline(steps={self.steps.copy()})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketClassifier')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n# Cell\ndef load_minirocket(fname, path='./models'):\n path = Path(path)\n filename = path/fname\n with open(f'{filename}.pkl', 'rb') as input:\n output = pickle.load(input)\n return output\n\n# Cell\nclass MiniRocketRegressor(sklearn.pipeline.Pipeline):\n \"\"\"Time series regression using MINIROCKET features and a linear regressor\"\"\"\n def __init__(self, num_features=10000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), *, normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):\n \"\"\" MiniRocketRegressor is recommended for up to 10k time series.\n\n For a larger dataset, you can use MINIROCKET (in Pytorch).\n scoring = None --> defaults to r2.\n \"\"\"\n\n # Issue caused by sktime when upgraded 0.9.0 (changed num_features to num_kernels was resolved by\n # Siva Sai (SivaAndMe in GiHub)https://github.com/timeseriesAI/tsai/pull/306)\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketRegressor\")\n\n self.steps = [('minirocketmultivariate', MiniRocketMultivariate(num_kernels=num_features,\n max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state)),\n ('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]\n store_attr()\n self._validate_steps()\n\n def __repr__(self):\n return f'Pipeline(steps={self.steps.copy()})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketRegressor')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n# Cell\ndef load_minirocket(fname, path='./models'):\n path = Path(path)\n filename = path/fname\n with open(f'{filename}.pkl', 'rb') as input:\n output = pickle.load(input)\n return output\n\n# Cell\nclass MiniRocketVotingClassifier(VotingClassifier):\n \"\"\"Time series classification ensemble using MINIROCKET features, a linear classifier and majority voting\"\"\"\n def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):\n store_attr()\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketVotingClassifier\")\n\n estimators = [(f'est_{i}', MiniRocketClassifier(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,\n verbose=verbose, scoring=scoring, class_weight=class_weight, **kwargs))\n for i in range(n_estimators)]\n super().__init__(estimators, voting='hard', weights=weights, n_jobs=n_jobs, verbose=verbose)\n\n def __repr__(self):\n return f'MiniRocketVotingClassifier(n_estimators={self.n_estimators}, \\nsteps={self.estimators[0][1].steps})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketVotingClassifier')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n# Cell\ndef get_minirocket_preds(X, fname, path='./models', model=None):\n if X.ndim == 1: X = X[np.newaxis][np.newaxis]\n elif X.ndim == 2: X = X[np.newaxis]\n if model is None:\n model = load_minirocket(fname=fname, path=path)\n return model.predict(X)\n\n# Cell\nclass MiniRocketVotingRegressor(VotingRegressor):\n \"\"\"Time series regression ensemble using MINIROCKET features, a linear regressor and a voting regressor\"\"\"\n def __init__(self, n_estimators=5, weights=None, n_jobs=-1, num_features=10_000, max_dilations_per_kernel=32, random_state=None,\n alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):\n store_attr()\n\n try:\n import sktime\n from sktime.transformations.panel.rocket import MiniRocketMultivariate\n except ImportError:\n print(\"You need to install sktime to be able to use MiniRocketVotingRegressor\")\n\n estimators = [(f'est_{i}', MiniRocketRegressor(num_features=num_features, max_dilations_per_kernel=max_dilations_per_kernel,\n random_state=random_state, alphas=alphas, normalize_features=normalize_features, memory=memory,\n verbose=verbose, scoring=scoring, **kwargs))\n for i in range(n_estimators)]\n super().__init__(estimators, weights=weights, n_jobs=n_jobs, verbose=verbose)\n\n def __repr__(self):\n return f'MiniRocketVotingRegressor(n_estimators={self.n_estimators}, \\nsteps={self.estimators[0][1].steps})'\n\n def save(self, fname=None, path='./models'):\n fname = ifnone(fname, 'MiniRocketVotingRegressor')\n path = Path(path)\n filename = path/fname\n filename.parent.mkdir(parents=True, exist_ok=True)\n with open(f'{filename}.pkl', 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)" ]
[ [ "sklearn.linear_model.RidgeCV", "sklearn.linear_model.RidgeClassifierCV" ] ]
learsi1911/GAMA_pygmo_v4
[ "459807db352dd1c9f9c1e0e322f8c1e9b5abbca0" ]
[ "tests/unit/test_scikitlearn.py" ]
[ "import pandas as pd\nfrom sklearn.datasets import load_iris\nfrom gama.genetic_programming.compilers.scikitlearn import (\n evaluate_individual,\n compile_individual,\n evaluate_pipeline,\n)\nfrom gama.utilities.metrics import Metric, scoring_to_metric\n\n\ndef test_evaluate_individual(SS_BNB):\n import datetime\n\n reported_start_time = datetime.datetime.now()\n\n def fake_evaluate_pipeline(pipeline, *args, **kwargs):\n # predictions, scores, estimators, errors\n return None, (1.0,), [], None\n\n evaluation = evaluate_individual(\n SS_BNB, evaluate_pipeline=fake_evaluate_pipeline, add_length_to_score=True,\n )\n individual = evaluation.individual\n assert individual == SS_BNB\n assert hasattr(individual, \"fitness\")\n assert individual.fitness.values == (1.0, -2)\n assert (individual.fitness.start_time - reported_start_time).total_seconds() < 1.0\n\n\ndef test_compile_individual(SS_BNB):\n from sklearn.naive_bayes import BernoulliNB\n from sklearn.preprocessing import StandardScaler, MinMaxScaler\n\n pipeline = compile_individual(SS_BNB)\n assert 2 == len(pipeline.steps)\n assert isinstance(pipeline.steps[0][1], StandardScaler)\n assert isinstance(pipeline.steps[1][1], BernoulliNB)\n\n mm_scale = [(\"scaler\", MinMaxScaler())]\n extended_pipeline = compile_individual(SS_BNB, preprocessing_steps=mm_scale)\n assert 3 == len(extended_pipeline.steps)\n assert isinstance(extended_pipeline.steps[0][1], MinMaxScaler)\n assert isinstance(extended_pipeline.steps[1][1], StandardScaler)\n assert isinstance(extended_pipeline.steps[2][1], BernoulliNB)\n\n\ndef test_evaluate_pipeline(SS_BNB):\n x, y = load_iris(return_X_y=True)\n x, y = pd.DataFrame(x), pd.Series(y)\n\n prediction, scores, estimators, errors = evaluate_pipeline(\n SS_BNB.pipeline, x, y, timeout=60, metrics=scoring_to_metric(\"accuracy\"),\n )\n assert 1 == len(scores)\n assert errors is None\n assert 5 == len(estimators)\n assert prediction.shape == (150,)\n\n\ndef test_evaluate_invalid_pipeline(InvalidLinearSVC):\n x, y = load_iris(return_X_y=True)\n x, y = pd.DataFrame(x), pd.Series(y)\n\n prediction, scores, estimators, error = evaluate_pipeline(\n InvalidLinearSVC.pipeline,\n x,\n y,\n timeout=60,\n metrics=scoring_to_metric(\"accuracy\"),\n )\n assert (float(\"-inf\"),) == scores\n assert str(error).startswith(\"Unsupported set of arguments:\")\n assert str(error).endswith(\"penalty='l1', loss='squared_hinge', dual=True\")\n assert estimators is None\n assert prediction is None\n" ]
[ [ "pandas.DataFrame", "sklearn.preprocessing.MinMaxScaler", "sklearn.datasets.load_iris", "pandas.Series" ] ]
Pandinosaurus/doctr
[ "3d645ce7d3d4fe36aa53537d4e4f92507f6cd422", "7bbdf4b1e5f7e9a28a7047dcd13eb2a5501643ef" ]
[ "demo/app.py", "api/tests/routes/test_detection.py" ]
[ "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\n\nimport matplotlib.pyplot as plt\nimport streamlit as st\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport cv2\nimport tensorflow as tf\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom doctr.io import DocumentFile\nfrom doctr.models import ocr_predictor\nfrom doctr.utils.visualization import visualize_page\n\nDET_ARCHS = [\"db_resnet50\", \"db_mobilenet_v3_large\", \"linknet_resnet18_rotation\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_mobilenet_v3_small\", \"master\", \"sar_resnet31\"]\n\n\ndef main():\n\n # Wide mode\n st.set_page_config(layout=\"wide\")\n\n # Designing the interface\n st.title(\"docTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n # Instructions\n st.markdown(\"*Hint: click on the top-right corner of an image to enlarge it!*\")\n # Set the columns\n cols = st.columns((1, 1, 1, 1))\n cols[0].subheader(\"Input page\")\n cols[1].subheader(\"Segmentation heatmap\")\n cols[2].subheader(\"OCR output\")\n cols[3].subheader(\"Page reconstitution\")\n\n # Sidebar\n # File selection\n st.sidebar.title(\"Document selection\")\n # Disabling warning\n st.set_option('deprecation.showfileUploaderEncoding', False)\n # Choose your own image\n uploaded_file = st.sidebar.file_uploader(\"Upload files\", type=['pdf', 'png', 'jpeg', 'jpg'])\n if uploaded_file is not None:\n if uploaded_file.name.endswith('.pdf'):\n doc = DocumentFile.from_pdf(uploaded_file.read())\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n page_idx = st.sidebar.selectbox(\"Page selection\", [idx + 1 for idx in range(len(doc))]) - 1\n cols[0].image(doc[page_idx])\n\n # Model selection\n st.sidebar.title(\"Model selection\")\n det_arch = st.sidebar.selectbox(\"Text detection model\", DET_ARCHS)\n reco_arch = st.sidebar.selectbox(\"Text recognition model\", RECO_ARCHS)\n\n # For newline\n st.sidebar.write('\\n')\n\n if st.sidebar.button(\"Analyze page\"):\n\n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n\n else:\n with st.spinner('Loading model...'):\n predictor = ocr_predictor(\n det_arch, reco_arch, pretrained=True,\n assume_straight_pages=(det_arch != \"linknet_resnet18_rotation\")\n )\n\n with st.spinner('Analyzing...'):\n\n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n ax.imshow(seg_map)\n ax.axis('off')\n cols[1].pyplot(fig)\n\n # Plot OCR output\n out = predictor([doc[page_idx]])\n fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)\n cols[2].pyplot(fig)\n\n # Page reconsitution under input page\n page_export = out.pages[0].export()\n if det_arch != \"linknet_resnet18_rotation\":\n img = out.pages[0].synthesize()\n cols[3].image(img, clamp=True)\n\n # Display JSON\n st.markdown(\"\\nHere are your analysis results in JSON format:\")\n st.json(page_export)\n\n\nif __name__ == '__main__':\n main()\n", "import numpy as np\nimport pytest\nfrom scipy.optimize import linear_sum_assignment\n\nfrom doctr.utils.metrics import box_iou\n\n\n@pytest.mark.asyncio\nasync def test_text_detection(test_app_asyncio, mock_detection_image):\n\n response = await test_app_asyncio.post(\"/detection\", files={'file': mock_detection_image})\n assert response.status_code == 200\n json_response = response.json()\n\n gt_boxes = np.array([[1240, 430, 1355, 470], [1360, 430, 1495, 470]], dtype=np.float32)\n gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] / 1654\n gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] / 2339\n\n # Check that IoU with GT if reasonable\n assert isinstance(json_response, list) and len(json_response) == gt_boxes.shape[0]\n pred_boxes = np.array([elt['box'] for elt in json_response])\n iou_mat = box_iou(gt_boxes, pred_boxes)\n gt_idxs, pred_idxs = linear_sum_assignment(-iou_mat)\n is_kept = iou_mat[gt_idxs, pred_idxs] >= 0.8\n assert gt_idxs[is_kept].shape[0] == gt_boxes.shape[0]\n" ]
[ [ "tensorflow.config.experimental.list_physical_devices", "tensorflow.config.experimental.set_memory_growth", "matplotlib.pyplot.subplots", "tensorflow.squeeze" ], [ "scipy.optimize.linear_sum_assignment", "numpy.array" ] ]
apuranik1/rl-examples
[ "af807bd9311e056e8690ee4bc5abbc63a91381e9" ]
[ "rl_examples/approximate_td.py" ]
[ "from typing import Deque, Tuple\nfrom collections import deque\n\nimport numpy as np\n\nfrom .psfa import PSFAAgent, PSFAEnvironment, TState, TAction\nfrom .approximation import TrainableEstimator, Featurizer\n\n\nclass ApproximationTDNAgent(PSFAAgent[TState, TAction]):\n \"\"\"A bootstrapping agent using n-step SARSA\"\"\"\n\n def __init__(\n self,\n env: PSFAEnvironment[TState, TAction],\n featurizer: Featurizer[Tuple[TState, TAction]],\n estimator: TrainableEstimator,\n exploration_rate: float,\n n: int,\n lr: float,\n use_average_reward: bool = False,\n ):\n self.env = env\n self.featurizer = featurizer\n self.estimator = estimator\n self.differential = use_average_reward\n self.avg_reward = 0.0\n self.epsilon = exploration_rate\n self.n = n\n self.lr = lr\n self.data_queue: Deque[Tuple[TState, TAction, float]] = deque()\n\n def action(self, state: TState) -> TAction:\n available_actions = self.env.get_actions(state)\n if np.random.rand() < self.epsilon:\n return np.random.choice(available_actions) # type: ignore\n else:\n batch_featurized = np.stack(\n [self._featurize(state, action) for action in available_actions]\n )\n value_estimates = self.estimator.batch_estimate(batch_featurized)\n max_idx: int = np.argmax(value_estimates)\n return available_actions[max_idx]\n\n def _evaluate_queue(self, trailing_rewards: float) -> float:\n est = trailing_rewards + sum(reward for s, a, reward in self.data_queue)\n if self.differential:\n return est - len(self.data_queue) * self.avg_reward\n else:\n return est\n\n def _featurize(self, state: TState, action: TAction) -> np.ndarray:\n return self.featurizer.featurize((state, action))\n\n def act_and_train(self, t: int) -> Tuple[TState, TAction, float]:\n state = self.env.state\n action = self.action(state)\n reward = self.env.take_action(action)\n if len(self.data_queue) == self.n:\n trailing_estimate = self.estimator.estimate(self._featurize(state, action))\n reward_estimate = self._evaluate_queue(trailing_estimate)\n old_state, old_action, old_reward = self.data_queue.popleft()\n current_estimate = self.estimator.estimate_and_update(\n self._featurize(old_state, old_action), reward_estimate\n )\n self.avg_reward += self.lr * (reward_estimate - current_estimate)\n self.data_queue.append((state, action, reward))\n return state, action, reward\n\n def episode_end(self) -> None:\n while self.data_queue:\n reward_estimate = self._evaluate_queue(0.0)\n old_state, old_action, old_reward = self.data_queue.popleft()\n current_estimate = self.estimator.estimate_and_update(\n self._featurize(old_state, old_action), reward_estimate\n )\n self.avg_reward += self.lr * (reward_estimate - current_estimate)\n" ]
[ [ "numpy.random.rand", "numpy.random.choice", "numpy.argmax" ] ]
dukebw/imgaug
[ "eba6eef5808704926edce97de39af23cab18cb7f" ]
[ "checks/check_perspective_transform.py" ]
[ "from __future__ import print_function, division\n\nimport numpy as np\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\n\n\ndef main():\n image = ia.data.quokka(size=0.5)\n kps = [\n ia.KeypointsOnImage(\n [\n ia.Keypoint(x=245, y=203),\n ia.Keypoint(x=365, y=195),\n ia.Keypoint(x=313, y=269),\n ],\n shape=(image.shape[0] * 2, image.shape[1] * 2),\n )\n ]\n kps[0] = kps[0].on(image.shape)\n print(\"image shape:\", image.shape)\n\n augs = [\n iaa.PerspectiveTransform(scale=0.01, name=\"pt001\", keep_size=True),\n iaa.PerspectiveTransform(scale=0.1, name=\"pt01\", keep_size=True),\n iaa.PerspectiveTransform(scale=0.2, name=\"pt02\", keep_size=True),\n iaa.PerspectiveTransform(scale=0.3, name=\"pt03\", keep_size=True),\n iaa.PerspectiveTransform(scale=(0, 0.3), name=\"pt00to03\", keep_size=True),\n ]\n\n print(\"original\", image.shape)\n ia.imshow(kps[0].draw_on_image(image))\n\n print(\"-----------------\")\n print(\"Random aug per image\")\n print(\"-----------------\")\n for aug in augs:\n images_aug = []\n for _ in range(16):\n aug_det = aug.to_deterministic()\n img_aug = aug_det.augment_image(image)\n kps_aug = aug_det.augment_keypoints(kps)[0]\n img_aug_kps = kps_aug.draw_on_image(img_aug)\n img_aug_kps = np.pad(\n img_aug_kps,\n ((1, 1), (1, 1), (0, 0)),\n mode=\"constant\",\n constant_values=255,\n )\n images_aug.append(img_aug_kps)\n print(aug.name)\n ia.imshow(ia.draw_grid(images_aug))\n\n print(\"----------------\")\n print(\"6 channels\")\n print(\"----------------\")\n image6 = np.dstack([image, image])\n image6_aug = augs[1].augment_image(image6)\n ia.imshow(np.hstack([image6_aug[..., 0:3], image6_aug[..., 3:6]]))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.dstack", "numpy.hstack", "numpy.pad" ] ]
AugustasVol/written_test_automation
[ "80d3295f741f4aaa3abaa4e85f20677ff59c146d" ]
[ "nets.py" ]
[ "import numpy as np\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass net_base:\n def trainer(self, x,y, epochs = 1, print_loss = True):\n \n self.train(True)\n \n for i in range(epochs):\n\n self.optimizer.zero_grad() # zero the gradient buffers\n \n output = self(x)\n loss = self.loss_function(output, y)\n loss.backward()\n if print_loss:\n print(loss)\n \n self.optimizer.step() # Does the update\n \n self.train(False)\n def numpy_forward(self, x):\n\n if x.dtype == np.uint8:\n x = x / 255\n\n x = x.astype(np.float32)\n x = torch.from_numpy(x)\n x = autograd.Variable(x)\n output = self(x)\n \n return output.data.numpy()\n def numpy_train(self,x,y, epochs = 1, print_loss = True):\n\n if x.dtype == np.uint8:\n x = x / 255\n \n x = x.astype(np.float32)\n y = y.astype(np.float32)\n \n x = torch.from_numpy(x)\n y = torch.from_numpy(y)\n \n x = autograd.Variable(x)\n y = autograd.Variable(y)\n \n self.trainer(x,y, epochs = epochs, print_loss = print_loss)\n\n def load_weights(self, path):\n self.load_state_dict(torch.load(path))\n def save_weights(self,path):\n torch.save(self.state_dict(), path)\n \n \nclass answer_model(nn.Module, net_base):\n def __init__(self, category_number = 6):\n super(answer_model, self).__init__()\n self.dropout = nn.Dropout(0.05)\n\n #self.conv_start = nn.Conv2d(1, 16, (3,3), stride=(1,1), padding=(1,1))\n\n self.conv00 = nn.Conv2d(1, 15, (2,2), stride=(2,2))\n self.conv01 = nn.Conv2d(15, 16, (2,2), stride=(2,2))\n self.conv02 = nn.Conv2d(16, 16, (1,1), stride=(1,1))\n \n self.conv10 = nn.Conv2d(16, 32, (3,3), stride=(3,3))\n self.conv11 = nn.Conv2d(32,32, (2,2), stride=(1,1))\n self.conv12 = nn.Conv2d(32,32, (1,1), stride=(1,1))\n \n self.conv20 = nn.Conv2d(32, 16, (1,5), stride=(1,2))\n self.conv21 = nn.Conv2d(16, 16, (1,5), stride=(1,2))\n self.conv22 = nn.Conv2d(16, 6, (1,1), stride=(1,1))\n \n self.final_dense = nn.Linear(6,category_number)\n\n self.loss_function = nn.BCELoss()\n self.optimizer = optim.Adam(self.parameters(), lr = 0.0001)\n\n self.train(False)\n def forward(self,x):\n\n #x = F.relu(self.conv_start(x))\n\n #x = self.dropout(x)\n \n x = F.relu(self.conv00(x))\n x = F.relu(self.conv01(x))\n x = F.relu(self.conv02(x))\n\n x = self.dropout(x)\n \n x = F.relu(self.conv10(x))\n x = F.relu(self.conv11(x))\n x = F.relu(self.conv12(x))\n\n x = self.dropout(x)\n\n x = F.relu(self.conv20(x))\n x = F.relu(self.conv21(x))\n x = F.relu(self.conv22(x))\n\n x = x.view(-1, 6)\n\n x = F.sigmoid(self.final_dense(x))\n \n return x\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.autograd.Variable", "torch.from_numpy", "torch.nn.Conv2d", "torch.nn.BCELoss", "torch.load" ] ]
Markek1/style-transfer-gui
[ "14892d3c657242c4825129b56a6668904f53a65e" ]
[ "style_transfer.py" ]
[ "import os\n\nimport tensorflow as tf\n\ndef magenta_v1_256_2(content_image, style_image, resize=False, content_res=None, style_res=None):\n '''Resolution of generated image = resolution of content image.\n Resolution of the style image is 256x256 by default because the net\n was trained on it and it generally works best'''\n if resize:\n if content_res:\n content_image = tf.image.resize(content_image, content_res)\n\n if style_res:\n style_image = tf.image.resize(style_image, style_res)\n else:\n style_image = tf.image.resize(style_image, (256, 256))\n local_path = 'models/magenta_arbitrary-image-stylization-v1-256_2'\n if os.path.exists(local_path):\n model = tf.saved_model.load(local_path)\n image = tf.squeeze(model(tf.constant(content_image), tf.constant(style_image))[0])\n return image\n" ]
[ [ "tensorflow.constant", "tensorflow.image.resize", "tensorflow.saved_model.load" ] ]
skerit/gezicht
[ "5361b06e250400b0f1b44faf6f8940b0f39ed5d9" ]
[ "python/main.py" ]
[ "import face_recognition\nimport importlib\nimport numpy as np\nimport socket\nimport time\nimport json\nimport sys\nimport os\n\nfrom PIL import Image\nfrom io import BytesIO\n\npi_spec = importlib.util.find_spec(\"picamera\")\nfound_picam = pi_spec is not None\n_picam = False\nhas_cv = False\npicam_options = {\n\t'rotation' : 90\n}\n\n# Make stdout flush by default\n#sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)\n\n# Create the face encodings\nencodings = {}\n\ndef getPicam():\n\t# If the module isn't available, return False\n\tif not found_picam:\n\t\treturn False\n\n\t# If the camera hasn't been created yet,\n\t# do so now\n\tif not _picam:\n\t\timport picamera\n\t\t_picam = picamera.PiCamera()\n\n\t\tif picam_options:\n\t\t\t_picam.rotation = picam_options.get('rotation')\n\n\t\t_picam.resolution = (320, 240)\n\n\treturn _picam\n\ndef detectFaceFromPath(path):\n\timage = face_recognition.load_image_file(path)\n\treturn detectFaces(image)\n\ndef detectFaces(frame):\n\n\t# Get the shape of the frame\n\tshape = frame.shape\n\twidth = shape[0]\n\theight = shape[1]\n\n\t# Create the result dictionary\n\tresult = {}\n\tresult['original_size'] = {\n\t\t'width' : width,\n\t\t'height' : height\n\t}\n\n\t# Max size is 450x450\n\tmax_size = 450\n\n\tif width > max_size or height > max_size:\n\t\tif width > height:\n\t\t\tcoef = max_size / width\n\t\telse:\n\t\t\tcoef = max_size / height\n\n\t\tif not has_cv:\n\t\t\timport cv2\n\t\t\thas_cv = True\n\n\t\t# Resize frame of video for faster face recognition processing\n\t\tframe = cv2.resize(frame, (0, 0), fx=coef, fy=coef)\n\n\t\tresult['resized'] = {\n\t\t\t'width' : frame.shape[0],\n\t\t\t'height' : frame.shape[1]\n\t\t}\n\n\tface_locations = face_recognition.face_locations(frame)\n\tface_encodings = face_recognition.face_encodings(frame, face_locations)\n\tface_names = []\n\tfaces = []\n\n\t# Get an array of the known faces\n\tknown_faces = list(encodings.items())\n\tleft_overs = []\n\tremove_seen_faces = True\n\n\t# Loop over each face found in the frame to see if it's someone we know.\n\tfor face_encoding in face_encodings:\n\t\tname = ''\n\n\t\tif remove_seen_faces:\n\t\t\t# Iterate over the known faces,\n\t\t\t# we'll pop one each time\n\t\t\twhile known_faces:\n\t\t\t\t# Shift the first face from the list\n\t\t\t\tface = known_faces.pop(0)\n\t\t\t\tkey = face[0]\n\t\t\t\tvalue = face[1]\n\n\t\t\t\tmatch = face_recognition.compare_faces(value, face_encoding)\n\n\t\t\t\tif (match[0]):\n\t\t\t\t\tname = key\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\t# It doesn't match, add it to the leftovers list\n\t\t\t\t\tleft_overs.append(face)\n\n\t\t\t# Add all the left overs back to the face_names\n\t\t\twhile left_overs:\n\t\t\t\tknown_faces.append(left_overs.pop(0))\n\t\telse:\n\t\t\tfor key, value in known_faces:\n\t\t\t\tmatch = face_recognition.compare_faces(value, face_encoding)\n\n\t\t\t\tif match[0]:\n\t\t\t\t\tname = key\n\t\t\t\t\tbreak\n\n\t\tface_names.append(name)\n\n\tfor (top, right, bottom, left), name in zip(face_locations, face_names):\n\t\tentry = {\n\t\t\t'top' : top,\n\t\t\t'right' : right,\n\t\t\t'bottom' : bottom,\n\t\t\t'left' : left,\n\t\t\t'name' : name\n\t\t}\n\n\t\tfaces.append(entry)\n\n\tresult['faces'] = faces\n\n\treturn result\n\n# Start listening to input commands\nwhile 1:\n\tline = sys.stdin.readline()\n\treq = json.loads(line)\n\tcmd = req.get('command')\n\toutput = {}\n\tresult = {}\n\toutput['id'] = req.get('id')\n\toutput['result'] = result;\n\n\tif cmd == 'learn-face':\n\t\tname = req.get('name')\n\t\tpaths = req.get('paths')\n\t\tpath_results = []\n\t\tcount = 0\n\n\t\tif not name in encodings:\n\t\t\tencodings[name] = []\n\n\t\tfor path in paths:\n\t\t\timage = face_recognition.load_image_file(path)\n\t\t\tencoding = face_recognition.face_encodings(image)[0]\n\t\t\tencodings[name].append(encoding)\n\n\t\t\tcount += 1\n\n\t\t\t# Turn the numpy array into a regular list,\n\t\t\t# otherwise it'll fail json encoding later\n\t\t\tpath_results.append(encoding.tolist())\n\n\t\t# Just a check on how many paths we did\n\t\tresult['count'] = count\n\n\t\t# Give the encodings back to the other side,\n\t\t# they might cache them\n\t\tresult['encodings'] = path_results\n\telif cmd == 'add-face-encoding':\n\t\tnew_encodings = req.get('encodings')\n\t\tname = req.get('name')\n\t\tcount = 0\n\n\t\tif not name in encodings:\n\t\t\tencodings[name] = []\n\n\t\tfor encoding in new_encodings:\n\t\t\tencodings[name].append(encoding)\n\t\t\tcount += 1\n\n\t\tresult['count'] = count\n\n\telif cmd == 'detect-face':\n\t\tpath = req.get('file_path')\n\t\tface_result = detectFaceFromPath(path)\n\t\tresult.update(face_result)\n\telif cmd == 'detect-picam':\n\t\tpicam = getPicam()\n\n\t\tif not picam:\n\t\t\toutput['error'] = 'Did not find picamera module'\n\t\telse:\n\n\t\t\tframe = np.empty((240, 320, 3), dtype=np.uint8)\n\t\t\tpicam.capture(frame, format=\"rgb\", use_video_port=True)\n\n\t\t\tface_result = detectFaces(frame)\n\n\t\t\tresult.update(face_result)\n\n\telif cmd == 'detect-stream':\n\t\tpath = req.get('stream_path');\n\n\t\ttry:\n\t\t\tsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\t\t\tsock.connect(path)\n\t\t\tdata = False\n\n\t\t\twhile True:\n\t\t\t\tbuf = sock.recv(4096)\n\n\t\t\t\tif not buf:\n\t\t\t\t\tbreak\n\n\t\t\t\tif not data:\n\t\t\t\t\tdata = buf\n\t\t\t\telse:\n\t\t\t\t\tdata = data + buf\n\n\t\t\tface_result = detectFaceFromPath(BytesIO(data))\n\t\t\tresult.update(face_result)\n\n\t\texcept Exception as e:\n\t\t\toutput['error'] = str(e)\n\n\n\tprint(json.dumps(output), flush=True)\n\tsys.stdout.flush()\n\n\t# We need to sleep for the buffer to flush\n\ttime.sleep(0.05)\n" ]
[ [ "numpy.empty" ] ]
disktnk/onnx-chainer
[ "e4542568009e63e7da83aa0f11b2cb5504e8cef8", "e4542568009e63e7da83aa0f11b2cb5504e8cef8" ]
[ "onnx_chainer/functions/normalization.py", "onnx_chainer/functions/loss.py" ]
[ "import sys\n\nimport chainer\nimport numpy as np\n\nfrom onnx_chainer.functions.opset_version import support\nfrom onnx_chainer import onnx_helper\n\n\n@support((1, 6, 7))\ndef convert_BatchNormalization(func, opset_version, input_names,\n output_names, context, parameters):\n is_fixed_bn = len(func.inputs) > 3\n\n # NOTE(disktnk):\n # if `use_beta=False`, beta_param is None, `use_gamma=False` is same.\n beta_param = func.inputs[2].get_variable_or_none()\n gamma_param = func.inputs[1].get_variable_or_none()\n namedlink = context.get_link(beta_param) or context.get_link(gamma_param)\n\n if namedlink is not None:\n prefix, link = namedlink\n if is_fixed_bn:\n mean = link.avg_mean\n var = link.avg_var\n else:\n # on train mode, avg_mean would be updated, so make them from x\n x = func.inputs[0].get_variable().array\n mean = x.mean(axis=func.axis)\n var = x.var(axis=func.axis)\n else:\n prefix = None\n if is_fixed_bn:\n mean = func.inputs[3].get_variable().array\n var = func.inputs[4].get_variable().array\n else:\n x = func.inputs[0].get_variable().array\n mean = x.mean(axis=func.axis)\n var = x.var(axis=func.axis)\n\n def add_param(v, suffix):\n if prefix is None:\n return context.add_param(v, suffix)\n else:\n return context.add_param(\n v, '{}_{}'.format(prefix, suffix), use_original_name=True)\n\n maen_name = add_param(mean, 'avg_mean')\n var_name = add_param(var, 'avg_var')\n if is_fixed_bn:\n input_names[3] = maen_name\n input_names[4] = var_name\n else:\n input_names.extend([maen_name, var_name])\n\n if beta_param is None:\n beta_name = add_param(np.zeros_like(mean, dtype=mean.dtype), 'beta')\n input_names[2] = beta_name\n if gamma_param is None:\n gamma_name = add_param(np.ones_like(mean, dtype=mean.dtype), 'gamma')\n input_names[1] = gamma_name\n\n momentum = getattr(func, 'decay', 0.)\n\n # TODO(disktnk): On definition of ONNX's BatchNormalization operator,\n # outputs one required output and four optional outputs. This converter\n # must make 5 values for output and return them.\n\n if opset_version == 1:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n consumed_inputs=[False, False, False, True, True],\n ),\n elif opset_version == 6:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n ),\n elif opset_version == 7:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n ),\n\n\n@support((1, 6, 7))\ndef convert_FixedBatchNormalization(func, opset_version,\n input_names, output_names, context,\n parameters):\n return convert_BatchNormalization(\n func, opset_version, input_names, output_names, context, parameters)\n\n\ndef convert_LocalResponseNormalization(func, opset_version,\n input_names, output_names, context,\n parameters):\n size = int(func.n)\n return onnx_helper.make_node(\n 'LRN', input_names, output_names,\n alpha=float(func.alpha) * size,\n beta=float(func.beta),\n bias=float(func.k),\n size=size,\n ),\n\n\ndef convert_NormalizeL2(func, opset_version, input_names,\n output_names, context, parameters):\n if isinstance(func.axis, tuple) and len(func.axis) != 1:\n raise ValueError(\n 'Normalization along with multiple axes ({}) are not supported in '\n 'the ONNX\\'s LpNormalization operator.'.format(func.axis))\n if abs(func.eps - 1e-5) > sys.float_info.epsilon:\n # default value of F.normaize eps is 1e-5\n raise ValueError(\n '\\'eps\\' is not supported in the ONNX\\'s LpNormalization operator,'\n ' so that ONNX-Chainer does not accept custom values for \\'eps\\' '\n '({})'.format(func.eps))\n\n return onnx_helper.make_node(\n 'LpNormalization', input_names, output_names,\n axis=int(func.axis[0]),\n p=2,\n ),\n", "import chainer\nimport numpy as np\n\nfrom onnx_chainer.functions.opset_version import support\nfrom onnx_chainer import onnx_helper\n\n\n@support((9,))\ndef convert_SoftmaxCrossEntropy(\n func, opset_version, input_names,\n output_names, context, parameters):\n # obtain input variable\n if not isinstance(func, chainer.FunctionNode):\n raise NotImplementedError(\n 'SoftmaxCrossEntropy is currently supported for Chainer>=6.0.0a1.')\n\n x_var, t_var = func.get_retained_inputs()\n if len(x_var.shape) != 2:\n raise NotImplementedError(\n 'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '\n 'the dimension of input variable x is exactly two.')\n if np.any(t_var.array == func.ignore_label):\n raise NotImplementedError(\n 'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '\n 'ignore_label is not used in input variable t.')\n if (not func.normalize) or (func.class_weight is not None) or\\\n (func.ignore_label != -1) or (func.reduce != 'mean'):\n raise NotImplementedError(\n 'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '\n 'argument parameters are default setting.')\n\n # create intermediate values\n gb = onnx_helper.GraphBuilder()\n x, t = input_names\n y_log = gb.op('LogSoftmax', [x])\n depth = context.add_const(np.array([x_var.shape[1]], dtype=np.int32),\n 'depth')\n zeroone = context.add_const(np.array([0, 1], dtype=x_var.dtype), 'zeroone')\n th = gb.op('OneHot', [t, depth, zeroone])\n s0 = gb.op('Mul', [y_log, th])\n sn = gb.op('Neg', [s0])\n sr = gb.op('ReduceSum', [sn], axes=[1], keepdims=0)\n gb.op_output_named('ReduceMean', [sr], output_names, axes=[0], keepdims=0)\n\n return gb.nodes()\n" ]
[ [ "numpy.zeros_like", "numpy.ones_like" ], [ "numpy.any", "numpy.array" ] ]
Abishek15592/pandas
[ "6929e262dd22ac35baabf87a5236d451255fb66d" ]
[ "pandas/tests/test_common.py" ]
[ "import collections\nfrom distutils.version import LooseVersion\nfrom functools import partial\nimport string\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_under1p17\n\nimport pandas as pd\nfrom pandas import Series, Timestamp\nimport pandas._testing as tm\nfrom pandas.core import ops\nimport pandas.core.common as com\n\n\ndef test_get_callable_name():\n getname = com.get_callable_name\n\n def fn(x):\n return x\n\n lambda_ = lambda x: x # noqa: E731\n part1 = partial(fn)\n part2 = partial(part1)\n\n class somecall:\n def __call__(self):\n return x # noqa\n\n assert getname(fn) == \"fn\"\n assert getname(lambda_)\n assert getname(part1) == \"fn\"\n assert getname(part2) == \"fn\"\n assert getname(somecall()) == \"somecall\"\n assert getname(1) is None\n\n\ndef test_any_none():\n assert com.any_none(1, 2, 3, None)\n assert not com.any_none(1, 2, 3, 4)\n\n\ndef test_all_not_none():\n assert com.all_not_none(1, 2, 3, 4)\n assert not com.all_not_none(1, 2, 3, None)\n assert not com.all_not_none(None, None, None, None)\n\n\ndef test_random_state():\n import numpy.random as npr\n\n # Check with seed\n state = com.random_state(5)\n assert state.uniform() == npr.RandomState(5).uniform()\n\n # Check with random state object\n state2 = npr.RandomState(10)\n assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()\n\n # check with no arg random state\n assert com.random_state() is np.random\n\n # check array-like\n # GH32503\n state_arr_like = npr.randint(0, 2 ** 31, size=624, dtype=\"uint32\")\n assert (\n com.random_state(state_arr_like).uniform()\n == npr.RandomState(state_arr_like).uniform()\n )\n\n # Check BitGenerators\n # GH32503\n if not np_version_under1p17:\n assert (\n com.random_state(npr.MT19937(3)).uniform()\n == npr.RandomState(npr.MT19937(3)).uniform()\n )\n assert (\n com.random_state(npr.PCG64(11)).uniform()\n == npr.RandomState(npr.PCG64(11)).uniform()\n )\n\n # Error for floats or strings\n msg = (\n \"random_state must be an integer, array-like, a BitGenerator, \"\n \"a numpy RandomState, or None\"\n )\n with pytest.raises(ValueError, match=msg):\n com.random_state(\"test\")\n\n with pytest.raises(ValueError, match=msg):\n com.random_state(5.5)\n\n\n@pytest.mark.parametrize(\n \"left, right, expected\",\n [\n (Series([1], name=\"x\"), Series([2], name=\"x\"), \"x\"),\n (Series([1], name=\"x\"), Series([2], name=\"y\"), None),\n (Series([1]), Series([2], name=\"x\"), None),\n (Series([1], name=\"x\"), Series([2]), None),\n (Series([1], name=\"x\"), [2], \"x\"),\n ([1], Series([2], name=\"y\"), \"y\"),\n ],\n)\ndef test_maybe_match_name(left, right, expected):\n assert ops._maybe_match_name(left, right) == expected\n\n\ndef test_dict_compat():\n data_datetime64 = {np.datetime64(\"1990-03-15\"): 1, np.datetime64(\"2015-03-15\"): 2}\n data_unchanged = {1: 2, 3: 4, 5: 6}\n expected = {Timestamp(\"1990-3-15\"): 1, Timestamp(\"2015-03-15\"): 2}\n assert com.dict_compat(data_datetime64) == expected\n assert com.dict_compat(expected) == expected\n assert com.dict_compat(data_unchanged) == data_unchanged\n\n\ndef test_standardize_mapping():\n # No uninitialized defaultdicts\n msg = r\"to_dict\\(\\) only accepts initialized defaultdicts\"\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping(collections.defaultdict)\n\n # No non-mapping subtypes, instance\n msg = \"unsupported type: <class 'list'>\"\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping([])\n\n # No non-mapping subtypes, class\n with pytest.raises(TypeError, match=msg):\n com.standardize_mapping(list)\n\n fill = {\"bad\": \"data\"}\n assert com.standardize_mapping(fill) == dict\n\n # Convert instance to type\n assert com.standardize_mapping({}) == dict\n\n dd = collections.defaultdict(list)\n assert isinstance(com.standardize_mapping(dd), partial)\n\n\ndef test_git_version():\n # GH 21295\n git_version = pd.__git_version__\n assert len(git_version) == 40\n assert all(c in string.hexdigits for c in git_version)\n\n\ndef test_version_tag():\n version = pd.__version__\n try:\n version > LooseVersion(\"0.0.1\")\n except TypeError:\n raise ValueError(\n \"No git tags exist, please sync tags between upstream and your repo\"\n )\n\n\n@pytest.mark.parametrize(\n \"obj\", [(obj,) for obj in pd.__dict__.values() if callable(obj)]\n)\ndef test_serializable(obj):\n # GH 35611\n unpickled = tm.round_trip_pickle(obj)\n assert type(obj) == type(unpickled)\n" ]
[ [ "numpy.random.PCG64", "pandas.core.common.dict_compat", "numpy.random.RandomState", "pandas.__dict__.values", "pandas._testing.round_trip_pickle", "pandas.core.common.standardize_mapping", "pandas.core.common.random_state", "pandas.core.common.all_not_none", "pandas.Timestamp", "numpy.random.randint", "numpy.random.MT19937", "pandas.core.common.any_none", "pandas.core.ops._maybe_match_name", "pandas.Series", "numpy.datetime64" ] ]
adbelniak/stable-baselines3
[ "61e3b9c3fc4b113b5de65dd3b083de7550676018" ]
[ "stable_baselines3/common/policies.py" ]
[ "\"\"\"Policies: abstract base class and concrete implementations.\"\"\"\n\nimport collections\nimport copy\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport gym\nimport numpy as np\nimport torch as th\nfrom torch import nn\n\nfrom stable_baselines3.common.distributions import (\n BernoulliDistribution,\n CategoricalDistribution,\n DiagGaussianDistribution,\n Distribution,\n MultiCategoricalDistribution,\n StateDependentNoiseDistribution,\n ConditionalCategoricalDistribution,\n make_proba_distribution,\n)\nfrom stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs\nfrom stable_baselines3.common.torch_layers import (\n BaseFeaturesExtractor,\n CombinedExtractor,\n FlattenExtractor,\n MlpExtractor,\n NatureCNN,\n create_mlp,\n)\nfrom stable_baselines3.common.type_aliases import Schedule\nfrom stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor\n\n\nclass BaseModel(nn.Module, ABC):\n \"\"\"\n The base model object: makes predictions in response to observations.\n\n In the case of policies, the prediction is an action. In the case of critics, it is the\n estimated value of the observation.\n\n :param observation_space: The observation space of the environment\n :param action_space: The action space of the environment\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param features_extractor: Network to extract features\n (a CNN when using images, a nn.Flatten() layer otherwise)\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n features_extractor: Optional[nn.Module] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(BaseModel, self).__init__()\n\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n\n if features_extractor_kwargs is None:\n features_extractor_kwargs = {}\n\n self.observation_space = observation_space\n self.action_space = action_space\n self.features_extractor = features_extractor\n self.normalize_images = normalize_images\n\n self.optimizer_class = optimizer_class\n self.optimizer_kwargs = optimizer_kwargs\n self.optimizer = None # type: Optional[th.optim.Optimizer]\n\n self.features_extractor_class = features_extractor_class\n self.features_extractor_kwargs = features_extractor_kwargs\n\n @abstractmethod\n def forward(self, *args, **kwargs):\n pass\n\n def _update_features_extractor(\n self,\n net_kwargs: Dict[str, Any],\n features_extractor: Optional[BaseFeaturesExtractor] = None,\n ) -> Dict[str, Any]:\n \"\"\"\n Update the network keyword arguments and create a new features extractor object if needed.\n If a ``features_extractor`` object is passed, then it will be shared.\n\n :param net_kwargs: the base network keyword arguments, without the ones\n related to features extractor\n :param features_extractor: a features extractor object.\n If None, a new object will be created.\n :return: The updated keyword arguments\n \"\"\"\n net_kwargs = net_kwargs.copy()\n if features_extractor is None:\n # The features extractor is not shared, create a new one\n features_extractor = self.make_features_extractor()\n net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))\n return net_kwargs\n\n def make_features_extractor(self) -> BaseFeaturesExtractor:\n \"\"\"Helper method to create a features extractor.\"\"\"\n return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)\n\n def extract_features(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Preprocess the observation if needed and extract features.\n\n :param obs:\n :return:\n \"\"\"\n assert self.features_extractor is not None, \"No features extractor was set\"\n preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)\n return self.features_extractor(preprocessed_obs)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n \"\"\"\n Get data that need to be saved in order to re-create the model when loading it from disk.\n\n :return: The dictionary to pass to the as kwargs constructor when reconstruction this model.\n \"\"\"\n return dict(\n observation_space=self.observation_space,\n action_space=self.action_space,\n # Passed to the constructor by child class\n # squash_output=self.squash_output,\n # features_extractor=self.features_extractor\n normalize_images=self.normalize_images,\n )\n\n @property\n def device(self) -> th.device:\n \"\"\"Infer which device this policy lives on by inspecting its parameters.\n If it has no parameters, the 'cpu' device is used as a fallback.\n\n :return:\"\"\"\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")\n\n def save(self, path: str) -> None:\n \"\"\"\n Save model to a given location.\n\n :param path:\n \"\"\"\n th.save({\"state_dict\": self.state_dict(), \"data\": self._get_constructor_parameters()}, path)\n\n @classmethod\n def load(cls, path: str, device: Union[th.device, str] = \"auto\") -> \"BaseModel\":\n \"\"\"\n Load model from path.\n\n :param path:\n :param device: Device on which the policy should be loaded.\n :return:\n \"\"\"\n device = get_device(device)\n saved_variables = th.load(path, map_location=device)\n\n # Allow to load policy saved with older version of SB3\n if \"sde_net_arch\" in saved_variables[\"data\"]:\n warnings.warn(\n \"sde_net_arch is deprecated, please downgrade to SB3 v1.2.0 if you need such parameter.\",\n DeprecationWarning,\n )\n del saved_variables[\"data\"][\"sde_net_arch\"]\n\n # Create policy object\n model = cls(**saved_variables[\"data\"]) # pytype: disable=not-instantiable\n # Load weights\n model.load_state_dict(saved_variables[\"state_dict\"])\n model.to(device)\n return model\n\n def load_from_vector(self, vector: np.ndarray) -> None:\n \"\"\"\n Load parameters from a 1D vector.\n\n :param vector:\n \"\"\"\n th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())\n\n def parameters_to_vector(self) -> np.ndarray:\n \"\"\"\n Convert the parameters to a 1D vector.\n\n :return:\n \"\"\"\n return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()\n\n def set_training_mode(self, mode: bool) -> None:\n \"\"\"\n Put the policy in either training or evaluation mode.\n\n This affects certain modules, such as batch normalisation and dropout.\n\n :param mode: if true, set to training mode, else set to evaluation mode\n \"\"\"\n self.train(mode)\n\n def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:\n \"\"\"\n Convert an input observation to a PyTorch tensor that can be fed to a model.\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :return: The observation as PyTorch tensor\n and whether the observation is vectorized or not\n \"\"\"\n vectorized_env = False\n if isinstance(observation, dict):\n # need to copy the dict as the dict in VecFrameStack will become a torch tensor\n observation = copy.deepcopy(observation)\n for key, obs in observation.items():\n obs_space = self.observation_space.spaces[key]\n if is_image_space(obs_space):\n obs_ = maybe_transpose(obs, obs_space)\n else:\n obs_ = np.array(obs)\n vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)\n # Add batch dimension if needed\n observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)\n\n elif is_image_space(self.observation_space):\n # Handle the different cases for images\n # as PyTorch use channel first format\n observation = maybe_transpose(observation, self.observation_space)\n\n else:\n observation = np.array(observation)\n\n if not isinstance(observation, dict):\n # Dict obs need to be handled separately\n vectorized_env = is_vectorized_observation(observation, self.observation_space)\n # Add batch dimension if needed\n observation = observation.reshape((-1,) + self.observation_space.shape)\n\n observation = obs_as_tensor(observation, self.device)\n return observation, vectorized_env\n\n\nclass BasePolicy(BaseModel):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super(BasePolicy, self).__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy\n actions = actions.cpu().numpy()\n\n if isinstance(self.action_space, gym.spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions[0]\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))\n\n\nclass ActorCriticPolicy(BasePolicy):\n \"\"\"\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n if optimizer_kwargs is None:\n optimizer_kwargs = {}\n # Small values to avoid NaN in Adam optimizer\n if optimizer_class == th.optim.Adam:\n optimizer_kwargs[\"eps\"] = 1e-5\n\n super(ActorCriticPolicy, self).__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n squash_output=squash_output,\n )\n\n # Default network architecture, from stable-baselines\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n net_arch = [dict(pi=[64, 64], vf=[64, 64])]\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.ortho_init = ortho_init\n\n self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)\n self.features_dim = self.features_extractor.features_dim\n\n self.normalize_images = normalize_images\n self.log_std_init = log_std_init\n dist_kwargs = None\n # Keyword arguments for gSDE distribution\n if use_sde:\n dist_kwargs = {\n \"full_std\": full_std,\n \"squash_output\": squash_output,\n \"use_expln\": use_expln,\n \"learn_features\": False,\n }\n\n if sde_net_arch is not None:\n warnings.warn(\"sde_net_arch is deprecated and will be removed in SB3 v2.4.0.\", DeprecationWarning)\n\n self.use_sde = use_sde\n self.dist_kwargs = dist_kwargs\n\n # Action distribution\n self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)\n\n self._build(lr_schedule)\n\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)\n\n data.update(\n dict(\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n use_sde=self.use_sde,\n log_std_init=self.log_std_init,\n squash_output=default_none_kwargs[\"squash_output\"],\n full_std=default_none_kwargs[\"full_std\"],\n use_expln=default_none_kwargs[\"use_expln\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n ortho_init=self.ortho_init,\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n def reset_noise(self, n_envs: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix.\n\n :param n_envs:\n \"\"\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), \"reset_noise() is only available when using gSDE\"\n self.action_dist.sample_weights(self.log_std, batch_size=n_envs)\n\n def _build_mlp_extractor(self) -> None:\n \"\"\"\n Create the policy and value networks.\n Part of the layers can be shared.\n \"\"\"\n # Note: If net_arch is None and some features extractor is used,\n # net_arch here is an empty list and mlp_extractor does not\n # really contain any layers (acts like an identity module).\n self.mlp_extractor = MlpExtractor(\n self.features_dim,\n net_arch=self.net_arch,\n activation_fn=self.activation_fn,\n device=self.device,\n )\n\n def _build(self, lr_schedule: Schedule) -> None:\n \"\"\"\n Create the networks and the optimizer.\n\n :param lr_schedule: Learning rate schedule\n lr_schedule(1) is the initial learning rate\n \"\"\"\n self._build_mlp_extractor()\n\n latent_dim_pi = self.mlp_extractor.latent_dim_pi\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n self.action_net, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init\n )\n elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):\n self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n elif isinstance(self.action_dist, (ConditionalCategoricalDistribution)):\n self.action_net, self.embedding, self.other = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)\n else:\n raise NotImplementedError(f\"Unsupported distribution '{self.action_dist}'.\")\n\n self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)\n # Init weights: use orthogonal initialization\n # with small initial weight for the output\n if self.ortho_init:\n # TODO: check for features_extractor\n # Values from stable-baselines.\n # features_extractor/mlp values are\n # originally from openai/baselines (default gains/init_scales).\n module_gains = {\n self.features_extractor: np.sqrt(2),\n self.mlp_extractor: np.sqrt(2),\n self.action_net: 0.01,\n self.value_net: 1,\n }\n for module, gain in module_gains.items():\n module.apply(partial(self.init_weights, gain=gain))\n\n # Setup optimizer with initial learning rate\n self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Forward pass in all the networks (actor and critic)\n\n :param obs: Observation\n :param deterministic: Whether to sample or use deterministic actions\n :return: action, value and log probability of the action\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n latent_pi, latent_vf = self.mlp_extractor(features)\n # Evaluate the values for the given observations\n values = self.value_net(latent_vf)\n if isinstance(self.action_dist, ConditionalCategoricalDistribution):\n # mean_actions = self.action_net[0](latent_pi)\n mean_actions = self.action_net(latent_pi)\n # mean_actions = F.relu(mean_actions)\n # distribution = self.action_dist.proba_distribution(mean_actions)\n actions, distribution = self.action_dist.sample_all(mean_actions)\n else:\n distribution = self._get_action_dist_from_latent(latent_pi)\n actions = distribution.get_actions(deterministic=deterministic)\n log_prob = distribution.log_prob(actions)\n return actions, values, log_prob\n\n def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:\n \"\"\"\n Retrieve action distribution given the latent codes.\n\n :param latent_pi: Latent code for the actor\n :return: Action distribution\n \"\"\"\n mean_actions = self.action_net(latent_pi)\n\n if isinstance(self.action_dist, DiagGaussianDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std)\n elif isinstance(self.action_dist, CategoricalDistribution):\n # Here mean_actions are the logits before the softmax\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, MultiCategoricalDistribution):\n # Here mean_actions are the flattened logits\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, BernoulliDistribution):\n # Here mean_actions are the logits (before rounding to get the binary actions)\n return self.action_dist.proba_distribution(action_logits=mean_actions)\n elif isinstance(self.action_dist, StateDependentNoiseDistribution):\n return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)\n else:\n raise ValueError(\"Invalid action distribution\")\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n return self.get_distribution(observation).get_actions(deterministic=deterministic)\n\n def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:\n \"\"\"\n Evaluate actions according to the current policy,\n given the observations.\n\n :param obs:\n :param actions:\n :return: estimated value, log likelihood of taking those actions\n and entropy of the action distribution.\n \"\"\"\n # Preprocess the observation if needed\n features = self.extract_features(obs)\n latent_pi, latent_vf = self.mlp_extractor(features)\n if isinstance(self.action_dist, ConditionalCategoricalDistribution):\n mean_actions = self.action_net(latent_pi)\n _, distribution = self.action_dist.sample_all(mean_actions)\n else:\n distribution = self._get_action_dist_from_latent(latent_pi)\n log_prob = distribution.log_prob(actions)\n values = self.value_net(latent_vf)\n return values, log_prob, distribution.entropy()\n\n def get_distribution(self, obs: th.Tensor) -> Distribution:\n \"\"\"\n Get the current policy distribution given the observations.\n\n :param obs:\n :return: the action distribution.\n \"\"\"\n features = self.extract_features(obs)\n latent_pi = self.mlp_extractor.forward_actor(features)\n return self._get_action_dist_from_latent(latent_pi)\n\n def predict_values(self, obs: th.Tensor) -> th.Tensor:\n \"\"\"\n Get the estimated values according to the current policy given the observations.\n\n :param obs:\n :return: the estimated values.\n \"\"\"\n features = self.extract_features(obs)\n latent_vf = self.mlp_extractor.forward_critic(features)\n return self.value_net(latent_vf)\n\n\nclass ActorCriticCnnPolicy(ActorCriticPolicy):\n \"\"\"\n CNN policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(ActorCriticCnnPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n sde_net_arch,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )\n\n\nclass MultiInputActorCriticPolicy(ActorCriticPolicy):\n \"\"\"\n MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space (Tuple)\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Uses the CombinedExtractor\n :param features_extractor_kwargs: Keyword arguments\n to pass to the feature extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Dict,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,\n activation_fn: Type[nn.Module] = nn.Tanh,\n ortho_init: bool = True,\n use_sde: bool = False,\n log_std_init: float = 0.0,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n squash_output: bool = False,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n super(MultiInputActorCriticPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n ortho_init,\n use_sde,\n log_std_init,\n full_std,\n sde_net_arch,\n use_expln,\n squash_output,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n )\n\n\nclass ContinuousCritic(BaseModel):\n \"\"\"\n Critic network(s) for DDPG/SAC/TD3.\n It represents the action-state value function (Q-value function).\n Compared to A2C/PPO critics, this one represents the Q-value\n and takes the continuous action as input. It is concatenated with the state\n and then fed to the network which outputs a single value: Q(s, a).\n For more recent algorithms like SAC/TD3, multiple networks\n are created to give different estimates.\n\n By default, it creates two critic networks used to reduce overestimation\n thanks to clipped Q-learning (cf TD3 paper).\n\n :param observation_space: Obervation space\n :param action_space: Action space\n :param net_arch: Network architecture\n :param features_extractor: Network to extract features\n (a CNN when using images, a nn.Flatten() layer otherwise)\n :param features_dim: Number of features\n :param activation_fn: Activation function\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether the features extractor is shared or not\n between the actor and the critic (this saves computation time)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n net_arch: List[int],\n features_extractor: nn.Module,\n features_dim: int,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n n_critics: int = 2,\n share_features_extractor: bool = True,\n ):\n super().__init__(\n observation_space,\n action_space,\n features_extractor=features_extractor,\n normalize_images=normalize_images,\n )\n\n action_dim = get_action_dim(self.action_space)\n\n self.share_features_extractor = share_features_extractor\n self.n_critics = n_critics\n self.q_networks = []\n for idx in range(n_critics):\n q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)\n q_net = nn.Sequential(*q_net)\n self.add_module(f\"qf{idx}\", q_net)\n self.q_networks.append(q_net)\n\n def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:\n # Learn the features extractor using the policy loss only\n # when the features_extractor is shared with the actor\n with th.set_grad_enabled(not self.share_features_extractor):\n features = self.extract_features(obs)\n qvalue_input = th.cat([features, actions], dim=1)\n return tuple(q_net(qvalue_input) for q_net in self.q_networks)\n\n def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:\n \"\"\"\n Only predict the Q-value using the first network.\n This allows to reduce computation when all the estimates are not needed\n (e.g. when updating the policy in TD3).\n \"\"\"\n with th.no_grad():\n features = self.extract_features(obs)\n return self.q_networks[0](th.cat([features, actions], dim=1))\n\n\n_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]\n\n\ndef get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:\n \"\"\"\n Returns the registered policy from the base type and name.\n See `register_policy` for registering policies and explanation.\n\n :param base_policy_type: the base policy class\n :param name: the policy name\n :return: the policy\n \"\"\"\n if base_policy_type not in _policy_registry:\n raise KeyError(f\"Error: the policy type {base_policy_type} is not registered!\")\n if name not in _policy_registry[base_policy_type]:\n raise KeyError(\n f\"Error: unknown policy type {name},\"\n f\"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!\"\n )\n return _policy_registry[base_policy_type][name]\n\n\ndef register_policy(name: str, policy: Type[BasePolicy]) -> None:\n \"\"\"\n Register a policy, so it can be called using its name.\n e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).\n\n The goal here is to standardize policy naming, e.g.\n all algorithms can call upon \"MlpPolicy\" or \"CnnPolicy\",\n and they receive respective policies that work for them.\n Consider following:\n\n OnlinePolicy\n -- OnlineMlpPolicy (\"MlpPolicy\")\n -- OnlineCnnPolicy (\"CnnPolicy\")\n OfflinePolicy\n -- OfflineMlpPolicy (\"MlpPolicy\")\n -- OfflineCnnPolicy (\"CnnPolicy\")\n\n Two policies have name \"MlpPolicy\" and two have \"CnnPolicy\".\n In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)\n is given and used to select and return the correct policy.\n\n :param name: the policy name\n :param policy: the policy class\n \"\"\"\n sub_class = None\n for cls in BasePolicy.__subclasses__():\n if issubclass(policy, cls):\n sub_class = cls\n break\n if sub_class is None:\n raise ValueError(f\"Error: the policy {policy} is not of any known subclasses of BasePolicy!\")\n\n if sub_class not in _policy_registry:\n _policy_registry[sub_class] = {}\n if name in _policy_registry[sub_class]:\n # Check if the registered policy is same\n # we try to register. If not so,\n # do not override and complain.\n if _policy_registry[sub_class][name] != policy:\n raise ValueError(f\"Error: the name {name} is already registered for a different policy, will not override.\")\n _policy_registry[sub_class][name] = policy\n" ]
[ [ "torch.nn.Linear", "torch.cat", "numpy.array", "torch.nn.Sequential", "torch.no_grad", "torch.FloatTensor", "torch.load", "numpy.clip", "numpy.sqrt", "torch.nn.init.orthogonal_", "torch.set_grad_enabled" ] ]
LSchultebraucks/matplotlib_examples
[ "cac02668ce6b81dcbbdf0ff3238cc01506c8f76a" ]
[ "src/mosaic_plot.py" ]
[ "import pandas as pd\nfrom statsmodels.graphics.mosaicplot import mosaic\nimport pylab\nfrom itertools import product\nimport numpy as np\nrand = np.random.random\n\nspeaks_mul_foreign_languages = list(product(['male', 'female'], ['yes', 'no']))\nindex = pd.MultiIndex.from_tuples(speaks_mul_foreign_languages, names=['male', 'female'])\ndata = pd.Series(rand(4), index=index)\n\nmosaic(data, gap=0.01, title='Who knows multiple foregin languages? - Mosaic Chart')\npylab.show()\n" ]
[ [ "pandas.MultiIndex.from_tuples" ] ]
lulujianjie/efficient-person-generation-for-reid
[ "1bb29c7c280e3322a65af36b37deecbce0c1d322" ]
[ "data-generation-GAN/generate_samples_market.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport os\nimport sys\nimport cv2\nfrom config.cfg import Cfg\nimport torch\nfrom torch.backends import cudnn\nfrom datasets.bases import read_image\nsys.path.append('.')\nfrom datasets import make_dataloader\nfrom processor import do_inference\nfrom model import make_model\nfrom utils.logger import setup_logger\nimport torchvision.transforms as T\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n#rename img\nimport string\nimport random\n\ndevice = \"cuda\"\nWEIGHT_PATH = './log/model_G_1800.pth'\n#'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth'\n#'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth'\nCfg.freeze()\nos.environ['CUDA_VISIBLE_DEVICES'] = \"5\"\ncudnn.benchmark = True\n\ntest_transforms = T.Compose([\n T.Resize(Cfg.MODEL.INPUT_SIZE),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n\nmodel_G, _, _, _ = make_model(Cfg)\nmodel_G.to(device)\n#model_G = nn.DataParallel(model_G)\nmodel_G.load_state_dict(torch.load(WEIGHT_PATH))\n\n\n# In[ ]:\n\n\ndataset = 'DukeMTMC-reID'\nroot_dir = '/home/lujj/datasets/{}/'.format(dataset)\ndata_dir = 'p3'\ntarget_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir)\ntarget_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir)\nimg_list = []\npid_set = set()\nfor img in os.listdir(root_dir+data_dir):\n pid = img.split('_')[0]\n if pid in pid_set:\n continue\n else:\n pid_set.add(pid)\nfor img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)):\n pid = img.split('_')[0]\n if pid in pid_set:\n continue\n else:\n pid_set.add(pid)\n img_list.append(img)\nprint('to generate pid:',len(img_list))\npose_list = np.load(root_dir+'pose_list_duke.npy')\nlen_pose = len(pose_list)\nprint('body-part:',len_pose)\n\n\n# In[ ]:\n\n\nnum_imgs = 24\nmodel_G.eval()\nfor img in img_list:\n if img[-3:] == 'jpg':\n img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img)\n for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False):\n target_pose = pose_list[pose2_idx]\n pose2_path = '/home/lujj/datasets/{}/train_part_heatmap/{}.npy'.format(dataset,target_pose)\n img1 = read_image(img1_path)\n # plt.imshow(img1)\n # plt.show()\n img1 = torch.unsqueeze(test_transforms(img1),0).to(device)\n pose_heatmap2 = np.load(pose2_path).astype(np.float32)\n pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1)))\n pose2 = torch.unsqueeze(pose2,0).to(device)\n input_G = (img1, pose2)\n\n fake_img2 = model_G(input_G)\n result = fake_img2.cpu().detach().numpy()\n img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0\n cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))\n cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))\n\n\n# In[ ]:\n\n\nfor img in os.listdir(target_dir):\n src = target_dir+img\n target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg'\n img_ = img.split('-')\n dst = target_dir+img_[0]+target_img\n os.rename(src, dst)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.transpose", "numpy.load", "torch.load", "torch.unsqueeze" ] ]
BenRussert/pandas
[ "1f02bf240c3d0d3da338af868d056bfc169b28c2", "9179e633b1e54ac31c5ea42ec0ec24e9a1709aae", "9179e633b1e54ac31c5ea42ec0ec24e9a1709aae", "9179e633b1e54ac31c5ea42ec0ec24e9a1709aae" ]
[ "pandas/core/computation/align.py", "pandas/core/indexes/numeric.py", "pandas/tests/util/test_util.py", "pandas/tests/io/formats/test_to_html.py" ]
[ "\"\"\"Core eval alignment algorithms\n\"\"\"\n\nfrom functools import partial, wraps\nimport warnings\n\nimport numpy as np\n\nfrom pandas.compat import range, zip\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import compat\nimport pandas.core.common as com\nfrom pandas.core.computation.common import _result_type_many\n\n\ndef _align_core_single_unary_op(term):\n if isinstance(term.value, np.ndarray):\n typ = partial(np.asanyarray, dtype=term.value.dtype)\n else:\n typ = type(term.value)\n ret = typ,\n\n if not hasattr(term.value, 'axes'):\n ret += None,\n else:\n ret += _zip_axes_from_type(typ, term.value.axes),\n return ret\n\n\ndef _zip_axes_from_type(typ, new_axes):\n axes = {}\n for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):\n axes[ax_name] = new_axes[ax_ind]\n return axes\n\n\ndef _any_pandas_objects(terms):\n \"\"\"Check a sequence of terms for instances of PandasObject.\"\"\"\n return any(isinstance(term.value, pd.core.generic.PandasObject)\n for term in terms)\n\n\ndef _filter_special_cases(f):\n @wraps(f)\n def wrapper(terms):\n # single unary operand\n if len(terms) == 1:\n return _align_core_single_unary_op(terms[0])\n\n term_values = (term.value for term in terms)\n\n # we don't have any pandas objects\n if not _any_pandas_objects(terms):\n return _result_type_many(*term_values), None\n\n return f(terms)\n return wrapper\n\n\n@_filter_special_cases\ndef _align_core(terms):\n term_index = [i for i, term in enumerate(terms)\n if hasattr(term.value, 'axes')]\n term_dims = [terms[i].value.ndim for i in term_index]\n ndims = pd.Series(dict(zip(term_index, term_dims)))\n\n # initial axes are the axes of the largest-axis'd term\n biggest = terms[ndims.idxmax()].value\n typ = biggest._constructor\n axes = biggest.axes\n naxes = len(axes)\n gt_than_one_axis = naxes > 1\n\n for value in (terms[i].value for i in term_index):\n is_series = isinstance(value, pd.Series)\n is_series_and_gt_one_axis = is_series and gt_than_one_axis\n\n for axis, items in enumerate(value.axes):\n if is_series_and_gt_one_axis:\n ax, itm = naxes - 1, value.index\n else:\n ax, itm = axis, items\n\n if not axes[ax].is_(itm):\n axes[ax] = axes[ax].join(itm, how='outer')\n\n for i, ndim in compat.iteritems(ndims):\n for axis, items in zip(range(ndim), axes):\n ti = terms[i].value\n\n if hasattr(ti, 'reindex'):\n transpose = isinstance(ti, pd.Series) and naxes > 1\n reindexer = axes[naxes - 1] if transpose else items\n\n term_axis_size = len(ti.axes[axis])\n reindexer_size = len(reindexer)\n\n ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))\n if ordm >= 1 and reindexer_size >= 10000:\n w = ('Alignment difference on axis {axis} is larger '\n 'than an order of magnitude on term {term!r}, by '\n 'more than {ordm:.4g}; performance may suffer'\n ).format(axis=axis, term=terms[i].name, ordm=ordm)\n warnings.warn(w, category=PerformanceWarning, stacklevel=6)\n\n f = partial(ti.reindex, reindexer, axis=axis, copy=False)\n\n terms[i].update(f())\n\n terms[i].update(terms[i].value.values)\n\n return typ, _zip_axes_from_type(typ, axes)\n\n\ndef _align(terms):\n \"\"\"Align a set of terms\"\"\"\n try:\n # flatten the parse tree (a nested list, really)\n terms = list(com.flatten(terms))\n except TypeError:\n # can't iterate so it must just be a constant or single variable\n if isinstance(terms.value, pd.core.generic.NDFrame):\n typ = type(terms.value)\n return typ, _zip_axes_from_type(typ, terms.value.axes)\n return np.result_type(terms.type), None\n\n # if all resolved variables are numeric scalars\n if all(term.is_scalar for term in terms):\n return _result_type_many(*(term.value for term in terms)).type, None\n\n # perform the main alignment\n typ, axes = _align_core(terms)\n return typ, axes\n\n\ndef _reconstruct_object(typ, obj, axes, dtype):\n \"\"\"Reconstruct an object given its type, raw value, and possibly empty\n (None) axes.\n\n Parameters\n ----------\n typ : object\n A type\n obj : object\n The value to use in the type constructor\n axes : dict\n The axes to use to construct the resulting pandas object\n\n Returns\n -------\n ret : typ\n An object of type ``typ`` with the value `obj` and possible axes\n `axes`.\n \"\"\"\n try:\n typ = typ.type\n except AttributeError:\n pass\n\n res_t = np.result_type(obj.dtype, dtype)\n\n if (not isinstance(typ, partial) and\n issubclass(typ, pd.core.generic.PandasObject)):\n return typ(obj, dtype=res_t, **axes)\n\n # special case for pathological things like ~True/~False\n if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:\n ret_value = res_t.type(obj)\n else:\n ret_value = typ(obj).astype(res_t)\n # The condition is to distinguish 0-dim array (returned in case of\n # scalar) and 1 element array\n # e.g. np.array(0) and np.array([0])\n if len(obj.shape) == 1 and len(obj) == 1:\n if not isinstance(ret_value, np.ndarray):\n ret_value = np.array([ret_value]).astype(res_t)\n\n return ret_value\n", "import warnings\n\nimport numpy as np\nfrom pandas._libs import index as libindex\nfrom pandas.core.dtypes.common import (\n is_dtype_equal,\n pandas_dtype,\n needs_i8_conversion,\n is_integer_dtype,\n is_float,\n is_bool,\n is_bool_dtype,\n is_scalar)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import compat\nfrom pandas.core import algorithms\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import (\n Index, InvalidIndexError, _index_shared_docs)\nfrom pandas.util._decorators import Appender, cache_readonly\nimport pandas.core.dtypes.concat as _concat\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.ops import get_op_result_name\n\n_num_index_shared_docs = dict()\n\n\nclass NumericIndex(Index):\n \"\"\"\n Provide numeric type operations\n\n This is an abstract class\n\n \"\"\"\n _is_numeric_dtype = True\n\n def __new__(cls, data=None, dtype=None, copy=False, name=None,\n fastpath=None):\n\n if fastpath is not None:\n warnings.warn(\"The 'fastpath' keyword is deprecated, and will be \"\n \"removed in a future version.\",\n FutureWarning, stacklevel=2)\n if fastpath:\n return cls._simple_new(data, name=name)\n\n # is_scalar, generators handled in coerce_to_ndarray\n data = cls._coerce_to_ndarray(data)\n\n if issubclass(data.dtype.type, compat.string_types):\n cls._string_data_error(data)\n\n if copy or not is_dtype_equal(data.dtype, cls._default_dtype):\n subarr = np.array(data, dtype=cls._default_dtype, copy=copy)\n cls._assert_safe_casting(data, subarr)\n else:\n subarr = data\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n return cls._simple_new(subarr, name=name)\n\n @Appender(_index_shared_docs['_maybe_cast_slice_bound'])\n def _maybe_cast_slice_bound(self, label, side, kind):\n assert kind in ['ix', 'loc', 'getitem', None]\n\n # we will try to coerce to integers\n return self._maybe_cast_indexer(label)\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, values=None, **kwargs):\n if values is not None and not self._can_hold_na:\n # Ensure we are not returning an Int64Index with float data:\n return self._shallow_copy_with_infer(values=values, **kwargs)\n return (super(NumericIndex, self)._shallow_copy(values=values,\n **kwargs))\n\n def _convert_for_op(self, value):\n \"\"\" Convert value to be insertable to ndarray \"\"\"\n\n if is_bool(value) or is_bool_dtype(value):\n # force conversion to object\n # so we don't lose the bools\n raise TypeError\n\n return value\n\n def _convert_tolerance(self, tolerance, target):\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError('list-like tolerance size must match '\n 'target index size')\n if not np.issubdtype(tolerance.dtype, np.number):\n if tolerance.ndim > 0:\n raise ValueError(('tolerance argument for %s must contain '\n 'numeric elements if it is list type') %\n (type(self).__name__,))\n else:\n raise ValueError(('tolerance argument for %s must be numeric '\n 'if it is a scalar: %r') %\n (type(self).__name__, tolerance))\n return tolerance\n\n @classmethod\n def _assert_safe_casting(cls, data, subarr):\n \"\"\"\n Subclasses need to override this only if the process of casting data\n from some accepted dtype to the internal dtype(s) bears the risk of\n truncation (e.g. float to int).\n \"\"\"\n pass\n\n def _concat_same_dtype(self, indexes, name):\n return _concat._concat_index_same_dtype(indexes).rename(name)\n\n @property\n def is_all_dates(self):\n \"\"\"\n Checks that all the labels are datetime objects\n \"\"\"\n return False\n\n @Appender(Index.insert.__doc__)\n def insert(self, loc, item):\n # treat NA values as nans:\n if is_scalar(item) and isna(item):\n item = self._na_value\n return super(NumericIndex, self).insert(loc, item)\n\n\n_num_index_shared_docs['class_descr'] = \"\"\"\n Immutable ndarray implementing an ordered, sliceable set. The basic object\n storing axis labels for all pandas objects. %(klass)s is a special case\n of `Index` with purely %(ltype)s labels. %(extra)s\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: %(dtype)s)\n copy : bool\n Make a copy of input ndarray\n name : object\n Name to be stored in the index\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n\n Notes\n -----\n An Index instance can **only** contain hashable objects.\n\n See also\n --------\n Index : The base pandas Index type\n\"\"\"\n\n_int64_descr_args = dict(\n klass='Int64Index',\n ltype='integer',\n dtype='int64',\n extra=''\n)\n\n\nclass IntegerIndex(NumericIndex):\n \"\"\"\n This is an abstract class for Int64Index, UInt64Index.\n \"\"\"\n\n def __contains__(self, key):\n \"\"\"\n Check if key is a float and has a decimal. If it has, return False.\n \"\"\"\n hash(key)\n try:\n if is_float(key) and int(key) != key:\n return False\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n\nclass Int64Index(IntegerIndex):\n __doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args\n\n _typ = 'int64index'\n _can_hold_na = False\n _engine_type = libindex.Int64Engine\n _default_dtype = np.int64\n\n @property\n def inferred_type(self):\n \"\"\"Always 'integer' for ``Int64Index``\"\"\"\n return 'integer'\n\n @property\n def asi8(self):\n # do not cache or you'll create a memory leak\n return self.values.view('i8')\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n # don't coerce ilocs to integers\n if kind != 'iloc':\n key = self._maybe_cast_indexer(key)\n return (super(Int64Index, self)\n ._convert_scalar_indexer(key, kind=kind))\n\n def _wrap_joined_index(self, joined, other):\n name = get_op_result_name(self, other)\n return Int64Index(joined, name=name)\n\n @classmethod\n def _assert_safe_casting(cls, data, subarr):\n \"\"\"\n Ensure incoming data can be represented as ints.\n \"\"\"\n if not issubclass(data.dtype.type, np.signedinteger):\n if not np.array_equal(data, subarr):\n raise TypeError('Unsafe NumPy casting, you must '\n 'explicitly cast')\n\n\nInt64Index._add_numeric_methods()\nInt64Index._add_logical_methods()\n\n_uint64_descr_args = dict(\n klass='UInt64Index',\n ltype='unsigned integer',\n dtype='uint64',\n extra=''\n)\n\n\nclass UInt64Index(IntegerIndex):\n __doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args\n\n _typ = 'uint64index'\n _can_hold_na = False\n _engine_type = libindex.UInt64Engine\n _default_dtype = np.uint64\n\n @property\n def inferred_type(self):\n \"\"\"Always 'integer' for ``UInt64Index``\"\"\"\n return 'integer'\n\n @property\n def asi8(self):\n # do not cache or you'll create a memory leak\n return self.values.view('u8')\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n # don't coerce ilocs to integers\n if kind != 'iloc':\n key = self._maybe_cast_indexer(key)\n return (super(UInt64Index, self)\n ._convert_scalar_indexer(key, kind=kind))\n\n @Appender(_index_shared_docs['_convert_arr_indexer'])\n def _convert_arr_indexer(self, keyarr):\n # Cast the indexer to uint64 if possible so\n # that the values returned from indexing are\n # also uint64.\n keyarr = com.asarray_tuplesafe(keyarr)\n if is_integer_dtype(keyarr):\n return com.asarray_tuplesafe(keyarr, dtype=np.uint64)\n return keyarr\n\n @Appender(_index_shared_docs['_convert_index_indexer'])\n def _convert_index_indexer(self, keyarr):\n # Cast the indexer to uint64 if possible so\n # that the values returned from indexing are\n # also uint64.\n if keyarr.is_integer():\n return keyarr.astype(np.uint64)\n return keyarr\n\n def _wrap_joined_index(self, joined, other):\n name = get_op_result_name(self, other)\n return UInt64Index(joined, name=name)\n\n @classmethod\n def _assert_safe_casting(cls, data, subarr):\n \"\"\"\n Ensure incoming data can be represented as uints.\n \"\"\"\n if not issubclass(data.dtype.type, np.unsignedinteger):\n if not np.array_equal(data, subarr):\n raise TypeError('Unsafe NumPy casting, you must '\n 'explicitly cast')\n\n\nUInt64Index._add_numeric_methods()\nUInt64Index._add_logical_methods()\n\n_float64_descr_args = dict(\n klass='Float64Index',\n dtype='float64',\n ltype='float',\n extra=''\n)\n\n\nclass Float64Index(NumericIndex):\n __doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args\n\n _typ = 'float64index'\n _engine_type = libindex.Float64Engine\n _default_dtype = np.float64\n\n @property\n def inferred_type(self):\n \"\"\"Always 'floating' for ``Float64Index``\"\"\"\n return 'floating'\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n if needs_i8_conversion(dtype):\n msg = ('Cannot convert Float64Index to dtype {dtype}; integer '\n 'values are required for conversion').format(dtype=dtype)\n raise TypeError(msg)\n elif is_integer_dtype(dtype) and self.hasnans:\n # GH 13149\n raise ValueError('Cannot convert NA to integer')\n return super(Float64Index, self).astype(dtype, copy=copy)\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n if kind == 'iloc':\n return self._validate_indexer('positional', key, kind)\n\n return key\n\n @Appender(_index_shared_docs['_convert_slice_indexer'])\n def _convert_slice_indexer(self, key, kind=None):\n # if we are not a slice, then we are done\n if not isinstance(key, slice):\n return key\n\n if kind == 'iloc':\n return super(Float64Index, self)._convert_slice_indexer(key,\n kind=kind)\n\n # translate to locations\n return self.slice_indexer(key.start, key.stop, key.step, kind=kind)\n\n def _format_native_types(self, na_rep='', float_format=None, decimal='.',\n quoting=None, **kwargs):\n from pandas.io.formats.format import FloatArrayFormatter\n formatter = FloatArrayFormatter(self.values, na_rep=na_rep,\n float_format=float_format,\n decimal=decimal, quoting=quoting,\n fixed_width=False)\n return formatter.get_result_as_array()\n\n def get_value(self, series, key):\n \"\"\" we always want to get an index value, never a value \"\"\"\n if not is_scalar(key):\n raise InvalidIndexError\n\n k = com.values_from_object(key)\n loc = self.get_loc(k)\n new_values = com.values_from_object(series)[loc]\n\n return new_values\n\n def equals(self, other):\n \"\"\"\n Determines if two Index objects contain the same elements.\n \"\"\"\n if self is other:\n return True\n\n if not isinstance(other, Index):\n return False\n\n # need to compare nans locations and make sure that they are the same\n # since nans don't compare equal this is a bit tricky\n try:\n if not isinstance(other, Float64Index):\n other = self._constructor(other)\n if (not is_dtype_equal(self.dtype, other.dtype) or\n self.shape != other.shape):\n return False\n left, right = self._ndarray_values, other._ndarray_values\n return ((left == right) | (self._isnan & other._isnan)).all()\n except (TypeError, ValueError):\n return False\n\n def __contains__(self, other):\n if super(Float64Index, self).__contains__(other):\n return True\n\n try:\n # if other is a sequence this throws a ValueError\n return np.isnan(other) and self.hasnans\n except ValueError:\n try:\n return len(other) <= 1 and ibase._try_get_item(other) in self\n except TypeError:\n pass\n except TypeError:\n pass\n\n return False\n\n @Appender(_index_shared_docs['get_loc'])\n def get_loc(self, key, method=None, tolerance=None):\n try:\n if np.all(np.isnan(key)) or is_bool(key):\n nan_idxs = self._nan_idxs\n try:\n return nan_idxs.item()\n except (ValueError, IndexError):\n # should only need to catch ValueError here but on numpy\n # 1.7 .item() can raise IndexError when NaNs are present\n if not len(nan_idxs):\n raise KeyError(key)\n return nan_idxs\n except (TypeError, NotImplementedError):\n pass\n return super(Float64Index, self).get_loc(key, method=method,\n tolerance=tolerance)\n\n @cache_readonly\n def is_unique(self):\n return super(Float64Index, self).is_unique and self._nan_idxs.size < 2\n\n @Appender(Index.isin.__doc__)\n def isin(self, values, level=None):\n if level is not None:\n self._validate_index_level(level)\n return algorithms.isin(np.array(self), values)\n\n\nFloat64Index._add_numeric_methods()\nFloat64Index._add_logical_methods_disabled()\n", "# -*- coding: utf-8 -*-\nimport codecs\nfrom collections import OrderedDict\nimport locale\nimport os\nimport sys\nfrom uuid import uuid4\n\nimport pytest\n\nfrom pandas.compat import PY3, intern\nfrom pandas.util._decorators import deprecate_kwarg, make_signature\nfrom pandas.util._move import BadMove, move_into_mutable_buffer, stolenbuf\nimport pandas.util._test_decorators as td\nfrom pandas.util._validators import (\n validate_args, validate_args_and_kwargs, validate_bool_kwarg,\n validate_kwargs)\n\nimport pandas.core.common as com\nimport pandas.util.testing as tm\n\n\nclass TestDecorators(object):\n\n def setup_method(self, method):\n @deprecate_kwarg('old', 'new')\n def _f1(new=False):\n return new\n\n @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})\n def _f2(new=False):\n return new\n\n @deprecate_kwarg('old', 'new', lambda x: x + 1)\n def _f3(new=0):\n return new\n\n @deprecate_kwarg('old', None)\n def _f4(old=True, unchanged=True):\n return old\n\n self.f1 = _f1\n self.f2 = _f2\n self.f3 = _f3\n self.f4 = _f4\n\n def test_deprecate_kwarg(self):\n x = 78\n with tm.assert_produces_warning(FutureWarning):\n result = self.f1(old=x)\n assert result is x\n with tm.assert_produces_warning(None):\n self.f1(new=x)\n\n def test_dict_deprecate_kwarg(self):\n x = 'yes'\n with tm.assert_produces_warning(FutureWarning):\n result = self.f2(old=x)\n assert result\n\n def test_missing_deprecate_kwarg(self):\n x = 'bogus'\n with tm.assert_produces_warning(FutureWarning):\n result = self.f2(old=x)\n assert result == 'bogus'\n\n def test_callable_deprecate_kwarg(self):\n x = 5\n with tm.assert_produces_warning(FutureWarning):\n result = self.f3(old=x)\n assert result == x + 1\n with pytest.raises(TypeError):\n self.f3(old='hello')\n\n def test_bad_deprecate_kwarg(self):\n with pytest.raises(TypeError):\n @deprecate_kwarg('old', 'new', 0)\n def f4(new=None):\n pass\n\n def test_deprecate_keyword(self):\n x = 9\n with tm.assert_produces_warning(FutureWarning):\n result = self.f4(old=x)\n assert result is x\n with tm.assert_produces_warning(None):\n result = self.f4(unchanged=x)\n assert result is True\n\n\ndef test_rands():\n r = tm.rands(10)\n assert(len(r) == 10)\n\n\ndef test_rands_array():\n arr = tm.rands_array(5, size=10)\n assert(arr.shape == (10,))\n assert(len(arr[0]) == 5)\n\n arr = tm.rands_array(7, size=(10, 10))\n assert(arr.shape == (10, 10))\n assert(len(arr[1, 1]) == 7)\n\n\nclass TestValidateArgs(object):\n fname = 'func'\n\n def test_bad_min_fname_arg_count(self):\n msg = \"'max_fname_arg_count' must be non-negative\"\n with tm.assert_raises_regex(ValueError, msg):\n validate_args(self.fname, (None,), -1, 'foo')\n\n def test_bad_arg_length_max_value_single(self):\n args = (None, None)\n compat_args = ('foo',)\n\n min_fname_arg_count = 0\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"argument \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args(self.fname, args,\n min_fname_arg_count,\n compat_args)\n\n def test_bad_arg_length_max_value_multiple(self):\n args = (None, None)\n compat_args = dict(foo=None)\n\n min_fname_arg_count = 2\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"arguments \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args(self.fname, args,\n min_fname_arg_count,\n compat_args)\n\n def test_not_all_defaults(self):\n bad_arg = 'foo'\n msg = (\"the '{arg}' parameter is not supported \"\n r\"in the pandas implementation of {func}\\(\\)\".\n format(arg=bad_arg, func=self.fname))\n\n compat_args = OrderedDict()\n compat_args['foo'] = 2\n compat_args['bar'] = -1\n compat_args['baz'] = 3\n\n arg_vals = (1, -1, 3)\n\n for i in range(1, 3):\n with tm.assert_raises_regex(ValueError, msg):\n validate_args(self.fname, arg_vals[:i], 2, compat_args)\n\n def test_validation(self):\n # No exceptions should be thrown\n validate_args(self.fname, (None,), 2, dict(out=None))\n\n compat_args = OrderedDict()\n compat_args['axis'] = 1\n compat_args['out'] = None\n\n validate_args(self.fname, (1, None), 2, compat_args)\n\n\nclass TestValidateKwargs(object):\n fname = 'func'\n\n def test_bad_kwarg(self):\n goodarg = 'f'\n badarg = goodarg + 'o'\n\n compat_args = OrderedDict()\n compat_args[goodarg] = 'foo'\n compat_args[badarg + 'o'] = 'bar'\n kwargs = {goodarg: 'foo', badarg: 'bar'}\n msg = (r\"{fname}\\(\\) got an unexpected \"\n r\"keyword argument '{arg}'\".format(\n fname=self.fname, arg=badarg))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_kwargs(self.fname, kwargs, compat_args)\n\n def test_not_all_none(self):\n bad_arg = 'foo'\n msg = (r\"the '{arg}' parameter is not supported \"\n r\"in the pandas implementation of {func}\\(\\)\".\n format(arg=bad_arg, func=self.fname))\n\n compat_args = OrderedDict()\n compat_args['foo'] = 1\n compat_args['bar'] = 's'\n compat_args['baz'] = None\n\n kwarg_keys = ('foo', 'bar', 'baz')\n kwarg_vals = (2, 's', None)\n\n for i in range(1, 3):\n kwargs = dict(zip(kwarg_keys[:i],\n kwarg_vals[:i]))\n\n with tm.assert_raises_regex(ValueError, msg):\n validate_kwargs(self.fname, kwargs, compat_args)\n\n def test_validation(self):\n # No exceptions should be thrown\n compat_args = OrderedDict()\n compat_args['f'] = None\n compat_args['b'] = 1\n compat_args['ba'] = 's'\n kwargs = dict(f=None, b=1)\n validate_kwargs(self.fname, kwargs, compat_args)\n\n def test_validate_bool_kwarg(self):\n arg_names = ['inplace', 'copy']\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n valid_values = [True, False, None]\n\n for name in arg_names:\n for value in invalid_values:\n with tm.assert_raises_regex(ValueError,\n \"For argument \\\"%s\\\" \"\n \"expected type bool, \"\n \"received type %s\" %\n (name, type(value).__name__)):\n validate_bool_kwarg(value, name)\n\n for value in valid_values:\n assert validate_bool_kwarg(value, name) == value\n\n\nclass TestValidateKwargsAndArgs(object):\n fname = 'func'\n\n def test_invalid_total_length_max_length_one(self):\n compat_args = ('foo',)\n kwargs = {'foo': 'FOO'}\n args = ('FoO', 'BaZ')\n\n min_fname_arg_count = 0\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(kwargs) + len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"argument \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n def test_invalid_total_length_max_length_multiple(self):\n compat_args = ('foo', 'bar', 'baz')\n kwargs = {'foo': 'FOO', 'bar': 'BAR'}\n args = ('FoO', 'BaZ')\n\n min_fname_arg_count = 2\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(kwargs) + len(args) + min_fname_arg_count\n msg = (r\"{fname}\\(\\) takes at most {max_length} \"\n r\"arguments \\({actual_length} given\\)\"\n .format(fname=self.fname, max_length=max_length,\n actual_length=actual_length))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n def test_no_args_with_kwargs(self):\n bad_arg = 'bar'\n min_fname_arg_count = 2\n\n compat_args = OrderedDict()\n compat_args['foo'] = -5\n compat_args[bad_arg] = 1\n\n msg = (r\"the '{arg}' parameter is not supported \"\n r\"in the pandas implementation of {func}\\(\\)\".\n format(arg=bad_arg, func=self.fname))\n\n args = ()\n kwargs = {'foo': -5, bad_arg: 2}\n tm.assert_raises_regex(ValueError, msg,\n validate_args_and_kwargs,\n self.fname, args, kwargs,\n min_fname_arg_count, compat_args)\n\n args = (-5, 2)\n kwargs = {}\n tm.assert_raises_regex(ValueError, msg,\n validate_args_and_kwargs,\n self.fname, args, kwargs,\n min_fname_arg_count, compat_args)\n\n def test_duplicate_argument(self):\n min_fname_arg_count = 2\n compat_args = OrderedDict()\n compat_args['foo'] = None\n compat_args['bar'] = None\n compat_args['baz'] = None\n kwargs = {'foo': None, 'bar': None}\n args = (None,) # duplicate value for 'foo'\n\n msg = (r\"{fname}\\(\\) got multiple values for keyword \"\n r\"argument '{arg}'\".format(fname=self.fname, arg='foo'))\n\n with tm.assert_raises_regex(TypeError, msg):\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n def test_validation(self):\n # No exceptions should be thrown\n compat_args = OrderedDict()\n compat_args['foo'] = 1\n compat_args['bar'] = None\n compat_args['baz'] = -2\n kwargs = {'baz': -2}\n args = (1, None)\n\n min_fname_arg_count = 2\n validate_args_and_kwargs(self.fname, args, kwargs,\n min_fname_arg_count,\n compat_args)\n\n\nclass TestMove(object):\n\n def test_cannot_create_instance_of_stolenbuffer(self):\n \"\"\"Stolen buffers need to be created through the smart constructor\n ``move_into_mutable_buffer`` which has a bunch of checks in it.\n \"\"\"\n msg = \"cannot create 'pandas.util._move.stolenbuf' instances\"\n with tm.assert_raises_regex(TypeError, msg):\n stolenbuf()\n\n def test_more_than_one_ref(self):\n \"\"\"Test case for when we try to use ``move_into_mutable_buffer`` when\n the object being moved has other references.\n \"\"\"\n b = b'testing'\n\n with pytest.raises(BadMove) as e:\n def handle_success(type_, value, tb):\n assert value.args[0] is b\n return type(e).handle_success(e, type_, value, tb) # super\n\n e.handle_success = handle_success\n move_into_mutable_buffer(b)\n\n def test_exactly_one_ref(self):\n \"\"\"Test case for when the object being moved has exactly one reference.\n \"\"\"\n b = b'testing'\n\n # We need to pass an expression on the stack to ensure that there are\n # not extra references hanging around. We cannot rewrite this test as\n # buf = b[:-3]\n # as_stolen_buf = move_into_mutable_buffer(buf)\n # because then we would have more than one reference to buf.\n as_stolen_buf = move_into_mutable_buffer(b[:-3])\n\n # materialize as bytearray to show that it is mutable\n assert bytearray(as_stolen_buf) == b'test'\n\n @pytest.mark.skipif(PY3, reason='bytes objects cannot be interned in py3')\n def test_interned(self):\n salt = uuid4().hex\n\n def make_string():\n # We need to actually create a new string so that it has refcount\n # one. We use a uuid so that we know the string could not already\n # be in the intern table.\n return ''.join(('testing: ', salt))\n\n # This should work, the string has one reference on the stack.\n move_into_mutable_buffer(make_string())\n\n refcount = [None] # nonlocal\n\n def ref_capture(ob):\n # Subtract two because those are the references owned by this\n # frame:\n # 1. The local variables of this stack frame.\n # 2. The python data stack of this stack frame.\n refcount[0] = sys.getrefcount(ob) - 2\n return ob\n\n with pytest.raises(BadMove):\n # If we intern the string it will still have one reference but now\n # it is in the intern table so if other people intern the same\n # string while the mutable buffer holds the first string they will\n # be the same instance.\n move_into_mutable_buffer(ref_capture(intern(make_string()))) # noqa\n\n assert refcount[0] == 1\n\n\ndef test_numpy_errstate_is_default():\n # The defaults since numpy 1.6.0\n expected = {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',\n 'under': 'ignore'}\n import numpy as np\n from pandas.compat import numpy # noqa\n # The errstate should be unchanged after that import.\n assert np.geterr() == expected\n\n\n@td.skip_if_windows\nclass TestLocaleUtils(object):\n\n @classmethod\n def setup_class(cls):\n cls.locales = tm.get_locales()\n cls.current_locale = locale.getlocale()\n\n if not cls.locales:\n pytest.skip(\"No locales found\")\n\n @classmethod\n def teardown_class(cls):\n del cls.locales\n del cls.current_locale\n\n def test_can_set_locale_valid_set(self):\n # Setting the default locale should return True\n assert tm.can_set_locale('') is True\n\n def test_can_set_locale_invalid_set(self):\n # Setting an invalid locale should return False\n assert tm.can_set_locale('non-existent_locale') is False\n\n def test_can_set_locale_invalid_get(self, monkeypatch):\n # In some cases, an invalid locale can be set,\n # but a subsequent getlocale() raises a ValueError\n # See GH 22129\n\n def mockgetlocale():\n raise ValueError()\n\n with monkeypatch.context() as m:\n m.setattr(locale, 'getlocale', mockgetlocale)\n assert tm.can_set_locale('') is False\n\n def test_get_locales(self):\n # all systems should have at least a single locale\n # GH9744\n assert len(tm.get_locales()) > 0\n\n def test_get_locales_prefix(self):\n if len(self.locales) == 1:\n pytest.skip(\"Only a single locale found, no point in \"\n \"trying to test filtering locale prefixes\")\n first_locale = self.locales[0]\n assert len(tm.get_locales(prefix=first_locale[:2])) > 0\n\n def test_set_locale(self):\n if len(self.locales) == 1:\n pytest.skip(\"Only a single locale found, no point in \"\n \"trying to test setting another locale\")\n\n if com._all_none(*self.current_locale):\n # Not sure why, but on some travis runs with pytest,\n # getlocale() returned (None, None).\n pytest.skip(\"Current locale is not set.\")\n\n locale_override = os.environ.get('LOCALE_OVERRIDE', None)\n\n if locale_override is None:\n lang, enc = 'it_CH', 'UTF-8'\n elif locale_override == 'C':\n lang, enc = 'en_US', 'ascii'\n else:\n lang, enc = locale_override.split('.')\n\n enc = codecs.lookup(enc).name\n new_locale = lang, enc\n\n if not tm.can_set_locale(new_locale):\n with pytest.raises(locale.Error):\n with tm.set_locale(new_locale):\n pass\n else:\n with tm.set_locale(new_locale) as normalized_locale:\n new_lang, new_enc = normalized_locale.split('.')\n new_enc = codecs.lookup(enc).name\n normalized_locale = new_lang, new_enc\n assert normalized_locale == new_locale\n\n current_locale = locale.getlocale()\n assert current_locale == self.current_locale\n\n\ndef test_make_signature():\n # See GH 17608\n # Case where the func does not have default kwargs\n sig = make_signature(validate_kwargs)\n assert sig == (['fname', 'kwargs', 'compat_args'],\n ['fname', 'kwargs', 'compat_args'])\n\n # Case where the func does have default kwargs\n sig = make_signature(deprecate_kwarg)\n assert sig == (['old_arg_name', 'new_arg_name',\n 'mapping=None', 'stacklevel=2'],\n ['old_arg_name', 'new_arg_name', 'mapping', 'stacklevel'])\n\n\ndef test_safe_import(monkeypatch):\n assert not td.safe_import(\"foo\")\n assert not td.safe_import(\"pandas\", min_version=\"99.99.99\")\n\n # Create dummy module to be imported\n import types\n import sys\n mod_name = \"hello123\"\n mod = types.ModuleType(mod_name)\n mod.__version__ = \"1.5\"\n\n assert not td.safe_import(mod_name)\n monkeypatch.setitem(sys.modules, mod_name, mod)\n assert not td.safe_import(mod_name, min_version=\"2.0\")\n assert td.safe_import(mod_name, min_version=\"1.0\")\n", "# -*- coding: utf-8 -*-\n\nimport re\nfrom textwrap import dedent\nfrom datetime import datetime\nfrom distutils.version import LooseVersion\n\nimport pytest\nimport numpy as np\nimport pandas as pd\nfrom pandas import compat, DataFrame, MultiIndex, option_context, Index\nfrom pandas.compat import u, lrange, StringIO\nfrom pandas.util import testing as tm\nimport pandas.io.formats.format as fmt\n\ndiv_style = ''\ntry:\n import IPython\n if LooseVersion(IPython.__version__) < LooseVersion('3.0.0'):\n div_style = ' style=\"max-width:1500px;overflow:auto;\"'\nexcept (ImportError, AttributeError):\n pass\n\n\nclass TestToHTML(object):\n\n def test_to_html_with_col_space(self):\n def check_with_width(df, col_space):\n # check that col_space affects HTML generation\n # and be very brittle about it.\n html = df.to_html(col_space=col_space)\n hdrs = [x for x in html.split(r\"\\n\") if re.search(r\"<th[>\\s]\", x)]\n assert len(hdrs) > 0\n for h in hdrs:\n assert \"min-width\" in h\n assert str(col_space) in h\n\n df = DataFrame(np.random.random(size=(1, 3)))\n\n check_with_width(df, 30)\n check_with_width(df, 50)\n\n def test_to_html_with_empty_string_label(self):\n # GH3547, to_html regards empty string labels as repeated labels\n data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}\n df = DataFrame(data).set_index(['c1', 'c2'])\n res = df.to_html()\n assert \"rowspan\" not in res\n\n def test_to_html_unicode(self):\n df = DataFrame({u('\\u03c3'): np.arange(10.)})\n expected = u'<table border=\"1\" class=\"dataframe\">\\n <thead>\\n <tr style=\"text-align: right;\">\\n <th></th>\\n <th>\\u03c3</th>\\n </tr>\\n </thead>\\n <tbody>\\n <tr>\\n <th>0</th>\\n <td>0.0</td>\\n </tr>\\n <tr>\\n <th>1</th>\\n <td>1.0</td>\\n </tr>\\n <tr>\\n <th>2</th>\\n <td>2.0</td>\\n </tr>\\n <tr>\\n <th>3</th>\\n <td>3.0</td>\\n </tr>\\n <tr>\\n <th>4</th>\\n <td>4.0</td>\\n </tr>\\n <tr>\\n <th>5</th>\\n <td>5.0</td>\\n </tr>\\n <tr>\\n <th>6</th>\\n <td>6.0</td>\\n </tr>\\n <tr>\\n <th>7</th>\\n <td>7.0</td>\\n </tr>\\n <tr>\\n <th>8</th>\\n <td>8.0</td>\\n </tr>\\n <tr>\\n <th>9</th>\\n <td>9.0</td>\\n </tr>\\n </tbody>\\n</table>' # noqa\n assert df.to_html() == expected\n df = DataFrame({'A': [u('\\u03c3')]})\n expected = u'<table border=\"1\" class=\"dataframe\">\\n <thead>\\n <tr style=\"text-align: right;\">\\n <th></th>\\n <th>A</th>\\n </tr>\\n </thead>\\n <tbody>\\n <tr>\\n <th>0</th>\\n <td>\\u03c3</td>\\n </tr>\\n </tbody>\\n</table>' # noqa\n assert df.to_html() == expected\n\n def test_to_html_decimal(self):\n # GH 12031\n df = DataFrame({'A': [6.0, 3.1, 2.2]})\n result = df.to_html(decimal=',')\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>6,0</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>3,1</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>2</th>\\n'\n ' <td>2,2</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert result == expected\n\n def test_to_html_escaped(self):\n a = 'str<ing1 &amp;'\n b = 'stri>ng2 &amp;'\n\n test_dict = {'co<l1': {a: \"<type 'str'>\",\n b: \"<type 'str'>\"},\n 'co>l2': {a: \"<type 'str'>\",\n b: \"<type 'str'>\"}}\n rs = DataFrame(test_dict).to_html()\n xp = \"\"\"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>co&lt;l1</th>\n <th>co&gt;l2</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>str&lt;ing1 &amp;amp;</th>\n <td>&lt;type 'str'&gt;</td>\n <td>&lt;type 'str'&gt;</td>\n </tr>\n <tr>\n <th>stri&gt;ng2 &amp;amp;</th>\n <td>&lt;type 'str'&gt;</td>\n <td>&lt;type 'str'&gt;</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert xp == rs\n\n def test_to_html_escape_disabled(self):\n a = 'str<ing1 &amp;'\n b = 'stri>ng2 &amp;'\n\n test_dict = {'co<l1': {a: \"<b>bold</b>\",\n b: \"<b>bold</b>\"},\n 'co>l2': {a: \"<b>bold</b>\",\n b: \"<b>bold</b>\"}}\n rs = DataFrame(test_dict).to_html(escape=False)\n xp = \"\"\"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>co<l1</th>\n <th>co>l2</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>str<ing1 &amp;</th>\n <td><b>bold</b></td>\n <td><b>bold</b></td>\n </tr>\n <tr>\n <th>stri>ng2 &amp;</th>\n <td><b>bold</b></td>\n <td><b>bold</b></td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert xp == rs\n\n def test_to_html_multiindex_index_false(self):\n # issue 8452\n df = DataFrame({\n 'a': range(2),\n 'b': range(3, 5),\n 'c': range(5, 7),\n 'd': range(3, 5)\n })\n df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])\n result = df.to_html(index=False)\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th colspan=\"2\" halign=\"left\">a</th>\n <th colspan=\"2\" halign=\"left\">b</th>\n </tr>\n <tr>\n <th>c</th>\n <th>d</th>\n <th>c</th>\n <th>d</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>0</td>\n <td>3</td>\n <td>5</td>\n <td>3</td>\n </tr>\n <tr>\n <td>1</td>\n <td>4</td>\n <td>6</td>\n <td>4</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n df.index = Index(df.index.values, name='idx')\n result = df.to_html(index=False)\n assert result == expected\n\n def test_to_html_multiindex_sparsify_false_multi_sparse(self):\n with option_context('display.multi_sparse', False):\n index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],\n names=['foo', None])\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)\n\n result = df.to_html()\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>0</th>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th>1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]],\n columns=index[::2], index=index)\n\n result = df.to_html()\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th>foo</th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>0</th>\n <th>0</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>0</th>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th>1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n def test_to_html_multiindex_sparsify(self):\n index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],\n names=['foo', None])\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)\n\n result = df.to_html()\n expected = \"\"\"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"2\" valign=\"top\">0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th rowspan=\"2\" valign=\"top\">1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=index[::2],\n index=index)\n\n result = df.to_html()\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th>foo</th>\n <th>0</th>\n <th>1</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>0</th>\n <th>0</th>\n </tr>\n <tr>\n <th>foo</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"2\" valign=\"top\">0</th>\n <th>0</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th rowspan=\"2\" valign=\"top\">1</th>\n <th>0</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>1</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n def test_to_html_multiindex_odd_even_truncate(self):\n # GH 14882 - Issue on truncation with odd length DataFrame\n mi = MultiIndex.from_product([[100, 200, 300],\n [10, 20, 30],\n [1, 2, 3, 4, 5, 6, 7]],\n names=['a', 'b', 'c'])\n df = DataFrame({'n': range(len(mi))}, index=mi)\n result = df.to_html(max_rows=60)\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th></th>\n <th>n</th>\n </tr>\n <tr>\n <th>a</th>\n <th>b</th>\n <th>c</th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"21\" valign=\"top\">100</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>1</td>\n </tr>\n <tr>\n <th>3</th>\n <td>2</td>\n </tr>\n <tr>\n <th>4</th>\n <td>3</td>\n </tr>\n <tr>\n <th>5</th>\n <td>4</td>\n </tr>\n <tr>\n <th>6</th>\n <td>5</td>\n </tr>\n <tr>\n <th>7</th>\n <td>6</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>7</td>\n </tr>\n <tr>\n <th>2</th>\n <td>8</td>\n </tr>\n <tr>\n <th>3</th>\n <td>9</td>\n </tr>\n <tr>\n <th>4</th>\n <td>10</td>\n </tr>\n <tr>\n <th>5</th>\n <td>11</td>\n </tr>\n <tr>\n <th>6</th>\n <td>12</td>\n </tr>\n <tr>\n <th>7</th>\n <td>13</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>14</td>\n </tr>\n <tr>\n <th>2</th>\n <td>15</td>\n </tr>\n <tr>\n <th>3</th>\n <td>16</td>\n </tr>\n <tr>\n <th>4</th>\n <td>17</td>\n </tr>\n <tr>\n <th>5</th>\n <td>18</td>\n </tr>\n <tr>\n <th>6</th>\n <td>19</td>\n </tr>\n <tr>\n <th>7</th>\n <td>20</td>\n </tr>\n <tr>\n <th rowspan=\"19\" valign=\"top\">200</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>21</td>\n </tr>\n <tr>\n <th>2</th>\n <td>22</td>\n </tr>\n <tr>\n <th>3</th>\n <td>23</td>\n </tr>\n <tr>\n <th>4</th>\n <td>24</td>\n </tr>\n <tr>\n <th>5</th>\n <td>25</td>\n </tr>\n <tr>\n <th>6</th>\n <td>26</td>\n </tr>\n <tr>\n <th>7</th>\n <td>27</td>\n </tr>\n <tr>\n <th rowspan=\"5\" valign=\"top\">20</th>\n <th>1</th>\n <td>28</td>\n </tr>\n <tr>\n <th>2</th>\n <td>29</td>\n </tr>\n <tr>\n <th>...</th>\n <td>...</td>\n </tr>\n <tr>\n <th>6</th>\n <td>33</td>\n </tr>\n <tr>\n <th>7</th>\n <td>34</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>35</td>\n </tr>\n <tr>\n <th>2</th>\n <td>36</td>\n </tr>\n <tr>\n <th>3</th>\n <td>37</td>\n </tr>\n <tr>\n <th>4</th>\n <td>38</td>\n </tr>\n <tr>\n <th>5</th>\n <td>39</td>\n </tr>\n <tr>\n <th>6</th>\n <td>40</td>\n </tr>\n <tr>\n <th>7</th>\n <td>41</td>\n </tr>\n <tr>\n <th rowspan=\"21\" valign=\"top\">300</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>42</td>\n </tr>\n <tr>\n <th>2</th>\n <td>43</td>\n </tr>\n <tr>\n <th>3</th>\n <td>44</td>\n </tr>\n <tr>\n <th>4</th>\n <td>45</td>\n </tr>\n <tr>\n <th>5</th>\n <td>46</td>\n </tr>\n <tr>\n <th>6</th>\n <td>47</td>\n </tr>\n <tr>\n <th>7</th>\n <td>48</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>49</td>\n </tr>\n <tr>\n <th>2</th>\n <td>50</td>\n </tr>\n <tr>\n <th>3</th>\n <td>51</td>\n </tr>\n <tr>\n <th>4</th>\n <td>52</td>\n </tr>\n <tr>\n <th>5</th>\n <td>53</td>\n </tr>\n <tr>\n <th>6</th>\n <td>54</td>\n </tr>\n <tr>\n <th>7</th>\n <td>55</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>56</td>\n </tr>\n <tr>\n <th>2</th>\n <td>57</td>\n </tr>\n <tr>\n <th>3</th>\n <td>58</td>\n </tr>\n <tr>\n <th>4</th>\n <td>59</td>\n </tr>\n <tr>\n <th>5</th>\n <td>60</td>\n </tr>\n <tr>\n <th>6</th>\n <td>61</td>\n </tr>\n <tr>\n <th>7</th>\n <td>62</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n # Test that ... appears in a middle level\n result = df.to_html(max_rows=56)\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th></th>\n <th></th>\n <th>n</th>\n </tr>\n <tr>\n <th>a</th>\n <th>b</th>\n <th>c</th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"21\" valign=\"top\">100</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>0</td>\n </tr>\n <tr>\n <th>2</th>\n <td>1</td>\n </tr>\n <tr>\n <th>3</th>\n <td>2</td>\n </tr>\n <tr>\n <th>4</th>\n <td>3</td>\n </tr>\n <tr>\n <th>5</th>\n <td>4</td>\n </tr>\n <tr>\n <th>6</th>\n <td>5</td>\n </tr>\n <tr>\n <th>7</th>\n <td>6</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>7</td>\n </tr>\n <tr>\n <th>2</th>\n <td>8</td>\n </tr>\n <tr>\n <th>3</th>\n <td>9</td>\n </tr>\n <tr>\n <th>4</th>\n <td>10</td>\n </tr>\n <tr>\n <th>5</th>\n <td>11</td>\n </tr>\n <tr>\n <th>6</th>\n <td>12</td>\n </tr>\n <tr>\n <th>7</th>\n <td>13</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>14</td>\n </tr>\n <tr>\n <th>2</th>\n <td>15</td>\n </tr>\n <tr>\n <th>3</th>\n <td>16</td>\n </tr>\n <tr>\n <th>4</th>\n <td>17</td>\n </tr>\n <tr>\n <th>5</th>\n <td>18</td>\n </tr>\n <tr>\n <th>6</th>\n <td>19</td>\n </tr>\n <tr>\n <th>7</th>\n <td>20</td>\n </tr>\n <tr>\n <th rowspan=\"15\" valign=\"top\">200</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>21</td>\n </tr>\n <tr>\n <th>2</th>\n <td>22</td>\n </tr>\n <tr>\n <th>3</th>\n <td>23</td>\n </tr>\n <tr>\n <th>4</th>\n <td>24</td>\n </tr>\n <tr>\n <th>5</th>\n <td>25</td>\n </tr>\n <tr>\n <th>6</th>\n <td>26</td>\n </tr>\n <tr>\n <th>7</th>\n <td>27</td>\n </tr>\n <tr>\n <th>...</th>\n <th>...</th>\n <td>...</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>35</td>\n </tr>\n <tr>\n <th>2</th>\n <td>36</td>\n </tr>\n <tr>\n <th>3</th>\n <td>37</td>\n </tr>\n <tr>\n <th>4</th>\n <td>38</td>\n </tr>\n <tr>\n <th>5</th>\n <td>39</td>\n </tr>\n <tr>\n <th>6</th>\n <td>40</td>\n </tr>\n <tr>\n <th>7</th>\n <td>41</td>\n </tr>\n <tr>\n <th rowspan=\"21\" valign=\"top\">300</th>\n <th rowspan=\"7\" valign=\"top\">10</th>\n <th>1</th>\n <td>42</td>\n </tr>\n <tr>\n <th>2</th>\n <td>43</td>\n </tr>\n <tr>\n <th>3</th>\n <td>44</td>\n </tr>\n <tr>\n <th>4</th>\n <td>45</td>\n </tr>\n <tr>\n <th>5</th>\n <td>46</td>\n </tr>\n <tr>\n <th>6</th>\n <td>47</td>\n </tr>\n <tr>\n <th>7</th>\n <td>48</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">20</th>\n <th>1</th>\n <td>49</td>\n </tr>\n <tr>\n <th>2</th>\n <td>50</td>\n </tr>\n <tr>\n <th>3</th>\n <td>51</td>\n </tr>\n <tr>\n <th>4</th>\n <td>52</td>\n </tr>\n <tr>\n <th>5</th>\n <td>53</td>\n </tr>\n <tr>\n <th>6</th>\n <td>54</td>\n </tr>\n <tr>\n <th>7</th>\n <td>55</td>\n </tr>\n <tr>\n <th rowspan=\"7\" valign=\"top\">30</th>\n <th>1</th>\n <td>56</td>\n </tr>\n <tr>\n <th>2</th>\n <td>57</td>\n </tr>\n <tr>\n <th>3</th>\n <td>58</td>\n </tr>\n <tr>\n <th>4</th>\n <td>59</td>\n </tr>\n <tr>\n <th>5</th>\n <td>60</td>\n </tr>\n <tr>\n <th>6</th>\n <td>61</td>\n </tr>\n <tr>\n <th>7</th>\n <td>62</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n def test_to_html_index_formatter(self):\n df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], columns=['foo', None],\n index=lrange(4))\n\n f = lambda x: 'abcd' [x]\n result = df.to_html(formatters={'__index__': f})\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>foo</th>\n <th>None</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>a</th>\n <td>0</td>\n <td>1</td>\n </tr>\n <tr>\n <th>b</th>\n <td>2</td>\n <td>3</td>\n </tr>\n <tr>\n <th>c</th>\n <td>4</td>\n <td>5</td>\n </tr>\n <tr>\n <th>d</th>\n <td>6</td>\n <td>7</td>\n </tr>\n </tbody>\n</table>\"\"\"\n\n assert result == expected\n\n def test_to_html_datetime64_monthformatter(self):\n months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]\n x = DataFrame({'months': months})\n\n def format_func(x):\n return x.strftime('%Y-%m')\n result = x.to_html(formatters={'months': format_func})\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>months</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>2016-01</td>\n </tr>\n <tr>\n <th>1</th>\n <td>2016-02</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n def test_to_html_datetime64_hourformatter(self):\n\n x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],\n format='%H:%M:%S.%f')})\n\n def format_func(x):\n return x.strftime('%H:%M')\n result = x.to_html(formatters={'hod': format_func})\n expected = \"\"\"\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>hod</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>10:10</td>\n </tr>\n <tr>\n <th>1</th>\n <td>12:12</td>\n </tr>\n </tbody>\n</table>\"\"\"\n assert result == expected\n\n def test_to_html_regression_GH6098(self):\n df = DataFrame({\n u('clé1'): [u('a'), u('a'), u('b'), u('b'), u('a')],\n u('clé2'): [u('1er'), u('2ème'), u('1er'), u('2ème'), u('1er')],\n 'données1': np.random.randn(5),\n 'données2': np.random.randn(5)})\n\n # it works\n df.pivot_table(index=[u('clé1')], columns=[u('clé2')])._repr_html_()\n\n def test_to_html_truncate(self):\n index = pd.DatetimeIndex(start='20010101', freq='D', periods=20)\n df = DataFrame(index=index, columns=range(20))\n result = df.to_html(max_rows=8, max_cols=4)\n expected = '''\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>0</th>\n <th>1</th>\n <th>...</th>\n <th>18</th>\n <th>19</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>2001-01-01</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-02</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-03</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-04</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>...</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>2001-01-17</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-18</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-19</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>2001-01-20</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n </tbody>\n</table>'''\n assert result == expected\n\n def test_to_html_truncate_multi_index(self):\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n df = DataFrame(index=arrays, columns=arrays)\n result = df.to_html(max_rows=7, max_cols=7)\n expected = '''\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th></th>\n <th colspan=\"2\" halign=\"left\">bar</th>\n <th>baz</th>\n <th>...</th>\n <th>foo</th>\n <th colspan=\"2\" halign=\"left\">qux</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>one</th>\n <th>two</th>\n <th>one</th>\n <th>...</th>\n <th>two</th>\n <th>one</th>\n <th>two</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th rowspan=\"2\" valign=\"top\">bar</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>baz</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>...</th>\n <th>...</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>foo</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th rowspan=\"2\" valign=\"top\">qux</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n </tbody>\n</table>'''\n assert result == expected\n\n @pytest.mark.xfail(reason='GH22887 TypeError', strict=True)\n def test_to_html_truncate_multi_index_sparse_off(self):\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n df = DataFrame(index=arrays, columns=arrays)\n result = df.to_html(max_rows=7, max_cols=7, sparsify=False)\n expected = '''\\\n<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th></th>\n <th>bar</th>\n <th>bar</th>\n <th>baz</th>\n <th>...</th>\n <th>foo</th>\n <th>qux</th>\n <th>qux</th>\n </tr>\n <tr>\n <th></th>\n <th></th>\n <th>one</th>\n <th>two</th>\n <th>one</th>\n <th>...</th>\n <th>two</th>\n <th>one</th>\n <th>two</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>bar</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>bar</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>baz</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>foo</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>qux</th>\n <th>one</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>qux</th>\n <th>two</th>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n <td>NaN</td>\n <td>NaN</td>\n </tr>\n </tbody>\n</table>'''\n assert result == expected\n\n def test_to_html_border(self):\n df = DataFrame({'A': [1, 2]})\n result = df.to_html()\n assert 'border=\"1\"' in result\n\n def test_to_html_border_option(self):\n df = DataFrame({'A': [1, 2]})\n with pd.option_context('display.html.border', 0):\n result = df.to_html()\n assert 'border=\"0\"' in result\n assert 'border=\"0\"' in df._repr_html_()\n\n def test_to_html_border_zero(self):\n df = DataFrame({'A': [1, 2]})\n result = df.to_html(border=0)\n assert 'border=\"0\"' in result\n\n @tm.capture_stdout\n def test_display_option_warning(self):\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n pd.options.html.border\n\n def test_to_html(self):\n # big mixed\n biggie = DataFrame({'A': np.random.randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n\n biggie.loc[:20, 'A'] = np.nan\n biggie.loc[:20, 'B'] = np.nan\n s = biggie.to_html()\n\n buf = StringIO()\n retval = biggie.to_html(buf=buf)\n assert retval is None\n assert buf.getvalue() == s\n\n assert isinstance(s, compat.string_types)\n\n biggie.to_html(columns=['B', 'A'], col_space=17)\n biggie.to_html(columns=['B', 'A'],\n formatters={'A': lambda x: '{x:.1f}'.format(x=x)})\n\n biggie.to_html(columns=['B', 'A'], float_format=str)\n biggie.to_html(columns=['B', 'A'], col_space=12, float_format=str)\n\n frame = DataFrame(index=np.arange(200))\n frame.to_html()\n\n def test_to_html_filename(self):\n biggie = DataFrame({'A': np.random.randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n\n biggie.loc[:20, 'A'] = np.nan\n biggie.loc[:20, 'B'] = np.nan\n with tm.ensure_clean('test.html') as path:\n biggie.to_html(path)\n with open(path, 'r') as f:\n s = biggie.to_html()\n s2 = f.read()\n assert s == s2\n\n frame = DataFrame(index=np.arange(200))\n with tm.ensure_clean('test.html') as path:\n frame.to_html(path)\n with open(path, 'r') as f:\n assert frame.to_html() == f.read()\n\n def test_to_html_with_no_bold(self):\n x = DataFrame({'x': np.random.randn(5)})\n ashtml = x.to_html(bold_rows=False)\n assert '<strong' not in ashtml[ashtml.find(\"</thead>\")]\n\n def test_to_html_columns_arg(self):\n frame = DataFrame(tm.getSeriesData())\n result = frame.to_html(columns=['A'])\n assert '<th>B</th>' not in result\n\n def test_to_html_multiindex(self):\n columns = MultiIndex.from_tuples(list(zip(np.arange(2).repeat(2),\n np.mod(lrange(4), 2))),\n names=['CL0', 'CL1'])\n df = DataFrame([list('abcd'), list('efgh')], columns=columns)\n result = df.to_html(justify='left')\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr>\\n'\n ' <th>CL0</th>\\n'\n ' <th colspan=\"2\" halign=\"left\">0</th>\\n'\n ' <th colspan=\"2\" halign=\"left\">1</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>CL1</th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>a</td>\\n'\n ' <td>b</td>\\n'\n ' <td>c</td>\\n'\n ' <td>d</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>e</td>\\n'\n ' <td>f</td>\\n'\n ' <td>g</td>\\n'\n ' <td>h</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n\n assert result == expected\n\n columns = MultiIndex.from_tuples(list(zip(\n range(4), np.mod(\n lrange(4), 2))))\n df = DataFrame([list('abcd'), list('efgh')], columns=columns)\n\n result = df.to_html(justify='right')\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr>\\n'\n ' <th></th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' <th>2</th>\\n'\n ' <th>3</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th></th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' <th>0</th>\\n'\n ' <th>1</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>a</td>\\n'\n ' <td>b</td>\\n'\n ' <td>c</td>\\n'\n ' <td>d</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>e</td>\\n'\n ' <td>f</td>\\n'\n ' <td>g</td>\\n'\n ' <td>h</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n\n assert result == expected\n\n @pytest.mark.parametrize(\"justify\", fmt._VALID_JUSTIFY_PARAMETERS)\n def test_to_html_justify(self, justify):\n df = DataFrame({'A': [6, 30000, 2],\n 'B': [1, 2, 70000],\n 'C': [223442, 0, 1]},\n columns=['A', 'B', 'C'])\n result = df.to_html(justify=justify)\n expected = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: {justify};\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>0</th>\\n'\n ' <td>6</td>\\n'\n ' <td>1</td>\\n'\n ' <td>223442</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>1</th>\\n'\n ' <td>30000</td>\\n'\n ' <td>2</td>\\n'\n ' <td>0</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>2</th>\\n'\n ' <td>2</td>\\n'\n ' <td>70000</td>\\n'\n ' <td>1</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>'.format(justify=justify))\n assert result == expected\n\n @pytest.mark.parametrize(\"justify\", [\"super-right\", \"small-left\",\n \"noinherit\", \"tiny\", \"pandas\"])\n def test_to_html_invalid_justify(self, justify):\n # see gh-17527\n df = DataFrame()\n msg = \"Invalid value for justify parameter\"\n\n with tm.assert_raises_regex(ValueError, msg):\n df.to_html(justify=justify)\n\n def test_to_html_index(self):\n index = ['foo', 'bar', 'baz']\n df = DataFrame({'A': [1, 2, 3],\n 'B': [1.2, 3.4, 5.6],\n 'C': ['one', 'two', np.nan]},\n columns=['A', 'B', 'C'],\n index=index)\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>foo</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>baz</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n\n expected_without_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n result = df.to_html(index=False)\n for i in index:\n assert i not in result\n assert result == expected_without_index\n df.index = Index(['foo', 'bar', 'baz'], name='idx')\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>idx</th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th>foo</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>baz</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n assert df.to_html(index=False) == expected_without_index\n\n tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]\n df.index = MultiIndex.from_tuples(tuples)\n\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th rowspan=\"2\" valign=\"top\">foo</th>\\n'\n ' <th>car</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bike</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <th>car</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n\n result = df.to_html(index=False)\n for i in ['foo', 'bar', 'car', 'bike']:\n assert i not in result\n # must be the same result as normal index\n assert result == expected_without_index\n\n df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])\n expected_with_index = ('<table border=\"1\" class=\"dataframe\">\\n'\n ' <thead>\\n'\n ' <tr style=\"text-align: right;\">\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th>A</th>\\n'\n ' <th>B</th>\\n'\n ' <th>C</th>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>idx1</th>\\n'\n ' <th>idx2</th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' <th></th>\\n'\n ' </tr>\\n'\n ' </thead>\\n'\n ' <tbody>\\n'\n ' <tr>\\n'\n ' <th rowspan=\"2\" valign=\"top\">foo</th>\\n'\n ' <th>car</th>\\n'\n ' <td>1</td>\\n'\n ' <td>1.2</td>\\n'\n ' <td>one</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bike</th>\\n'\n ' <td>2</td>\\n'\n ' <td>3.4</td>\\n'\n ' <td>two</td>\\n'\n ' </tr>\\n'\n ' <tr>\\n'\n ' <th>bar</th>\\n'\n ' <th>car</th>\\n'\n ' <td>3</td>\\n'\n ' <td>5.6</td>\\n'\n ' <td>NaN</td>\\n'\n ' </tr>\\n'\n ' </tbody>\\n'\n '</table>')\n assert df.to_html() == expected_with_index\n assert df.to_html(index=False) == expected_without_index\n\n def test_to_html_with_classes(self):\n df = DataFrame()\n result = df.to_html(classes=\"sortable draggable\")\n expected = dedent(\"\"\"\n\n <table border=\"1\" class=\"dataframe sortable draggable\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n </tr>\n </thead>\n <tbody>\n </tbody>\n </table>\n\n \"\"\").strip()\n assert result == expected\n\n result = df.to_html(classes=[\"sortable\", \"draggable\"])\n assert result == expected\n\n def test_to_html_no_index_max_rows(self):\n # GH https://github.com/pandas-dev/pandas/issues/14998\n df = DataFrame({\"A\": [1, 2, 3, 4]})\n result = df.to_html(index=False, max_rows=1)\n expected = dedent(\"\"\"\\\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th>A</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>1</td>\n </tr>\n </tbody>\n </table>\"\"\")\n assert result == expected\n\n def test_to_html_multiindex_max_cols(self):\n # GH 6131\n index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']],\n labels=[[0, 1, 2], [0, 1, 2]],\n names=['b', 'c'])\n columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']],\n labels=[[0, 0, 0], [0, 1, 2]],\n names=[None, 'a'])\n data = np.array(\n [[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]])\n df = DataFrame(data, index, columns)\n result = df.to_html(max_cols=2)\n expected = dedent(\"\"\"\\\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr>\n <th></th>\n <th></th>\n <th colspan=\"3\" halign=\"left\">d</th>\n </tr>\n <tr>\n <th></th>\n <th>a</th>\n <th>aa</th>\n <th>...</th>\n <th>ac</th>\n </tr>\n <tr>\n <th>b</th>\n <th>c</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>ba</th>\n <th>ca</th>\n <td>1.0</td>\n <td>...</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>bb</th>\n <th>cb</th>\n <td>NaN</td>\n <td>...</td>\n <td>NaN</td>\n </tr>\n <tr>\n <th>bc</th>\n <th>cc</th>\n <td>NaN</td>\n <td>...</td>\n <td>3.0</td>\n </tr>\n </tbody>\n </table>\"\"\")\n assert result == expected\n\n def test_to_html_notebook_has_style(self):\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n result = df.to_html(notebook=True)\n assert \"tbody tr th:only-of-type\" in result\n assert \"vertical-align: middle;\" in result\n assert \"thead th\" in result\n\n def test_to_html_notebook_has_no_style(self):\n df = pd.DataFrame({\"A\": [1, 2, 3]})\n result = df.to_html()\n assert \"tbody tr th:only-of-type\" not in result\n assert \"vertical-align: middle;\" not in result\n assert \"thead th\" not in result\n\n def test_to_html_with_index_names_false(self):\n # gh-16493\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index(['a', 'b'],\n name='myindexname'))\n result = df.to_html(index_names=False)\n assert 'myindexname' not in result\n\n def test_to_html_with_id(self):\n # gh-8496\n df = pd.DataFrame({\"A\": [1, 2]}, index=pd.Index(['a', 'b'],\n name='myindexname'))\n result = df.to_html(index_names=False, table_id=\"TEST_ID\")\n assert ' id=\"TEST_ID\"' in result\n" ]
[ [ "numpy.result_type", "numpy.array", "pandas.core.common.flatten", "pandas.compat.iteritems", "pandas.compat.zip", "pandas.core.computation.common._result_type_many", "pandas.compat.range" ], [ "numpy.array_equal", "pandas.core.indexes.base._try_get_item", "pandas.core.common.asarray_tuplesafe", "pandas.core.ops.get_op_result_name", "numpy.issubdtype", "pandas.core.dtypes.missing.isna", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.dtypes.common.is_bool", "pandas.core.dtypes.common.is_integer_dtype", "pandas.util._decorators.Appender", "pandas.core.dtypes.common.is_dtype_equal", "numpy.array", "pandas.core.dtypes.common.is_scalar", "pandas.core.common.values_from_object", "pandas.core.dtypes.concat._concat_index_same_dtype", "pandas.io.formats.format.FloatArrayFormatter", "numpy.isnan", "numpy.asarray", "pandas.core.dtypes.common.needs_i8_conversion", "pandas.core.dtypes.common.is_float", "pandas.core.dtypes.common.is_bool_dtype" ], [ "pandas.util._move.stolenbuf", "pandas.util.testing.can_set_locale", "pandas.util.testing.set_locale", "pandas.util.testing.rands_array", "numpy.geterr", "pandas.util._validators.validate_args", "pandas.util.testing.rands", "pandas.util._decorators.deprecate_kwarg", "pandas.util._test_decorators.safe_import", "pandas.util._validators.validate_args_and_kwargs", "pandas.util._validators.validate_kwargs", "pandas.util.testing.assert_raises_regex", "pandas.util.testing.assert_produces_warning", "pandas.util._validators.validate_bool_kwarg", "pandas.core.common._all_none", "pandas.util._move.move_into_mutable_buffer", "pandas.util.testing.get_locales", "pandas.util._decorators.make_signature" ], [ "pandas.compat.StringIO", "pandas.DatetimeIndex", "pandas.compat.u", "pandas.util.testing.assert_raises_regex", "pandas.util.testing.makeStringIndex", "pandas.util.testing.getSeriesData", "numpy.random.random", "pandas.util.testing.ensure_clean", "pandas.DataFrame", "numpy.arange", "pandas.MultiIndex", "pandas.util.testing.assert_produces_warning", "pandas.to_datetime", "numpy.array", "pandas.MultiIndex.from_tuples", "numpy.random.randn", "pandas.MultiIndex.from_arrays", "pandas.compat.lrange", "pandas.MultiIndex.from_product", "pandas.Index", "pandas.option_context" ] ]
billsioros/computational-geometry
[ "398a92e3c08046f85eb3e95828afe62230b816fb" ]
[ "Homework_1/exercise3.py" ]
[ "from matplotlib.patches import Polygon\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom exercise1 import check_for_triangle\nfrom exercise1 import plot_2D_points\n\n\ndef remove_duplicates(lst): \n return [item for item in (set(tuple(i) for i in lst))] \n\n\n# select a point from avaliable points (for ccw)\ndef select_random_point(current_hull_points, point1, point2):\n random_point = current_hull_points[0][0]\n if random_point == point1 or random_point == point2:\n random_points = [p[0] for p in current_hull_points if p[0] != point1 and p[0] != point2]\n random_point = random_points[0]\n return random_point\n\n\n# makes thw final plot with all points and the convex hull\ndef plot_2D_hull(current_hull, all_points):\n points = []\n for line in current_hull:\n points.append(line[0])\n points.append(line[1])\n\n plot_2D_points(points+all_points, polyg=True)\n\n line_of_hull = []\n for k in current_hull:\n line_of_hull.append(k[0])\n line_of_hull.append(k[1])\n hull = np.array(line_of_hull)\n hull_plot = plt.Polygon(hull, fill=False)\n plt.gca().add_patch(hull_plot)\n del line_of_hull[:]\n plt.show()\n\n\n# returns the sign of det\ndef ccw(A, B, C):\n return (B[0] - A[0]) * (C[1] - A[1]) > (B[1] - A[1]) * (C[0] - A[0])\n\n\ndef check_ccw(p, previous_point, end_point, random_point):\n if ccw(previous_point, end_point, random_point):\n if not ccw(previous_point, end_point, p):\n return True\n else:\n return False\n else:\n if ccw(previous_point, end_point, p):\n return True\n else:\n return False\n\n\ndef beneath_beyond(points):\n # Step 1: sort points in descending\n sorted_points = sorted(points, key=lambda x: (x[0], x[1]), reverse=True)\n\n # Step 2: initial hull = triangle\n current_hull_points = []\n current_hull = []\n # if first 3 points are collinear, select (x,min(y)) and (x,max(y))\n if not check_for_triangle(sorted_points[0][0], sorted_points[0][1],\n sorted_points[1][0], sorted_points[1][1],\n sorted_points[2][0], sorted_points[2][1]):\n for p in sorted_points[1:]:\n if p[0] == sorted_points[0][0]:\n last = p\n sorted_points.remove(p)\n sorted_points.append(last)\n sorted_points = sorted(sorted_points, key=lambda x: x[0], reverse=True)\n\n for p in sorted_points[0:2]:\n current_hull_points.append([p, 'blue'])\n current_hull_points.append([sorted_points[2], 'red'])\n \n current_hull.append([sorted_points[0], sorted_points[1], 'blue'])\n current_hull.append([sorted_points[0], sorted_points[2], 'blue'])\n current_hull.append([sorted_points[1], sorted_points[2], 'blue'])\n \n del sorted_points[0:3]\n previous_point = current_hull_points[-1][0]\n\n # Step 3: \n color = [] \n purple_points = []\n for p in sorted_points:\n # Step 3B: find all red lines\n # check every blue line in hull, if it's red now\n for line in current_hull:\n if line[2] == 'blue':\n random_point = select_random_point(current_hull_points, line[0], line[1])\n if check_ccw(p, line[0], line[1], random_point):\n line[2] = 'red'\n else:\n line[2] = 'blue' \n\n # Step 3B: find two purple points\n # re-coloring points\n for point1 in current_hull_points:\n del color[:]\n for point2 in current_hull:\n if point2[0] == point1[0] or point2[1] == point1[0]:\n color.append(point2[2]) \n if len(color) > 0:\n if color[0] != 'purple' and color[1] != 'purple':\n if color[0] != color[1]: # red + blue = purple\n point1[1] = 'purple' \n \n del purple_points[:]\n for point in current_hull_points:\n if point[1] == 'purple':\n purple_points.append(point[0])\n\n # Step 3C: remove all red lines\n for line in current_hull:\n if line[2] == 'red':\n line[2] = 'delete_line'\n current_hull = [elem for elem in current_hull if elem[2] != 'delete_line']\n\n # Step 3C: put two lines from p to purple1 and purple2 point\n current_hull.append([p, purple_points[0], 'blue']) \n current_hull.append([p, purple_points[1], 'blue']) \n\n # initialize for next step\n current_hull_points.append([p,'red'])\n for point in current_hull_points:\n if point[1] == 'purple':\n point[1] = 'blue'\n plot_2D_hull(current_hull, points)\n\n\n\n\nif __name__ == \"__main__\":\n # read points from user(input choice 1)\n # number_of_points = input('Give the number of points: ')\n # if int(number_of_points) < 3:\n # print('Error: Program needs 3 points at least.')\n # exit()\n # points = list(tuple(map(int,input(\"Give a point: \").split())) for r in range(int(number_of_points)))\n \n # random poinsts(input choice 2)\n for i in range(10):\n points = [(random.randrange(-100, 100), random.randrange(-100, 100)) for i in range(20)]\n points = remove_duplicates(points)\n\n # call beneath_beyond algorithm\n beneath_beyond(points)\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.Polygon" ] ]
TheoLvs/westworld
[ "7fb435f3a028ff3d3156bf2a023b44ee06aa9f8b" ]
[ "westworld/_deprecated/chicken_game.py" ]
[ "\nimport sys\nsys.path.append(\"C:/git/reinforcement-learning\")\n\n\nfrom hyperion.agents import *\nfrom hyperion.environment import *\n\nimport random\nimport numpy as np\nimport uuid\nimport attr\n\n\nSTATUSES = [\"EGG\",\"CHICKEN\",\"COW\",\"FARMER\",\"SUPERMAN\"]\nSIZE = 100\n\n\n@attr.s(slots = True)\nclass Player(Agent):\n\n # # Agent id\n # id = attr.ib()\n # id.default\n # def _init_id(self):\n # return str(uuid.uuid1())\n\n # Status\n status = attr.ib(default = 0,init=False)\n\n # Position\n x = attr.ib(init = False)\n @x.default\n def _init_x(self):\n return random.randint(0,SIZE)\n\n\n def step(self,env):\n\n # Movement\n new_x = self.x + random.choice([-1,1])\n new_x = np.clip(new_x,0,SIZE-1)\n self.x = new_x\n\n # Others\n others = env.inverse_loc(self.id)\n for other in others:\n if other.x == self.x:\n if other.status == self.status:\n other.status = 0\n self.status += 1\n\n def interacts_with(self,other):\n return self.x == other.x,1\n\n\nclass ChickenGame(Environment):\n\n def render(self):\n env = [\" \"]*SIZE\n for agent in self.agents:\n env[agent.x] = str(agent.status)\n return \"|\"+\"\".join(env)+\"|\"\n\n\n\n def interactions(self):\n pass\n\n\n\n" ]
[ [ "numpy.clip" ] ]
tpulkit/txt2vid
[ "679b1672fb3221c6b5fe576a158974556047c201" ]
[ "Wav2Lip/util/wav2lip_inference_funcs.py" ]
[ "import numpy as np\nimport os\nimport cv2\nfrom models import Wav2Lip\nimport face_detection\nimport torch\n\ndef get_smoothened_boxes(boxes, T):\n for i in range(len(boxes)):\n if i + T > len(boxes):\n window = boxes[len(boxes) - T:]\n else:\n window = boxes[i: i + T]\n boxes[i] = np.mean(window, axis=0)\n return boxes\n\n\ndef face_detect(images, device, face_det_batch_size, pads, nosmooth):\n detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,\n flip_input=False, device=device)\n\n batch_size = face_det_batch_size\n\n while 1:\n predictions = []\n try:\n for i in range(0, len(images), batch_size):\n predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))\n except RuntimeError:\n if batch_size == 1:\n raise RuntimeError(\n 'Image too big to run face detection on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n print('Recovering from OOM error; New batch size: {}'.format(batch_size))\n continue\n break\n\n results = []\n pady1, pady2, padx1, padx2 = pads\n for rect, image in zip(predictions, images):\n if rect is None:\n cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.\n raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')\n\n y1 = max(0, rect[1] - pady1)\n y2 = min(image.shape[0], rect[3] + pady2)\n x1 = max(0, rect[0] - padx1)\n x2 = min(image.shape[1], rect[2] + padx2)\n\n results.append([x1, y1, x2, y2])\n\n boxes = np.array(results)\n if not nosmooth: boxes = get_smoothened_boxes(boxes, T=5)\n results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]\n\n del detector\n return results\n\n\ndef face_detect_wrapper(frames, device, face_det_batch_size, pads, nosmooth, box, static):\n if box[0] == -1:\n if not static:\n face_det_results = face_detect(frames,\n device, face_det_batch_size, pads, nosmooth) # BGR2RGB for CNN face detection\n else:\n face_det_results = face_detect([frames[0]],\n device, face_det_batch_size, pads, nosmooth)\n else:\n print('Using the specified bounding box instead of face detection...')\n y1, y2, x1, x2 = box\n face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]\n return face_det_results\n\n\ndef datagen(frames, face_det_results, mels, start_frame_idx, static, img_size, wav2lip_batch_size):\n # start frame idx is the current frame idx in the output video\n # we start from this point\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n start_frame_idx = start_frame_idx % len(frames) # loop back\n num_frames = len(mels)\n # take frames from start_frame_idx to start_frame_idx+num_frames\n # wrapping around if necessary\n if not static:\n if len(frames) == 1:\n frames_current = frames\n face_det_results_current = face_det_results\n if start_frame_idx + num_frames > len(frames):\n frames_current = frames[start_frame_idx:] + frames[:start_frame_idx + num_frames - len(frames)]\n face_det_results_current = face_det_results[start_frame_idx:] + face_det_results[\n :start_frame_idx + num_frames - len(frames)]\n else:\n frames_current = frames[start_frame_idx:start_frame_idx + num_frames]\n face_det_results_current = face_det_results[start_frame_idx:start_frame_idx + num_frames]\n\n else:\n frames_current = frames\n face_det_results_current = face_det_results\n\n for i, m in enumerate(mels):\n idx = 0 if static else i % len(frames_current)\n frame_to_save = frames_current[idx].copy()\n face, coords = face_det_results_current[idx].copy()\n\n face = cv2.resize(face, (img_size, img_size))\n\n img_batch.append(face)\n mel_batch.append(m)\n frame_batch.append(frame_to_save)\n coords_batch.append(coords)\n\n if len(img_batch) >= wav2lip_batch_size:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, img_size // 2:] = 0\n\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n\n yield img_batch, mel_batch, frame_batch, coords_batch\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n if len(img_batch) > 0:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, img_size // 2:] = 0\n\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n\n yield img_batch, mel_batch, frame_batch, coords_batch\n\n\ndef _load(checkpoint_path, device):\n if device == 'cuda':\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n\n\ndef load_model(path, device):\n model = Wav2Lip()\n print(\"Load checkpoint from: {}\".format(path))\n checkpoint = _load(path, device)\n s = checkpoint[\"state_dict\"]\n new_s = {}\n for k, v in s.items():\n new_s[k.replace('module.', '')] = v\n model.load_state_dict(new_s)\n\n model = model.to(device)\n return model.eval()\n\n\ndef preprocess_video(face, fps, resize_factor, rotate, crop):\n if not os.path.isfile(face):\n raise ValueError('--face argument must be a valid path to video/image file')\n\n elif face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n full_frames = [cv2.imread(face)]\n fps = fps\n\n else:\n video_stream = cv2.VideoCapture(face)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n\n print('Reading video frames...')\n\n full_frames = []\n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break\n if resize_factor > 1:\n frame = cv2.resize(frame, (frame.shape[1] // resize_factor, frame.shape[0] // resize_factor))\n\n if rotate:\n frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)\n\n y1, y2, x1, x2 = crop\n if x2 == -1: x2 = frame.shape[1]\n if y2 == -1: y2 = frame.shape[0]\n\n frame = frame[y1:y2, x1:x2]\n\n full_frames.append(frame)\n\n print(\"Number of frames available for inference: \" + str(len(full_frames)))\n\n return full_frames" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.asarray", "numpy.mean", "torch.load" ] ]
rebecca-palmer/statsmodels
[ "27dd8ba0be0211fdc91097463ce4edd28bce1ef4" ]
[ "statsmodels/sandbox/tsa/fftarma.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 14 19:53:25 2009\n\nAuthor: josef-pktd\n\ngenerate arma sample using fft with all the lfilter it looks slow\nto get the ma representation first\n\napply arma filter (in ar representation) to time series to get white noise\nbut seems slow to be useful for fast estimation for nobs=10000\n\nchange/check: instead of using marep, use fft-transform of ar and ma\n separately, use ratio check theory is correct and example works\n DONE : feels much faster than lfilter\n -> use for estimation of ARMA\n -> use pade (scipy.misc) approximation to get starting polynomial\n from autocorrelation (is autocorrelation of AR(p) related to marep?)\n check if pade is fast, not for larger arrays ?\n maybe pade does not do the right thing for this, not tried yet\n scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)\n raises LinAlgError: singular matrix\n also does not have roots inside unit circle ??\n -> even without initialization, it might be fast for estimation\n -> how do I enforce stationarity and invertibility,\n need helper function\n\nget function drop imag if close to zero from numpy/scipy source, where?\n\n\"\"\"\n\nimport numpy as np\nimport numpy.fft as fft\n#import scipy.fftpack as fft\nfrom scipy import signal\n#from try_var_convolve import maxabs\nfrom statsmodels.tsa.arima_process import ArmaProcess\n\n\n#trying to convert old experiments to a class\n\n\nclass ArmaFft(ArmaProcess):\n '''fft tools for arma processes\n\n This class contains several methods that are providing the same or similar\n returns to try out and test different implementations.\n\n Notes\n -----\n TODO:\n check whether we do not want to fix maxlags, and create new instance if\n maxlag changes. usage for different lengths of timeseries ?\n or fix frequency and length for fft\n\n check default frequencies w, terminology norw n_or_w\n\n some ffts are currently done without padding with zeros\n\n returns for spectral density methods needs checking, is it always the power\n spectrum hw*hw.conj()\n\n normalization of the power spectrum, spectral density: not checked yet, for\n example no variance of underlying process is used\n\n '''\n\n def __init__(self, ar, ma, n):\n #duplicates now that are subclassing ArmaProcess\n super(ArmaFft, self).__init__(ar, ma)\n\n self.ar = np.asarray(ar)\n self.ma = np.asarray(ma)\n self.nobs = n\n #could make the polynomials into cached attributes\n self.arpoly = np.polynomial.Polynomial(ar)\n self.mapoly = np.polynomial.Polynomial(ma)\n self.nar = len(ar) #1d only currently\n self.nma = len(ma)\n\n def padarr(self, arr, maxlag, atend=True):\n '''pad 1d array with zeros at end to have length maxlag\n function that is a method, no self used\n\n Parameters\n ----------\n arr : array_like, 1d\n array that will be padded with zeros\n maxlag : int\n length of array after padding\n atend : bool\n If True (default), then the zeros are added to the end, otherwise\n to the front of the array\n\n Returns\n -------\n arrp : ndarray\n zero-padded array\n\n Notes\n -----\n This is mainly written to extend coefficient arrays for the lag-polynomials.\n It returns a copy.\n\n '''\n if atend:\n return np.r_[arr, np.zeros(maxlag-len(arr))]\n else:\n return np.r_[np.zeros(maxlag-len(arr)), arr]\n\n\n def pad(self, maxlag):\n '''construct AR and MA polynomials that are zero-padded to a common length\n\n Parameters\n ----------\n maxlag : int\n new length of lag-polynomials\n\n Returns\n -------\n ar : ndarray\n extended AR polynomial coefficients\n ma : ndarray\n extended AR polynomial coefficients\n\n '''\n arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]\n mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]\n return arpad, mapad\n\n def fftar(self, n=None):\n '''Fourier transform of AR polynomial, zero-padded at end to n\n\n Parameters\n ----------\n n : int\n length of array after zero-padding\n\n Returns\n -------\n fftar : ndarray\n fft of zero-padded ar polynomial\n '''\n if n is None:\n n = len(self.ar)\n return fft.fft(self.padarr(self.ar, n))\n\n def fftma(self, n):\n '''Fourier transform of MA polynomial, zero-padded at end to n\n\n Parameters\n ----------\n n : int\n length of array after zero-padding\n\n Returns\n -------\n fftar : ndarray\n fft of zero-padded ar polynomial\n '''\n if n is None:\n n = len(self.ar)\n return fft.fft(self.padarr(self.ma, n))\n\n def fftarma(self, n=None):\n '''Fourier transform of ARMA polynomial, zero-padded at end to n\n\n The Fourier transform of the ARMA process is calculated as the ratio\n of the fft of the MA polynomial divided by the fft of the AR polynomial.\n\n Parameters\n ----------\n n : int\n length of array after zero-padding\n\n Returns\n -------\n fftarma : ndarray\n fft of zero-padded arma polynomial\n '''\n if n is None:\n n = self.nobs\n return (self.fftma(n) / self.fftar(n))\n\n def spd(self, npos):\n '''raw spectral density, returns Fourier transform\n\n n is number of points in positive spectrum, the actual number of points\n is twice as large. different from other spd methods with fft\n '''\n n = npos\n w = fft.fftfreq(2*n) * 2 * np.pi\n hw = self.fftarma(2*n) #not sure, need to check normalization\n #return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #does not show in plot\n return (hw*hw.conj()).real * 0.5 / np.pi, w\n\n def spdshift(self, n):\n '''power spectral density using fftshift\n\n currently returns two-sided according to fft frequencies, use first half\n '''\n #size = s1+s2-1\n mapadded = self.padarr(self.ma, n)\n arpadded = self.padarr(self.ar, n)\n hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))\n #return np.abs(spd)[n//2-1:]\n w = fft.fftfreq(n) * 2 * np.pi\n wslice = slice(n//2-1, None, None)\n #return (hw*hw.conj()).real[wslice], w[wslice]\n return (hw*hw.conj()).real, w\n\n def spddirect(self, n):\n '''power spectral density using padding to length n done by fft\n\n currently returns two-sided according to fft frequencies, use first half\n '''\n #size = s1+s2-1\n #abs looks wrong\n hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)\n w = fft.fftfreq(n) * 2 * np.pi\n wslice = slice(None, n//2, None)\n #return (np.abs(hw)**2)[wslice], w[wslice]\n return (np.abs(hw)**2) * 0.5/np.pi, w\n\n def _spddirect2(self, n):\n '''this looks bad, maybe with an fftshift\n '''\n #size = s1+s2-1\n hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)\n / fft.fft(np.r_[self.ar[::-1],self.ar], n))\n return (hw*hw.conj()) #.real[n//2-1:]\n\n def spdroots(self, w):\n '''spectral density for frequency using polynomial roots\n\n builds two arrays (number of roots, number of frequencies)\n '''\n return self._spdroots(self.arroots, self.maroots, w)\n\n def _spdroots(self, arroots, maroots, w):\n '''spectral density for frequency using polynomial roots\n\n builds two arrays (number of roots, number of frequencies)\n\n Parameters\n ----------\n arroots : ndarray\n roots of ar (denominator) lag-polynomial\n maroots : ndarray\n roots of ma (numerator) lag-polynomial\n w : array_like\n frequencies for which spd is calculated\n\n Notes\n -----\n this should go into a function\n '''\n w = np.atleast_2d(w).T\n cosw = np.cos(w)\n #Greene 5th edt. p626, section 20.2.7.a.\n maroots = 1./maroots\n arroots = 1./arroots\n num = 1 + maroots**2 - 2* maroots * cosw\n den = 1 + arroots**2 - 2* arroots * cosw\n #print 'num.shape, den.shape', num.shape, den.shape\n hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog\n return np.squeeze(hw), w.squeeze()\n\n def spdpoly(self, w, nma=50):\n '''spectral density from MA polynomial representation for ARMA process\n\n References\n ----------\n Cochrane, section 8.3.3\n '''\n mpoly = np.polynomial.Polynomial(self.arma2ma(nma))\n hw = mpoly(np.exp(1j * w))\n spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)\n return spd, w\n\n def filter(self, x):\n '''\n filter a timeseries with the ARMA filter\n\n padding with zero is missing, in example I needed the padding to get\n initial conditions identical to direct filter\n\n Initial filtered observations differ from filter2 and signal.lfilter, but\n at end they are the same.\n\n See Also\n --------\n tsa.filters.fftconvolve\n\n '''\n n = x.shape[0]\n if n == self.fftarma:\n fftarma = self.fftarma\n else:\n fftarma = self.fftma(n) / self.fftar(n)\n tmpfft = fftarma * fft.fft(x)\n return fft.ifft(tmpfft)\n\n def filter2(self, x, pad=0):\n '''filter a time series using fftconvolve3 with ARMA filter\n\n padding of x currently works only if x is 1d\n in example it produces same observations at beginning as lfilter even\n without padding.\n\n TODO: this returns 1 additional observation at the end\n '''\n from statsmodels.tsa.filters import fftconvolve3\n if not pad:\n pass\n elif pad == 'auto':\n #just guessing how much padding\n x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)\n else:\n x = self.padarr(x, x.shape[0] + int(pad), atend=False)\n\n return fftconvolve3(x, self.ma, self.ar)\n\n\n def acf2spdfreq(self, acovf, nfreq=100, w=None):\n '''\n not really a method\n just for comparison, not efficient for large n or long acf\n\n this is also similarly use in tsa.stattools.periodogram with window\n '''\n if w is None:\n w = np.linspace(0, np.pi, nfreq)[:, None]\n nac = len(acovf)\n hw = 0.5 / np.pi * (acovf[0] +\n 2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))\n return hw\n\n def invpowerspd(self, n):\n '''autocovariance from spectral density\n\n scaling is correct, but n needs to be large for numerical accuracy\n maybe padding with zero in fft would be faster\n without slicing it returns 2-sided autocovariance with fftshift\n\n >>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]\n array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,\n 0.045 , 0.0225 , 0.01125 , 0.005625])\n >>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)\n array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,\n 0.045 , 0.0225 , 0.01125 , 0.005625])\n '''\n hw = self.fftarma(n)\n return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]\n\n def spdmapoly(self, w, twosided=False):\n '''ma only, need division for ar, use LagPolynomial\n '''\n if w is None:\n w = np.linspace(0, np.pi, nfreq)\n return 0.5 / np.pi * self.mapoly(np.exp(w*1j))\n\n\n def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):\n \"\"\"Plot results\"\"\"\n rvs = self.generate_sample(nsample=100, burnin=500)\n acf = self.acf(nacf)[:nacf] #TODO: check return length\n pacf = self.pacf(nacf)\n w = np.linspace(0, np.pi, nfreq)\n spdr, wr = self.spdroots(w)\n\n if fig is None:\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(2,2,1)\n ax.plot(rvs)\n ax.set_title('Random Sample \\nar=%s, ma=%s' % (self.ar, self.ma))\n\n ax = fig.add_subplot(2,2,2)\n ax.plot(acf)\n ax.set_title('Autocorrelation \\nar=%s, ma=%rs' % (self.ar, self.ma))\n\n ax = fig.add_subplot(2,2,3)\n ax.plot(wr, spdr)\n ax.set_title('Power Spectrum \\nar=%s, ma=%s' % (self.ar, self.ma))\n\n ax = fig.add_subplot(2,2,4)\n ax.plot(pacf)\n ax.set_title('Partial Autocorrelation \\nar=%s, ma=%s' % (self.ar, self.ma))\n\n return fig\n\n\n\n\n\n\n\ndef spdar1(ar, w):\n if np.ndim(ar) == 0:\n rho = ar\n else:\n rho = -ar[1]\n return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))\n\nif __name__ == '__main__':\n def maxabs(x,y):\n return np.max(np.abs(x-y))\n nobs = 200 #10000\n ar = [1, 0.0]\n ma = [1, 0.0]\n ar2 = np.zeros(nobs)\n ar2[:2] = [1, -0.9]\n\n\n\n uni = np.zeros(nobs)\n uni[0]=1.\n #arrep = signal.lfilter(ma, ar, ar2)\n #marep = signal.lfilter([1],arrep, uni)\n # same faster:\n arcomb = np.convolve(ar, ar2, mode='same')\n marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]\n print(marep[:10])\n mafr = fft.fft(marep)\n\n rvs = np.random.normal(size=nobs)\n datafr = fft.fft(rvs)\n y = fft.ifft(mafr*datafr)\n print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))\n\n arrep = signal.lfilter([1],marep, uni)\n print(arrep[:20]) # roundtrip to ar\n arfr = fft.fft(arrep)\n yfr = fft.fft(y)\n x = fft.ifft(arfr*yfr).real #imag part is e-15\n # the next two are equal, roundtrip works\n print(x[:5])\n print(rvs[:5])\n print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))\n\n\n # ARMA filter using fft with ratio of fft of ma/ar lag polynomial\n # seems much faster than using lfilter\n\n #padding, note arcomb is already full length\n arcombp = np.zeros(nobs)\n arcombp[:len(arcomb)] = arcomb\n map_ = np.zeros(nobs) #rename: map was shadowing builtin\n map_[:len(ma)] = ma\n ar0fr = fft.fft(arcombp)\n ma0fr = fft.fft(map_)\n y2 = fft.ifft(ma0fr/ar0fr*datafr)\n #the next two are (almost) equal in real part, almost zero but different in imag\n print(y2[:10])\n print(y[:10])\n print(maxabs(y, y2)) # from chfdiscrete\n #1.1282071239631782e-014\n\n ar = [1, -0.4]\n ma = [1, 0.2]\n\n arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)\n\n nfreq = nobs\n w = np.linspace(0, np.pi, nfreq)\n w2 = np.linspace(0, 2*np.pi, nfreq)\n\n import matplotlib.pyplot as plt\n plt.close('all')\n\n plt.figure()\n spd1, w1 = arma1.spd(2**10)\n print(spd1.shape)\n _ = plt.plot(spd1)\n plt.title('spd fft complex')\n\n plt.figure()\n spd2, w2 = arma1.spdshift(2**10)\n print(spd2.shape)\n _ = plt.plot(w2, spd2)\n plt.title('spd fft shift')\n\n plt.figure()\n spd3, w3 = arma1.spddirect(2**10)\n print(spd3.shape)\n _ = plt.plot(w3, spd3)\n plt.title('spd fft direct')\n\n plt.figure()\n spd3b = arma1._spddirect2(2**10)\n print(spd3b.shape)\n _ = plt.plot(spd3b)\n plt.title('spd fft direct mirrored')\n\n plt.figure()\n spdr, wr = arma1.spdroots(w)\n print(spdr.shape)\n plt.plot(w, spdr)\n plt.title('spd from roots')\n\n plt.figure()\n spdar1_ = spdar1(arma1.ar, w)\n print(spdar1_.shape)\n _ = plt.plot(w, spdar1_)\n plt.title('spd ar1')\n\n\n plt.figure()\n wper, spdper = arma1.periodogram(nfreq)\n print(spdper.shape)\n _ = plt.plot(w, spdper)\n plt.title('periodogram')\n\n startup = 1000\n rvs = arma1.generate_sample(startup+10000)[startup:]\n import matplotlib.mlab as mlb\n plt.figure()\n sdm, wm = mlb.psd(x)\n print('sdm.shape', sdm.shape)\n sdm = sdm.ravel()\n plt.plot(wm, sdm)\n plt.title('matplotlib')\n\n from nitime.algorithms import LD_AR_est\n #yule_AR_est(s, order, Nfreqs)\n wnt, spdnt = LD_AR_est(rvs, 10, 512)\n plt.figure()\n print('spdnt.shape', spdnt.shape)\n _ = plt.plot(spdnt.ravel())\n print(spdnt[:10])\n plt.title('nitime')\n\n fig = plt.figure()\n arma1.plot4(fig)\n\n #plt.show()\n" ]
[ [ "numpy.exp", "numpy.fft.fft", "scipy.signal.lfilter", "numpy.cos", "numpy.fft.fftfreq", "numpy.random.normal", "matplotlib.mlab.psd", "numpy.polynomial.Polynomial", "numpy.arange", "numpy.ndim", "numpy.fft.fftshift", "numpy.atleast_2d", "numpy.convolve", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "numpy.corrcoef", "numpy.fft.ifft", "numpy.squeeze", "numpy.asarray", "matplotlib.pyplot.plot", "numpy.abs", "numpy.linspace" ] ]
tian1327/AutoLDA
[ "be202b70b6d0a02b75ff05016dcd7084c32a9ccf" ]
[ "Hyperband/Embeddings/GLOVE.py" ]
[ "import os\nimport numpy as np\nimport pickle\n# import tqdm\n\ndef load_GLOVE():\n model = 'glove_pretrained_840b_300d.pkl'\n print(\"loading GLOVE pretrained model ......\")\n with open('./Embeddings/GLOVE_pretrained/'+model,'rb') as pk:\n glove_emb = pickle.load(pk)\n print('GLOVE loaded.\\n')\n\n return glove_emb\n\ndef genEmbeddings_GLOVE(keyword):\n # print('gen GLOVE')\n word_embedding = [0 for i in range(300)]\n if keyword in glove_emb:\n word_embedding = glove_emb[keyword]\n else:\n print('--'*10, keyword, 'not found in GLOVE!')\n\n return word_embedding\n\nglove_emb = load_GLOVE()\n\nif __name__ == \"__main__\":\n path_to_glove_file = \"./GLOVE_pretrained/glove.840B.300d.txt\"\n embeddings_dict = {}\n with open(path_to_glove_file) as f:\n for line in f:\n value = line.split(' ')\n word = value[0]\n coefs = np.array(value[1:], dtype = 'float32')\n embeddings_dict[word] = coefs\n\n print('save GLOVE embeddings_dict to pkl ......')\n with open('./GLOVE_pretrained/glove_pretrained_840b_300d.pkl','wb') as f:\n pickle.dump(embeddings_dict, f)\n\n" ]
[ [ "numpy.array" ] ]
wyyfkim/MRTA
[ "fab515569d3434cae01733c702fc0e1afc73b552" ]
[ "catkin_ws/src/mrta/src/DataGenerator.py" ]
[ "import os, sys\nimport argparse\nimport pickle\nfrom numpy import random\nfrom Task import Task\nfrom PrecedenceGraph import PrecedenceGraph, Node\nfrom Robot import Robot\nfrom Logger import Logger, LogLevel\n\nclass DataSet:\n\n def __init__(self, p_graphs, robots, beta, bid_alpha, cost_alpha):\n self.p_graphs = p_graphs\n self.robots = robots\n self.beta = beta\n self.bid_alpha = bid_alpha\n self.cost_alpha = cost_alpha \n self.schedules = []\n\nclass DataGenerator:\n\n def __init__(self, map_size_x, map_size_y, logger):\n self._map_size = (map_size_x, map_size_y)\n self._logger = logger\n self.task_types = [1, 2]\n\n def generate_tasks(self, num_of_tasks, task_locations=None):\n if task_locations is not None:\n if len(task_locations) != num_of_tasks:\n self._logger.error(\"generate_tasks: The number of task locations is not same as the number of tasks.\")\n\n tasks = []\n duration = random.randint(20, 40)\n\n for i in range(num_of_tasks):\n task_id = i + 1\n est = random.randint(25, 400)\n lft = est + random.randint(100, 1200)\n task_type = random.choice(self.task_types, 1, p=[0.5, 0.5])[0]\n\n if task_locations is not None:\n pos_x = task_locations[i][0]\n pos_y = task_locations[i][1]\n else:\n pos_x, pos_y = self.generate_locations(1)[0]\n\n tasks.append(Task(est, lft, duration, task_id, pos_x, pos_y, task_type))\n return tasks\n\n def generate_locations(self, num_of_locations):\n locations = []\n for i in range(num_of_locations):\n pos_x = random.randint(0, self._map_size[0])\n pos_y = random.randint(0, self._map_size[1])\n locations.append((pos_x, pos_y))\n return locations\n\n def generate_pgraph(self, tasks, max_num_of_edges):\n p_graph = PrecedenceGraph(tasks)\n min_num_of_edges = len(tasks) / 2\n num_of_edges = min_num_of_edges\n\n if max_num_of_edges > min_num_of_edges:\n num_of_edges = random.randint(min_num_of_edges, max_num_of_edges)\n\n i = 0\n while i < num_of_edges:\n from_task = random.choice(tasks)\n to_task = random.choice(tasks)\n\n if from_task.lft < to_task.lft:\n\n if p_graph.are_connected(from_task, to_task):\n p_graph.remove_edge(from_task, to_task)\n else:\n if p_graph.add_edge(from_task, to_task):\n i += 1\n \n p_graph.build_graph()\n return p_graph\n\n def generate_pgraphs(self, tasks, num_of_pgraphs, max_num_of_edges):\n p_graphs = [] \n\n for i in range(num_of_pgraphs):\n p_graph = self.generate_pgraph(tasks, max_num_of_edges)\n p_graphs.append(p_graph)\n\n return p_graphs \n\n def generate_robots(self, num_of_robots, robot_speed):\n locations = self.generate_locations(num_of_robots) \n robots = []\n task_types = [1,2]\n\n for i in range(num_of_robots):\n robot_id = i + 1\n capability = set()\n ran = random.uniform()\n\n #first robot capable of doing all tasks\n if i == 0 or ran > 0.66:\n capability = set(task_types)\n elif ran > 0.33:\n capability.add(task_types[0])\n else:\n capability.add(task_types[1])\n\n robot = Robot(robot_id, locations[i][0], locations[i][1], capability, robot_speed, self._logger) \n robots.append(robot)\n\n return robots\n\nif __name__ == \"__main__\":\n\n '''if len(sys.argv) < 2:\n print(\"ERROR starting datageneration\")\n exit(1)'''\n\n data_dir = \"../data/\"\n ##dsfile_name = 'dataset' + sys.argv[1] + '.pickle'\n dsfile_name = '../data/dataset1.pickle'\n\n parser = argparse.ArgumentParser(description=\"MRTA Data Generator\")\n\n parser.add_argument('--x',\n help='X Dimention of Map',\n dest='map_x',\n type=int,\n default=100,\n action='store')\n\n parser.add_argument('--y',\n help='Y Dimention of Map',\n dest='map_y',\n type=int,\n default=100,\n action='store')\n\n args = parser.parse_args()\n\n logger = Logger(LogLevel.OFF[0])\n map_x = args.map_x\n map_y = args.map_y\n \n num_of_pgraphs = 50\n ##robot_count_arr = [2, 4, 8]\n ##task_count_arr = [5, 10, 20, 30]\n robot_count_arr = [1]\n task_count_arr = [5]\n\n\n dg = DataGenerator(map_x, map_y, logger)\n robots = { }\n for robot_count in robot_count_arr:\n robots[robot_count] = dg.generate_robots(robot_count, 1)\n\n p_graphs = { }\n for task_count in task_count_arr:\n p_graphs[task_count] = {}\n tasks = dg.generate_tasks(task_count)\n print(tasks)\n max_possible_edges = (task_count * (task_count - 1))/2\n max_num_of_edges = min(3 * task_count, max_possible_edges)\n p_graphs[task_count] = dg.generate_pgraphs(tasks, num_of_pgraphs, max_num_of_edges)\n\n ds = DataSet(p_graphs, robots, 0.25, 0.75, 0.75)\n\n pickle.dump(robots, open('./robots.pickle', 'w'))\n pickle.dump(p_graphs, open('./pgraphs.pickle', 'w'))\n pickle.dump(ds, open(dsfile_name, 'w'))\n\n\n\n \n" ]
[ [ "numpy.random.uniform", "numpy.random.randint", "numpy.random.choice" ] ]
noranhe/vnpy_optionmaster
[ "180c85f92004d1092bc45032dc31585539de9768" ]
[ "vnpy_optionmaster/ui/manager.py" ]
[ "from typing import Dict, List, Tuple, Optional\nfrom copy import copy\nfrom functools import partial\n\nfrom scipy import interpolate\n\nfrom vnpy.event import Event, EventEngine\nfrom vnpy.trader.engine import MainEngine\nfrom vnpy.trader.ui import QtWidgets, QtCore, QtGui\nfrom vnpy.trader.event import EVENT_TICK, EVENT_TIMER, EVENT_TRADE\nfrom vnpy.trader.object import TickData, TradeData, LogData\nfrom vnpy.trader.utility import save_json, load_json\n\nfrom ..engine import OptionEngine, OptionAlgoEngine\nfrom ..base import (\n EVENT_OPTION_ALGO_PRICING,\n EVENT_OPTION_ALGO_STATUS,\n EVENT_OPTION_ALGO_LOG,\n PortfolioData,\n ChainData,\n OptionData,\n InstrumentData\n)\nfrom .monitor import (\n MonitorCell, IndexCell, BidCell, AskCell, PosCell,\n COLOR_WHITE, COLOR_BLACK\n)\nfrom ..algo import ElectronicEyeAlgo\n\n\nclass AlgoSpinBox(QtWidgets.QSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setMaximum(999999)\n self.setMinimum(-999999)\n self.setAlignment(QtCore.Qt.AlignCenter)\n\n def get_value(self) -> int:\n \"\"\"\"\"\"\n return self.value()\n\n def set_value(self, value: int) -> None:\n \"\"\"\"\"\"\n self.setValue(value)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.setEnabled(not active)\n\n\nclass AlgoPositiveSpinBox(AlgoSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setMinimum(0)\n\n\nclass AlgoDoubleSpinBox(QtWidgets.QDoubleSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setDecimals(1)\n self.setMaximum(9999.9)\n self.setMinimum(0)\n self.setAlignment(QtCore.Qt.AlignCenter)\n\n def get_value(self) -> float:\n \"\"\"\"\"\"\n return self.value()\n\n def set_value(self, value: float) -> None:\n \"\"\"\"\"\"\n self.setValue(value)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.setEnabled(not active)\n\n\nclass AlgoDirectionCombo(QtWidgets.QComboBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.addItems([\n \"双向\",\n \"做多\",\n \"做空\"\n ])\n\n def get_value(self) -> Dict[str, bool]:\n \"\"\"\"\"\"\n if self.currentText() == \"双向\":\n value: dict = {\n \"long_allowed\": True,\n \"short_allowed\": True\n }\n elif self.currentText() == \"做多\":\n value: dict = {\n \"long_allowed\": True,\n \"short_allowed\": False\n }\n else:\n value: dict = {\n \"long_allowed\": False,\n \"short_allowed\": True\n }\n\n return value\n\n def set_value(self, value: dict) -> None:\n \"\"\"\"\"\"\n if value[\"long_allowed\"] and value[\"short_allowed\"]:\n self.setCurrentIndex(0)\n elif value[\"long_allowed\"]:\n self.setCurrentIndex(1)\n else:\n self.setCurrentIndex(2)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.setEnabled(not active)\n\n\nclass AlgoPricingButton(QtWidgets.QPushButton):\n \"\"\"\"\"\"\n\n def __init__(self, vt_symbol: str, manager: \"ElectronicEyeManager\") -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.vt_symbol: str = vt_symbol\n self.manager: ElectronicEyeManager = manager\n\n self.active: bool = False\n self.setText(\"N\")\n self.clicked.connect(self.on_clicked)\n\n def on_clicked(self) -> None:\n \"\"\"\"\"\"\n if self.active:\n self.manager.stop_algo_pricing(self.vt_symbol)\n else:\n self.manager.start_algo_pricing(self.vt_symbol)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.active = active\n\n if active:\n self.setText(\"Y\")\n else:\n self.setText(\"N\")\n\n\nclass AlgoTradingButton(QtWidgets.QPushButton):\n \"\"\"\"\"\"\n\n def __init__(self, vt_symbol: str, manager: \"ElectronicEyeManager\") -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.vt_symbol: str = vt_symbol\n self.manager: ElectronicEyeManager = manager\n\n self.active: bool = False\n self.setText(\"N\")\n self.clicked.connect(self.on_clicked)\n\n def on_clicked(self) -> None:\n \"\"\"\"\"\"\n if self.active:\n self.manager.stop_algo_trading(self.vt_symbol)\n else:\n self.manager.start_algo_trading(self.vt_symbol)\n\n def update_status(self, active: bool) -> None:\n \"\"\"\"\"\"\n self.active = active\n\n if active:\n self.setText(\"Y\")\n else:\n self.setText(\"N\")\n\n\nclass ElectronicEyeMonitor(QtWidgets.QTableWidget):\n \"\"\"\"\"\"\n\n signal_tick: QtCore.Signal = QtCore.Signal(Event)\n signal_pricing: QtCore.Signal = QtCore.Signal(Event)\n signal_status: QtCore.Signal = QtCore.Signal(Event)\n signal_trade: QtCore.Signal = QtCore.Signal(Event)\n\n headers: List[Dict] = [\n {\"name\": \"bid_volume\", \"display\": \"买量\", \"cell\": BidCell},\n {\"name\": \"bid_price\", \"display\": \"买价\", \"cell\": BidCell},\n {\"name\": \"ask_price\", \"display\": \"卖价\", \"cell\": AskCell},\n {\"name\": \"ask_volume\", \"display\": \"卖量\", \"cell\": AskCell},\n {\"name\": \"algo_bid_price\", \"display\": \"目标\\n买价\", \"cell\": BidCell},\n {\"name\": \"algo_ask_price\", \"display\": \"目标\\n卖价\", \"cell\": AskCell},\n {\"name\": \"algo_spread\", \"display\": \"价差\", \"cell\": MonitorCell},\n {\"name\": \"ref_price\", \"display\": \"理论价\", \"cell\": MonitorCell},\n {\"name\": \"pricing_impv\", \"display\": \"定价\\n隐波\", \"cell\": MonitorCell},\n {\"name\": \"net_pos\", \"display\": \"净持仓\", \"cell\": PosCell},\n\n {\"name\": \"price_spread\", \"display\": \"价格\\n价差\", \"cell\": AlgoDoubleSpinBox},\n {\"name\": \"volatility_spread\", \"display\": \"隐波\\n价差\", \"cell\": AlgoDoubleSpinBox},\n {\"name\": \"max_pos\", \"display\": \"持仓\\n范围\", \"cell\": AlgoPositiveSpinBox},\n {\"name\": \"target_pos\", \"display\": \"目标\\n持仓\", \"cell\": AlgoSpinBox},\n {\"name\": \"max_order_size\", \"display\": \"最大\\n委托\", \"cell\": AlgoPositiveSpinBox},\n {\"name\": \"direction\", \"display\": \"方向\", \"cell\": AlgoDirectionCombo},\n {\"name\": \"pricing_active\", \"display\": \"定价\", \"cell\": AlgoPricingButton},\n {\"name\": \"trading_active\", \"display\": \"交易\", \"cell\": AlgoTradingButton},\n ]\n\n def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.option_engine: OptionEngine = option_engine\n self.event_engine: EventEngine = option_engine.event_engine\n self.main_engine: MainEngine = option_engine.main_engine\n self.algo_engine: OptionAlgoEngine = option_engine.algo_engine\n self.portfolio_name: str = portfolio_name\n self.setting_filename: str = f\"{portfolio_name}_electronic_eye.json\"\n\n self.cells: Dict[str, Dict] = {}\n\n self.init_ui()\n self.register_event()\n self.load_setting()\n\n def init_ui(self) -> None:\n \"\"\"\"\"\"\n self.setWindowTitle(\"电子眼\")\n self.verticalHeader().setVisible(False)\n self.setEditTriggers(self.NoEditTriggers)\n\n # Set table row and column numbers\n portfolio: PortfolioData = self.option_engine.get_portfolio(self.portfolio_name)\n\n row_count: int = 0\n for chain in portfolio.chains.values():\n row_count += (1 + len(chain.indexes))\n self.setRowCount(row_count)\n\n column_count: int = len(self.headers) * 2 + 1\n self.setColumnCount(column_count)\n\n call_labels: list = [d[\"display\"] for d in self.headers]\n put_labels: list = copy(call_labels)\n put_labels.reverse()\n labels: list = call_labels + [\"行权价\"] + put_labels\n self.setHorizontalHeaderLabels(labels)\n\n # Init cells\n strike_column: int = len(self.headers)\n current_row: int = 0\n\n chain_symbols: list = list(portfolio.chains.keys())\n chain_symbols.sort()\n\n for chain_symbol in chain_symbols:\n chain: ChainData = portfolio.get_chain(chain_symbol)\n\n self.setItem(\n current_row,\n strike_column,\n IndexCell(chain.chain_symbol.split(\".\")[0])\n )\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n\n current_row += 1\n\n # Call cells\n call_cells: dict = {}\n\n for column, d in enumerate(self.headers):\n cell_type = d[\"cell\"]\n\n if issubclass(cell_type, QtWidgets.QPushButton):\n cell = cell_type(call.vt_symbol, self)\n else:\n cell = cell_type()\n\n call_cells[d[\"name\"]] = cell\n\n if isinstance(cell, QtWidgets.QTableWidgetItem):\n self.setItem(current_row, column, cell)\n else:\n self.setCellWidget(current_row, column, cell)\n\n self.cells[call.vt_symbol] = call_cells\n\n # Put cells\n put_cells: dict = {}\n put_headers: list = copy(self.headers)\n put_headers.reverse()\n\n for column, d in enumerate(put_headers):\n column += (strike_column + 1)\n\n cell_type = d[\"cell\"]\n\n if issubclass(cell_type, QtWidgets.QPushButton):\n cell = cell_type(put.vt_symbol, self)\n else:\n cell = cell_type()\n\n put_cells[d[\"name\"]] = cell\n\n if isinstance(cell, QtWidgets.QTableWidgetItem):\n self.setItem(current_row, column, cell)\n else:\n self.setCellWidget(current_row, column, cell)\n\n self.cells[put.vt_symbol] = put_cells\n\n # Strike cell\n index_cell: IndexCell = IndexCell(str(call.chain_index))\n self.setItem(current_row, strike_column, index_cell)\n\n # Move to next row\n current_row += 1\n\n self.resizeColumnsToContents()\n\n # Update all net pos and tick cells\n for vt_symbol in self.cells.keys():\n self.update_net_pos(vt_symbol)\n\n tick: Optional[TickData] = self.main_engine.get_tick(vt_symbol)\n if tick:\n self.update_tick(tick)\n\n def load_setting(self) -> None:\n \"\"\"\"\"\"\n fields: list = [\n \"price_spread\",\n \"volatility_spread\",\n \"max_pos\",\n \"target_pos\",\n \"max_order_size\",\n \"direction\"\n ]\n\n setting: dict = load_json(self.setting_filename)\n\n for vt_symbol, cells in self.cells.items():\n buf: Optional[dict] = setting.get(vt_symbol, None)\n if buf:\n for field in fields:\n cells[field].set_value(buf[field])\n\n def save_setting(self) -> None:\n \"\"\"\"\"\"\n fields: list = [\n \"price_spread\",\n \"volatility_spread\",\n \"max_pos\",\n \"target_pos\",\n \"max_order_size\",\n \"direction\"\n ]\n\n setting: dict = {}\n for vt_symbol, cells in self.cells.items():\n buf: dict = {}\n for field in fields:\n buf[field] = cells[field].get_value()\n setting[vt_symbol] = buf\n\n save_json(self.setting_filename, setting)\n\n def register_event(self) -> None:\n \"\"\"\"\"\"\n self.signal_pricing.connect(self.process_pricing_event)\n self.signal_status.connect(self.process_status_event)\n self.signal_tick.connect(self.process_tick_event)\n self.signal_trade.connect(self.process_trade_event)\n\n self.event_engine.register(\n EVENT_OPTION_ALGO_PRICING,\n self.signal_pricing.emit\n )\n self.event_engine.register(\n EVENT_OPTION_ALGO_STATUS,\n self.signal_status.emit\n )\n self.event_engine.register(\n EVENT_TICK,\n self.signal_tick.emit\n )\n self.event_engine.register(\n EVENT_TRADE,\n self.signal_trade.emit\n )\n\n def process_tick_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n tick: TickData = event.data\n self.update_tick(tick)\n\n def update_tick(self, tick: TickData) -> None:\n \"\"\"\"\"\"\n cells: Optional[dict] = self.cells.get(tick.vt_symbol, None)\n if not cells:\n return\n\n cells[\"bid_price\"].setText(str(tick.bid_price_1))\n cells[\"ask_price\"].setText(str(tick.ask_price_1))\n cells[\"bid_volume\"].setText(str(tick.bid_volume_1))\n cells[\"ask_volume\"].setText(str(tick.ask_volume_1))\n\n def process_status_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n algo: ElectronicEyeAlgo = event.data\n cells: dict = self.cells[algo.vt_symbol]\n\n cells[\"price_spread\"].update_status(algo.pricing_active)\n cells[\"volatility_spread\"].update_status(algo.pricing_active)\n cells[\"pricing_active\"].update_status(algo.pricing_active)\n\n cells[\"max_pos\"].update_status(algo.trading_active)\n cells[\"target_pos\"].update_status(algo.trading_active)\n cells[\"max_order_size\"].update_status(algo.trading_active)\n cells[\"direction\"].update_status(algo.trading_active)\n cells[\"trading_active\"].update_status(algo.trading_active)\n\n def process_pricing_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n algo: ElectronicEyeAlgo = event.data\n cells: dict = self.cells[algo.vt_symbol]\n\n if algo.ref_price:\n cells[\"algo_bid_price\"].setText(str(algo.algo_bid_price))\n cells[\"algo_ask_price\"].setText(str(algo.algo_ask_price))\n cells[\"algo_spread\"].setText(str(algo.algo_spread))\n cells[\"ref_price\"].setText(str(algo.ref_price))\n cells[\"pricing_impv\"].setText(f\"{algo.pricing_impv * 100:.2f}\")\n else:\n cells[\"algo_bid_price\"].setText(\"\")\n cells[\"algo_ask_price\"].setText(\"\")\n cells[\"algo_spread\"].setText(\"\")\n cells[\"ref_price\"].setText(\"\")\n cells[\"pricing_impv\"].setText(\"\")\n\n def process_trade_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n trade: TradeData = event.data\n self.update_net_pos(trade.vt_symbol)\n\n def update_net_pos(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n cells: Optional[dict] = self.cells.get(vt_symbol, None)\n if not cells:\n return\n\n option: InstrumentData = self.option_engine.get_instrument(vt_symbol)\n cells[\"net_pos\"].setText(str(option.net_pos))\n\n def start_algo_pricing(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n cells: dict = self.cells[vt_symbol]\n\n params: dict = {}\n params[\"price_spread\"] = cells[\"price_spread\"].get_value()\n params[\"volatility_spread\"] = cells[\"volatility_spread\"].get_value()\n\n self.algo_engine.start_algo_pricing(vt_symbol, params)\n\n def stop_algo_pricing(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n self.algo_engine.stop_algo_pricing(vt_symbol)\n\n def start_algo_trading(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n cells: dict = self.cells[vt_symbol]\n\n params = cells[\"direction\"].get_value()\n for name in [\n \"max_pos\",\n \"target_pos\",\n \"max_order_size\"\n ]:\n params[name] = cells[name].get_value()\n\n self.algo_engine.start_algo_trading(vt_symbol, params)\n\n def stop_algo_trading(self, vt_symbol: str) -> None:\n \"\"\"\"\"\"\n self.algo_engine.stop_algo_trading(vt_symbol)\n\n\nclass ElectronicEyeManager(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n signal_log = QtCore.Signal(Event)\n\n def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.option_engine: OptionEngine = option_engine\n self.event_Engine: EventEngine = option_engine.event_engine\n self.algo_engine: OptionAlgoEngine = option_engine.algo_engine\n self.portfolio_name: str = portfolio_name\n\n self.init_ui()\n self.register_event()\n\n def init_ui(self) -> None:\n \"\"\"\"\"\"\n self.setWindowTitle(\"期权电子眼\")\n\n self.algo_monitor: ElectronicEyeMonitor = ElectronicEyeMonitor(self.option_engine, self.portfolio_name)\n\n self.log_monitor: QtWidgets.QTextEdit = QtWidgets.QTextEdit()\n self.log_monitor.setReadOnly(True)\n self.log_monitor.setMaximumWidth(400)\n\n stop_pricing_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"停止定价\")\n stop_pricing_button.clicked.connect(self.stop_pricing_for_all)\n\n stop_trading_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"停止交易\")\n stop_trading_button.clicked.connect(self.stop_trading_for_all)\n\n self.price_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()\n self.volatility_spread_spin: AlgoDoubleSpinBox = AlgoDoubleSpinBox()\n self.direction_combo: AlgoDirectionCombo = AlgoDirectionCombo()\n self.max_order_size_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()\n self.target_pos_spin: AlgoSpinBox = AlgoSpinBox()\n self.max_pos_spin: AlgoPositiveSpinBox = AlgoPositiveSpinBox()\n\n price_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n price_spread_button.clicked.connect(self.set_price_spread_for_all)\n\n volatility_spread_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n volatility_spread_button.clicked.connect(self.set_volatility_spread_for_all)\n\n direction_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n direction_button.clicked.connect(self.set_direction_for_all)\n\n max_order_size_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n max_order_size_button.clicked.connect(self.set_max_order_size_for_all)\n\n target_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n target_pos_button.clicked.connect(self.set_target_pos_for_all)\n\n max_pos_button: QtWidgets.QPushButton = QtWidgets.QPushButton(\"设置\")\n max_pos_button.clicked.connect(self.set_max_pos_for_all)\n\n QLabel = QtWidgets.QLabel\n grid: QtWidgets.QGridLayout = QtWidgets.QGridLayout()\n grid.addWidget(QLabel(\"价格价差\"), 0, 0)\n grid.addWidget(self.price_spread_spin, 0, 1)\n grid.addWidget(price_spread_button, 0, 2)\n grid.addWidget(QLabel(\"隐波价差\"), 1, 0)\n grid.addWidget(self.volatility_spread_spin, 1, 1)\n grid.addWidget(volatility_spread_button, 1, 2)\n grid.addWidget(QLabel(\"持仓范围\"), 2, 0)\n grid.addWidget(self.max_pos_spin, 2, 1)\n grid.addWidget(max_pos_button, 2, 2)\n grid.addWidget(QLabel(\"目标持仓\"), 3, 0)\n grid.addWidget(self.target_pos_spin, 3, 1)\n grid.addWidget(target_pos_button, 3, 2)\n grid.addWidget(QLabel(\"最大委托\"), 4, 0)\n grid.addWidget(self.max_order_size_spin, 4, 1)\n grid.addWidget(max_order_size_button, 4, 2)\n grid.addWidget(QLabel(\"方向\"), 5, 0)\n grid.addWidget(self.direction_combo, 5, 1)\n grid.addWidget(direction_button, 5, 2)\n\n hbox1: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n hbox1.addWidget(stop_pricing_button)\n hbox1.addWidget(stop_trading_button)\n\n vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox1)\n vbox.addLayout(grid)\n vbox.addWidget(self.log_monitor)\n\n hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n hbox.addWidget(self.algo_monitor)\n hbox.addLayout(vbox)\n\n self.setLayout(hbox)\n\n def register_event(self) -> None:\n \"\"\"\"\"\"\n self.signal_log.connect(self.process_log_event)\n\n self.event_Engine.register(EVENT_OPTION_ALGO_LOG, self.signal_log.emit)\n\n def process_log_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n log: LogData = event.data\n timestr: str = log.time.strftime(\"%H:%M:%S\")\n msg: str = f\"{timestr} {log.msg}\"\n self.log_monitor.append(msg)\n\n def show(self) -> None:\n \"\"\"\"\"\"\n self.algo_engine.init_engine(self.portfolio_name)\n self.algo_monitor.resizeColumnsToContents()\n super().showMaximized()\n\n def set_price_spread_for_all(self) -> None:\n \"\"\"\"\"\"\n price_spread: float = self.price_spread_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"price_spread\"].isEnabled():\n cells[\"price_spread\"].setValue(price_spread)\n\n def set_volatility_spread_for_all(self) -> None:\n \"\"\"\"\"\"\n volatility_spread: float = self.volatility_spread_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"volatility_spread\"].isEnabled():\n cells[\"volatility_spread\"].setValue(volatility_spread)\n\n def set_direction_for_all(self) -> None:\n \"\"\"\"\"\"\n ix: int = self.direction_combo.currentIndex()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"direction\"].isEnabled():\n cells[\"direction\"].setCurrentIndex(ix)\n\n def set_max_order_size_for_all(self) -> None:\n \"\"\"\"\"\"\n size: int = self.max_order_size_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"max_order_size\"].isEnabled():\n cells[\"max_order_size\"].setValue(size)\n\n def set_target_pos_for_all(self) -> None:\n \"\"\"\"\"\"\n pos: int = self.target_pos_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"target_pos\"].isEnabled():\n cells[\"target_pos\"].setValue(pos)\n\n def set_max_pos_for_all(self) -> None:\n \"\"\"\"\"\"\n pos: int = self.max_pos_spin.get_value()\n\n for cells in self.algo_monitor.cells.values():\n if cells[\"max_pos\"].isEnabled():\n cells[\"max_pos\"].setValue(pos)\n\n def stop_pricing_for_all(self) -> None:\n \"\"\"\"\"\"\n for vt_symbol in self.algo_monitor.cells.keys():\n self.algo_monitor.stop_algo_pricing(vt_symbol)\n\n def stop_trading_for_all(self) -> None:\n \"\"\"\"\"\"\n for vt_symbol in self.algo_monitor.cells.keys():\n self.algo_monitor.stop_algo_trading(vt_symbol)\n\n def closeEvent(self, event: QtGui.QCloseEvent) -> None:\n \"\"\"\"\"\"\n self.algo_monitor.save_setting()\n event.accept()\n\n\nclass VolatilityDoubleSpinBox(QtWidgets.QDoubleSpinBox):\n \"\"\"\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.setDecimals(1)\n self.setSuffix(\"%\")\n self.setMaximum(200.0)\n self.setMinimum(0)\n\n def get_value(self) -> float:\n \"\"\"\"\"\"\n return self.value()\n\n\nclass PricingVolatilityManager(QtWidgets.QWidget):\n \"\"\"\"\"\"\n\n signal_timer: QtCore.Signal = QtCore.Signal(Event)\n\n def __init__(self, option_engine: OptionEngine, portfolio_name: str) -> None:\n \"\"\"\"\"\"\n super().__init__()\n\n self.option_engine: OptionEngine = option_engine\n self.event_engine: EventEngine = option_engine.event_engine\n self.portfolio: PortfolioData = option_engine.get_portfolio(portfolio_name)\n\n self.cells: Dict[Tuple, Dict] = {}\n self.chain_symbols: List[str] = []\n self.chain_atm_index: Dict[str, str] = {}\n\n self.init_ui()\n self.register_event()\n\n def init_ui(self) -> None:\n \"\"\"\"\"\"\n self.setWindowTitle(\"波动率管理\")\n\n tab: QtWidgets.QTabWidget = QtWidgets.QTabWidget()\n vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()\n vbox.addWidget(tab)\n self.setLayout(vbox)\n\n self.chain_symbols: list = list(self.portfolio.chains.keys())\n self.chain_symbols.sort()\n\n for chain_symbol in self.chain_symbols:\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n table: QtWidgets.QTableWidget = QtWidgets.QTableWidget()\n table.setEditTriggers(table.NoEditTriggers)\n table.verticalHeader().setVisible(False)\n table.setRowCount(len(chain.indexes))\n table.horizontalHeader().setSectionResizeMode(\n QtWidgets.QHeaderView.Stretch\n )\n\n labels: list = [\n \"行权价\",\n \"OTM隐波\",\n \"CALL隐波\",\n \"PUT隐波\",\n \"定价隐波\",\n \"执行拟合\"\n ]\n table.setColumnCount(len(labels))\n table.setHorizontalHeaderLabels(labels)\n\n for row, index in enumerate(chain.indexes):\n index_cell: IndexCell = IndexCell(index)\n otm_impv_cell: MonitorCell = MonitorCell(\"\")\n call_impv_cell: MonitorCell = MonitorCell(\"\")\n put_impv_cell: MonitorCell = MonitorCell(\"\")\n\n set_func = partial(\n self.set_pricing_impv,\n chain_symbol=chain_symbol,\n index=index\n )\n pricing_impv_spin: VolatilityDoubleSpinBox = VolatilityDoubleSpinBox()\n pricing_impv_spin.setAlignment(QtCore.Qt.AlignCenter)\n pricing_impv_spin.valueChanged.connect(set_func)\n\n check: QtWidgets.QCheckBox = QtWidgets.QCheckBox()\n\n check_hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n check_hbox.setAlignment(QtCore.Qt.AlignCenter)\n check_hbox.addWidget(check)\n\n check_widget: QtWidgets.QWidget = QtWidgets.QWidget()\n check_widget.setLayout(check_hbox)\n\n table.setItem(row, 0, index_cell)\n table.setItem(row, 1, otm_impv_cell)\n table.setItem(row, 2, call_impv_cell)\n table.setItem(row, 3, put_impv_cell)\n table.setCellWidget(row, 4, pricing_impv_spin)\n table.setCellWidget(row, 5, check_widget)\n\n cells: dict = {\n \"otm_impv\": otm_impv_cell,\n \"call_impv\": call_impv_cell,\n \"put_impv\": put_impv_cell,\n \"pricing_impv\": pricing_impv_spin,\n \"check\": check\n }\n\n self.cells[(chain_symbol, index)] = cells\n\n reset_func = partial(self.reset_pricing_impv, chain_symbol=chain_symbol)\n button_reset: QtWidgets.QPushButton = QtWidgets.QPushButton(\"重置\")\n button_reset.clicked.connect(reset_func)\n\n fit_func = partial(self.fit_pricing_impv, chain_symbol=chain_symbol)\n button_fit: QtWidgets.QPushButton = QtWidgets.QPushButton(\"拟合\")\n button_fit.clicked.connect(fit_func)\n\n increase_func = partial(self.increase_pricing_impv, chain_symbol=chain_symbol)\n button_increase: QtWidgets.QPushButton = QtWidgets.QPushButton(\"+0.1%\")\n button_increase.clicked.connect(increase_func)\n\n decrease_func = partial(self.decrease_pricing_impv, chain_symbol=chain_symbol)\n button_decrease: QtWidgets.QPushButton = QtWidgets.QPushButton(\"-0.1%\")\n button_decrease.clicked.connect(decrease_func)\n\n hbox: QtWidgets.QHBoxLayout = QtWidgets.QHBoxLayout()\n hbox.addWidget(button_reset)\n hbox.addWidget(button_fit)\n hbox.addWidget(button_increase)\n hbox.addWidget(button_decrease)\n\n vbox: QtWidgets.QVBoxLayout = QtWidgets.QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addWidget(table)\n\n chain_widget: QtWidgets.QWidget = QtWidgets.QWidget()\n chain_widget.setLayout(vbox)\n tab.addTab(chain_widget, chain_symbol)\n\n self.update_pricing_impv(chain_symbol)\n\n self.default_foreground = otm_impv_cell.foreground()\n self.default_background = otm_impv_cell.background()\n\n table.resizeRowsToContents()\n\n def register_event(self) -> None:\n \"\"\"\"\"\"\n self.signal_timer.connect(self.process_timer_event)\n\n self.event_engine.register(EVENT_TIMER, self.signal_timer.emit)\n\n def process_timer_event(self, event: Event) -> None:\n \"\"\"\"\"\"\n for chain_symbol in self.chain_symbols:\n self.update_chain_impv(chain_symbol)\n\n def reset_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Set pricing impv to the otm mid impv of each strike price.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n\n if index >= atm_index:\n otm: OptionData = call\n else:\n otm: OptionData = put\n\n call.pricing_impv = otm.mid_impv\n put.pricing_impv = otm.mid_impv\n\n self.update_pricing_impv(chain_symbol)\n\n def fit_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Fit pricing impv with cubic spline algo.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n strike_prices: list = []\n pricing_impvs: list = []\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n cells: dict = self.cells[(chain_symbol, index)]\n\n if not cells[\"check\"].isChecked():\n if index >= atm_index:\n otm: OptionData = call\n else:\n otm: OptionData = put\n\n strike_prices.append(otm.strike_price)\n pricing_impvs.append(otm.pricing_impv)\n\n cs: interpolate.CubicSpline = interpolate.CubicSpline(strike_prices, pricing_impvs)\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n\n new_impv: float = float(cs(call.strike_price))\n call.pricing_impv = new_impv\n put.pricing_impv = new_impv\n\n self.update_pricing_impv(chain_symbol)\n\n def increase_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Increase pricing impv of all options within a chain by 0.1%.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n for option in chain.options.values():\n option.pricing_impv += 0.001\n\n self.update_pricing_impv(chain_symbol)\n\n def decrease_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\n Decrease pricing impv of all options within a chain by 0.1%.\n \"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n for option in chain.options.values():\n option.pricing_impv -= 0.001\n\n self.update_pricing_impv(chain_symbol)\n\n def set_pricing_impv(self, value: float, chain_symbol: str, index: str) -> None:\n \"\"\"\"\"\"\n new_impv: float = value / 100\n\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n\n call: OptionData = chain.calls[index]\n call.pricing_impv = new_impv\n\n put: OptionData = chain.puts[index]\n put.pricing_impv = new_impv\n\n def update_pricing_impv(self, chain_symbol: str) -> None:\n \"\"\"\"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n for index in chain.indexes:\n if index >= atm_index:\n otm: OptionData = chain.calls[index]\n else:\n otm: OptionData = chain.puts[index]\n\n value: int = round(otm.pricing_impv * 100, 1)\n\n key: tuple = (chain_symbol, index)\n cells: Optional[dict] = self.cells.get(key, None)\n if cells:\n cells[\"pricing_impv\"].setValue(value)\n\n def update_chain_impv(self, chain_symbol: str) -> None:\n \"\"\"\"\"\"\n chain: ChainData = self.portfolio.get_chain(chain_symbol)\n atm_index: str = chain.atm_index\n\n for index in chain.indexes:\n call: OptionData = chain.calls[index]\n put: OptionData = chain.puts[index]\n if index >= atm_index:\n otm: OptionData = call\n else:\n otm: OptionData = put\n\n cells: dict = self.cells[(chain_symbol, index)]\n cells[\"otm_impv\"].setText(f\"{otm.mid_impv:.1%}\")\n cells[\"call_impv\"].setText(f\"{call.mid_impv:.1%}\")\n cells[\"put_impv\"].setText(f\"{put.mid_impv:.1%}\")\n\n current_atm_index: str = self.chain_atm_index.get(chain_symbol, \"\")\n if current_atm_index == atm_index:\n return\n self.chain_atm_index[chain_symbol] = atm_index\n\n if current_atm_index:\n old_cells: dict = self.cells[(chain_symbol, current_atm_index)]\n\n for field in [\"otm_impv\", \"call_impv\", \"put_impv\"]:\n old_cells[field].setForeground(COLOR_WHITE)\n old_cells[field].setBackground(self.default_background)\n\n if atm_index:\n new_cells: dict = self.cells[(chain_symbol, atm_index)]\n\n for field in [\"otm_impv\", \"call_impv\", \"put_impv\"]:\n new_cells[field].setForeground(COLOR_BLACK)\n new_cells[field].setBackground(COLOR_WHITE)\n" ]
[ [ "scipy.interpolate.CubicSpline" ] ]
JeffreyJosanne/nematus_tf
[ "582be1eeba2920bfa8cc064fa642c429f5eddd6d" ]
[ "nematus/data_iterator.py" ]
[ "import numpy\n\nimport gzip\n\nimport shuffle\nfrom util import load_dict\n\ndef fopen(filename, mode='r'):\n if filename.endswith('.gz'):\n return gzip.open(filename, mode)\n return open(filename, mode)\n\nclass FileWrapper(object):\n def __init__(self, fname):\n self.pos = 0\n self.lines = fopen(fname).readlines()\n self.lines = numpy.array(self.lines, dtype=numpy.object)\n def __iter__(self):\n return self\n def next(self):\n if self.pos >= len(self.lines):\n raise StopIteration\n l = self.lines[self.pos]\n self.pos += 1\n return l\n def reset(self):\n self.pos = 0\n def seek(self, pos):\n assert pos == 0\n self.pos = 0\n def readline(self):\n return self.next()\n def shuffle_lines(self, perm):\n self.lines = self.lines[perm]\n self.pos = 0\n def __len__(self):\n return len(self.lines)\n\nclass TextIterator:\n \"\"\"Simple Bitext iterator.\"\"\"\n def __init__(self, source, target,\n source_dicts, target_dict,\n batch_size=128,\n maxlen=100,\n n_words_source=-1,\n n_words_target=-1,\n skip_empty=False,\n shuffle_each_epoch=False,\n sort_by_length=True,\n use_factor=False,\n maxibatch_size=20,\n keep_data_in_memory=False):\n if keep_data_in_memory:\n self.source, self.target = FileWrapper(source), FileWrapper(target)\n if shuffle_each_epoch:\n r = numpy.random.permutation(len(self.source))\n self.source.shuffle_lines(r)\n self.target.shuffle_lines(r)\n elif shuffle_each_epoch:\n self.source_orig = source\n self.target_orig = target\n self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)\n else:\n self.source = fopen(source, 'r')\n self.target = fopen(target, 'r')\n self.source_dicts = []\n for source_dict in source_dicts:\n self.source_dicts.append(load_dict(source_dict))\n self.target_dict = load_dict(target_dict)\n\n self.keep_data_in_memory = keep_data_in_memory\n self.batch_size = batch_size\n self.maxlen = maxlen\n self.skip_empty = skip_empty\n self.use_factor = use_factor\n\n self.n_words_source = n_words_source\n self.n_words_target = n_words_target\n\n if self.n_words_source > 0:\n for d in self.source_dicts:\n for key, idx in d.items():\n if idx >= self.n_words_source:\n del d[key]\n\n if self.n_words_target > 0:\n for key, idx in self.target_dict.items():\n if idx >= self.n_words_target:\n del self.target_dict[key]\n\n self.shuffle = shuffle_each_epoch\n self.sort_by_length = sort_by_length\n\n self.source_buffer = []\n self.target_buffer = []\n self.k = batch_size * maxibatch_size\n \n\n self.end_of_data = False\n\n def __iter__(self):\n return self\n\n def reset(self):\n if self.shuffle:\n if self.keep_data_in_memory:\n r = numpy.random.permutation(len(self.source))\n self.source.shuffle_lines(r)\n self.target.shuffle_lines(r)\n else:\n self.source, self.target = shuffle.main([self.source_orig, self.target_orig], temporary=True)\n else:\n self.source.seek(0)\n self.target.seek(0)\n\n def next(self):\n if self.end_of_data:\n self.end_of_data = False\n self.reset()\n raise StopIteration\n\n source = []\n target = []\n\n # fill buffer, if it's empty\n assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch!'\n\n if len(self.source_buffer) == 0:\n for ss in self.source:\n ss = ss.split()\n tt = self.target.readline().split()\n \n if self.skip_empty and (len(ss) == 0 or len(tt) == 0):\n continue\n if len(ss) > self.maxlen or len(tt) > self.maxlen:\n continue\n\n self.source_buffer.append(ss)\n self.target_buffer.append(tt)\n if len(self.source_buffer) == self.k:\n break\n\n if len(self.source_buffer) == 0 or len(self.target_buffer) == 0:\n self.end_of_data = False\n self.reset()\n raise StopIteration\n\n # sort by target buffer\n if self.sort_by_length:\n tlen = numpy.array([len(t) for t in self.target_buffer])\n tidx = tlen.argsort()\n\n _sbuf = [self.source_buffer[i] for i in tidx]\n _tbuf = [self.target_buffer[i] for i in tidx]\n\n self.source_buffer = _sbuf\n self.target_buffer = _tbuf\n\n else:\n self.source_buffer.reverse()\n self.target_buffer.reverse()\n\n\n try:\n # actual work here\n while True:\n\n # read from source file and map to word index\n try:\n ss = self.source_buffer.pop()\n except IndexError:\n break\n tmp = []\n for w in ss:\n if self.use_factor:\n w = [self.source_dicts[i][f] if f in self.source_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))]\n else:\n w = [self.source_dicts[0][w] if w in self.source_dicts[0] else 1]\n tmp.append(w)\n ss = tmp\n\n # read from source file and map to word index\n tt = self.target_buffer.pop()\n tt = [self.target_dict[w] if w in self.target_dict else 1\n for w in tt]\n if self.n_words_target > 0:\n tt = [w if w < self.n_words_target else 1 for w in tt]\n\n source.append(ss)\n target.append(tt)\n\n if len(source) >= self.batch_size or \\\n len(target) >= self.batch_size:\n break\n except IOError:\n self.end_of_data = True\n\n return source, target\n" ]
[ [ "numpy.array" ] ]
joeranbosma/ModelsGenesis
[ "5b18ea88d662e5250523434d02cfdcb6b527e634" ]
[ "pytorch/utils.py" ]
[ "from __future__ import print_function\nimport math\nimport os\nimport random\nimport copy\nimport scipy\nimport imageio\nimport string\nimport numpy as np\nfrom skimage.transform import resize\ntry: # SciPy >= 0.19\n from scipy.special import comb\nexcept ImportError:\n from scipy.misc import comb\n\ndef bernstein_poly(i, n, t):\n \"\"\"\n The Bernstein polynomial of n, i as a function of t\n \"\"\"\n\n return comb(n, i) * ( t**(n-i) ) * (1 - t)**i\n\ndef bezier_curve(points, nTimes=1000):\n \"\"\"\n Given a set of control points, return the\n bezier curve defined by the control points.\n\n Control points should be a list of lists, or list of tuples\n such as [ [1,1], \n [2,3], \n [4,5], ..[Xn, Yn] ]\n nTimes is the number of time steps, defaults to 1000\n\n See http://processingjs.nihongoresources.com/bezierinfo/\n \"\"\"\n\n nPoints = len(points)\n xPoints = np.array([p[0] for p in points])\n yPoints = np.array([p[1] for p in points])\n\n t = np.linspace(0.0, 1.0, nTimes)\n\n polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints) ])\n \n xvals = np.dot(xPoints, polynomial_array)\n yvals = np.dot(yPoints, polynomial_array)\n\n return xvals, yvals\n\ndef data_augmentation(x, y, prob=0.5):\n # augmentation by flipping\n cnt = 3\n while random.random() < prob and cnt > 0:\n degree = random.choice([0, 1, 2])\n x = np.flip(x, axis=degree)\n y = np.flip(y, axis=degree)\n cnt = cnt - 1\n\n return x, y\n\ndef nonlinear_transformation(x, prob=0.5):\n if random.random() >= prob:\n return x\n points = [[0, 0], [random.random(), random.random()], [random.random(), random.random()], [1, 1]]\n xpoints = [p[0] for p in points]\n ypoints = [p[1] for p in points]\n xvals, yvals = bezier_curve(points, nTimes=100000)\n if random.random() < 0.5:\n # Half change to get flip\n xvals = np.sort(xvals)\n else:\n xvals, yvals = np.sort(xvals), np.sort(yvals)\n nonlinear_x = np.interp(x, xvals, yvals)\n return nonlinear_x\n\ndef local_pixel_shuffling(x, prob=0.5):\n if random.random() >= prob:\n return x\n image_temp = copy.deepcopy(x)\n orig_image = copy.deepcopy(x)\n _, img_rows, img_cols, img_deps = x.shape\n num_block = 10000\n for _ in range(num_block):\n block_noise_size_x = random.randint(1, img_rows//10)\n block_noise_size_y = random.randint(1, img_cols//10)\n block_noise_size_z = random.randint(1, img_deps//10)\n noise_x = random.randint(0, img_rows-block_noise_size_x)\n noise_y = random.randint(0, img_cols-block_noise_size_y)\n noise_z = random.randint(0, img_deps-block_noise_size_z)\n window = orig_image[0, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z,\n ]\n window = window.flatten()\n np.random.shuffle(window)\n window = window.reshape((block_noise_size_x, \n block_noise_size_y, \n block_noise_size_z))\n image_temp[0, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = window\n local_shuffling_x = image_temp\n\n return local_shuffling_x\n\ndef image_in_painting(x):\n _, img_rows, img_cols, img_deps = x.shape\n cnt = 5\n while cnt > 0 and random.random() < 0.95:\n block_noise_size_x = random.randint(img_rows//6, img_rows//3)\n block_noise_size_y = random.randint(img_cols//6, img_cols//3)\n block_noise_size_z = random.randint(img_deps//6, img_deps//3)\n noise_x = random.randint(3, img_rows-block_noise_size_x-3)\n noise_y = random.randint(3, img_cols-block_noise_size_y-3)\n noise_z = random.randint(3, img_deps-block_noise_size_z-3)\n x[:, \n noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = np.random.rand(block_noise_size_x, \n block_noise_size_y, \n block_noise_size_z, ) * 1.0\n cnt -= 1\n return x\n\ndef image_out_painting(x):\n _, img_rows, img_cols, img_deps = x.shape\n image_temp = copy.deepcopy(x)\n x = np.random.rand(x.shape[0], x.shape[1], x.shape[2], x.shape[3], ) * 1.0\n block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)\n block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)\n block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)\n noise_x = random.randint(3, img_rows-block_noise_size_x-3)\n noise_y = random.randint(3, img_cols-block_noise_size_y-3)\n noise_z = random.randint(3, img_deps-block_noise_size_z-3)\n x[:, \n noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z]\n cnt = 4\n while cnt > 0 and random.random() < 0.95:\n block_noise_size_x = img_rows - random.randint(3*img_rows//7, 4*img_rows//7)\n block_noise_size_y = img_cols - random.randint(3*img_cols//7, 4*img_cols//7)\n block_noise_size_z = img_deps - random.randint(3*img_deps//7, 4*img_deps//7)\n noise_x = random.randint(3, img_rows-block_noise_size_x-3)\n noise_y = random.randint(3, img_cols-block_noise_size_y-3)\n noise_z = random.randint(3, img_deps-block_noise_size_z-3)\n x[:, \n noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z] = image_temp[:, noise_x:noise_x+block_noise_size_x, \n noise_y:noise_y+block_noise_size_y, \n noise_z:noise_z+block_noise_size_z]\n cnt -= 1\n return x\n \n\n\ndef generate_pair(img, batch_size, config, status=\"test\"):\n img_rows, img_cols, img_deps = img.shape[2], img.shape[3], img.shape[4]\n while True:\n index = [i for i in range(img.shape[0])]\n random.shuffle(index)\n y = img[index[:batch_size]]\n x = copy.deepcopy(y)\n for n in range(batch_size):\n \n # Autoencoder\n x[n] = copy.deepcopy(y[n])\n \n # Flip\n x[n], y[n] = data_augmentation(x[n], y[n], config.flip_rate)\n\n # Local Shuffle Pixel\n x[n] = local_pixel_shuffling(x[n], prob=config.local_rate)\n \n # Apply non-Linear transformation with an assigned probability\n x[n] = nonlinear_transformation(x[n], config.nonlinear_rate)\n \n # Inpainting & Outpainting\n if random.random() < config.paint_rate:\n if random.random() < config.inpaint_rate:\n # Inpainting\n x[n] = image_in_painting(x[n])\n else:\n # Outpainting\n x[n] = image_out_painting(x[n])\n\n # Save sample images module\n if config.save_samples is not None and status == \"train\" and random.random() < 0.01:\n n_sample = random.choice( [i for i in range(config.batch_size)] )\n sample_1 = np.concatenate((x[n_sample,0,:,:,2*img_deps//6], y[n_sample,0,:,:,2*img_deps//6]), axis=1)\n sample_2 = np.concatenate((x[n_sample,0,:,:,3*img_deps//6], y[n_sample,0,:,:,3*img_deps//6]), axis=1)\n sample_3 = np.concatenate((x[n_sample,0,:,:,4*img_deps//6], y[n_sample,0,:,:,4*img_deps//6]), axis=1)\n sample_4 = np.concatenate((x[n_sample,0,:,:,5*img_deps//6], y[n_sample,0,:,:,5*img_deps//6]), axis=1)\n final_sample = np.concatenate((sample_1, sample_2, sample_3, sample_4), axis=0)\n final_sample = final_sample * 255.0\n final_sample = final_sample.astype(np.uint8)\n file_name = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(10)])+'.'+config.save_samples\n imageio.imwrite(os.path.join(config.sample_path, config.exp_name, file_name), final_sample)\n\n yield (x, y)\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.dot", "numpy.random.rand", "numpy.interp", "numpy.random.shuffle", "scipy.misc.comb", "numpy.sort", "numpy.linspace", "numpy.flip" ] ]
mattiaguerri/transformers
[ "ebc36108dc1c20985905c79f7d6a00f57f3cd3ae" ]
[ "src/transformers/modeling_mobilebert.py" ]
[ "# MIT License\n#\n# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport logging\nimport math\nimport os\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom transformers.modeling_bert import BertIntermediate\n\nfrom .activations import gelu, gelu_new, swish\nfrom .configuration_mobilebert import MobileBertConfig\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer\n\n\nlogger = logging.getLogger(__name__)\nMOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\"mobilebert-uncased\"]\n\n\ndef load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n name = name.replace(\"ffn_layer\", \"ffn\")\n name = name.replace(\"FakeLayerNorm\", \"LayerNorm\")\n name = name.replace(\"extra_output_weights\", \"dense/kernel\")\n name = name.replace(\"bert\", \"mobilebert\")\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\ndef mish(x):\n return x * torch.tanh(nn.functional.softplus(x))\n\n\nclass NoNorm(nn.Module):\n def __init__(self, feat_size, eps=None):\n super().__init__()\n self.bias = nn.Parameter(torch.zeros(feat_size))\n self.weight = nn.Parameter(torch.ones(feat_size))\n\n def forward(self, input_tensor):\n return input_tensor * self.weight + self.bias\n\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"gelu_new\": gelu_new, \"mish\": mish}\nNORM2FN = {\"layer_norm\": torch.nn.LayerNorm, \"no_norm\": NoNorm}\n\n\nclass MobileBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.trigram_input = config.trigram_input\n self.embedding_size = config.embedding_size\n self.hidden_size = config.hidden_size\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n embed_dim_multiplier = 3 if self.trigram_input else 1\n embedded_input_size = self.embedding_size * embed_dim_multiplier\n self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)\n\n self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n seq_length = input_shape[1]\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if self.trigram_input:\n # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited\n # Devices (https://arxiv.org/abs/2004.02984)\n #\n # The embedding table in BERT models accounts for a substantial proportion of model size. To compress\n # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.\n # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512\n # dimensional output.\n inputs_embeds = torch.cat(\n [\n F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),\n inputs_embeds,\n F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),\n ],\n dim=2,\n )\n if self.trigram_input or self.embedding_size != self.hidden_size:\n inputs_embeds = self.embedding_transformation(inputs_embeds)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass MobileBertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.true_hidden_size, self.all_head_size)\n self.key = nn.Linear(config.true_hidden_size, self.all_head_size)\n self.value = nn.Linear(\n config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size\n )\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n ):\n mixed_query_layer = self.query(query_tensor)\n mixed_key_layer = self.key(key_tensor)\n mixed_value_layer = self.value(value_tensor)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass MobileBertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)\n if not self.use_bottleneck:\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n if not self.use_bottleneck:\n layer_outputs = self.dropout(layer_outputs)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass MobileBertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = MobileBertSelfAttention(config)\n self.output = MobileBertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n ):\n self_outputs = self.self(\n query_tensor,\n key_tensor,\n value_tensor,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n attention_output = self.output(self_outputs[0], layer_input)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass MobileBertIntermediate(BertIntermediate):\n def __init__(self, config):\n super().__init__(config)\n self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)\n\n\nclass OutputBottleneck(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n layer_outputs = self.dropout(layer_outputs)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass MobileBertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)\n if not self.use_bottleneck:\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n else:\n self.bottleneck = OutputBottleneck(config)\n\n def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):\n layer_output = self.dense(intermediate_states)\n if not self.use_bottleneck:\n layer_output = self.dropout(layer_output)\n layer_output = self.LayerNorm(layer_output + residual_tensor_1)\n else:\n layer_output = self.LayerNorm(layer_output + residual_tensor_1)\n layer_output = self.bottleneck(layer_output, residual_tensor_2)\n return layer_output\n\n\nclass BottleneckLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n layer_input = self.dense(hidden_states)\n layer_input = self.LayerNorm(layer_input)\n return layer_input\n\n\nclass Bottleneck(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.key_query_shared_bottleneck = config.key_query_shared_bottleneck\n self.use_bottleneck_attention = config.use_bottleneck_attention\n self.input = BottleneckLayer(config)\n if self.key_query_shared_bottleneck:\n self.attention = BottleneckLayer(config)\n\n def forward(self, hidden_states):\n # This method can return three different tuples of values. These different values make use of bottlenecks,\n # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory\n # usage. These linear layer have weights that are learned during training.\n #\n # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the\n # key, query, value, and \"layer input\" to be used by the attention layer.\n # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor\n # in the attention self output, after the attention scores have been computed.\n #\n # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return\n # four values, three of which have been passed through a bottleneck: the query and key, passed through the same\n # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.\n #\n # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,\n # and the residual layer will be this value passed through a bottleneck.\n\n bottlenecked_hidden_states = self.input(hidden_states)\n if self.use_bottleneck_attention:\n return (bottlenecked_hidden_states,) * 4\n elif self.key_query_shared_bottleneck:\n shared_attention_input = self.attention(hidden_states)\n return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)\n else:\n return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)\n\n\nclass FFNOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)\n self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, residual_tensor):\n layer_outputs = self.dense(hidden_states)\n layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)\n return layer_outputs\n\n\nclass FFNLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate = MobileBertIntermediate(config)\n self.output = FFNOutput(config)\n\n def forward(self, hidden_states):\n intermediate_output = self.intermediate(hidden_states)\n layer_outputs = self.output(intermediate_output, hidden_states)\n return layer_outputs\n\n\nclass MobileBertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_bottleneck = config.use_bottleneck\n self.num_feedforward_networks = config.num_feedforward_networks\n\n self.attention = MobileBertAttention(config)\n self.intermediate = MobileBertIntermediate(config)\n self.output = MobileBertOutput(config)\n if self.use_bottleneck:\n self.bottleneck = Bottleneck(config)\n if config.num_feedforward_networks > 1:\n self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n ):\n if self.use_bottleneck:\n query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)\n else:\n query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4\n\n self_attention_outputs = self.attention(\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n s = (attention_output,)\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n if self.num_feedforward_networks != 1:\n for i, ffn_module in enumerate(self.ffn):\n attention_output = ffn_module(attention_output)\n s += (attention_output,)\n\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output, hidden_states)\n outputs = (\n (layer_output,)\n + outputs\n + (\n torch.tensor(1000),\n query_tensor,\n key_tensor,\n value_tensor,\n layer_input,\n attention_output,\n intermediate_output,\n )\n + s\n )\n return outputs\n\n\nclass MobileBertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n ):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n output_attentions,\n )\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\nclass MobileBertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.do_activate = config.classifier_activation\n if self.do_activate:\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n if not self.do_activate:\n return first_token_tensor\n else:\n pooled_output = self.dense(first_token_tensor)\n pooled_output = F.tanh(pooled_output)\n return pooled_output\n\n\nclass MobileBertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = NORM2FN[\"layer_norm\"](config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass MobileBertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = MobileBertPredictionHeadTransform(config)\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)\n self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))\n hidden_states += self.bias\n return hidden_states\n\n\nclass MobileBertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = MobileBertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass MobileBertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = MobileBertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass MobileBertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = MobileBertConfig\n pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST\n load_tf_weights = load_tf_weights_in_mobilebert\n base_model_prefix = \"mobilebert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, (nn.LayerNorm, NoNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nMOBILEBERT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nMOBILEBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.MobileBertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask\n is used in the cross-attention if the model is configured as a decoder.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertModel(MobileBertPreTrainedModel):\n \"\"\"\n https://arxiv.org/pdf/2004.02984.pdf\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.embeddings = MobileBertEmbeddings(config)\n self.encoder = MobileBertEncoder(config)\n self.pooler = MobileBertPooler(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_hidden_states=None,\n output_attentions=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during pre-training.\n\n This output is usually *not* a good summary\n of the semantic content of the input, you're often better with averaging or pooling\n the sequence of hidden-states for the whole input sequence.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertModel, MobileBertTokenizer\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained(model_name_or_path)\n model = MobileBertModel.from_pretrained(model_name_or_path)\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n attention_mask, input_shape, self.device\n )\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n outputs = (sequence_output, pooled_output,) + encoder_outputs[\n 1:\n ] # add hidden_states and attentions if they are here\n return outputs # sequence_output, pooled_output, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with two heads on top as done during the pre-training: a `masked language modeling` head and\n a `next sentence prediction (classification)` head. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForPreTraining(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n input_embeddings = self.get_input_embeddings()\n\n resized_dense = nn.Linear(\n input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False\n )\n kept_data = self.cls.predictions.dense.weight.data[\n ..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])\n ]\n resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data\n self.cls.predictions.dense = resized_dense\n self.cls.predictions.dense.to(self.device)\n\n if output_embeddings is not None:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False\n continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n from transformers import MobileBertTokenizer, MobileBertForPreTraining\n import torch\n tokenizer = MobileBertTokenizer.from_pretrained(model_name_or_path)\n model = MobileBertForPreTraining.from_pretrained(model_name_or_path)\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n prediction_scores, seq_relationship_scores = outputs[:2]\n\n \"\"\"\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n outputs = (prediction_scores, seq_relationship_score,) + outputs[\n 2:\n ] # add hidden states and attention if they are here\n\n if labels is not None and next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = masked_lm_loss + next_sentence_loss\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\"\"\"MobileBert Model with a `language modeling` head on top. \"\"\", MOBILEBERT_START_DOCSTRING)\nclass MobileBertForMaskedLM(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertOnlyMLMHead(config)\n self.config = config\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n input_embeddings = self.get_input_embeddings()\n\n resized_dense = nn.Linear(\n input_embeddings.num_embeddings, self.config.hidden_size - self.config.embedding_size, bias=False\n )\n kept_data = self.cls.predictions.dense.weight.data[\n ..., : min(self.cls.predictions.dense.weight.data.shape[1], resized_dense.weight.data.shape[1])\n ]\n resized_dense.weight.data[..., : self.cls.predictions.dense.weight.data.shape[1]] = kept_data\n self.cls.predictions.dense = resized_dense\n self.cls.predictions.dense.to(self.device)\n\n if output_embeddings is not None:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n **kwargs\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n masked_lm_loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForMaskedLM\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForMaskedLM.from_pretrained('mobilebert-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=input_ids)\n\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n if \"masked_lm_labels\" in kwargs:\n warnings.warn(\n \"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.\",\n FutureWarning,\n )\n labels = kwargs.pop(\"masked_lm_labels\")\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n outputs = (masked_lm_loss,) + outputs\n\n return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)\n\n\nclass MobileBertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a `next sentence prediction (classification)` head on top. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mobilebert = MobileBertModel(config)\n self.cls = MobileBertOnlyNSPHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n next_sentence_label=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n next_sentence_label (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):\n Next sequence prediction (classification) loss.\n seq_relationship_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForNextSentencePrediction.from_pretrained('mobilebert-uncased')\n\n prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n next_sentence = \"The sky is blue due to the shorter wavelength of blue light.\"\n encoding = tokenizer.encode_plus(prompt, next_sentence, return_tensors='pt')\n\n loss, logits = model(**encoding, next_sentence_label=torch.LongTensor([1]))\n assert logits[0, 0] < logits[0, 1] # next sentence was random\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n pooled_output = outputs[1]\n\n seq_relationship_score = self.cls(pooled_output)\n\n outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here\n if next_sentence_label is not None:\n loss_fct = CrossEntropyLoss()\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n outputs = (next_sentence_loss,) + outputs\n\n return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForSequenceClassification(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.mobilebert = MobileBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import BertTokenizer, BertForSequenceClassification\n import torch\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertForSequenceClassification.from_pretrained('bert-base-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n\n loss, logits = outputs[:2]\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n pooled_output = outputs[1]\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForQuestionAnswering(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.mobilebert = MobileBertModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-start scores (before SoftMax).\n end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForQuestionAnswering\n import torch\n\n tokenizer = BertTokenizer.from_pretrained(model_name_or_path)\n model = MobileBertForQuestionAnswering.from_pretrained(model_name_or_path)\n\n question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n encoding = tokenizer.encode_plus(question, text)\n input_ids, token_type_ids = encoding[\"input_ids\"], encoding[\"token_type_ids\"]\n start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))\n\n all_tokens = tokenizer.convert_ids_to_tokens(input_ids)\n answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])\n\n assert answer == \"a nice puppet\"\n\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (start_logits, end_logits,) + outputs[2:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MobileBert Model with a multiple choice classification head on top (a linear layer on top of\n the pooled output and a softmax) e.g. for RocStories/SWAG tasks. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForMultipleChoice(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.mobilebert = MobileBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, num_choices, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the multiple choice classification loss.\n Indices should be in ``[0, ..., num_choices-1]`` where `num_choices` is the size of the second dimension\n of the input tensors. (see `input_ids` above)\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):\n Classification loss.\n classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):\n `num_choices` is the second dimension of the input tensors. (see `input_ids` above).\n\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForMultipleChoice\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForMultipleChoice.from_pretrained('mobilebert-uncased')\n\n prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n choice0 = \"It is eaten with a fork and a knife.\"\n choice1 = \"It is eaten while held in the hand.\"\n labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1\n\n encoding = tokenizer.batch_encode_plus([[prompt, choice0], [prompt, choice1]], return_tensors='pt', pad_to_max_length=True)\n outputs = model(**{k: v.unsqueeze(0) for k,v in encoding.items()}, labels=labels) # batch size is 1\n\n # the linear classifier still needs to be trained\n loss, logits = outputs[:2]\n \"\"\"\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n outputs = (loss,) + outputs\n\n return outputs # (loss), reshaped_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"MoibleBert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n MOBILEBERT_START_DOCSTRING,\n)\nclass MobileBertForTokenClassification(MobileBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.mobilebert = MobileBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(MOBILEBERT_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.MobileBertConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :\n Classification loss.\n scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import MobileBertTokenizer, MobileBertForTokenClassification\n import torch\n\n tokenizer = MobileBertTokenizer.from_pretrained('mobilebert-uncased')\n model = MobileBertForTokenClassification.from_pretrained('mobilebert-uncased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n\n loss, scores = outputs[:2]\n\n \"\"\"\n\n outputs = self.mobilebert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.nn.Dropout", "torch.nn.functional.softplus", "torch.nn.MSELoss", "torch.arange", "torch.nn.Softmax", "tensorflow.train.load_variable", "torch.nn.CrossEntropyLoss", "torch.from_numpy", "tensorflow.train.list_variables", "torch.ones", "numpy.transpose", "torch.tensor", "torch.nn.functional.pad", "torch.matmul", "torch.nn.Embedding", "torch.nn.functional.tanh" ] ]
sergeyshilin/kaggle-statoil-iceberg-classifier-challenge
[ "fa5c7e721297d9e1478593951b4d9cf16a0cd66d" ]
[ "ensembling/make_submit.py" ]
[ "import sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import log_loss\n\npower = float(sys.argv[1])\n\ndef transform(preds):\n return preds ** power / (preds ** power + (1.0 - preds) ** power)\n\nwith open('submit_id', 'r') as submit_id:\n last_submit_id = int(submit_id.read())\n\nlast_submit_id = str(last_submit_id).zfill(3)\n\nensemble = pd.read_csv('ensembles/ensemble_{}.csv'.format(last_submit_id))\nensemble_cv = pd.read_csv('ensembles_cv/ensemble_cv_{}.csv'.format(last_submit_id))\n\ny_cv = ensemble_cv.is_iceberg\nx_cv = ensemble_cv.drop('is_iceberg', axis=1).values.mean(axis=1)\n\nprint ('cv log_loss before: {}'.format(log_loss(y_cv, x_cv)))\n\nx_cv_calib = transform(x_cv)\nprint ('cv log_loss calibration: {}'.format(log_loss(y_cv, x_cv_calib)))\n\nx_cv_clip = np.clip(x_cv, 0.001, 0.999)\nprint ('cv log_loss clip: {}'.format(log_loss(y_cv, x_cv_clip)))\n\nx_cv_calib_clip = np.clip(transform(x_cv), 0.001, 0.999)\nprint ('cv log_loss calib+clip: {}'.format(log_loss(y_cv, x_cv_calib_clip)))\n\nsubmit = pd.read_csv('../data/sample_submission.csv')\nsubmit.is_iceberg = np.clip(transform(ensemble.values.mean(axis=1)), 0.001, 0.999)\nsubmit.to_csv('submits/submission_{}_calib_clip_1_4.csv'.format(last_submit_id), index=False)\n\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.log_loss", "numpy.clip" ] ]
FoVNull/MFDSL
[ "8c6fc99260c1c02f4f45cfb14a111028d2a96ded" ]
[ "validation/new/bare_model.py" ]
[ "from typing import Dict, Any\n\nimport tensorflow as tf\nfrom tensorflow.keras.utils import plot_model\nfrom kashgari_local.abc_feature_model import ABCClassificationModel\nfrom kashgari.layers import L\n\n\nclass Bare_Model(ABCClassificationModel):\n def __init__(self, embedding, **params):\n super().__init__(embedding)\n self.feature_D = params[\"feature_D\"]\n\n @classmethod\n def default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:\n \"\"\"\n Get hyper parameters of model\n Returns:\n hyper parameters dict\n\n activation_function list:\n {softmax, elu, selu, softplus, softsign, swish,\n relu, gelu, tanh, sigmoid, exponential,\n hard_sigmoid, linear, serialize, deserialize, get}\n \"\"\"\n return {\n 'layer_bilstm1': {\n 'units': 128,\n 'return_sequences': True\n },\n 'layer_time_distributed': {},\n 'conv_layer1': {\n 'filters': 128,\n 'kernel_size': 4,\n 'padding': 'valid',\n 'activation': 'relu'\n },\n 'layer_output1': {\n 'activation': 'softmax'\n },\n }\n\n def build_model_arc(self):\n \"\"\"\n build model architectural\n\n BiLSTM + Convolution + Attention\n \"\"\"\n features = tf.keras.Input(shape=(None, self.feature_D), name=\"features\")\n\n l1_reg = tf.keras.regularizers.l1(0.01)\n l2_reg = tf.keras.regularizers.L2(0.01)\n\n output_dim = self.label_processor.vocab_size\n config = self.hyper_parameters\n embed_model = self.embedding.embed_model\n # Define layers for BiLSTM\n layer_stack = [\n L.Bidirectional(L.LSTM(**config['layer_bilstm1'])),\n L.Dropout(rate=0.2),\n ]\n\n # tensor flow in Layers {tensor:=layer(tensor)}\n tensor = embed_model.output\n for layer in layer_stack:\n tensor = layer(tensor)\n\n # extend features\n features_tensor = L.Dense(64, kernel_regularizer=l1_reg)(features)\n # tensor = L.Concatenate(axis=-1)([features_tensor, tensor])\n query = L.Concatenate(axis=-1)([tensor, features_tensor])\n key = L.Concatenate(axis=-1)([features_tensor, tensor])\n\n query_value_attention_seq = L.Attention()([query, key])\n # query_value_attention_seq = L.MultiHeadAttention(\n # num_heads=4, key_dim=2, dropout=0.5\n # )(tensor, tensor)\n\n query_encoding = L.GlobalMaxPool1D()(query)\n query_value_attention = L.GlobalMaxPool1D()(query_value_attention_seq)\n\n input_tensor = L.Concatenate(axis=1)([query_encoding, query_value_attention])\n\n # output tensor\n input_tensor = L.Dropout(rate=0.1)(input_tensor)\n output_tensor = L.Dense(\n output_dim, activation='sigmoid', name=\"output0\",\n kernel_regularizer=l2_reg\n )(input_tensor)\n self.tf_model = tf.keras.Model(inputs=[embed_model.inputs, features], outputs=output_tensor)\n\n # plot_model(self.tf_model, to_file=\"D:/PycProject/TripleC/reference/model.png\")\n" ]
[ [ "tensorflow.keras.regularizers.L2", "tensorflow.keras.Input", "tensorflow.keras.regularizers.l1", "tensorflow.keras.Model" ] ]
sandeepsn1997/civilapp
[ "749027e904924575f60883c5d44688101f7e9864" ]
[ "resources/draw1.py" ]
[ "\n\nimport matplotlib.pyplot as plt\n\ndef draw_line_x(ox, oy, x, y, length):\n for i in range(length+1):\n ox.append(x+i)\n oy.append(y)\n return ox, oy\n\ndef draw_line_y(ox, oy, x, y, length):\n for i in range(length+1):\n ox.append(x)\n oy.append(y+i)\n return ox, oy\n\ndef draw_sqr(ox, oy, x, y, length):\n draw_line_x(ox, oy, x, y, length)\n draw_line_x(ox, oy, x, y+length, length)\n draw_line_y(ox, oy, x, y, length)\n draw_line_y(ox, oy, x + length, y, length)\n return ox, oy\n\ndef draw_rect(ox, oy, x, y, length, breadth):\n draw_line_x(ox, oy, x, y, length)\n draw_line_x(ox, oy, x, y+breadth, length)\n draw_line_y(ox, oy, x, y, breadth)\n draw_line_y(ox, oy, x + length, y, breadth)\n\n return ox, oy\n\n\ndef draw_layout():\n ox, oy = [], []\n\n # Outter Box\n ox, oy = draw_rect(ox, oy, -60, 0, 470,300)\n\n #Sites Row1\n ox, oy = draw_rect(ox, oy, 40, 240,25, 30)\n ox, oy = draw_rect(ox, oy, 85, 240, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 240, 25, 30)\n\n\n ox, oy = draw_rect(ox, oy, 265, 240, 25, 30)\n ox, oy = draw_rect(ox, oy, 310, 240, 25, 30)\n # outer boundry for row1\n ox, oy = draw_rect(ox, oy, 30, 225, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 225, 45, 55)\n ox, oy = draw_rect(ox, oy, 120, 225, 45, 55)\n\n\n ox, oy = draw_rect(ox, oy, 255, 225, 45, 55)\n ox, oy = draw_rect(ox, oy, 300, 225, 45, 55)\n\n # Sites Row2\n ox, oy = draw_rect(ox, oy, 40, 150, 25, 30)\n ox, oy = draw_rect(ox, oy, 85, 150, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 150, 25, 30)\n\n\n ox, oy = draw_rect(ox, oy, 310, 150, 25, 30)\n ox, oy = draw_rect(ox, oy, 355, 150, 25, 30)\n # outer boundry for row2\n ox, oy = draw_rect(ox, oy, 30, 140, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 140, 45, 55)\n ox, oy = draw_rect(ox, oy, 120, 140, 45, 55)\n\n\n\n ox, oy = draw_rect(ox, oy, 300, 140, 45, 55)\n ox, oy = draw_rect(ox, oy, 345, 140, 45, 55)\n # Sites Row3\n ox, oy = draw_rect(ox, oy, 40,100, 25, 30)\n ox, oy = draw_rect(ox, oy, 85, 100, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 100, 25, 30)\n\n\n\n ox, oy = draw_rect(ox, oy, 310, 100, 25, 30)\n ox, oy = draw_rect(ox, oy,355 , 100, 25, 30)\n\n # outer boundry for row3\n\n ox, oy = draw_rect(ox, oy, 30, 85, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 85, 45, 55)\n ox, oy = draw_rect(ox, oy, 120, 85, 45, 55)\n\n\n\n ox, oy = draw_rect(ox, oy, 300, 85, 45, 55)\n ox, oy = draw_rect(ox, oy, 345, 85, 45, 55)\n # Sites Row4\n ox, oy = draw_rect(ox, oy, 40, 10,25, 30)\n ox, oy = draw_rect(ox, oy, 85, 10, 25, 30)\n ox, oy = draw_rect(ox, oy, 130, 10, 25, 30)\n ox, oy = draw_rect(ox, oy, 310, 10, 25, 30)\n ox, oy = draw_rect(ox, oy, 355, 10, 25, 30)\n\n # outer boundry for row4\n ox, oy = draw_rect(ox, oy, 30, 0, 45, 55)\n ox, oy = draw_rect(ox, oy, 75, 0, 45, 55)\n ox, oy = draw_rect(ox, oy, 120,0, 45, 55)\n\n\n\n ox, oy = draw_rect(ox, oy, 300, 0, 45, 55)\n ox, oy = draw_rect(ox, oy, 345, 0, 45, 55)\n\n return ox, oy\n\ndef draw_empty_space():\n ox, oy = [], []\n ox, oy = draw_sqr(ox, oy, -50, 265, 25)#1\n ox, oy = draw_rect(ox, oy, -50,65,25,135)#2\n ox, oy = draw_sqr(ox, oy,190,240,35)#4\n ox, oy = draw_sqr(ox, oy, 225, 150, 20)#6\n ox, oy = draw_rect(ox, oy, 190,150, 25,35)#5\n ox, oy = draw_rect(ox, oy, -50, 5,40,50 )\n\n ox, oy = draw_rect(ox, oy, 360, 240, 45,55)#7\n ox, oy = draw_rect(ox, oy, 190,90,30,45)#8\n ox, oy = draw_sqr(ox, oy, 240,5, 25)#10\n ox, oy = draw_rect(ox, oy,230,105,40,30)#9\n ox, oy = draw_sqr(ox, oy,190 , 5, 40)#11\n\n return ox, oy\n\nplt.figure(figsize=(10, 8))\nox, oy = draw_layout()\nplt.plot(ox, oy, \"sk\")\n\n\nox, oy = draw_empty_space()\nplt.plot(ox, oy, \"sg\")\nplt.axis(\"equal\")\nplt.grid(True)\n\n\nplt.annotate(\"1\",xy=(-40,275))\nplt.annotate(\"2\",xy=(-40,135))\nplt.annotate(\"3\",xy=(-35,30))\nplt.annotate(\"4\",xy=(205,255))\nplt.annotate(\"5\",xy=(195,165))\nplt.annotate(\"6\",xy=(230,155))\nplt.annotate(\"7\",xy=(375,265))\nplt.annotate(\"8\",xy=(200,112))\nplt.annotate(\"9\",xy=(245,115))\nplt.annotate(\"10\",xy=(245,15))\nplt.annotate(\"11\",xy=(200,25))\nplt.xlabel('X-Coordinates')\nplt.ylabel('Y-Coordinates')\nplt.title('Construction Site Layout Plan',fontsize=15,color=\"red\")\nplt.figtext(0.905,0.8,\"1=Security shed\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.77,\"2=Parking\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.74,\"3=Site office\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.71,\"4=Canteen\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.68,\"5=Labour Shed\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.65,\"6=Toilet\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.62,\"7=Ware House\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.59,\"8=Power House\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.56,\"9=Water tank\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.53,\"10=Q/C Lab\",fontsize=10,color=\"blue\")\nplt.figtext(0.905,0.50,\"11=Batching Plant\",fontsize=10,color=\"blue\")\n\n\nplt.show()\n\n#plt.axis('scaled')\n#plt.axis(\"square\")\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.annotate", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.figtext", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
dego1985/wave_simulation
[ "05f5119aab158e0958170d90066c2b87b998e658" ]
[ "sim_004_complex_001/module/plot.py" ]
[ "import numpy as np\nfrom glumpy import app, gl, glm, gloo\nimport torch\n\nimport module.gpu_work as gw\n\n\nclass mesh():\n def __init__(self, motion):\n # plane\n self.motion = motion\n self.N = N = motion.N[:2]\n self.dx = dx = motion.dx\n\n # vertices\n X = [dx * (np.arange(N[i]) - N[i] * 0.5) for i in range(2)]\n x, y = X\n x, y = np.meshgrid(x, y)\n z = motion.update_numpy()\n\n vertices = np.transpose([x, y, z], (1, 2, 0)).reshape(-1, 3)\n\n # colors\n colors = np.random.randn(len(vertices), 4).astype(np.float32)\n\n # outline\n idx = []\n for i in np.arange(N[1]-1):\n for j in np.arange(N[0]-1):\n offset = i * N[0] + j\n idx.append([offset, offset+1, offset+1+N[0], offset+N[0]] +\n [offset, offset+N[0], offset+1, offset+1+N[0]])\n outline = np.array(idx).reshape(-1)\n\n # outline\n idx = np.arange(N[0]*N[1])\n point_idx = np.array(idx).reshape(-1)\n\n ############################################################\n # glumpy Vertex Buffer\n dtype = [(\"position\", np.float32, 3),\n (\"color\", np.float32, 4)]\n VertexBuffer = np.zeros(len(vertices), dtype)\n VertexBuffer[\"position\"] = vertices\n VertexBuffer[\"color\"] = colors\n VertexBuffer = VertexBuffer.view(gloo.VertexBuffer)\n\n # glumpy Index Buffer\n outline = outline.astype(np.uint32).view(gloo.IndexBuffer)\n\n # glumpy Index Buffer\n point_idx = point_idx.astype(np.uint32).view(gloo.IndexBuffer)\n\n ############################################################\n # self\n self.VertexBuffer = VertexBuffer\n self.outline = outline\n self.point_idx = point_idx\n\n ############################################################\n # torch\n v = torch.from_numpy(np.transpose(vertices, (1, 0)).reshape(1, 3, N[0], N[1]).astype(np.float32)).cuda()\n c = torch.from_numpy(np.transpose(colors, (1, 0)).reshape(1, 4, N[0], N[1]).astype(np.float32)).cuda()\n self.v = v\n self.c = c\n\n def update(self, dt=0):\n motion = self.motion\n v = self.v\n c = self.c\n\n z = motion.update(dt)\n\n zc = 0.5 * z\n c[0, 0] = 0 + 2*zc\n c[0, 1] = 0.5 - zc\n c[0, 2] = 1.0 + 2*zc\n c[0, 3] = 1\n\n v[0, 2] = z*0.3\n\n\nclass plot3d():\n def __init__(self, obj):\n self.obj = obj\n self.phi, self.theta = 0, 0\n\n # init\n self.init_window()\n self.bind_obj(obj)\n self.update_VertexBuffer()\n app.run()\n\n def init_window(self):\n window = app.Window(width=1920, height=1080,\n color=(0.30, 0.30, 0.35, 1.00))\n\n @window.event\n def on_init():\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glPolygonOffset(1, 1)\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glLineWidth(0.55)\n\n @window.event\n def on_draw(dt):\n window.clear()\n self.on_draw(dt)\n\n @window.event\n def on_resize(width, height):\n program = self.program\n program['projection'] = glm.perspective(\n 45.0, width / float(height), 0.1, 100.0)\n\n self.window = window\n\n def bind_obj(self, obj):\n # make obj\n vertex = \"\"\"\n uniform vec4 ucolor;\n uniform mat4 model;\n uniform mat4 view;\n uniform mat4 projection;\n attribute vec3 position;\n attribute vec4 color;\n varying vec4 v_color;\n void main()\n {\n v_color = ucolor * color;\n gl_Position = projection * view * model * vec4(position,1.0);\n }\n \"\"\"\n\n fragment = \"\"\"\n varying vec4 v_color;\n void main()\n {\n gl_FragColor = v_color;\n }\n \"\"\"\n\n VertexBuffer = obj.VertexBuffer\n outline = obj.outline\n point_idx = obj.point_idx\n program = gloo.Program(vertex, fragment)\n\n program.bind(VertexBuffer)\n program['model'] = np.eye(4, dtype=np.float32)\n program['view'] = glm.translation(0, 0, -5)\n\n VertexBuffer.activate()\n VertexBuffer.deactivate()\n\n self.RegisteredBuffer = gw.make_RegisteredBuffer(VertexBuffer)\n self.program = program\n self.outline = outline\n self.point_idx = point_idx\n \n def update_VertexBuffer(self, dt=0):\n # torch\n self.obj.update(dt)\n v = self.obj.v\n c = self.obj.c\n V_ = torch.cat((v, c), dim=1)\n V_ = V_.contiguous(memory_format=torch.channels_last)\n\n # copy\n gw.copy_torch2RegisteredBuffer(self.RegisteredBuffer, V_[0])\n\n def on_draw(self, dt):\n program = self.program\n window = self.window\n\n # set title\n window.set_title(str(\n window.fps).encode(\"ascii\"))\n\n self.update_VertexBuffer(dt)\n\n # # Point\n # gl.glDisable(gl.GL_BLEND)\n # gl.glEnable(gl.GL_DEPTH_TEST)\n # gl.glPointSize(5)\n # program['ucolor'] = 1, 1, 1, 1\n # program.draw(gl.GL_POINTS, self.point_idx)\n\n # Fill\n gl.glDisable(gl.GL_BLEND)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)\n program['ucolor'] = 1, 1, 1, 1\n program.draw(gl.GL_QUADS, self.outline)\n\n # Outlined program\n # gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)\n # gl.glEnable(gl.GL_BLEND)\n # gl.glDepthMask(gl.GL_FALSE)\n # program['ucolor'] = 0, 0, 0, 1\n # program.draw(gl.GL_LINES, self.outline)\n # gl.glDepthMask(gl.GL_TRUE)\n\n # Make program rotate\n self.theta += 0*dt # degrees\n self.phi += 2*dt # degrees\n model = np.eye(4, dtype=np.float32)\n glm.rotate(model, -90, 1, 0, 0)\n glm.rotate(model, self.theta, 0, 0, 1)\n glm.rotate(model, self.phi, 0, 1, 0)\n glm.rotate(model, 45, 1, 0, 0)\n program['model'] = model\n\n" ]
[ [ "torch.cat", "numpy.array", "numpy.eye", "numpy.arange", "numpy.transpose", "numpy.meshgrid" ] ]
WeiChengTseng/Basic_Peptides_Model
[ "0b2bb8f157ec4c9752382eca8ffcbaca94fcaa45" ]
[ "model.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport math\nimport os\n\nclass Model():\n def __init__(self, num_label, word_dim=10, batch_size=32):\n self.num_label = num_label\n self.word_dim = word_dim\n self.batch_size = batch_size\n\n return\n\n def build(self, x, y, reg, keep_prob):\n \"\"\"\n Build the model.\n\n Input:\n - x: the input data, that is, the peptide sequences.\n - y: the ground truth of the peptides.\n - reg: the weight of the regression.\n Output:\n - loss: the loss of the model.\n - logits: the result of the logit regression.\n - predict: the prediction of the peptides.\n \"\"\"\n logits, params = self.sub_model(x, keep_prob)\n loss = self.loss(y, logits, params, reg)\n predict = self.predict(logits)\n\n return loss, logits, predict\n\n def sub_model(self, x, keep_prob):\n \"\"\"\n Define the architecture of the model.\n\n Input:\n - x: the input data, that is, the peptide sequences.\n - keep_prob: the keep probability of dropout.\n\n Output:\n - logits: the result of the logit regression.\n - params: some weights and filters used in the model.\n \"\"\"\n\n params = []\n with tf.name_scope('filters'):\n Filter1 = tf.Variable(tf.truncated_normal([6, self.word_dim, 128], stddev = 0.1), name = 'Filter_1')\n Filter2 = tf.Variable(tf.truncated_normal([6, 128, 128], stddev = 0.1), name = 'Filter_2')\n Filter3 = tf.Variable(tf.truncated_normal([5, 128, 256], stddev = 0.1), name = 'Filter_3')\n Filter4 = tf.Variable(tf.truncated_normal([5, 256, 256], stddev = 0.1), name = 'Filter_4')\n Filter5 = tf.Variable(tf.truncated_normal([5, 256, 512], stddev = 0.1), name = 'Filter_5')\n Filter6 = tf.Variable(tf.truncated_normal([5, 512, 512], stddev = 0.1), name = 'Filter_6')\n self.variable_summaries(Filter1)\n self.variable_summaries(Filter2)\n self.variable_summaries(Filter3)\n self.variable_summaries(Filter4)\n self.variable_summaries(Filter5)\n self.variable_summaries(Filter6)\n\n with tf.name_scope('weights'):\n W7 = tf.Variable(tf.truncated_normal([7168, 1024], stddev = 0.1), name = 'W7')\n W8 = tf.Variable(tf.truncated_normal([1024, self.num_label], stddev = 0.1), name = 'W8')\n self.variable_summaries(W7)\n self.variable_summaries(W8)\n\n with tf.name_scope('bias'):\n b1 = tf.Variable(tf.zeros([128]), name = 'b1')\n b2 = tf.Variable(tf.zeros([128]), name = 'b2')\n b3 = tf.Variable(tf.zeros([256]), name = 'b3')\n b4 = tf.Variable(tf.zeros([256]), name = 'b4')\n b5 = tf.Variable(tf.zeros([512]), name = 'b5')\n b6 = tf.Variable(tf.zeros([512]), name = 'b6')\n b7 = tf.Variable(tf.zeros([1024]), name = 'b7')\n b8 = tf.Variable(tf.zeros([self.num_label]), name = 'b8')\n self.variable_summaries(b1)\n self.variable_summaries(b2)\n self.variable_summaries(b3)\n self.variable_summaries(b4)\n self.variable_summaries(b5)\n self.variable_summaries(b6)\n self.variable_summaries(b7)\n self.variable_summaries(b8)\n alpha = 0.2\n\n with tf.name_scope('Conv_1'):\n L1 = tf.nn.conv1d(x, Filter1, stride = 1, padding = 'VALID', data_format='NHWC') + b1\n with tf.name_scope('leaky_relu_1'):\n L1_act = tf.nn.leaky_relu(L1, alpha)\n L1_bn = tf.layers.batch_normalization(L1_act, scale = False, name = 'bn_1')\n\n with tf.name_scope('Conv_2'):\n L2 = tf.nn.conv1d(L1_bn, Filter2, stride = 1, padding = 'VALID') + b2\n with tf.name_scope('leaky_relu_2'):\n L2_act = tf.nn.leaky_relu(L2, alpha)\n L2_pooled = tf.layers.max_pooling1d(L2_act, pool_size = 2, strides = 2, name = 'max_pool_2')\n L2_bn = tf.layers.batch_normalization(L2_pooled, scale = False, name = 'bn_2')\n\n with tf.name_scope('Conv_3'): \n L3 = tf.nn.conv1d(L2_bn, Filter3, stride = 1, padding = 'VALID') + b3\n with tf.name_scope('leaky_relu_3'):\n L3_act = tf.nn.leaky_relu(L3, alpha)\n L3_pooled = tf.layers.max_pooling1d(L3_act, pool_size = 2, strides = 2, name = 'max_pool_3')\n L3_bn = tf.layers.batch_normalization(L3_pooled, scale = False, name = 'bn_3')\n\n with tf.name_scope('Conv_4'): \n L4 = tf.nn.conv1d(L3_bn, Filter4, stride = 1, padding = 'VALID') + b4\n with tf.name_scope('leaky_relu_4'):\n L4_act = tf.nn.leaky_relu(L4, alpha)\n L4_pooled = tf.layers.max_pooling1d(L4_act, pool_size = 2, strides = 2, name = 'max_pool_4')\n L4_bn = tf.layers.batch_normalization(L4_pooled, scale = False, name = 'bn_4')\n\n with tf.name_scope('Conv_5'): \n L5 = tf.nn.conv1d(L4_bn, Filter5, stride = 1, padding = 'VALID') + b5\n with tf.name_scope('leaky_relu_5'):\n L5_act = tf.nn.leaky_relu(L5, alpha)\n L5_pooled = tf.layers.max_pooling1d(L5_act, pool_size = 2, strides = 2, name = 'max_pool_5')\n L5_bn = tf.layers.batch_normalization(L5_pooled, scale = False, name = 'bn_5')\n\n with tf.name_scope('Conv_6'): \n L6 = tf.nn.conv1d(L5_bn, Filter6, stride = 1, padding = 'VALID') + b6\n with tf.name_scope('leaky_relu_6'):\n L6_act = tf.nn.leaky_relu(L6, alpha)\n L6_pooled = tf.layers.max_pooling1d(L6_act, pool_size = 2, strides = 2, name = 'max_pool_6')\n L6_bn = tf.layers.batch_normalization(L6_pooled, scale = False, name = 'bn_6')\n \n reshaped_data = tf.reshape(L6_bn, shape = (self.batch_size, -1), name = 'reshape')\n\n with tf.name_scope('full_connected_7'):\n L7 = tf.matmul(reshaped_data, W7) + b7\n with tf.name_scope('leaky_relu_7'):\n L7_act = tf.nn.leaky_relu(L7, alpha)\n\n L7_dropout = tf.nn.dropout(L7_act, keep_prob=keep_prob, name = 'dropout')\n L7_bn = tf.layers.batch_normalization(L7_dropout, scale = True, name = 'bm_7')\n \n with tf.name_scope('full_connected_8'):\n L8 = tf.matmul(L7_bn, W8) + b8\n\n logits = L8\n params += [Filter1, Filter2, Filter3, Filter4, Filter5, Filter6]\n params += [W7, W8]\n return logits, params\n\n def predict(self, logits):\n \"\"\"\n Predict the labels according to the model.\n\n Input:\n - logits: the result of the logit regression.\n\n Output:\n - x: the result of the prediction\n \"\"\"\n x = tf.nn.sigmoid(logits)\n \n return x\n\n def loss(self, labels, logits, params, reg):\n \"\"\"\n Define the loss of the model.\n\n Input:\n - label: the ground truth of the prediction.\n - logits: the result of the logit regression.\n - params: some weights and filters used in the model.\n - reg: the weight of the L2 loss\n\n Output:\n - loss: the loss of the model.\n \"\"\"\n\n L2_loss_list = list(map(tf.nn.l2_loss, params))\n L2_loss = tf.add_n(L2_loss_list)\n loss = tf.losses.sigmoid_cross_entropy(labels, logits) + L2_loss * reg\n tf.summary.scalar('loss', loss)\n return loss\n \n def variable_summaries(self, var):\n \"\"\"\n Define the tensorboard scalar and histogram summary.\n\n Input:\n - var: the variable we want to summarize in tensorboard.\n \"\"\"\n with tf.name_scope(\"summaries\"):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))\n tf.summary.scalar('stddev',stddev)\n tf.summary.scalar('max',tf.reduce_max(var))\n tf.summary.scalar('min',tf.reduce_min(var))\n tf.summary.histogram('histogram',var)\n return\n\n" ]
[ [ "tensorflow.reduce_min", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.reduce_mean", "tensorflow.nn.leaky_relu", "tensorflow.summary.histogram", "tensorflow.layers.batch_normalization", "tensorflow.nn.conv1d", "tensorflow.add_n", "tensorflow.losses.sigmoid_cross_entropy", "tensorflow.nn.sigmoid", "tensorflow.nn.dropout", "tensorflow.zeros", "tensorflow.summary.scalar", "tensorflow.truncated_normal", "tensorflow.name_scope", "tensorflow.reduce_max", "tensorflow.layers.max_pooling1d", "tensorflow.square" ] ]
DHsLc/test
[ "4b2fb49fd7578afe7e289936f347af581b5bdab1" ]
[ "tensorflow/python/eager/tape_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic tests for autograd-based gradients.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import custom_gradient\nfrom tensorflow.python.eager import tensor\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\n# Importing nn_grad for the registration functions.\nfrom tensorflow.python.ops import nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\n\n\n@custom_gradient.custom_gradient\ndef two_outputs(a, b):\n mm = math_ops.matmul(a, b)\n r = math_ops.reduce_sum(mm)\n\n def grad(dmm, dr):\n return [\n math_ops.matmul(dmm, b, transpose_b=True) +\n math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),\n math_ops.matmul(a, dmm, transpose_b=True) +\n math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)\n ]\n\n return [mm, r], grad\n\n\nclass TapeTest(test.TestCase):\n\n def testMultiOutput(self):\n\n def fn(x, y):\n c = x + y\n # Multiple outputs from split.\n d, f = array_ops.split(c, 2)\n return d + f\n\n a = tensor.Tensor([[1., 0.], [0., 1.]])\n b = tensor.Tensor([[1., 2.], [3., 4.]])\n da, db = backprop.gradients_function(fn, [0, 1])(a, b)\n with context.graph_mode(), self.test_session():\n tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)\n tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)\n tf_c = tf_a + tf_b\n tf_d, tf_f = array_ops.split(tf_c, 2, axis=1)\n tf_e = tf_d + tf_f\n tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])\n\n self.assertAllEqual(da.numpy(), tf_da.eval())\n self.assertAllEqual(db.numpy(), tf_db.eval())\n\n def testBasicFunctional(self):\n\n def forward(a, b):\n mm = math_ops.matmul(a, b)\n return math_ops.reduce_sum(mm)\n\n aa = tensor.Tensor([[1., 0.], [0., 1.]])\n bb = tensor.Tensor([[1., 2.], [3., 4.]])\n da, = backprop.gradients_function(forward, ['a'])(aa, bb)\n self.assertAllEqual(da.numpy(),\n math_ops.matmul(\n array_ops.ones_like(aa),\n array_ops.transpose(bb)).numpy())\n\n def testBasicFunctionalPositionalArg(self):\n\n def forward(a, b):\n mm = math_ops.matmul(a, b)\n return math_ops.reduce_sum(mm)\n\n aa = tensor.Tensor([[1., 0.], [0., 1.]])\n bb = tensor.Tensor([[1., 2.], [3., 4.]])\n da, = backprop.gradients_function(forward, [0])(aa, bb)\n self.assertAllEqual(da.numpy(),\n math_ops.matmul(\n array_ops.ones_like(aa),\n array_ops.transpose(bb)).numpy())\n\n def testBasicFunctionalWithValue(self):\n\n def forward(a, b):\n mm = math_ops.matmul(a, b)\n return math_ops.reduce_sum(mm)\n\n aa = tensor.Tensor([[1., 0.], [0., 1.]])\n bb = tensor.Tensor([[1., 2.], [3., 4.]])\n val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb)\n self.assertAllEqual(da.numpy(),\n math_ops.matmul(\n array_ops.ones_like(aa),\n array_ops.transpose(bb)).numpy())\n self.assertAllEqual(val.numpy(), forward(aa, bb).numpy())\n\n def testTwoOutputs(self):\n\n def fn(x, y):\n mm, r = two_outputs(x, y)\n return r + math_ops.reduce_sum(mm)\n\n a = tensor.Tensor([[1., 0.], [0., 1.]])\n b = tensor.Tensor([[1., 2.], [3., 4.]])\n da, db = backprop.gradients_function(fn, [0, 1])(a, b)\n with context.graph_mode(), self.test_session():\n tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)\n tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)\n tf_mm = math_ops.matmul(tf_a, tf_b)\n tf_rr = 2 * math_ops.reduce_sum(tf_mm)\n tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])\n\n self.assertAllEqual(da.numpy(), tf_da.eval())\n self.assertAllEqual(db.numpy(), tf_db.eval())\n\n def testGcTwoOutputs(self):\n\n def fn(x, y):\n return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,\n labels=y)[0]\n\n labels = tensor.Tensor([0])\n logits = tensor.Tensor([[0.0]])\n grad, = backprop.gradients_function(fn, [0])(logits, labels)\n self.assertAllEqual(grad.numpy(), [[0.0]])\n\n def testTfTensor(self):\n\n def fn(x):\n return x\n\n t = constant_op.constant(1.0)\n g, = backprop.gradients_function(fn, [0])(t)\n self.assertEqual(g.numpy(), 1.0)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.eager.tensor.Tensor", "tensorflow.python.eager.context.graph_mode", "tensorflow.python.eager.backprop.val_and_grad_function", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.ops.math_ops.reduce_sum", "tensorflow.python.eager.backprop.gradients_function", "tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.eager.test.main", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.array_ops.split" ] ]
LuizPitaAlmeida/image_caption_generator
[ "e368b9f23ef283856a42f78b724d3181245b27de", "e368b9f23ef283856a42f78b724d3181245b27de" ]
[ "src/utils/hardware_stats.py", "src/data_process/data_loader.py" ]
[ "import torch\nimport nvidia_smi\nimport psutil\n\n\nclass HardwareStats():\n def __init__(self):\n super().__init__()\n self.device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n nvidia_smi.nvmlInit()\n self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)\n\n def hardware_stats(self):\n \"\"\"\n Returns a dict containing some hardware related stats\n \"\"\"\n res = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)\n return {\"cpu\": f\"{str(psutil.cpu_percent())}%\",\n \"mem\": f\"{str(psutil.virtual_memory().percent)}%\",\n \"gpu\": f\"{str(res.gpu)}%\",\n \"gpu_mem\": f\"{str(res.memory)}%\"}\n", "\"\"\"data_loader\nDo a torch dataloader of Coco Captions Dataset\n\nMinor modifications in Yunjey Choi code that can be found in:\n<https://github.com/yunjey/pytorch-tutorial>\nThanks Yunjey Choi.\n\"\"\"\nimport torch\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nimport os\nimport pickle\nimport numpy as np\nimport nltk\nfrom PIL import Image\nfrom pycocotools.coco import COCO\n\nfrom build_vocab import Vocabulary\n\n\nclass CocoDataset(data.Dataset):\n \"\"\"COCO Custom Dataset compatible with torch.utils.data.DataLoader.\"\"\"\n def __init__(self, root, json, vocab, transform=None):\n \"\"\"Set the path for images, captions and vocabulary wrapper.\n\n Args:\n root: image directory.\n json: coco annotation file path.\n vocab: vocabulary wrapper.\n transform: image transformer.\n \"\"\"\n self.root = root\n self.coco = COCO(json)\n self.ids = list(self.coco.anns.keys())\n self.vocab = vocab\n self.transform = transform\n\n def __getitem__(self, index):\n \"\"\"Returns one data pair (image and caption).\"\"\"\n coco = self.coco\n vocab = self.vocab\n ann_id = self.ids[index]\n caption = coco.anns[ann_id]['caption']\n img_id = coco.anns[ann_id]['image_id']\n path = coco.loadImgs(img_id)[0]['file_name']\n\n image = Image.open(os.path.join(self.root, path)).convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n\n # Convert caption (string) to word ids.\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\n caption = []\n caption.append(vocab('<start>'))\n caption.extend([vocab(token) for token in tokens])\n caption.append(vocab('<end>'))\n target = torch.Tensor(caption)\n return image, target\n\n def __len__(self):\n return len(self.ids)\n\n\ndef collate_fn(data):\n \"\"\"Creates mini-batch tensors from the list of tuples (image, caption).\n\n We should build custom collate_fn rather than using default collate_fn,\n because merging caption (including padding) is not supported in default.\n\n Args:\n data: list of tuple (image, caption).\n - image: torch tensor of shape (3, 256, 256).\n - caption: torch tensor of shape (?); variable length.\n\n Returns:\n images: torch tensor of shape (batch_size, 3, 256, 256).\n targets: torch tensor of shape (batch_size, padded_length).\n lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length (descending order).\n data.sort(key=lambda x: len(x[1]), reverse=True)\n images, captions = zip(*data)\n\n # Merge images (from tuple of 3D tensor to 4D tensor).\n images = torch.stack(images, 0)\n\n # Merge captions (from tuple of 1D tensor to 2D tensor).\n lengths = [len(cap) for cap in captions]\n targets = torch.zeros(len(captions), max(lengths)).long()\n for i, cap in enumerate(captions):\n end = lengths[i]\n targets[i, :end] = cap[:end]\n return images, targets, lengths\n\n\ndef get_loader(root, json, vocab, transform, batch_size, shuffle, num_workers):\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\n # COCO caption dataset\n coco = CocoDataset(root=root,\n json=json,\n vocab=vocab,\n transform=transform)\n\n # Data loader for COCO dataset\n # This will return (images, captions, lengths) for each iteration.\n # images: a tensor of shape (batch_size, 3, 224, 224).\n # captions: a tensor of shape (batch_size, padded_length).\n # lengths: a list indicating valid length for each caption. length is\n # (batch_size).\n data_loader = torch.utils.data.DataLoader(dataset=coco,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader\n" ]
[ [ "torch.cuda.is_available" ], [ "torch.Tensor", "torch.stack", "torch.utils.data.DataLoader" ] ]
javierlorenzod/pytorch-lightning
[ "6dba26666aa564db414eb238d99a4213006d8220", "6dba26666aa564db414eb238d99a4213006d8220" ]
[ "pytorch_lightning/trainer/training_loop.py", "tests/metrics/test_metric.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom contextlib import contextmanager, suppress\nfrom copy import copy, deepcopy\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.states import RunningStage, TrainerState\nfrom pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum\nfrom pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing\nfrom pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.memory import recursive_detach\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.parsing import AttributeDict\nfrom pytorch_lightning.utilities.warnings import WarningCache\n\n\nclass TrainLoop:\n\n def __init__(self, trainer, multiple_trainloader_mode):\n self.trainer = trainer\n self.early_stopping_accumulator = None\n self.checkpoint_accumulator = None\n self.accumulated_loss = None\n self.warning_cache = WarningCache()\n self._teardown_already_run = False\n self.running_loss = TensorRunningAccum(window_length=20)\n self.automatic_optimization = True\n self._curr_step_result = None\n self._cur_grad_norm_dict = None\n self._multiple_trainloader_mode = multiple_trainloader_mode\n self._skip_backward = False\n self.trainer._multiple_trainloader_mode = multiple_trainloader_mode\n\n def on_trainer_init(\n self,\n max_epochs,\n min_epochs,\n max_steps,\n min_steps,\n num_sanity_val_steps,\n automatic_optimization,\n weights_summary,\n ):\n self.trainer.global_step = 0\n self.trainer.current_epoch = 0\n self.trainer.interrupted = False\n self.trainer.should_stop = False\n self.trainer._state = TrainerState.INITIALIZING\n\n self.trainer.total_batch_idx = 0\n self.trainer.batch_idx = 0\n self.trainer.num_training_batches = 0\n self.trainer.train_dataloader = None\n self.automatic_optimization = automatic_optimization\n\n # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000\n self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs\n # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1\n self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs\n self.trainer.max_steps = max_steps\n self.trainer.min_steps = min_steps\n\n if num_sanity_val_steps == -1:\n self.trainer.num_sanity_val_steps = float(\"inf\")\n else:\n self.trainer.num_sanity_val_steps = num_sanity_val_steps\n\n self.trainer.weights_summary = weights_summary\n if weights_summary is not None and weights_summary not in ModelSummary.MODES:\n raise MisconfigurationException(\n f\"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}\"\n )\n\n @property\n def num_optimizers(self):\n num_optimizers = len(self.get_optimizers_iterable())\n return num_optimizers\n\n def should_skip_training(self):\n should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps\n should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs\n return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0\n\n def on_train_start(self):\n # hook\n self.trainer.call_hook(\"on_train_start\")\n\n # provide rank to profiler\n self.trainer.profile_connector.on_train_start(self.trainer)\n\n def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):\n # clean hparams\n if hasattr(model, \"hparams\"):\n parsing.clean_namespace(model.hparams)\n\n # links data to the trainer\n self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)\n\n # check that model is configured correctly\n self.trainer.config_validator.verify_loop_configurations(model)\n\n # attach model log function to callback\n self.trainer.callback_connector.attach_model_logging_functions(model)\n\n def on_train_end(self):\n if self._teardown_already_run:\n return\n\n self._teardown_already_run = True\n\n # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates\n # when a checkpoint was saved at the last step\n self.trainer.global_step -= 1\n self.check_checkpoint_callback(should_update=True, is_last=True)\n self.trainer.global_step += 1\n\n # hook\n self.trainer.call_hook(\"on_train_end\")\n\n # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.\n # It might be related to xla tensors blocked when moving the cpu\n # kill loggers\n if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:\n self.trainer.logger.finalize(\"success\")\n\n # summarize profile results\n if self.trainer.global_rank == 0:\n self.trainer.profiler.describe()\n\n # give accelerators a chance to finish\n self.trainer.accelerator.on_train_end()\n\n def check_checkpoint_callback(self, should_update, is_last=False):\n # TODO bake this logic into the ModelCheckpoint callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = self.trainer.checkpoint_callbacks\n\n if is_last and any(cb.save_last for cb in callbacks):\n rank_zero_info(\"Saving latest checkpoint...\")\n\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def check_early_stopping_callback(self, should_update):\n # TODO bake this logic into the EarlyStopping callback\n if should_update and self.trainer.checkpoint_connector.has_trained:\n callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]\n model = self.trainer.get_model()\n\n for cb in callbacks:\n cb.on_validation_end(self.trainer, model)\n\n def on_train_epoch_start(self, epoch):\n\n # update training progress in trainer\n self.trainer.current_epoch = epoch\n\n model = self.trainer.get_model()\n\n # reset train dataloader\n if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n # todo: specify the possible exception\n with suppress(Exception):\n # set seed for distributed sampler (enables shuffling for each epoch)\n self.trainer.train_dataloader.sampler.set_epoch(epoch)\n\n # changing gradient according accumulation_scheduler\n self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())\n\n # stores accumulated grad fractions per batch\n self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)\n\n # structured result accumulators for callbacks\n self.early_stopping_accumulator = Accumulator()\n self.checkpoint_accumulator = Accumulator()\n\n # hook\n self.trainer.call_hook(\"on_epoch_start\")\n self.trainer.call_hook(\"on_train_epoch_start\")\n\n def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):\n # hook\n self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)\n self.trainer.call_hook('on_batch_end')\n\n # figure out what to track for epoch end\n self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)\n\n # reset batch logger internals\n self.trainer.logger_connector.on_train_batch_end()\n\n def reset_train_val_dataloaders(self, model):\n if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_train_dataloader(model)\n\n if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:\n self.trainer.reset_val_dataloader(model)\n\n def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):\n\n # track the outputs to reduce at the end of the epoch\n for opt_idx, opt_outputs in enumerate(batch_end_outputs):\n sample_output = opt_outputs[-1]\n\n # decide if we need to reduce at the end of the epoch automatically\n auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end\n hook_overridden = (\n is_overridden(\"training_epoch_end\", model=self.trainer.get_model())\n or is_overridden(\"on_train_epoch_end\", model=self.trainer.get_model())\n )\n\n # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end\n if not (hook_overridden or auto_reduce_tng_result):\n continue\n\n # with 1 step (no tbptt) don't use a sequence at epoch end\n if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):\n opt_outputs = opt_outputs[0]\n\n epoch_output[opt_idx].append(opt_outputs)\n\n def get_optimizers_iterable(self):\n \"\"\"\n Generates an iterable with (idx, optimizer) for each optimizer.\n \"\"\"\n if not self.trainer.optimizer_frequencies:\n # call training_step once per optimizer\n return list(enumerate(self.trainer.optimizers))\n\n optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)\n optimizers_loop_length = optimizer_freq_cumsum[-1]\n current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length\n\n # find optimzier index by looking for the first {item > current_place} in the cumsum list\n opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)\n return [[opt_idx, self.trainer.optimizers[opt_idx]]]\n\n def on_after_backward(self, training_step_output, batch_idx, untouched_loss):\n is_result_obj = isinstance(training_step_output, Result)\n\n if is_result_obj:\n training_step_output.detach()\n else:\n training_step_output.batch_loss = training_step_output.batch_loss.detach()\n\n # insert after step hook\n self.trainer.call_hook(\"on_after_backward\")\n\n # when in dev debugging track the losses\n self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())\n\n def _check_training_step_output(self, training_step_output):\n if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n\n def training_step(self, split_batch, batch_idx, opt_idx, hiddens):\n # give the PL module a result for logging\n model_ref = self.trainer.get_model()\n\n with self.trainer.profiler.profile(\"model_forward\"):\n args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)\n\n # manually capture logged metrics\n model_ref._current_fx_name = 'training_step'\n model_ref._results = Result()\n with self.trainer.profiler.profile(\"training_step\"):\n training_step_output = self.trainer.accelerator_backend.training_step(args)\n self.trainer.accelerator_backend.post_training_step()\n\n self.trainer.logger_connector.cache_logged_metrics()\n\n self._check_training_step_output(training_step_output)\n\n training_step_output = self.trainer.call_hook(\"training_step_end\", training_step_output)\n\n training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(\n training_step_output, split_batch\n )\n is_result_obj = isinstance(training_step_output, Result)\n\n if training_step_output_for_epoch_end is None:\n return None\n\n # enable empty loss when using manual opt\n closure_loss = None\n untouched_loss = None\n\n if self.trainer.train_loop.automatic_optimization:\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n if is_result_obj:\n closure_loss = training_step_output.minimize\n else:\n closure_loss = training_step_output.batch_loss\n\n closure_loss = closure_loss / self.trainer.accumulate_grad_batches\n\n # the loss will get scaled for amp. avoid any modifications to it\n untouched_loss = closure_loss.detach().clone()\n\n # result\n result = AttributeDict(\n closure_loss=closure_loss,\n loss=untouched_loss,\n training_step_output=training_step_output,\n training_step_output_for_epoch_end=training_step_output_for_epoch_end,\n hiddens=training_step_output.hiddens,\n )\n return result\n\n def _process_training_step_output(self, training_step_output, split_batch):\n training_step_output_for_epoch_end = training_step_output\n\n # enable validation_step return None\n if training_step_output_for_epoch_end is None:\n return None, None\n\n # -----------------------------------------\n # process result return (DEPRECATE in 1.0)\n # -----------------------------------------\n if isinstance(training_step_output, Result):\n training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)\n return training_step_output_for_epoch_end, training_step_output\n\n # -----------------------------------------\n # process hybrid (1.0)\n # -----------------------------------------\n # no need for these checks in 1.0.0\n # TODO: remove checks in 1.0.0\n is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)\n is_1_0_output = is_tensor or (\"log\" not in training_step_output and \"progress_bar\" not in training_step_output)\n if is_1_0_output:\n return self._process_training_step_output_1_0(training_step_output, split_batch)\n\n # -----------------------------------------\n # process old dict (deprecate 1.0)\n # -----------------------------------------\n training_step_output = self.trainer.process_dict_result(training_step_output, train=True)\n\n training_step_output = AttributeDict(\n batch_loss=training_step_output[0],\n pbar_on_batch_end=training_step_output[1],\n log_metrics=training_step_output[2],\n callback_metrics=training_step_output[3],\n hiddens=training_step_output[4],\n )\n # if the user decides to finally reduce things in epoch_end, save raw output without graphs\n if isinstance(training_step_output_for_epoch_end, torch.Tensor):\n training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()\n else:\n training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_training_step_output_1_0(self, training_step_output, split_batch):\n result = self.trainer.get_model()._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n loss = training_step_output.pop(\"loss\", None)\n hiddens = training_step_output.pop(\"hiddens\", None)\n result[\"extra\"] = training_step_output\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n result[\"extra\"] = {}\n\n # map to results under the hood\n result.minimize = loss\n result.hiddens = hiddens\n\n # track batch for manual reduction with result\n result.track_batch_size(len(split_batch))\n\n # track metrics without grads for epoch reduction\n training_step_output_for_epoch_end = copy(result)\n training_step_output_for_epoch_end.detach()\n if self.trainer.move_metrics_to_cpu:\n training_step_output_for_epoch_end.cpu()\n\n # what flows back into the system\n training_step_output = result\n\n return training_step_output_for_epoch_end, training_step_output\n\n def _process_result(self, training_step_output, split_batch):\n training_step_output.track_batch_size(len(split_batch))\n m = \"\"\"\n TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.\n Use self.log and .write from the LightningModule to log metrics and write predictions.\n training_step can now only return a scalar (for the loss) or a dictionary with anything you want.\n\n Option 1:\n return loss\n\n Option 2:\n return {'loss': loss, 'anything_else': ...}\n\n Option 3:\n return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}\n \"\"\"\n rank_zero_warn(m)\n\n training_step_output_for_epoch_end = copy(training_step_output)\n training_step_output_for_epoch_end.detach()\n\n return training_step_output_for_epoch_end\n\n def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):\n model_ref = self.trainer.get_model()\n\n is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)\n using_native_amp = self.trainer.amp_backend == AMPType.NATIVE\n\n # native amp + lbfgs is a no go right now\n if using_native_amp and is_lbfgs:\n raise MisconfigurationException(\n 'native PyTorch amp and lbfgs are not compatible.'\n ' To request, please file a Github issue in PyTorch and tag @mcarilli'\n )\n\n # wraps into LightningOptimizer only for running step\n optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)\n\n # model hook\n model_ref.optimizer_step(\n self.trainer.current_epoch,\n batch_idx,\n optimizer,\n opt_idx,\n train_step_and_backward_closure,\n on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,\n using_native_amp=using_native_amp,\n using_lbfgs=is_lbfgs,\n )\n\n def on_before_zero_grad(self, optimizer):\n self.trainer.call_hook('on_before_zero_grad', optimizer)\n\n def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):\n self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)\n\n def track_and_norm_grad(self, optimizer):\n # track gradient norms\n grad_norm_dic = self._track_gradient_norm()\n\n # clip gradients\n self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)\n self._cur_grad_norm_dict = grad_norm_dic\n\n def _track_gradient_norm(self):\n grad_norm_dict = {}\n if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:\n if float(self.trainer.track_grad_norm) > 0:\n model = self.trainer.get_model()\n grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)\n return grad_norm_dict\n\n def process_hiddens(self, opt_closure_result):\n hiddens = opt_closure_result.hiddens\n if isinstance(opt_closure_result.training_step_output, Result):\n opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()\n return hiddens\n\n def tbptt_split_batch(self, batch):\n splits = [batch]\n if self.trainer.truncated_bptt_steps is not None:\n model_ref = self.trainer.get_model()\n with self.trainer.profiler.profile(\"tbptt_split_batch\"):\n splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)\n return splits\n\n def run_training_epoch(self):\n # modify dataloader if needed (ddp, etc...)\n train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)\n\n # track epoch output\n epoch_output = [[] for _ in range(self.num_optimizers)]\n\n train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)\n dataloader_idx = 0\n should_check_val = False\n\n for batch_idx, (batch, is_last_batch) in train_dataloader:\n\n self.trainer.batch_idx = batch_idx\n\n # ------------------------------------\n # TRAINING_STEP + TRAINING_STEP_END\n # ------------------------------------\n with self.trainer.profiler.profile(\"run_training_batch\"):\n batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)\n\n # when returning -1 from train_step, we end epoch early\n if batch_output.signal == -1:\n break\n\n batch_end_outputs = self.process_train_step_outputs(\n batch_output.training_step_output_for_epoch_end,\n self.early_stopping_accumulator,\n self.checkpoint_accumulator,\n )\n # hook\n # TODO: add outputs to batches\n self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)\n\n # -----------------------------------------\n # SAVE METRICS TO LOGGERS\n # -----------------------------------------\n self.trainer.logger_connector.log_train_step_metrics(batch_output)\n\n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)\n if should_check_val:\n self.trainer.run_evaluation()\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self.save_loggers_on_train_batch_end()\n\n # update LR schedulers\n monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)\n self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)\n self.trainer.checkpoint_connector.has_trained = True\n\n # max steps reached, end training\n if (\n self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1\n and self._accumulated_batches_reached()\n ):\n break\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if self.trainer.should_stop:\n break\n\n self.trainer.total_batch_idx += 1\n\n # stop epoch if we limited the number of training batches\n if self._num_training_batches_reached(is_last_batch):\n break\n\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n # epoch end hook\n self.run_on_epoch_end_hook(epoch_output)\n\n # log epoch metrics\n self.trainer.logger_connector.log_train_epoch_end_metrics(\n epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers\n )\n\n should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)\n if should_check_val:\n self.trainer.run_evaluation(on_epoch=True)\n\n # reset stage to train\n self.trainer._set_wide_running_stage(RunningStage.TRAINING)\n\n should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)\n should_train_only = self.trainer.disable_validation or should_skip_eval\n\n if should_train_only:\n # update epoch level lr_schedulers\n self.trainer.optimizer_connector.update_learning_rates(interval='epoch')\n self.check_checkpoint_callback(True)\n self.check_early_stopping_callback(True)\n\n # increment the global step once\n # progress global step according to grads progress\n self.increment_accumulated_grad_global_step()\n\n def run_training_batch(self, batch, batch_idx, dataloader_idx):\n # track grad norms\n grad_norm_dic = {}\n\n # bookkeeping\n self.trainer.hiddens = None\n\n # track all outputs across time and num of optimizers\n batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]\n\n if batch is None:\n return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_batch_start\")\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # hook\n response = self.trainer.call_hook(\"on_train_batch_start\", batch, batch_idx, dataloader_idx)\n if response == -1:\n return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)\n\n # lightning module hook\n splits = self.tbptt_split_batch(batch)\n\n for split_idx, split_batch in enumerate(splits):\n\n # create an iterable for optimizers and loop over them\n for opt_idx, optimizer in self.prepare_optimizers():\n\n # toggle model params + set info to logger_connector\n self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)\n\n if self.should_accumulate():\n # For gradient accumulation\n\n # -------------------\n # calculate loss (train step + train step end)\n # -------------------\n\n # automatic_optimization=True: perform dpp sync only when performing optimizer_step\n # automatic_optimization=False: don't block synchronization here\n with self.block_ddp_sync_behaviour():\n self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # ------------------------------\n # BACKWARD PASS\n # ------------------------------\n # gradient update with accumulated gradients\n\n else:\n if self.automatic_optimization:\n\n def train_step_and_backward_closure():\n result = self.training_step_and_backward(\n split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens\n )\n return None if result is None else result.loss\n\n # optimizer step\n self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)\n\n else:\n self._curr_step_result = self.training_step(\n split_batch, batch_idx, opt_idx, self.trainer.hiddens\n )\n\n if self._curr_step_result is None:\n # user decided to skip optimization\n # make sure to zero grad.\n continue\n\n batch_outputs = self._process_closure_result(\n batch_outputs=batch_outputs,\n opt_idx=opt_idx,\n )\n\n # todo: Properly aggregate grad_norm accros opt_idx and split_idx\n grad_norm_dic = self._cur_grad_norm_dict\n self._cur_grad_norm_dict = None\n\n # update running loss + reset accumulated loss\n self.update_running_loss()\n\n result = AttributeDict(\n signal=0,\n grad_norm_dic=grad_norm_dic,\n training_step_output_for_epoch_end=batch_outputs,\n )\n return result\n\n @contextmanager\n def block_ddp_sync_behaviour(self, should_block_sync: bool = False):\n \"\"\"\n automatic_optimization = True\n Blocks ddp sync gradients behaviour on backwards pass.\n This is useful for skipping sync when accumulating gradients, reducing communication overhead\n\n automatic_optimization = False\n do not block ddp gradient sync when using manual optimization\n as gradients are needed within the training step\n\n Returns:\n context manager with sync behaviour off\n\n \"\"\"\n if (\n isinstance(self.trainer.training_type_plugin, ParallelPlugin)\n and (self.automatic_optimization or should_block_sync)\n ):\n with self.trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n\n def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:\n opt_closure_result = self._curr_step_result\n\n if opt_closure_result is not None:\n\n # cache metrics\n self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)\n\n # track hiddens\n self.trainer.hiddens = self.process_hiddens(opt_closure_result)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(opt_closure_result.loss)\n\n # track all the outputs across all steps\n batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0\n batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)\n\n if self.automatic_optimization:\n # track total loss for logging (avoid mem leaks)\n self.accumulated_loss.append(opt_closure_result.loss)\n\n self._curr_step_result = None\n\n return batch_outputs\n\n def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):\n \"\"\"\n wrap the forward step in a closure so second order methods work\n \"\"\"\n with self.trainer.profiler.profile(\"training_step_and_backward\"):\n # lightning module hook\n result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)\n self._curr_step_result = result\n\n if result is None:\n if self.automatic_optimization:\n self.warning_cache.warn(\"training_step returned None if it was on purpose, ignore this warning...\")\n return None\n\n if not self._skip_backward and self.trainer.train_loop.automatic_optimization:\n # backward pass\n with self.trainer.profiler.profile(\"model_backward\"):\n self.backward(result, optimizer, opt_idx)\n\n # hook - call this hook only\n # when gradients have finished to accumulate\n if not self.should_accumulate():\n self.on_after_backward(result.training_step_output, batch_idx, result.loss)\n\n # check if loss or model weights are nan\n if self.trainer.terminate_on_nan:\n self.trainer.detect_nan_tensors(result.loss)\n\n if len(self.trainer.optimizers) > 1:\n # revert back to previous state\n self.trainer.get_model().untoggle_optimizer(opt_idx)\n\n return result\n\n def backward(self, result, optimizer, opt_idx, *args, **kwargs):\n self.trainer.dev_debugger.track_event(\"backward_call\")\n\n should_accumulate = self.should_accumulate()\n\n # backward can be called manually in the training loop\n if isinstance(result, torch.Tensor):\n self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)\n else:\n result.closure_loss = self.trainer.accelerator_backend.backward(\n result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs\n )\n\n if not self.should_accumulate():\n # track gradients\n self.track_and_norm_grad(optimizer=optimizer)\n\n def update_train_loop_lr_schedulers(self, monitor_metrics=None):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n if num_accumulated_batches_reached or num_training_batches_reached:\n # update lr\n self.trainer.optimizer_connector.update_learning_rates(interval=\"step\", monitor_metrics=monitor_metrics)\n\n def run_on_epoch_end_hook(self, epoch_output):\n # inform logger the batch loop has finished\n self.trainer.logger_connector.on_train_epoch_end()\n\n self.trainer.call_hook('on_train_epoch_end', epoch_output)\n self.trainer.call_hook('on_epoch_end')\n\n def increment_accumulated_grad_global_step(self):\n num_accumulated_batches_reached = self._accumulated_batches_reached()\n num_training_batches_reached = self._num_training_batches_reached()\n\n # progress global step according to grads progress\n if num_accumulated_batches_reached or num_training_batches_reached:\n self.trainer.global_step += 1\n\n def _accumulated_batches_reached(self):\n return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0\n\n def _num_training_batches_reached(self, is_last_batch=False):\n return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch\n\n def should_accumulate(self):\n # checks if backward or backward + optimizer step (via closure)\n accumulation_done = self._accumulated_batches_reached()\n is_final_batch = self._num_training_batches_reached()\n return not (accumulation_done or is_final_batch)\n\n def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):\n # decide if we should run validation\n is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0\n is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n can_check_val = self.trainer.enable_validation and is_val_check_epoch\n is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float(\"inf\")\n epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches\n\n should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop\n or is_last_batch_for_infinite_dataset\n ) if on_epoch else (is_val_check_batch and not epoch_end_val_check)\n\n return should_check_val and can_check_val\n\n def build_train_args(self, batch, batch_idx, opt_idx, hiddens):\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_idx]\n\n if len(self.trainer.optimizers) > 1:\n if self.trainer.has_arg(\"training_step\", \"optimizer_idx\"):\n args.append(opt_idx)\n else:\n num_opts = len(self.trainer.optimizers)\n raise ValueError(\n f\"Your LightningModule defines {num_opts} optimizers but \"\n f'training_step is missing the \"optimizer_idx\" argument.'\n )\n\n # pass hiddens if using tbptt\n if self.trainer.truncated_bptt_steps is not None:\n args.append(hiddens)\n\n return args\n\n def save_loggers_on_train_batch_end(self):\n # when loggers should save to disk\n should_flush_logs = self.trainer.logger_connector.should_flush_logs\n if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:\n self.trainer.logger.save()\n\n def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):\n \"\"\"\n Figure out what needs to be tracked/logged at the end of the epoch\n \"\"\"\n\n # the training step outputs a list per optimizer. The list contains the outputs at each time step\n # when no TBPTT is used, then the list has 1 item per batch\n # when TBPTT IS used, then the list has n items (1 per time step)\n batch_end_outputs = []\n for optimizer_idx_outputs in all_train_step_outputs:\n # extract one representative sample from each time step (1 if no tbptt) and 0th optimizer\n if len(optimizer_idx_outputs) == 0:\n continue\n\n sample_output = optimizer_idx_outputs[-1]\n\n # pull out callback info if available (ie: Results object)\n if isinstance(sample_output, dict) and \"early_stop_on\" in sample_output:\n early_stopping_accumulator.accumulate(sample_output[\"early_stop_on\"])\n\n if isinstance(sample_output, dict) and \"checkpoint_on\" in sample_output:\n checkpoint_accumulator.accumulate(sample_output[\"checkpoint_on\"])\n\n batch_end_outputs.append(optimizer_idx_outputs)\n\n return batch_end_outputs\n\n def prepare_optimizers(self):\n # in manual optimization we loop over all optimizers at once\n optimizers = self.get_optimizers_iterable()\n if not self.automatic_optimization:\n optimizers = [optimizers[0]]\n return optimizers\n\n def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):\n # set split_idx to trainer for tracking\n self.trainer.split_idx = split_idx\n\n # make sure only the gradients of the current optimizer's parameters are calculated\n # in the training step to prevent dangling gradients in multiple-optimizer setup.\n if self.automatic_optimization and len(self.trainer.optimizers) > 1:\n model = self.trainer.get_model()\n model.toggle_optimizer(optimizer, opt_idx)\n\n # use to track metrics internally\n self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)\n\n def update_running_loss(self):\n accumulated_loss = self.accumulated_loss.mean()\n\n if accumulated_loss is not None:\n # calculate running loss for display\n self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)\n\n # reset for next set of accumulated grads\n self.accumulated_loss.reset()\n", "import pickle\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\n\nimport cloudpickle\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import nn\n\nfrom pytorch_lightning.metrics.metric import Metric, MetricCollection\n\ntorch.manual_seed(42)\n\n\nclass Dummy(Metric):\n name = \"Dummy\"\n\n def __init__(self):\n super().__init__()\n self.add_state(\"x\", torch.tensor(0.0), dist_reduce_fx=None)\n\n def update(self):\n pass\n\n def compute(self):\n pass\n\n\nclass DummyList(Metric):\n name = \"DummyList\"\n\n def __init__(self):\n super().__init__()\n self.add_state(\"x\", list(), dist_reduce_fx=None)\n\n def update(self):\n pass\n\n def compute(self):\n pass\n\n\ndef test_inherit():\n Dummy()\n\n\ndef test_add_state():\n a = Dummy()\n\n a.add_state(\"a\", torch.tensor(0), \"sum\")\n assert a._reductions[\"a\"](torch.tensor([1, 1])) == 2\n\n a.add_state(\"b\", torch.tensor(0), \"mean\")\n assert np.allclose(a._reductions[\"b\"](torch.tensor([1.0, 2.0])).numpy(), 1.5)\n\n a.add_state(\"c\", torch.tensor(0), \"cat\")\n assert a._reductions[\"c\"]([torch.tensor([1]), torch.tensor([1])]).shape == (2, )\n\n with pytest.raises(ValueError):\n a.add_state(\"d1\", torch.tensor(0), 'xyz')\n\n with pytest.raises(ValueError):\n a.add_state(\"d2\", torch.tensor(0), 42)\n\n with pytest.raises(ValueError):\n a.add_state(\"d3\", [torch.tensor(0)], 'sum')\n\n with pytest.raises(ValueError):\n a.add_state(\"d4\", 42, 'sum')\n\n def custom_fx(x):\n return -1\n\n a.add_state(\"e\", torch.tensor(0), custom_fx)\n assert a._reductions[\"e\"](torch.tensor([1, 1])) == -1\n\n\ndef test_add_state_persistent():\n a = Dummy()\n\n a.add_state(\"a\", torch.tensor(0), \"sum\", persistent=True)\n assert \"a\" in a.state_dict()\n\n a.add_state(\"b\", torch.tensor(0), \"sum\", persistent=False)\n\n if LooseVersion(torch.__version__) >= LooseVersion(\"1.6.0\"):\n assert \"b\" not in a.state_dict()\n\n\ndef test_reset():\n\n class A(Dummy):\n pass\n\n class B(DummyList):\n pass\n\n a = A()\n assert a.x == 0\n a.x = torch.tensor(5)\n a.reset()\n assert a.x == 0\n\n b = B()\n assert isinstance(b.x, list) and len(b.x) == 0\n b.x = torch.tensor(5)\n b.reset()\n assert isinstance(b.x, list) and len(b.x) == 0\n\n\ndef test_update():\n\n class A(Dummy):\n\n def update(self, x):\n self.x += x\n\n a = A()\n assert a.x == 0\n assert a._computed is None\n a.update(1)\n assert a._computed is None\n assert a.x == 1\n a.update(2)\n assert a.x == 3\n assert a._computed is None\n\n\ndef test_compute():\n\n class A(Dummy):\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n a = A()\n assert 0 == a.compute()\n assert 0 == a.x\n a.update(1)\n assert a._computed is None\n assert a.compute() == 1\n assert a._computed == 1\n a.update(2)\n assert a._computed is None\n assert a.compute() == 3\n assert a._computed == 3\n\n # called without update, should return cached value\n a._computed = 5\n assert a.compute() == 5\n\n\ndef test_forward():\n\n class A(Dummy):\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n a = A()\n assert a(5) == 5\n assert a._forward_cache == 5\n\n assert a(8) == 8\n assert a._forward_cache == 8\n\n assert a.compute() == 13\n\n\nclass DummyMetric1(Dummy):\n\n def update(self, x):\n self.x += x\n\n def compute(self):\n return self.x\n\n\nclass DummyMetric2(Dummy):\n\n def update(self, y):\n self.x -= y\n\n def compute(self):\n return self.x\n\n\ndef test_pickle(tmpdir):\n # doesn't tests for DDP\n a = DummyMetric1()\n a.update(1)\n\n metric_pickled = pickle.dumps(a)\n metric_loaded = pickle.loads(metric_pickled)\n\n assert metric_loaded.compute() == 1\n\n metric_loaded.update(5)\n assert metric_loaded.compute() == 6\n\n metric_pickled = cloudpickle.dumps(a)\n metric_loaded = cloudpickle.loads(metric_pickled)\n\n assert metric_loaded.compute() == 1\n\n\ndef test_state_dict(tmpdir):\n \"\"\" test that metric states can be removed and added to state dict \"\"\"\n metric = Dummy()\n assert metric.state_dict() == OrderedDict()\n metric.persistent(True)\n assert metric.state_dict() == OrderedDict(x=0)\n metric.persistent(False)\n assert metric.state_dict() == OrderedDict()\n\n\ndef test_child_metric_state_dict():\n \"\"\" test that child metric states will be added to parent state dict \"\"\"\n\n class TestModule(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.metric = Dummy()\n self.metric.add_state('a', torch.tensor(0), persistent=True)\n self.metric.add_state('b', [], persistent=True)\n self.metric.register_buffer('c', torch.tensor(0))\n\n module = TestModule()\n expected_state_dict = {\n 'metric.a': torch.tensor(0),\n 'metric.b': [],\n 'metric.c': torch.tensor(0),\n }\n assert module.state_dict() == expected_state_dict\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"Test requires GPU.\")\ndef test_device_and_dtype_transfer(tmpdir):\n metric = DummyMetric1()\n assert metric.x.is_cuda is False\n assert metric.x.dtype == torch.float32\n\n metric = metric.to(device='cuda')\n assert metric.x.is_cuda\n\n metric = metric.double()\n assert metric.x.dtype == torch.float64\n\n metric = metric.half()\n assert metric.x.dtype == torch.float16\n\n\ndef test_metric_collection(tmpdir):\n m1 = DummyMetric1()\n m2 = DummyMetric2()\n\n metric_collection = MetricCollection([m1, m2])\n\n # Test correct dict structure\n assert len(metric_collection) == 2\n assert metric_collection['DummyMetric1'] == m1\n assert metric_collection['DummyMetric2'] == m2\n\n # Test correct initialization\n for name, metric in metric_collection.items():\n assert metric.x == 0, f'Metric {name} not initialized correctly'\n\n # Test every metric gets updated\n metric_collection.update(5)\n for name, metric in metric_collection.items():\n assert metric.x.abs() == 5, f'Metric {name} not updated correctly'\n\n # Test compute on each metric\n metric_collection.update(-5)\n metric_vals = metric_collection.compute()\n assert len(metric_vals) == 2\n for name, metric_val in metric_vals.items():\n assert metric_val == 0, f'Metric {name}.compute not called correctly'\n\n # Test that everything is reset\n for name, metric in metric_collection.items():\n assert metric.x == 0, f'Metric {name} not reset correctly'\n\n # Test pickable\n metric_pickled = pickle.dumps(metric_collection)\n metric_loaded = pickle.loads(metric_pickled)\n assert isinstance(metric_loaded, MetricCollection)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"Test requires GPU.\")\ndef test_device_and_dtype_transfer_metriccollection(tmpdir):\n m1 = DummyMetric1()\n m2 = DummyMetric2()\n\n metric_collection = MetricCollection([m1, m2])\n for _, metric in metric_collection.items():\n assert metric.x.is_cuda is False\n assert metric.x.dtype == torch.float32\n\n metric_collection = metric_collection.to(device='cuda')\n for _, metric in metric_collection.items():\n assert metric.x.is_cuda\n\n metric_collection = metric_collection.double()\n for _, metric in metric_collection.items():\n assert metric.x.dtype == torch.float64\n\n metric_collection = metric_collection.half()\n for _, metric in metric_collection.items():\n assert metric.x.dtype == torch.float16\n\n\ndef test_metric_collection_wrong_input(tmpdir):\n \"\"\" Check that errors are raised on wrong input \"\"\"\n m1 = DummyMetric1()\n\n # Not all input are metrics (list)\n with pytest.raises(ValueError):\n _ = MetricCollection([m1, 5])\n\n # Not all input are metrics (dict)\n with pytest.raises(ValueError):\n _ = MetricCollection({'metric1': m1, 'metric2': 5})\n\n # Same metric passed in multiple times\n with pytest.raises(ValueError, match='Encountered two metrics both named *.'):\n _ = MetricCollection([m1, m1])\n\n # Not a list or dict passed in\n with pytest.raises(ValueError, match='Unknown input to MetricCollection.'):\n _ = MetricCollection(m1)\n\n\ndef test_metric_collection_args_kwargs(tmpdir):\n \"\"\" Check that args and kwargs gets passed correctly in metric collection,\n Checks both update and forward method\n \"\"\"\n m1 = DummyMetric1()\n m2 = DummyMetric2()\n\n metric_collection = MetricCollection([m1, m2])\n\n # args gets passed to all metrics\n metric_collection.update(5)\n assert metric_collection['DummyMetric1'].x == 5\n assert metric_collection['DummyMetric2'].x == -5\n metric_collection.reset()\n _ = metric_collection(5)\n assert metric_collection['DummyMetric1'].x == 5\n assert metric_collection['DummyMetric2'].x == -5\n metric_collection.reset()\n\n # kwargs gets only passed to metrics that it matches\n metric_collection.update(x=10, y=20)\n assert metric_collection['DummyMetric1'].x == 10\n assert metric_collection['DummyMetric2'].x == -20\n metric_collection.reset()\n _ = metric_collection(x=10, y=20)\n assert metric_collection['DummyMetric1'].x == 10\n assert metric_collection['DummyMetric2'].x == -20\n" ]
[ [ "numpy.argmax", "numpy.cumsum" ], [ "torch.manual_seed", "torch.cuda.is_available", "torch.tensor" ] ]
adrianbouza/automl
[ "46dbd753efc8efc73ced146fe8b3bb694709dcff", "46dbd753efc8efc73ced146fe8b3bb694709dcff", "46dbd753efc8efc73ced146fe8b3bb694709dcff" ]
[ "efficientdet/keras/infer_lib.py", "efficientdet/keras/wbf_test.py", "efficientdet/efficientdet_arch.py" ]
[ "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Inference related utilities.\"\"\"\nimport copy\nimport os\nimport time\nfrom typing import Text, Dict, Any, Optional\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\n\nfrom .. import dataloader\nfrom .. import hparams_config\nfrom .. import utils\nfrom . import efficientdet_keras\nfrom . import label_util\nfrom . import util_keras\nfrom ..visualize import vis_utils\n\n\ndef visualize_image(image,\n boxes,\n classes,\n scores,\n label_map=None,\n min_score_thresh=0.01,\n max_boxes_to_draw=1000,\n line_thickness=2,\n **kwargs):\n \"\"\"Visualizes a given image.\n\n Args:\n image: a image with shape [H, W, C].\n boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].\n classes: a class prediction with shape [N].\n scores: A list of float value with shape [N].\n label_map: a dictionary from class id to name.\n min_score_thresh: minimal score for showing. If claass probability is below\n this threshold, then the object will not show up.\n max_boxes_to_draw: maximum bounding box to draw.\n line_thickness: how thick is the bounding box line.\n **kwargs: extra parameters.\n\n Returns:\n output_image: an output image with annotated boxes and classes.\n \"\"\"\n label_map = label_util.get_label_map(label_map or 'coco')\n category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}\n img = np.array(image)\n vis_utils.visualize_boxes_and_labels_on_image_array(\n img,\n boxes,\n classes,\n scores,\n category_index,\n min_score_thresh=min_score_thresh,\n max_boxes_to_draw=max_boxes_to_draw,\n line_thickness=line_thickness,\n **kwargs)\n return img\n\n\nclass ExportNetwork(tf.Module):\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n @tf.function\n def __call__(self, imgs):\n return tf.nest.flatten(self.model(imgs, training=False))\n\n\nclass ExportModel(tf.Module):\n \"\"\"Model to be exported as SavedModel/TFLite format.\"\"\"\n\n def __init__(self, model, pre_mode='infer', post_mode='global'):\n super().__init__()\n self.model = model\n self.pre_mode = pre_mode\n self.post_mode = post_mode\n\n @tf.function\n def __call__(self, imgs):\n return self.model(\n imgs, training=False, pre_mode=self.pre_mode, post_mode=self.post_mode)\n\n\nclass ServingDriver:\n \"\"\"A driver for serving single or batch images.\n\n This driver supports serving with image files or arrays, with configurable\n batch size.\n\n Example 1. Serving streaming image contents:\n\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=1)\n driver.build()\n for m in image_iterator():\n predictions = driver.serve_files([m])\n boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)\n driver.visualize(m, boxes[0], scores[0], classes[0])\n # m is the new image with annotated boxes.\n\n Example 2. Serving batch image contents:\n\n imgs = []\n for f in ['/tmp/1.jpg', '/tmp/2.jpg']:\n imgs.append(np.array(Image.open(f)))\n\n driver = inference.ServingDriver(\n 'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=len(imgs))\n driver.build()\n predictions = driver.serve(imgs)\n boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)\n for i in range(len(imgs)):\n driver.visualize(imgs[i], boxes[i], scores[i], classes[i])\n\n Example 3: another way is to use SavedModel:\n\n # step1: export a model.\n driver = inference.ServingDriver('efficientdet-d0', '/tmp/efficientdet-d0')\n driver.build()\n driver.export('/tmp/saved_model_path')\n\n # step2: Serve a model.\n driver.load(self.saved_model_dir)\n raw_images = []\n for f in tf.io.gfile.glob('/tmp/images/*.jpg'):\n raw_images.append(np.array(PIL.Image.open(f)))\n detections = driver.serve(raw_images)\n boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections)\n for i in range(len(imgs)):\n driver.visualize(imgs[i], boxes[i], scores[i], classes[i])\n \"\"\"\n\n def __init__(self,\n model_name: Text,\n ckpt_path: Optional[Text] = None,\n batch_size: int = 1,\n only_network: bool = False,\n model_params: Optional[Dict[Text, Any]] = None,\n debug: bool = False):\n \"\"\"Initialize the inference driver.\n\n Args:\n model_name: target model name, such as efficientdet-d0.\n ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.\n batch_size: batch size for inference.\n only_network: only use the network without pre/post processing.\n model_params: model parameters for overriding the config.\n debug: bool, if true, run in debug mode.\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.ckpt_path = ckpt_path\n self.batch_size = batch_size\n self.only_network = only_network\n self.debug = debug\n\n self.params = hparams_config.get_detection_config(model_name).as_dict()\n\n if model_params:\n self.params.update(model_params)\n self.params.update(dict(is_training_bn=False))\n self.label_map = self.params.get('label_map', None)\n\n self._model = None\n\n mixed_precision = self.params.get('mixed_precision', None)\n precision = utils.get_precision(\n self.params.get('strategy', None), mixed_precision)\n policy = tf.keras.mixed_precision.Policy(precision)\n tf.keras.mixed_precision.set_global_policy(policy)\n\n @property\n def model(self):\n if not self._model:\n self.build()\n return self._model\n\n @model.setter\n def model(self, model):\n self._model = model\n\n def build(self, params_override=None):\n \"\"\"Build model and restore checkpoints.\"\"\"\n params = copy.deepcopy(self.params)\n if params_override:\n params.update(params_override)\n config = hparams_config.get_efficientdet_config(self.model_name)\n config.override(params)\n if self.only_network:\n self.model = efficientdet_keras.EfficientDetNet(config=config)\n else:\n self.model = efficientdet_keras.EfficientDetModel(config=config)\n image_size = utils.parse_image_size(params['image_size'])\n self.model.build((self.batch_size, *image_size, 3))\n util_keras.restore_ckpt(self.model, self.ckpt_path,\n self.params['moving_average_decay'],\n skip_mismatch=False)\n if self.debug:\n tf.config.run_functions_eagerly(self.debug)\n\n def visualize(self, image, boxes, classes, scores, **kwargs):\n \"\"\"Visualize prediction on image.\"\"\"\n return visualize_image(image, boxes, classes.astype(int), scores,\n self.label_map, **kwargs)\n\n def benchmark(self, image_arrays, bm_runs=10, trace_filename=None):\n \"\"\"Benchmark inference latency/throughput.\n\n Args:\n image_arrays: a list of images in numpy array format.\n bm_runs: Number of benchmark runs.\n trace_filename: If None, specify the filename for saving trace.\n \"\"\"\n _, spec = self._get_model_and_spec()\n\n @tf.function(input_signature=[spec])\n def test_func(image_arrays):\n return self.model(image_arrays) # pylint: disable=not-callable\n\n for _ in range(3): # warmup 3 runs.\n test_func(image_arrays)\n\n start = time.perf_counter()\n for _ in range(bm_runs):\n test_func(image_arrays)\n end = time.perf_counter()\n inference_time = (end - start) / bm_runs\n\n print('Per batch inference time: ', inference_time)\n print('FPS: ', self.batch_size / inference_time)\n\n if trace_filename:\n options = tf.profiler.experimental.ProfilerOptions()\n tf.profiler.experimental.start(trace_filename, options)\n test_func(image_arrays)\n tf.profiler.experimental.stop()\n\n def serve(self, image_arrays):\n \"\"\"Serve a list of image arrays.\n\n Args:\n image_arrays: A list of image content with each image has shape [height,\n width, 3] and uint8 type.\n\n Returns:\n A list of detections.\n \"\"\"\n if isinstance(self.model, tf.lite.Interpreter):\n input_details = self.model.get_input_details()\n output_details = self.model.get_output_details()\n self.model.set_tensor(input_details[0]['index'], np.array(image_arrays))\n self.model.invoke()\n return [self.model.get_tensor(x['index']) for x in output_details]\n return self.model(image_arrays) # pylint: disable=not-callable\n\n def load(self, saved_model_dir_or_frozen_graph: Text):\n \"\"\"Load the model using saved model or a frozen graph.\"\"\"\n # Load saved model if it is a folder.\n if tf.saved_model.contains_saved_model(saved_model_dir_or_frozen_graph):\n self.model = tf.saved_model.load(saved_model_dir_or_frozen_graph)\n return\n\n if saved_model_dir_or_frozen_graph.endswith('.tflite'):\n self.model = tf.lite.Interpreter(saved_model_dir_or_frozen_graph)\n self.model.allocate_tensors()\n return\n\n # Load a frozen graph.\n def wrap_frozen_graph(graph_def, inputs, outputs):\n # https://www.tensorflow.org/guide/migrate\n imports_graph_def_fn = lambda: tf.import_graph_def(graph_def, name='')\n wrapped_import = tf.compat.v1.wrap_function(imports_graph_def_fn, [])\n import_graph = wrapped_import.graph\n return wrapped_import.prune(\n tf.nest.map_structure(import_graph.as_graph_element, inputs),\n tf.nest.map_structure(import_graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n with tf.io.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:\n graph_def.ParseFromString(f.read())\n\n self.model = wrap_frozen_graph(\n graph_def,\n inputs='images:0',\n outputs=['Identity:0', 'Identity_1:0', 'Identity_2:0', 'Identity_3:0'])\n\n def freeze(self, func):\n \"\"\"Freeze the graph.\"\"\"\n # pylint: disable=g-import-not-at-top,disable=g-direct-tensorflow-import\n from tensorflow.python.framework.convert_to_constants \\\n import convert_variables_to_constants_v2_as_graph\n _, graphdef = convert_variables_to_constants_v2_as_graph(func)\n return graphdef\n\n def _get_model_and_spec(self, tflite=None):\n \"\"\"Get model instance and export spec.\"\"\"\n if self.only_network or tflite:\n image_size = utils.parse_image_size(self.params['image_size'])\n spec = tf.TensorSpec(\n shape=[self.batch_size, *image_size, 3],\n dtype=tf.float32,\n name='images')\n if self.only_network:\n export_model = ExportNetwork(self.model)\n else:\n # If export tflite, we should remove preprocessing since TFLite doesn't\n # support dynamic shape.\n logging.info('Export model without preprocessing.')\n # This section is only used for TFLite, so we use the applicable\n # pre_ & post_ modes.\n export_model = ExportModel(\n self.model, pre_mode=None, post_mode='tflite')\n return export_model, spec\n else:\n spec = tf.TensorSpec(\n shape=[self.batch_size, None, None, 3], dtype=tf.uint8, name='images')\n export_model = ExportModel(self.model)\n return export_model, spec\n\n def export(self,\n output_dir: Optional[Text] = None,\n tensorrt: Optional[Text] = None,\n tflite: Optional[Text] = None,\n file_pattern: Optional[Text] = None,\n num_calibration_steps: int = 2000):\n \"\"\"Export a saved model, frozen graph, and potential tflite/tensorrt model.\n\n Args:\n output_dir: the output folder for saved model.\n tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.\n tflite: Type for post-training quantization.\n file_pattern: Glob for tfrecords, e.g. coco/val-*.tfrecord.\n num_calibration_steps: Number of post-training quantization calibration\n steps to run.\n \"\"\"\n export_model, input_spec = self._get_model_and_spec(tflite)\n image_size = utils.parse_image_size(self.params['image_size'])\n if output_dir:\n tf.saved_model.save(\n export_model,\n output_dir,\n signatures=export_model.__call__.get_concrete_function(input_spec))\n logging.info('Model saved at %s', output_dir)\n\n # also save freeze pb file.\n graphdef = self.freeze(\n export_model.__call__.get_concrete_function(input_spec))\n proto_path = tf.io.write_graph(\n graphdef, output_dir, self.model_name + '_frozen.pb', as_text=False)\n logging.info('Frozen graph saved at %s', proto_path)\n\n if tflite:\n shape = (self.batch_size, *image_size, 3)\n input_spec = tf.TensorSpec(\n shape=shape, dtype=input_spec.dtype, name=input_spec.name)\n # from_saved_model supports advanced converter features like op fusing.\n converter = tf.lite.TFLiteConverter.from_saved_model(output_dir)\n if tflite == 'FP32':\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.target_spec.supported_types = [tf.float32]\n elif tflite == 'FP16':\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.target_spec.supported_types = [tf.float16]\n elif tflite == 'INT8':\n # Enables MLIR-based post-training quantization.\n converter.experimental_new_quantizer = True\n if file_pattern:\n config = hparams_config.get_efficientdet_config(self.model_name)\n config.override(self.params)\n ds = dataloader.InputReader(\n file_pattern,\n is_training=False,\n max_instances_per_image=config.max_instances_per_image)(\n config, batch_size=self.batch_size)\n\n def representative_dataset_gen():\n for image, _ in ds.take(num_calibration_steps):\n yield [image]\n else: # Used for debugging, can remove later.\n logging.warn('Use real representative dataset instead of fake ones.')\n num_calibration_steps = 10\n def representative_dataset_gen(): # rewrite this for real data.\n for _ in range(num_calibration_steps):\n yield [tf.ones(shape, dtype=input_spec.dtype)]\n\n converter.representative_dataset = representative_dataset_gen\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.inference_input_type = tf.uint8\n # TFLite's custom NMS op isn't supported by post-training quant,\n # so we add TFLITE_BUILTINS as well.\n supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.TFLITE_BUILTINS\n ]\n converter.target_spec.supported_ops = supported_ops\n\n else:\n raise ValueError(f'Invalid tflite {tflite}: must be FP32, FP16, INT8.')\n\n tflite_path = os.path.join(output_dir, tflite.lower() + '.tflite')\n tflite_model = converter.convert()\n tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)\n logging.info('TFLite is saved at %s', tflite_path)\n\n if tensorrt:\n trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())\n conversion_params = tf.experimental.tensorrt.ConversionParams(\n max_workspace_size_bytes=(2 << 20),\n maximum_cached_engines=1,\n precision_mode=tensorrt.upper())\n converter = tf.experimental.tensorrt.Converter(\n output_dir, conversion_params=conversion_params)\n converter.convert()\n converter.save(trt_path)\n logging.info('TensorRT model is saved at %s', trt_path)\n", "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test for wbf.\"\"\"\nfrom absl import logging\nimport tensorflow as tf\n\nfrom . import wbf\n\n\nclass WbfTest(tf.test.TestCase):\n\n def test_detection_iou_same(self):\n d1 = tf.constant([[1, 1, 1, 3, 3, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 1, 1, 3, 3, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 1.0)\n\n def test_detection_iou_corners(self):\n d1 = tf.constant([[1, 1, 1, 3, 3, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 2, 2, 4, 4, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 1.0 / 7.0)\n\n def test_detection_iou_ends(self):\n d1 = tf.constant([[1, 1, 1, 3, 2, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 2, 1, 4, 2, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 1.0 / 3.0)\n\n def test_detection_iou_none(self):\n d1 = tf.constant([[1, 1, 1, 3, 3, 1, 1]], dtype=tf.float32)\n d2 = tf.constant([1, 3, 3, 5, 5, 1, 1], dtype=tf.float32)\n\n iou = wbf.vectorized_iou(d1, d2)\n\n self.assertAllClose(iou[0][0], 0)\n\n def test_detection_iou_vector(self):\n vector_to_match = tf.constant(\n [\n [1, 1, 1, 3, 3, 1, 1],\n [1, 2, 2, 4, 4, 1, 1],\n [1, 3, 3, 5, 5, 1, 1],\n ],\n dtype=tf.float32,\n )\n\n detection = tf.constant([1, 1, 1, 3, 3, 1, 1], dtype=tf.float32)\n\n ious = wbf.vectorized_iou(vector_to_match, detection)\n self.assertAllClose(tf.reshape(ious, [3]), [1, 1.0 / 7.0, 0])\n\n def test_find_matching_cluster_matches(self):\n matching_cluster = tf.constant([1, 1, 1, 2, 2, 1, 1], dtype=tf.float32)\n non_matching_cluster = tf.constant([1, 3, 3, 2, 2, 1, 1], dtype=tf.float32)\n\n box = tf.constant([1, 1, 1, 2, 2, 1, 1], dtype=tf.float32)\n\n cluster_index = wbf.find_matching_cluster(\n (matching_cluster, non_matching_cluster), box)\n\n self.assertAllClose(cluster_index, 0)\n\n cluster_index = wbf.find_matching_cluster(\n (non_matching_cluster, matching_cluster), box)\n\n self.assertAllClose(cluster_index, 1)\n\n def test_find_matching_cluster_best_overlap(self):\n overlaps = tf.constant([1, 1, 1, 11, 2, 1, 1], dtype=tf.float32)\n overlaps_better = tf.constant([1, 2, 1, 12, 2, 1, 1], dtype=tf.float32)\n\n box = tf.constant([1, 3, 1, 13, 2, 1, 1], dtype=tf.float32)\n\n cluster_index = wbf.find_matching_cluster((overlaps,), box)\n\n self.assertAllClose(cluster_index, 0)\n\n cluster_index = wbf.find_matching_cluster((overlaps, overlaps_better), box)\n\n self.assertAllClose(cluster_index, 1)\n\n def test_weighted_average(self):\n samples = tf.constant([1, 3], dtype=tf.float32)\n\n weights1 = tf.constant([0.5, 0.5], dtype=tf.float32)\n weighted_average1 = wbf.weighted_average(samples, weights1)\n\n self.assertAllClose(weighted_average1, 2)\n\n weights2 = tf.constant([1, 0], dtype=tf.float32)\n weighted_average2 = wbf.weighted_average(samples, weights2)\n\n self.assertAllClose(weighted_average2, 1)\n\n weights3 = tf.constant([1, 2], dtype=tf.float32)\n weighted_average3 = wbf.weighted_average(samples, weights3)\n\n self.assertAllClose(weighted_average3, 7.0 / 3.0)\n\n def test_average_detections(self):\n d1 = tf.constant([1, 1, 1, 2, 2, 0.3, 1], dtype=tf.float32)\n d2 = tf.constant([1, 3, 3, 4, 4, 0.7, 1], dtype=tf.float32)\n\n averaged_single_model = wbf.average_detections((d1, d2), 1)\n self.assertAllClose(averaged_single_model, [1, 2.4, 2.4, 3.4, 3.4, 0.5, 1])\n\n averaged_multi_model = wbf.average_detections((d1, d2), 3)\n self.assertAllClose(averaged_multi_model,\n [1, 2.4, 2.4, 3.4, 3.4, 0.333333, 1])\n\n averaged_single_detection = wbf.average_detections((d2,), 2)\n self.assertAllClose(averaged_single_detection, [1, 3, 3, 4, 4, 0.35, 1])\n\n def test_ensemble_boxes(self):\n d1 = tf.constant([1, 2, 1, 10, 1, 0.75, 1], dtype=tf.float32)\n d2 = tf.constant([1, 3, 1, 10, 1, 0.75, 1], dtype=tf.float32)\n d3 = tf.constant([1, 3, 1, 10, 1, 1, 2], dtype=tf.float32)\n\n ensembled = wbf.ensemble_detections({'num_classes': 3},\n tf.stack([d1, d2, d3]), 2)\n\n self.assertAllClose(ensembled,\n [[1, 2.5, 1, 10, 1, 0.75, 1], [1, 3, 1, 10, 1, 0.5, 2]])\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.WARNING)\n tf.test.main()\n", "# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"EfficientDet model definition.\n\n[1] Mingxing Tan, Ruoming Pang, Quoc Le.\n EfficientDet: Scalable and Efficient Object Detection.\n CVPR 2020, https://arxiv.org/abs/1911.09070\n\"\"\"\nimport functools\nimport re\n\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom . import hparams_config\nfrom . import utils\nfrom .backbone import backbone_factory\nfrom .backbone import efficientnet_builder\nfrom .keras import fpn_configs\n\n\n################################################################################\ndef freeze_vars(variables, pattern):\n \"\"\"Removes backbone+fpn variables from the input.\n\n Args:\n variables: all the variables in training\n pattern: a reg experession such as \".*(efficientnet|fpn_cells).*\".\n\n Returns:\n var_list: a list containing variables for training\n \"\"\"\n if pattern:\n filtered_vars = [v for v in variables if not re.match(pattern, v.name)]\n if len(filtered_vars) == len(variables):\n logging.warning('%s didnt match with any variable. Please use compatible '\n 'pattern. i.e \"(efficientnet)\"', pattern)\n return filtered_vars\n return variables\n\n\ndef resample_feature_map(feat,\n name,\n target_height,\n target_width,\n target_num_channels,\n apply_bn=False,\n is_training=None,\n conv_after_downsample=False,\n strategy=None,\n data_format='channels_last'):\n \"\"\"Resample input feature map to have target number of channels and size.\"\"\"\n if data_format == 'channels_first':\n _, num_channels, height, width = feat.get_shape().as_list()\n else:\n _, height, width, num_channels = feat.get_shape().as_list()\n\n if height is None or width is None or num_channels is None:\n raise ValueError(\n 'shape[1] or shape[2] or shape[3] of feat is None (shape:{}).'.format(\n feat.shape))\n if apply_bn and is_training is None:\n raise ValueError('If BN is applied, need to provide is_training')\n\n def _maybe_apply_1x1(feat):\n \"\"\"Apply 1x1 conv to change layer width if necessary.\"\"\"\n if num_channels != target_num_channels:\n feat = tf.layers.conv2d(\n feat,\n filters=target_num_channels,\n kernel_size=(1, 1),\n padding='same',\n data_format=data_format)\n if apply_bn:\n feat = utils.batch_norm_act(\n feat,\n is_training_bn=is_training,\n act_type=None,\n data_format=data_format,\n strategy=strategy,\n name='bn')\n return feat\n\n with tf.variable_scope('resample_{}'.format(name)):\n # If conv_after_downsample is True, when downsampling, apply 1x1 after\n # downsampling for efficiency.\n if height > target_height and width > target_width:\n if not conv_after_downsample:\n feat = _maybe_apply_1x1(feat)\n height_stride_size = int((height - 1) // target_height + 1)\n width_stride_size = int((width - 1) // target_width + 1)\n\n # Use max pooling in default.\n feat = tf.layers.max_pooling2d(\n inputs=feat,\n pool_size=[height_stride_size + 1, width_stride_size + 1],\n strides=[height_stride_size, width_stride_size],\n padding='SAME',\n data_format=data_format)\n\n if conv_after_downsample:\n feat = _maybe_apply_1x1(feat)\n elif height <= target_height and width <= target_width:\n feat = _maybe_apply_1x1(feat)\n if height < target_height or width < target_width:\n if data_format == 'channels_first':\n feat = tf.transpose(feat, [0, 2, 3, 1])\n feat = tf.cast(\n tf.image.resize_nearest_neighbor(\n tf.cast(feat, tf.float32), [target_height, target_width]),\n dtype=feat.dtype)\n if data_format == 'channels_first':\n feat = tf.transpose(feat, [0, 3, 1, 2])\n else:\n raise ValueError(\n 'Incompatible target feature map size: target_height: {},'\n 'target_width: {}'.format(target_height, target_width))\n\n return feat\n\n\n###############################################################################\ndef class_net(images,\n level,\n num_classes,\n num_anchors,\n num_filters,\n is_training,\n act_type,\n separable_conv=True,\n repeats=4,\n survival_prob=None,\n strategy=None,\n data_format='channels_last'):\n \"\"\"Class prediction network.\"\"\"\n if separable_conv:\n conv_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1,\n data_format=data_format,\n pointwise_initializer=tf.initializers.variance_scaling(),\n depthwise_initializer=tf.initializers.variance_scaling())\n else:\n conv_op = functools.partial(\n tf.layers.conv2d,\n data_format=data_format,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01))\n\n for i in range(repeats):\n orig_images = images\n images = conv_op(\n images,\n num_filters,\n kernel_size=3,\n bias_initializer=tf.zeros_initializer(),\n activation=None,\n padding='same',\n name='class-%d' % i)\n images = utils.batch_norm_act(\n images,\n is_training,\n act_type=act_type,\n init_zero=False,\n strategy=strategy,\n data_format=data_format,\n name='class-%d-bn-%d' % (i, level))\n\n if i > 0 and survival_prob:\n images = utils.drop_connect(images, is_training, survival_prob)\n images = images + orig_images\n\n classes = conv_op(\n images,\n num_classes * num_anchors,\n kernel_size=3,\n bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),\n padding='same',\n name='class-predict')\n return classes\n\n\ndef box_net(images,\n level,\n num_anchors,\n num_filters,\n is_training,\n act_type,\n repeats=4,\n separable_conv=True,\n survival_prob=None,\n strategy=None,\n data_format='channels_last'):\n \"\"\"Box regression network.\"\"\"\n if separable_conv:\n conv_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1,\n data_format=data_format,\n pointwise_initializer=tf.initializers.variance_scaling(),\n depthwise_initializer=tf.initializers.variance_scaling())\n else:\n conv_op = functools.partial(\n tf.layers.conv2d,\n data_format=data_format,\n kernel_initializer=tf.random_normal_initializer(stddev=0.01))\n\n for i in range(repeats):\n orig_images = images\n images = conv_op(\n images,\n num_filters,\n kernel_size=3,\n activation=None,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-%d' % i)\n images = utils.batch_norm_act(\n images,\n is_training,\n act_type=act_type,\n init_zero=False,\n strategy=strategy,\n data_format=data_format,\n name='box-%d-bn-%d' % (i, level))\n\n if i > 0 and survival_prob:\n images = utils.drop_connect(images, is_training, survival_prob)\n images = images + orig_images\n\n boxes = conv_op(\n images,\n 4 * num_anchors,\n kernel_size=3,\n bias_initializer=tf.zeros_initializer(),\n padding='same',\n name='box-predict')\n\n return boxes\n\n\ndef build_class_and_box_outputs(feats, config):\n \"\"\"Builds box net and class net.\n\n Args:\n feats: input tensor.\n config: a dict-like config, including all parameters.\n\n Returns:\n A tuple (class_outputs, box_outputs) for class/box predictions.\n \"\"\"\n\n class_outputs = {}\n box_outputs = {}\n num_anchors = len(config.aspect_ratios) * config.num_scales\n cls_fsize = config.fpn_num_filters\n with tf.variable_scope('class_net', reuse=tf.AUTO_REUSE):\n for level in range(config.min_level,\n config.max_level + 1):\n class_outputs[level] = class_net(\n images=feats[level],\n level=level,\n num_classes=config.num_classes,\n num_anchors=num_anchors,\n num_filters=cls_fsize,\n is_training=config.is_training_bn,\n act_type=config.act_type,\n repeats=config.box_class_repeats,\n separable_conv=config.separable_conv,\n survival_prob=config.survival_prob,\n strategy=config.strategy,\n data_format=config.data_format\n )\n\n box_fsize = config.fpn_num_filters\n with tf.variable_scope('box_net', reuse=tf.AUTO_REUSE):\n for level in range(config.min_level,\n config.max_level + 1):\n box_outputs[level] = box_net(\n images=feats[level],\n level=level,\n num_anchors=num_anchors,\n num_filters=box_fsize,\n is_training=config.is_training_bn,\n act_type=config.act_type,\n repeats=config.box_class_repeats,\n separable_conv=config.separable_conv,\n survival_prob=config.survival_prob,\n strategy=config.strategy,\n data_format=config.data_format)\n\n return class_outputs, box_outputs\n\n\ndef build_backbone(features, config):\n \"\"\"Builds backbone model.\n\n Args:\n features: input tensor.\n config: config for backbone, such as is_training_bn and backbone name.\n\n Returns:\n A dict from levels to the feature maps from the output of the backbone model\n with strides of 8, 16 and 32.\n\n Raises:\n ValueError: if backbone_name is not supported.\n \"\"\"\n backbone_name = config.backbone_name\n is_training_bn = config.is_training_bn\n if 'efficientnet' in backbone_name:\n override_params = {\n 'batch_norm':\n utils.batch_norm_class(is_training_bn, config.strategy),\n 'relu_fn':\n functools.partial(utils.activation_fn, act_type=config.act_type),\n }\n if 'b0' in backbone_name:\n override_params['survival_prob'] = 0.0\n if config.backbone_config is not None:\n override_params['blocks_args'] = (\n efficientnet_builder.BlockDecoder().encode(\n config.backbone_config.blocks))\n override_params['data_format'] = config.data_format\n model_builder = backbone_factory.get_model_builder(backbone_name)\n _, endpoints = model_builder.build_model_base(\n features,\n backbone_name,\n training=is_training_bn,\n override_params=override_params)\n u1 = endpoints[0]\n u2 = endpoints[1]\n u3 = endpoints[2]\n u4 = endpoints[3]\n u5 = endpoints[4]\n else:\n raise ValueError(\n 'backbone model {} is not supported.'.format(backbone_name))\n return {0: features, 1: u1, 2: u2, 3: u3, 4: u4, 5: u5}\n\n\ndef build_feature_network(features, config):\n \"\"\"Build FPN input features.\n\n Args:\n features: input tensor.\n config: a dict-like config, including all parameters.\n\n Returns:\n A dict from levels to the feature maps processed after feature network.\n \"\"\"\n feat_sizes = utils.get_feat_sizes(config.image_size, config.max_level)\n feats = []\n if config.min_level not in features.keys():\n raise ValueError('features.keys ({}) should include min_level ({})'.format(\n features.keys(), config.min_level))\n\n # Build additional input features that are not from backbone.\n for level in range(config.min_level, config.max_level + 1):\n if level in features.keys():\n feats.append(features[level])\n else:\n h_id, w_id = (2, 3) if config.data_format == 'channels_first' else (1, 2)\n # Adds a coarser level by downsampling the last feature map.\n feats.append(\n resample_feature_map(\n feats[-1],\n name='p%d' % level,\n target_height=(feats[-1].shape[h_id] - 1) // 2 + 1,\n target_width=(feats[-1].shape[w_id] - 1) // 2 + 1,\n target_num_channels=config.fpn_num_filters,\n apply_bn=config.apply_bn_for_resampling,\n is_training=config.is_training_bn,\n conv_after_downsample=config.conv_after_downsample,\n strategy=config.strategy,\n data_format=config.data_format\n ))\n\n utils.verify_feats_size(\n feats,\n feat_sizes=feat_sizes,\n min_level=config.min_level,\n max_level=config.max_level,\n data_format=config.data_format)\n\n with tf.variable_scope('fpn_cells'):\n for rep in range(config.fpn_cell_repeats):\n with tf.variable_scope('cell_{}'.format(rep)):\n logging.info('building cell %d', rep)\n new_feats = build_bifpn_layer(feats, feat_sizes, config)\n\n feats = [\n new_feats[level]\n for level in range(\n config.min_level, config.max_level + 1)\n ]\n\n utils.verify_feats_size(\n feats,\n feat_sizes=feat_sizes,\n min_level=config.min_level,\n max_level=config.max_level,\n data_format=config.data_format)\n\n return new_feats\n\n\ndef fuse_features(nodes, weight_method):\n \"\"\"Fuse features from different resolutions and return a weighted sum.\n\n Args:\n nodes: a list of tensorflow features at different levels\n weight_method: feature fusion method. One of:\n - \"attn\" - Softmax weighted fusion\n - \"fastattn\" - Fast normalzied feature fusion\n - \"sum\" - a sum of inputs\n\n Returns:\n A tensor denoting the fused feature.\n \"\"\"\n dtype = nodes[0].dtype\n\n if weight_method == 'attn':\n edge_weights = [tf.cast(tf.Variable(1.0, name='WSM'), dtype=dtype)\n for _ in nodes]\n normalized_weights = tf.nn.softmax(tf.stack(edge_weights))\n nodes = tf.stack(nodes, axis=-1)\n new_node = tf.reduce_sum(nodes * normalized_weights, -1)\n elif weight_method == 'fastattn':\n edge_weights = [\n tf.nn.relu(tf.cast(tf.Variable(1.0, name='WSM'), dtype=dtype))\n for _ in nodes\n ]\n weights_sum = tf.add_n(edge_weights)\n nodes = [nodes[i] * edge_weights[i] / (weights_sum + 0.0001)\n for i in range(len(nodes))]\n new_node = tf.add_n(nodes)\n elif weight_method == 'channel_attn':\n num_filters = int(nodes[0].shape[-1])\n edge_weights = [\n tf.cast(\n tf.Variable(lambda: tf.ones([num_filters]), name='WSM'),\n dtype=dtype) for _ in nodes\n ]\n normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)\n nodes = tf.stack(nodes, axis=-1)\n new_node = tf.reduce_sum(nodes * normalized_weights, -1)\n elif weight_method == 'channel_fastattn':\n num_filters = int(nodes[0].shape[-1])\n edge_weights = [\n tf.nn.relu(tf.cast(\n tf.Variable(lambda: tf.ones([num_filters]), name='WSM'),\n dtype=dtype)) for _ in nodes\n ]\n weights_sum = tf.add_n(edge_weights)\n nodes = [nodes[i] * edge_weights[i] / (weights_sum + 0.0001)\n for i in range(len(nodes))]\n new_node = tf.add_n(nodes)\n elif weight_method == 'sum':\n new_node = tf.add_n(nodes)\n else:\n raise ValueError(\n 'unknown weight_method {}'.format(weight_method))\n\n return new_node\n\n\ndef build_bifpn_layer(feats, feat_sizes, config):\n \"\"\"Builds a feature pyramid given previous feature pyramid and config.\"\"\"\n p = config # use p to denote the network config.\n if p.fpn_config:\n fpn_config = p.fpn_config\n else:\n fpn_config = fpn_configs.get_fpn_config(p.fpn_name, p.min_level,\n p.max_level, p.fpn_weight_method)\n\n num_output_connections = [0 for _ in feats]\n for i, fnode in enumerate(fpn_config.nodes):\n with tf.variable_scope('fnode{}'.format(i)):\n logging.info('fnode %d : %s', i, fnode)\n new_node_height = feat_sizes[fnode['feat_level']]['height']\n new_node_width = feat_sizes[fnode['feat_level']]['width']\n nodes = []\n for idx, input_offset in enumerate(fnode['inputs_offsets']):\n input_node = feats[input_offset]\n num_output_connections[input_offset] += 1\n input_node = resample_feature_map(\n input_node, '{}_{}_{}'.format(idx, input_offset, len(feats)),\n new_node_height, new_node_width, p.fpn_num_filters,\n p.apply_bn_for_resampling, p.is_training_bn,\n p.conv_after_downsample,\n strategy=p.strategy,\n data_format=config.data_format)\n nodes.append(input_node)\n\n new_node = fuse_features(nodes, fpn_config.weight_method)\n\n with tf.variable_scope('op_after_combine{}'.format(len(feats))):\n if not p.conv_bn_act_pattern:\n new_node = utils.activation_fn(new_node, p.act_type)\n\n if p.separable_conv:\n conv_op = functools.partial(\n tf.layers.separable_conv2d, depth_multiplier=1)\n else:\n conv_op = tf.layers.conv2d\n\n new_node = conv_op(\n new_node,\n filters=p.fpn_num_filters,\n kernel_size=(3, 3),\n padding='same',\n use_bias=not p.conv_bn_act_pattern,\n data_format=config.data_format,\n name='conv')\n\n new_node = utils.batch_norm_act(\n new_node,\n is_training_bn=p.is_training_bn,\n act_type=None if not p.conv_bn_act_pattern else p.act_type,\n data_format=config.data_format,\n strategy=p.strategy,\n name='bn')\n\n feats.append(new_node)\n num_output_connections.append(0)\n\n output_feats = {}\n for l in range(p.min_level, p.max_level + 1):\n for i, fnode in enumerate(reversed(fpn_config.nodes)):\n if fnode['feat_level'] == l:\n output_feats[l] = feats[-1 - i]\n break\n return output_feats\n\n\ndef efficientdet(features, model_name=None, config=None, **kwargs):\n \"\"\"Build EfficientDet model.\"\"\"\n if not config and not model_name:\n raise ValueError('please specify either model name or config')\n\n if not config:\n config = hparams_config.get_efficientdet_config(model_name)\n elif isinstance(config, dict):\n config = hparams_config.Config(config) # wrap dict in Config object\n\n if kwargs:\n config.override(kwargs)\n\n logging.info(config)\n\n # build backbone features.\n features = build_backbone(features, config)\n logging.info('backbone params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n # build feature network.\n fpn_feats = build_feature_network(features, config)\n logging.info('backbone+fpn params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n # build class and box predictions.\n class_outputs, box_outputs = build_class_and_box_outputs(fpn_feats, config)\n logging.info('backbone+fpn+box params/flops = {:.6f}M, {:.9f}B'.format(\n *utils.num_params_flops()))\n\n return class_outputs, box_outputs\n" ]
[ [ "tensorflow.io.gfile.GFile", "tensorflow.lite.TFLiteConverter.from_saved_model", "tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2_as_graph", "tensorflow.import_graph_def", "tensorflow.ones", "tensorflow.function", "tensorflow.profiler.experimental.stop", "tensorflow.profiler.experimental.start", "tensorflow.keras.mixed_precision.Policy", "tensorflow.config.run_functions_eagerly", "numpy.array", "tensorflow.saved_model.contains_saved_model", "tensorflow.profiler.experimental.ProfilerOptions", "tensorflow.lite.Interpreter", "tensorflow.nest.map_structure", "tensorflow.experimental.tensorrt.Converter", "tensorflow.saved_model.load", "tensorflow.keras.mixed_precision.set_global_policy", "tensorflow.TensorSpec", "tensorflow.Graph", "tensorflow.compat.v1.wrap_function", "tensorflow.io.write_graph" ], [ "tensorflow.stack", "tensorflow.constant", "tensorflow.test.main", "tensorflow.reshape" ], [ "tensorflow.compat.v1.cast", "tensorflow.compat.v1.transpose", "numpy.log", "tensorflow.compat.v1.add_n", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.zeros_initializer", "tensorflow.compat.v1.layers.max_pooling2d", "tensorflow.compat.v1.random_normal_initializer", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.initializers.variance_scaling", "tensorflow.compat.v1.layers.conv2d", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.Variable" ] ]
fossabot/onnx-mlir
[ "ed1377c26b1be69b9b0ed6942025197491ca6c7e" ]
[ "utils/gen_doc.py" ]
[ "#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nfrom io import StringIO\nimport io\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dry-run-onnx-ops\",\n help=\"Output ONNXOps.td.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--dry-run-op-build-table\",\n help=\"Output OpBuildTable.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nargs = parser.parse_args()\n\n# Manual specification of attribute defaults.\nspecial_attr_defaults = dict([\n # (\"AveragePool.kernel_shape\", ('ints', '{}')),\n # (\"MaxPool.kernel_shape\", ('ints', '{}')),\n # (\"Cast.to\", ('int', '0')),\n # (\"Concat.axis\", ('int', '0')),\n # (\"Conv.group\", ('int', '1')),\n # (\"Unsqueeze.axes\", ('ints', '{}')),\n # (\"RNN.activation_alpha\", ('floats', '{}')),\n # (\"RNN.activation_beta\", ('floats', '{}')),\n])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Pad\", \"ImportNodePad\"),\n (\"Reshape\", \"ImportNodeReshape\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting shape inference.\nOpsWithShapeInference = [\n 'Exp', 'Tanh', 'Sinh', 'Cosh', 'Sigmoid', 'Relu', 'Add', 'Mul', 'Div',\n 'Sub', 'And', 'Or', 'Xor', 'Sum', 'Max', 'Min', 'MatMul', 'Gemm',\n 'LeakyRelu', 'Elu', 'Selu', 'HardSigmoid', 'Reshape', 'Reciprocal',\n 'Identity', 'Cos', 'Log', 'Transpose', 'Softmax', 'ReduceMax', 'ReduceMin',\n 'ReduceProd', 'ReduceSum', 'Softplus', 'Softsign', 'Sqrt', 'Unsqueeze',\n 'Sign', 'Constant', 'AveragePool', 'Abs', 'Conv', 'Concat', 'Neg'\n]\n\n# Operations supporting canonicalization.\nOpsWithCanonicalizer = ['Add', 'Identity', 'Gemm', 'Conv']\n\n# Operations who have operands that, if produced by constant operations, should\n# be promoted to become an attribute (via attribute promotion).\n#\n# For each operation, a key/value pair is used to specify how attribute promotion\n# should proceed. The key is the operation's name and the value is a list of\n# tuples, whose first item is the attribute/operand name, and the second item is\n# the index at which such operand occurs in the list of the operation's inputs.\nOpsWithPromotableConstOperands = {\"Reshape\": [(\"shape\", 1)]}\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currenlty, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_ops_list = ['Abs', 'Mul', 'Exp', 'ReduceSum', 'ReduceSumSquare']\n\nSNIPPETS = collect_snippets()\nSAMPLE_IMPLEMENTATIONS = collect_sample_implementations()\nONNX_ML = not bool(os.getenv('ONNX_ML') == '0')\n\nONNX_ML = False\nsys.stderr.write(\"ONNX_ML {}\\n\".format(ONNX_ML))\n\nif ONNX_ML:\n ext = '-ml.md'\nelse:\n ext = '.md'\n\n\ndef should_render_domain(domain): # type: (Text) -> bool\n if domain == ONNX_ML_DOMAIN and not ONNX_ML:\n return False\n elif ONNX_ML and domain != ONNX_ML_DOMAIN:\n return False\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'I64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n tfrom = np.array([\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double'\n ])\n tto = np.array(\n ['I1', 'I8', 'I16', 'I32', 'I64', 'BF16', 'F16', 'F32', 'F64'])\n index = -1\n for i in range(len(tfrom)):\n if tfrom[i] in tstr:\n index = i\n break\n if index == -1:\n print(\"error\", tstr)\n return ''\n else:\n return tto[i]\n\n\ndef get_allowed_elem_types(schema, input):\n allowed_types_str = None\n return allowed_types_str\n # TODO: enable type constraints.\n # if input.typeStr :\n # tstr = input.typeStr\n # else :\n # return allwedTypeStr\n # if schema.type_constraints:\n # for type_constraint in schema.type_constraints:\n # if type_constraint.type_param_str != tstr :\n # continue\n # allowedTypes = type_constraint.allowed_type_strs\n # allowedTypeStr=''\n # if (len(allowedTypes) > 0):\n # t = convert_type(allowedTypes[0])\n # if t == '' :\n # return ''\n # allowedTypeStr += t\n # for allowedType in allowedTypes[1:]:\n # t = convert_type(allowedType)\n # if t == '' :\n # return ''\n # if not t in allowedTypeStr :\n # allowedTypeStr += ', '+t\n #\n # return allowedTypeStr\n #\n # return allowedTypeStr\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\n\ndef get_operands_or_results(schema, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for i, value in enumerate(value_list):\n elem_types = get_allowed_elem_types(schema, value)\n\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types), types))\n\n # If operand is promotable to an attribute, then it must be\n # nullable in case it migrates to be an attribute.\n if schema.name in OpsWithPromotableConstOperands:\n idxs = dict(OpsWithPromotableConstOperands[schema.name]).values()\n if i in idxs:\n types.append(\"NoneType\")\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n sys.stderr.write(\"warning: (variadic, heterogeneous) for\" + schema.name +\n ' ' + value.name + \"\\n\")\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_defaults:\n name_to_type[attr.name] = get_attr_type_with_default(\n *special_attr_defaults[qualified_attr_name])\n\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\n\ndef get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):\n cpp_name_to_idx_literal = \"{\" + \", \".join([\n \"{{\\\"{}\\\", {}}}\".format(*name_to_idx)\n for name_to_idx in const_operands_name_to_idx\n ]) + \"}\"\n\n s += indent + \"let extraClassDeclaration = [{\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::map<std::string, size_t> promotableConstOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(cpp_name_to_idx_literal)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n indent = dec_indent(indent)\n s += indent + \"}];\\n\"\n\n return s\n\n\ndef gen_op_def(schema):\n indent = inc_indent()\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(schema.name)\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n if schema.name in OpsWithShapeInference:\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n if schema.name in OpsWithPromotableConstOperands.keys():\n traits.append(\"OpInterface<\\\"PromotableConstOperandsOpInterface\\\">\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if schema.name in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, is_input=True)\n ins.update(get_attrs(schema))\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if schema.name in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a seperate parameter.\n # E.g. OpBuilder<\"Builder *builder, OperationState &state, Value X, Value, Y, Attribute A\", [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<\"Builder *builder, OperationState &state'\n operands_dict = get_operands_or_results(schema, is_input=True)\n for name, ty in operands_dict.items():\n s += ', {} {}'.format(tblgen_operand_type_to_cpp_type(ty),\n name)\n for name, ty in get_attrs(schema).items():\n s += ', {} {}'.format(tblgen_attr_type_to_cpp_type(ty), name)\n s += '\", [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n s += indent + 'auto elementType = {}.getType().cast<TensorType>().getElementType();\\n'.format(\n first_operand_name)\n s += indent + 'build(builder, state, UnrankedTensorType::get(elementType)'\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<\"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<\"Builder *builder, OperationState &state, ValueRange operands, ArrayRef<NamedAttribute> attributes\", [{\\n'\n indent = inc_indent(indent)\n s += indent + 'auto elementType = operands[0].getType().cast<TensorType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back(UnrankedTensorType::get(elementType));\\n'\n s += indent + 'build(builder, state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n if schema.name in OpsWithPromotableConstOperands:\n s = get_promotable_const_operands_func(\n s, indent, OpsWithPromotableConstOperands[schema.name])\n s += '}\\n\\n'\n return s\n\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file):\n indent = inc_indent()\n s = indent + 'if (opName == \"' + schema.name + '\")\\n'\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(schema.name))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n s += inc_indent(indent) + \"return {}({});\\n\".format(\n handler_func, \", \".join(args))\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in docs/readonnxdefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = args.op_def\n op_def.write(autogen_warning)\n\n op_importer = args.op_importer\n op_importer.write(autogen_warning)\n\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n for op_type, schema, versions in namemap:\n gen_op_importer(schema, op_importer)\n r = gen_op_def(schema)\n op_def.write(r)\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n if args.dry_run_onnx_ops:\n op_def = StringIO()\n else:\n op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')\n op_def = io.open(op_def_file_path, 'w', newline='')\n\n if args.dry_run_op_build_table:\n op_importer = StringIO()\n else:\n op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')\n op_importer = io.open(op_importer_file_path, 'w', newline='')\n main(Args)\n\n if args.dry_run_onnx_ops:\n sys.stdout.write(Args.op_def.getvalue())\n if args.dry_run_op_build_table:\n sys.stdout.write(Args.op_importer.getvalue())\n\n" ]
[ [ "numpy.round", "numpy.array" ] ]
alexhunterlang/natural_bm
[ "b2a1cb15694f4f3a80a3a1cc6f8423892563806d", "b2a1cb15694f4f3a80a3a1cc6f8423892563806d" ]
[ "natural_bm/datasets/fast.py", "tests/natural_bm/test_datasets.py" ]
[ "\"\"\"Simplified version of MNIST that is useful for demos and testing \"\"\"\n\n#%%\nimport numpy as np\n\ntry:\n import PIL.Image as Image\nexcept ImportError:\n import Image\n\nfrom natural_bm.datasets.common import Dataset, sample_data, threshold_data, convert2uint8\nfrom natural_bm.datasets import mnist\n\n\n#%%\nclass Fast(Dataset):\n def __init__(self, datatype):\n super().__init__('fast', datatype)\n\n def _create_probability(self):\n # Start from the MNIST probabilities\n prob = mnist.MNIST('probability')\n mnist_dataset = prob.dataset_dict\n\n def shrink_data(data, lbl, n_sample):\n # only keep 0's and 1's\n # subsample to 14 by 14\n # then just drop first 2, last 2 rows/cols since mainly zero\n\n new_data = np.zeros((2*n_sample, 10**2), dtype='float32')\n new_lbl = np.concatenate((np.zeros((n_sample, )),\n np.ones((n_sample, )))).astype('int32')\n\n index0 = np.where(lbl == 0)[0][0:n_sample]\n index1 = np.where(lbl == 1)[0][0:n_sample]\n index = np.concatenate((index0, index1))\n\n for i in range(new_data.shape[0]):\n img = Image.fromarray(data[index[i]].reshape((28, 28)))\n img_down = img.resize((14, 14))\n temp = np.asarray(img_down)\n temp = temp[:, 2:-2]\n temp = temp[2:-2]\n new_data[i] = temp.flatten()\n\n return new_data, new_lbl\n\n dataset = {}\n for dset in ['train', 'valid', 'test']:\n if dset == 'train':\n num_samples = 500\n else:\n num_samples = 50\n data, lbl = shrink_data(mnist_dataset[dset+'.data'],\n mnist_dataset[dset+'.lbl'],\n num_samples)\n dataset[dset+'.data'] = data\n dataset[dset+'.lbl'] = lbl\n\n # save the dataset\n np.savez_compressed(self.savename, **dataset)\n\n def _create_sampled(self):\n # Start from the probabilities\n prob = Fast('probability')\n datasets = prob.dataset_dict\n\n # do the sampling\n datasets = sample_data(datasets)\n\n # reduce precision, only need uint8\n datasets = convert2uint8(datasets)\n\n # Save the dataset\n np.savez_compressed(self.savename, **datasets)\n\n def _create_threshold(self):\n # Start from the probabilities\n prob = Fast('probability')\n datasets = prob.dataset_dict\n\n # threshold the data\n datasets = threshold_data(datasets)\n\n # reduce precision, only need uint8\n datasets = convert2uint8(datasets)\n\n # Save the dataset\n np.savez_compressed(self.savename, **datasets)\n", "#%%\nimport os\nimport numpy as np\nimport pytest\nfrom natural_bm.datasets.common import threshold_data\nfrom natural_bm.datasets import mnist, svhn, fast\nimport natural_bm.backend as B\n\n\n#%%\ndef test_treshold_data():\n\n datasets = {'train.data': 0.6*np.ones((100, 10))}\n datasets = threshold_data(datasets, threshold=None)\n assert np.all(datasets['train.data'] == 1.0)\n\n datasets = {'train.data': 0.6*np.ones((100, 10))}\n datasets = threshold_data(datasets, threshold=0.7)\n assert np.all(datasets['train.data'] == 0.0)\n\n datasets = {'train.data': 0.6*np.ones((100, 10))}\n threshold = np.concatenate((0.7*np.ones((5,)), 0.5*np.ones((5,))))\n datasets = threshold_data(datasets, threshold=threshold)\n verify = np.concatenate((np.zeros((100, 5)), np.ones((100, 5))), axis=1)\n assert np.all(datasets['train.data'] == verify)\n\n\n#%% \ndef test_mnist():\n\n name = 'mnist'\n datatype_ls = ['probability', 'sampled', 'threshold']\n\n # delete files if they exist\n filepath = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.abspath(os.path.join(filepath, '..', '..', 'data'))\n print(folder)\n for datatype in datatype_ls:\n filename = os.path.join(folder, name + '_' + datatype + '.npz')\n try:\n os.remove(filename)\n except OSError:\n pass\n\n # this checks on creating and loading datasets\n for datatype in datatype_ls:\n data = mnist.MNIST(datatype)\n\n # this checks on loading existing\n for datatype in datatype_ls:\n data = mnist.MNIST(datatype)\n\n\n#%% \ndef test_fast():\n\n name = 'fast'\n datatype_ls = ['probability', 'sampled', 'threshold']\n\n # delete files if they exist\n filepath = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.abspath(os.path.join(filepath, '..', '..', 'data'))\n for datatype in datatype_ls:\n filename = os.path.join(folder, name + '_' + datatype + '.npz')\n try:\n os.remove(filename)\n except OSError:\n pass\n\n train_samples = 1000\n other_samples = 100\n\n # this checks on creating and loading datasets\n for datatype in datatype_ls:\n data = fast.Fast(datatype)\n assert B.eval(data.train.data).shape[0] == train_samples\n assert B.eval(data.valid.data).shape[0] == other_samples\n assert B.eval(data.test.data).shape[0] == other_samples\n assert B.eval(data.train.lbl).shape[0] == train_samples\n assert B.eval(data.valid.lbl).shape[0] == other_samples\n assert B.eval(data.test.lbl).shape[0] == other_samples\n\n # this checks on loading existing\n for datatype in datatype_ls:\n data = fast.Fast(datatype)\n assert B.eval(data.train.data).shape[0] == train_samples\n assert B.eval(data.valid.data).shape[0] == other_samples\n assert B.eval(data.test.data).shape[0] == other_samples\n assert B.eval(data.train.lbl).shape[0] == train_samples\n assert B.eval(data.valid.lbl).shape[0] == other_samples\n assert B.eval(data.test.lbl).shape[0] == other_samples\n\n\n#%%\ndef longtest_svhn(__file__):\n \"\"\"\n This test is internet dependent and requires a large downloand.\n Since it is slow, I did not include it in auto pytesting.\n \"\"\"\n \n name = 'svhn'\n datatype_ls = ['probability', 'threshold']\n\n # delete files if they exist\n filepath = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.abspath(os.path.join(filepath, '..', '..', 'data'))\n for datatype in datatype_ls:\n filename = os.path.join(folder, name + '_' + datatype + '.npz')\n try:\n os.remove(filename)\n except OSError:\n pass\n\n # this checks on creating and loading datasets\n for datatype in datatype_ls:\n data = svhn.SVHN(datatype)\n\n # this checks on loading existing\n for datatype in datatype_ls:\n data = svhn.SVHN(datatype)\n\n\n#%%\nif __name__ == '__main__':\n # This test will take a couple of minutes depending on your internet speed\n # longtest_svhn(__file__)\n\n pytest.main([__file__])\n" ]
[ [ "numpy.concatenate", "numpy.asarray", "numpy.zeros", "numpy.ones", "numpy.where", "numpy.savez_compressed" ], [ "numpy.all", "numpy.ones", "numpy.zeros" ] ]
emilleishida/resspect_metric
[ "92f0b5d9de9cd6a031ec67fd76f8d302be0efef8" ]
[ "posteriors/fiducial/get_cosmo_posteriors.py" ]
[ "case = 'fiducial'\n\nimport pandas as pd\nimport numpy as np\nimport pystan\nimport os\nfrom resspect.salt3_utils import get_distances\nimport pickle\nimport time\nfrom shutil import copyfile\n\n\n\nfit_lightcurves = False\nrestart_master = True\n\n# number of bins for SALT2mu\nnbins = 70\n\n# rather to re-write fitres file\nreplace_z = True\nadd_lowz = True\nbias = True\n\n###########################################################################################\n# translate ids ###################################\n###########################################################################################\nSNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14},\n 67:41, 52:43, 64:51, 95:60, 994:61, 992:62,\n 993:63, 15:64, 88:70, 92:80, 65:81, 16:83,\n 53:84, 991:90, 6:{1:91, 2:93}}\n\ntypes_names = {90: 'Ia', 67: '91bg', 52:'Iax', 42:'II', 62:'Ibc', \n 95: 'SLSN', 15:'TDE', 64:'KN', 88:'AGN', 92:'RRL', 65:'M-dwarf',\n 16:'EB',53:'Mira', 6:'MicroL', 991:'MicroLB', 992:'ILOT', \n 993:'CART', 994:'PISN',995:'MLString'}\n\n\n# read plasticc test metadata\ntest_zenodo_meta = '/media/RESSPECT/data/PLAsTiCC/PLAsTiCC_zenodo/plasticc_test_metadata.csv'\ntest_metadata = pd.read_csv(test_zenodo_meta)\n\n# read sample for this case\nfname = '/media/RESSPECT/data/PLAsTiCC/for_metrics/' + case + '_samp.csv'\ndata = pd.read_csv(fname)\n\ndata_new = {}\ndata_new['id'] = data['id'].values\ndata_new['redshift'] = data['redshift'].values\ndata_new['type'] = [types_names[item] for item in data['code'].values]\ndata_new['code'] = []\ndata_new['orig_sample'] = ['test' for i in range(data.shape[0])]\ndata_new['queryable'] = [True for i in range(data.shape[0])]\ndata_new['code_zenodo'] = data['code'].values\n\nfor i in range(data.shape[0]): \n sncode = data.iloc[i]['code']\n if sncode not in [62, 42, 6]:\n data_new['code'].append(SNANA_types[sncode])\n if SNANA_types[sncode] == 60:\n print('sncode = ', sncode, ' new code=', SNANA_types[sncode])\n else:\n flag = test_metadata['object_id'].values == data.iloc[i]['id']\n submodel = test_metadata[flag]['true_submodel'].values[0]\n data_new['code'].append(SNANA_types[sncode][submodel])\n \ndata_out = pd.DataFrame(data_new)\ndata_out.to_csv('results/' + case + '_photoids_plasticc.dat', index=False)\n\n###################################################################################\n###################################################################################\n\n\nres = {}\n\nif fit_lightcurves:\n \n start_time = time.time()\n \n print('********* Fitting light curves ******************')\n\n fname = 'results/' + case + '_photoids_plasticc.dat'\n \n meta = pd.read_csv(fname, index_col=False)\n codes = np.unique(meta['code'].values)\n\n res = get_distances(fname,\n data_prefix='LSST_DDF',\n data_folder='/media/RESSPECT/data/PLAsTiCC/SNANA', \n select_modelnum=None,\n salt2mu_prefix='test_salt2mu_res',\n maxsnnum=50000,\n select_orig_sample=['test'],\n salt3_outfile='salt3pipeinput.txt',\n data_prefix_has_sntype=False,\n master_fitres_name='results/master_fitres.fitres', \n append_master_fitres=True,\n restart_master_fitres=restart_master)\n \n res['distances'].to_csv('results/mu_photoIa_plasticc_' + case + '.dat', index=False)\n res['cosmopars'].to_csv('results/cosmo_photoIa_plasticc_' + case + '.dat', index=False)\n \n \n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n# SALT2mu input file name\nsalt2mu_fname = 'SALT2mu.input'\n\n\nif replace_z:\n if add_lowz:\n if bias:\n # path to lowz fitres\n fitres_lowz_fname = '/media/RESSPECT/data/temp_lowz_sim/lowz_only_fittres.fitres'\n \n else:\n raise ValueError('Low-z without bias not implemented yet.')\n \n fitres_lowz = pd.read_csv(fitres_lowz_fname, index_col=False, comment=\"#\", \n skip_blank_lines=True, delim_whitespace=True)\n \n fitres_lowz['zHD'] = fitres_lowz['SIM_ZCMB']\n\n # path to main fitres\n fitres_main_fname = 'results/master_fitres.fitres'\n \n # read fitres\n fitres_main = pd.read_csv(fitres_main_fname, index_col=False, comment=\"#\", \n skip_blank_lines=True, delim_whitespace=True)\n\n if add_lowz:\n # join samples considering only common columns\n frames = [fitres_lowz, fitres_main]\n fitres = pd.concat(frames, ignore_index=True)\n else:\n fitres = fitres_main \n \n # update redshift value\n fitres['zHD'] = fitres['SIM_ZCMB']\n\n # replace nans with number so SNANA recognizes the columns\n fitres.fillna(value=-99, inplace=True)\n\n # save combined fitres to file\n if add_lowz:\n if bias:\n fitres.to_csv('results/master_fitres_new_lowz_withbias.fitres', sep=\" \", index=False)\n else:\n fitres.to_csv('results/master_fitres_new_lowz_nobias.fitres', sep=\" \", index=False)\n else:\n fitres.to_csv('results/master_fitres_new.fitres', sep=\" \", index=False)\n \nsamples_dir = '/media/RESSPECT/data/PLAsTiCC/for_metrics/posteriors/' + case + '/'\nif not os.path.isdir(samples_dir):\n os.makedirs(samples_dir)\n\n# change parameters for SALT2mu\nop = open(salt2mu_fname, 'r')\nlin = op.readlines()\nop.close()\n\nlin[0] = 'bins=' + str(nbins) + '\\n'\n\n\nif add_lowz:\n if bias:\n lin[-3] = 'prefix=results/test_salt2mu_lowz_withbias_' + case + '\\n'\n lin[-4] = 'file=results/master_fitres_new_lowz_withbias.fitres' + '\\n'\n fitres_comb_fname = 'results/test_salt2mu_lowz_withbias_' + case + '.fitres'\n stan_input_fname = 'results/stan_input_salt2mu_lowz_withbias_' + case + '.csv'\n else:\n lin[-3] = 'prefix=results/test_salt2mu_lowz_nobias_' + case + '\\n'\n lin[-4] = 'file=results/master_fitres_new_lowz_nobias.fitres' + '\\n'\n fitres_comb_fname = 'results/test_salt2mu_lowz_nobias_' + case + '.fitres'\n stan_input_fname = 'results/stan_input_salt2mu_lowz_npbias_' + case + '.csv'\nelse:\n lin[-3] = 'prefix=results/test_salt2mu_' + case + '\\n'\n lin[-4] = 'file=results/master_fitres_new.fitres' + '\\n'\n fitres_comb_fname = 'results/test_salt2mu_' + case + '.fitres'\n stan_input_fname = 'results/stan_input_salt2mu_' + case + '.csv'\n\nop2 = open(salt2mu_fname, 'w')\nfor line in lin:\n op2.write(line)\nop2.close()\n\n# get distances from SALT2MU\nos.system('SALT2mu.exe ' + salt2mu_fname)\n\n# read data for Bayesian model\nfitres_comb = pd.read_csv(fitres_comb_fname, index_col=False, comment=\"#\", skip_blank_lines=True, \n delim_whitespace=True)\n\n# set initial conditions\nz0 = 0\nE0 = 0\nc = 3e5\nH0 = 70\n\n# remove duplicated redshift\nfitres_final = fitres_comb.drop_duplicates(subset=['SIM_ZCMB'], keep='first')\n\n# order data according to redshift \nindx = np.argsort(fitres_final['SIM_ZCMB'].values)\n\n# create input data\nstan_input = {}\nstan_input['nobs'] = fitres_final.shape[0]\nstan_input['z'] = fitres_final['SIM_ZCMB'].values[indx]\nstan_input['mu'] = fitres_final['MU'].values[indx]\nstan_input['muerr'] = fitres_final['MUERR'].values[indx]\nstan_input['z0'] = z0\nstan_input['H0'] = H0\nstan_input['c'] = c\nstan_input['E0'] = np.array([E0])\n\n# save only stan input to file\nstan_input2 = {}\nstan_input2['z'] = stan_input['z']\nstan_input2['mu'] = stan_input['mu']\nstan_input2['muerr'] = stan_input['muerr']\n\nstan_input_tofile = pd.DataFrame(stan_input2)\n\nstan_input_tofile[['z', 'mu', 'muerr']].to_csv(stan_input_fname, index=False)\n\nstan_model=\"\"\"\nfunctions {\n /** \n * ODE for the inverse Hubble parameter. \n * System State E is 1 dimensional. \n * The system has 2 parameters theta = (om, w)\n * \n * where \n * \n * om: dark matter energy density \n * w: dark energy equation of state parameter\n *\n * The system redshift derivative is \n * \n * d.E[1] / d.z = \n * 1.0/sqrt(om * pow(1+z,3) + (1-om) * (1+z)^(3 * (1+w)))\n * \n * @param z redshift at which derivatives are evaluated. \n * @param E system state at which derivatives are evaluated. \n * @param params parameters for system. \n * @param x_r real constants for system (empty). \n * @param x_i integer constants for system (empty). \n */ \n real[] Ez(real z,\n real[] H,\n real[] params,\n real[] x_r,\n int[] x_i) {\n real dEdz[1];\n dEdz[1] = 1.0/sqrt(params[1]*(1+z)^3\n +(1-params[1])*(1+z)^(3*(1+params[2])));\n return dEdz;\n } \n}\ndata {\n int<lower=1> nobs; // number of data points\n real E0[1]; // integral(1/H) at z=0 \n real z0; // initial redshift, 0\n real c; // speed of light\n real H0; // hubble parameter\n real mu[nobs]; // distance modulus\n vector[nobs] muerr; // error in distance modulus\n real<lower=0> z[nobs]; // redshift\n}\ntransformed data {\n real x_r[0]; // required by ODE (empty)\n int x_i[0]; \n}\nparameters{\n real<lower=0, upper=1> om; // dark matter energy density\n real<lower=-2, upper=0> w; // dark energy equation of state parameter\n}\ntransformed parameters{\n real DC[nobs,1]; // co-moving distance \n real pars[2]; // ODE input = (om, w)\n real dl[nobs]; // luminosity distance\n real DH; // Hubble distance = c/H0\n \n \n DH = (c/H0);\n pars[1] = om;\n pars[2] = w;\n \n // Integral of 1/E(z) \n DC = integrate_ode_rk45(Ez, E0, z0, z, pars, x_r, x_i);\n for (i in 1:nobs) {\n dl[i] = 25 + 5 * log10(DH * (1 + z[i]) * DC[i, 1]);\n }\n}\nmodel{\n // priors and likelihood\n om ~ normal(0.3, 0.1);\n w ~ normal(-1, 0.2);\n\n mu ~ normal(dl, muerr);\n}\ngenerated quantities {\n vector[nobs] log_lik;\n vector[nobs] mu_hat;\n \n for (j in 1:nobs) {\n log_lik[j] = normal_lpdf(mu[j] | dl[j], muerr[j]);\n mu_hat[j] = normal_rng(dl[j], muerr[j]);\n }\n}\n\"\"\"\n\nmodel = pystan.StanModel(model_code=stan_model)\n\nfit = model.sampling(data=stan_input, iter=16000, chains=3, warmup=10000, control={'adapt_delta':0.99})\n\n# print summary\nres = fit.stansummary(pars=[\"om\", \"w\"])\ncheck = str(pystan.check_hmc_diagnostics(fit))\nprint(res)\nprint( ' ******* ')\nprint(check)\n\n\nif add_lowz and bias:\n summ_fname = samples_dir + 'stan_summary_' + case + '_lowz_withbias.dat'\n summ_fname2 = 'results/stan_summary_' + case + '_lowz_withbias.dat'\n chains_fname = samples_dir + '/chains_' + case + '_lowz_withbias.pkl'\n trace_fname = samples_dir + '/trace_plot_' + case + '_lowz_withbias.png'\n trace_fname2 = 'results/trace_plot_' + case + '_lowz_withbias.png'\nelif add_lowz and not bias:\n summ_fname = samples_dir + 'stan_summary_' + case + '_lowz_nobias.dat'\n summ_fname2 = 'results/stan_summary_' + case + '_lowz_nobias.dat'\n chains_fname = samples_dir + '/chains_' + case + '_lowz_nobias.pkl'\n trace_fname = samples_dir + '/trace_plot_' + case + '_lowz_nobias.png'\n trace_fname2 = 'results/trace_plot_' + case + '_lowz_nobias.png'\nelse:\n summ_fname = samples_dir + 'stan_summary_' + case + '.dat'\n summ_fname2 = 'results/stan_summary_' + case + '.dat'\n chains_fname = samples_dir + '/chains_' + case + '.pkl'\n trace_fname = samples_dir + '/trace_plot_' + case + '.png'\n trace_fname2 = 'results/trace_plot_' + case + '.png'\n\nop2 = open(summ_fname, 'w')\nop2.write(res)\nop2.write('\\n ************* \\n')\nop2.write(check)\nop2.close()\n\nsamples = fit.extract(permuted=True)\n\npickle.dump(samples, open(chains_fname, \"wb\"))\n\npystan.check_hmc_diagnostics(fit)\n\n# plot chains\nimport arviz\nimport matplotlib.pyplot as plt\n\narviz.plot_trace(fit, ['om', 'w'])\nplt.savefig(trace_fname)\n\ncopyfile(trace_fname, trace_fname2)\ncopyfile(summ_fname, summ_fname2)" ]
[ [ "numpy.array", "pandas.DataFrame", "matplotlib.pyplot.savefig", "numpy.argsort", "pandas.concat", "pandas.read_csv", "numpy.unique" ] ]
bmaranville/orsopy
[ "74083afdce8f8f1ab3866c7f1f5209942c8734db" ]
[ "tests/test_slddb/test_dbcreation.py" ]
[ "import sys\nimport unittest\n\nfrom numpy import ndarray, testing\n\nfrom orsopy.slddb import SLDDB, dbconfig, element_table\n\n\nclass TestCreateDB(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.db = SLDDB(\":memory:\")\n cls.db.create_database()\n\n @classmethod\n def tearDownClass(cls):\n del cls.db\n\n def test_tables(self):\n c = self.db.db.cursor()\n c.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n items = c.fetchall()\n for i, tbl in enumerate([dbconfig.DB_MATERIALS_NAME]):\n with self.subTest(msg=tbl, i=i):\n self.assertTrue((tbl,) in items)\n\n def test_element_search(self):\n with self.subTest(\"database search\", i=0):\n s1 = element_table.get_element(\"Si\")\n s2 = element_table.get_element(14)\n with self.subTest(\"equality\", i=0):\n self.assertEqual(s1.Z, s2.Z)\n self.assertEqual(s1.symbol, s2.symbol)\n self.assertEqual(s1.mass, s2.mass)\n self.assertEqual(s1.b, s2.b)\n testing.assert_array_equal(s1._xdata, s2._xdata)\n\n def test_add_field(self):\n global dbconfig\n # call without changes\n self.db.update_fields()\n\n # call with appending column\n dbconfig.DB_MATERIALS_FIELDS.append(\"testadd\")\n dbconfig.DB_MATERIALS_CONVERTERS.append(dbconfig.DB_MATERIALS_CONVERTERS[-1])\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.append(dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])\n dbconfig.db_lookup = dict(\n [\n (field, (i, converter, default))\n for i, (field, converter, default) in enumerate(\n zip(\n dbconfig.DB_MATERIALS_FIELDS,\n dbconfig.DB_MATERIALS_CONVERTERS,\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS,\n )\n )\n ]\n )\n\n self.db.update_fields()\n\n # call with inserted column\n dbconfig.DB_MATERIALS_FIELDS.insert(5, \"testadd2\")\n dbconfig.DB_MATERIALS_CONVERTERS.insert(5, dbconfig.DB_MATERIALS_CONVERTERS[-1])\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.insert(5, dbconfig.DB_MATERIALS_FIELD_DEFAULTS[-1])\n dbconfig.db_lookup = dict(\n [\n (field, (i, converter, default))\n for i, (field, converter, default) in enumerate(\n zip(\n dbconfig.DB_MATERIALS_FIELDS,\n dbconfig.DB_MATERIALS_CONVERTERS,\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS,\n )\n )\n ]\n )\n self.db.update_fields()\n\n # reset database\n dbconfig.DB_MATERIALS_FIELDS.pop(-1)\n dbconfig.DB_MATERIALS_FIELDS.pop(5)\n dbconfig.DB_MATERIALS_CONVERTERS.pop(-1)\n dbconfig.DB_MATERIALS_CONVERTERS.pop(5)\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(-1)\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS.pop(5)\n dbconfig.db_lookup = dict(\n [\n (field, (i, converter, default))\n for i, (field, converter, default) in enumerate(\n zip(\n dbconfig.DB_MATERIALS_FIELDS,\n dbconfig.DB_MATERIALS_CONVERTERS,\n dbconfig.DB_MATERIALS_FIELD_DEFAULTS,\n )\n )\n ]\n )\n self.db = SLDDB(\":memory:\")\n self.db.create_database()\n\n def test_backup(self):\n if sys.version_info.minor > 6:\n self.db.backup(\":memory:\")\n" ]
[ [ "numpy.testing.assert_array_equal" ] ]
Katsute/Baruch-CIS-3120-Assignments
[ "2cb470a7e3b7bf2d49da520fdff079f832624c06" ]
[ "classwork/05_01_2021/plt.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\n\nx = [5, 2, 9, 4, 7]\ny = [10, 5, 8, 4, 2]\n\nplt.plot(x, y)\nplt.show()\n\nplt.bar(x, y)\nplt.show()\n\nplt.hist(x)\nplt.show()\n\ndf = pd.DataFrame({'x': x, 'y': y})\ndf.plot('x', 'y', kind=\"scatter\")\nplt.show()\n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.bar" ] ]
AnglinaBhambra/OpenSarToolkit
[ "b2d6562a77eea86b4c236cc14f81f73ff4e75c17" ]
[ "ost/helpers/vector.py" ]
[ "import os\nimport sys\nimport json\nfrom functools import partial\n\nimport osr\nimport ogr\nimport pyproj\nimport geopandas as gpd\n\nfrom shapely.ops import transform\nfrom shapely.wkt import loads\nfrom shapely.geometry import Point, Polygon, mapping, shape\nfrom fiona import collection\nfrom fiona.crs import from_epsg\n\n\ndef get_epsg(prjfile):\n '''Get the epsg code from a projection file of a shapefile\n\n Args:\n prjfile: a .prj file of a shapefile\n\n Returns:\n str: EPSG code\n\n '''\n\n prj_file = open(prjfile, 'r')\n prj_txt = prj_file.read()\n srs = osr.SpatialReference()\n srs.ImportFromESRI([prj_txt])\n srs.AutoIdentifyEPSG()\n # return EPSG code\n return srs.GetAuthorityCode(None)\n\n\ndef get_proj4(prjfile):\n '''Get the proj4 string from a projection file of a shapefile\n\n Args:\n prjfile: a .prj file of a shapefile\n\n Returns:\n str: PROJ4 code\n\n '''\n\n prj_file = open(prjfile, 'r')\n prj_string = prj_file.read()\n\n # Lambert error\n if '\\\"Lambert_Conformal_Conic\\\"' in prj_string:\n\n print(' ERROR: It seems you used an ESRI generated shapefile'\n ' with Lambert Conformal Conic projection. ')\n print(' This one is not compatible with Open Standard OGR/GDAL'\n ' tools used here. ')\n print(' Reproject your shapefile to a standard Lat/Long projection'\n ' and try again')\n exit(1)\n\n srs = osr.SpatialReference()\n srs.ImportFromESRI([prj_string])\n return srs.ExportToProj4()\n\n\ndef epsg_to_wkt_projection(epsg_code):\n \n spatial_ref = osr.SpatialReference()\n spatial_ref.ImportFromEPSG(epsg_code) \n \n return spatial_ref.ExpotToWkt()\n\n\ndef reproject_geometry(geom, inproj4, out_epsg):\n '''Reproject a wkt geometry based on EPSG code\n\n Args:\n geom (ogr-geom): an ogr geom objecct\n inproj4 (str): a proj4 string\n out_epsg (str): the EPSG code to which the geometry should transformed\n\n Returns\n geom (ogr-geometry object): the transformed geometry\n\n '''\n\n geom = ogr.CreateGeometryFromWkt(geom)\n # input SpatialReference\n spatial_ref_in = osr.SpatialReference()\n spatial_ref_in.ImportFromProj4(inproj4)\n\n # output SpatialReference\n spatial_ref_out = osr.SpatialReference()\n spatial_ref_out.ImportFromEPSG(int(out_epsg))\n\n # create the CoordinateTransformation\n coord_transform = osr.CoordinateTransformation(spatial_ref_in,\n spatial_ref_out)\n try:\n geom.Transform(coord_transform)\n except:\n print(' ERROR: Not able to transform the geometry')\n sys.exit()\n\n return geom\n\n\ndef geodesic_point_buffer(lat, lon, meters, envelope=False):\n\n # get WGS 84 proj\n proj_wgs84 = pyproj.Proj(init='epsg:4326')\n\n # Azimuthal equidistant projection\n aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'\n project = partial(\n pyproj.transform,\n pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),\n proj_wgs84)\n\n buf = Point(0, 0).buffer(meters) # distance in metres\n\n if envelope is True:\n geom = Polygon(transform(project, buf).exterior.coords[:]).envelope\n else:\n geom = Polygon(transform(project, buf).exterior.coords[:])\n\n return geom.to_wkt()\n\n\ndef latlon_to_wkt(lat, lon, buffer_degree=None, buffer_meter=None, envelope=False):\n '''A helper function to create a WKT representation of Lat/Lon pair\n\n This function takes lat and lon vale and returns the WKT Point\n representation by default.\n\n A buffer can be set in metres, which returns a WKT POLYGON. If envelope\n is set to True, the buffer will be squared by the extent buffer radius.\n\n Args:\n lat (str): Latitude (deg) of a point\n lon (str): Longitude (deg) of a point\n buffer (float): optional buffer around the point\n envelope (bool): gives a square instead of a circular buffer\n (only applies if bufferis set)\n\n Returns:\n wkt (str): WKT string\n\n '''\n\n if buffer_degree is None and buffer_meter is None:\n aoi_wkt = 'POINT ({} {})'.format(lon, lat)\n\n elif buffer_degree:\n aoi_geom = loads('POINT ({} {})'.format(lon, lat)).buffer(buffer_degree)\n if envelope:\n aoi_geom = aoi_geom.envelope\n\n aoi_wkt = aoi_geom.to_wkt()\n\n elif buffer_meter:\n aoi_wkt = geodesic_point_buffer(lat, lon, buffer_meter, envelope)\n\n return aoi_wkt\n\n\ndef wkt_manipulations(wkt, buffer=None, convex=False, envelope=False):\n\n geom = ogr.CreateGeometryFromWkt(wkt)\n\n if buffer:\n geom = geom.Buffer(buffer)\n\n if convex:\n geom = geom.ConvexHull()\n\n if envelope:\n geom = geom.GetEnvelope()\n geom = ogr.CreateGeometryFromWkt(\n 'POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}, {} {}))'.format(\n geom[1], geom[3], geom[0], geom[3], geom[0], geom[2],\n geom[1], geom[2], geom[1], geom[3], geom[1], geom[3]))\n\n return geom.ExportToWkt()\n\n\ndef shp_to_wkt(shapefile, buffer=None, convex=False, envelope=False):\n '''A helper function to translate a shapefile into WKT\n\n\n '''\n\n # get filepaths and proj4 string\n shpfile = os.path.abspath(shapefile)\n prjfile = shpfile[:-4] + '.prj'\n proj4 = get_proj4(prjfile)\n\n lyr_name = os.path.basename(shapefile)[:-4]\n shp = ogr.Open(os.path.abspath(shapefile))\n lyr = shp.GetLayerByName(lyr_name)\n geom = ogr.Geometry(ogr.wkbGeometryCollection)\n\n for feat in lyr:\n geom.AddGeometry(feat.GetGeometryRef())\n wkt = geom.ExportToWkt()\n\n if proj4 != '+proj=longlat +datum=WGS84 +no_defs':\n print(' INFO: Reprojecting AOI file to Lat/Long (WGS84)')\n wkt = reproject_geometry(wkt, proj4, 4326).ExportToWkt()\n\n # do manipulations if needed\n wkt = wkt_manipulations(wkt, buffer=buffer, convex=convex,\n envelope=envelope)\n\n return wkt\n\n\ndef kml_to_wkt(kmlfile):\n\n shp = ogr.Open(os.path.abspath(kmlfile))\n lyr = shp.GetLayerByName()\n for feat in lyr:\n geom = feat.GetGeometryRef()\n wkt = str(geom)\n\n return wkt\n\n\ndef latlon_to_shp(lon, lat, shapefile):\n\n shapefile = str(shapefile)\n\n schema = {'geometry': 'Point',\n 'properties': {'id': 'str'}}\n\n wkt = loads('POINT ({} {})'.format(lon, lat))\n\n with collection(shapefile, \"w\",\n crs=from_epsg(4326),\n driver=\"ESRI Shapefile\",\n schema=schema) as output:\n\n output.write({'geometry': mapping(wkt),\n 'properties': {'id': '1'}})\n\n\ndef shp_to_gdf(shapefile):\n\n gdf = gpd.GeoDataFrame.from_file(shapefile)\n\n prjfile = shapefile[:-4] + '.prj'\n proj4 = get_proj4(prjfile)\n\n if proj4 != '+proj=longlat +datum=WGS84 +no_defs':\n print(' INFO: reprojecting AOI layer to WGS84.')\n # reproject\n gdf.crs = (proj4)\n gdf = gdf.to_crs({'init': 'epsg:4326'})\n\n return gdf\n\n\ndef wkt_to_gdf(wkt):\n \n geometry = loads(wkt)\n # point wkt\n if geometry.geom_type == 'Point':\n data = {'id': ['1'],\n 'geometry': loads(wkt).buffer(0.05).envelope}\n gdf = gpd.GeoDataFrame(data)\n \n # polygon wkt\n elif geometry.geom_type == 'Polygon':\n data = {'id': ['1'],\n 'geometry': loads(wkt)}\n gdf = gpd.GeoDataFrame(data)\n\n # geometry collection of single multiploygon\n elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1 and 'MULTIPOLYGON' in str(geometry):\n\n data = {'id': ['1'],\n 'geometry': geometry}\n gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})\n \n ids, feats =[], []\n for i, feat in enumerate(gdf.geometry.values[0]):\n ids.append(i)\n feats.append(feat)\n\n gdf = gpd.GeoDataFrame({'id': ids,\n 'geometry': feats}, \n geometry='geometry', \n crs = gdf.crs\n )\n \n # geometry collection of single polygon\n elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1:\n \n data = {'id': ['1'],\n 'geometry': geometry}\n gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})\n\n # everything else (hopefully)\n else:\n\n i, ids, geoms = 1, [], []\n for geom in geometry:\n ids.append(i)\n geoms.append(geom)\n i += 1\n\n gdf = gpd.GeoDataFrame({'id': ids,\n 'geometry': geoms},\n crs = {'init': 'epsg:4326', 'no_defs': True}\n )\n \n return gdf\n\n\ndef wkt_to_shp(wkt, outfile):\n\n gdf = wkt_to_gdf(wkt)\n gdf.to_file(outfile)\n\n\ndef gdf_to_json_geometry(gdf):\n \"\"\"Function to parse features from GeoDataFrame in such a manner \n that rasterio wants them\"\"\"\n# \n# try:\n# gdf.geometry.values[0].type\n# features = [json.loads(gdf.to_json())['features'][0]['geometry']]\n# except AttributeError:\n# ids, feats =[], []\n# for i, feat in enumerate(gdf.geometry.values[0]):\n# ids.append(i)\n# feats.append(feat)\n#\n# gdf = gpd.GeoDataFrame({'id': ids,\n# 'geometry': feats}, \n# geometry='geometry', \n# crs = gdf.crs\n# )\n geojson = json.loads(gdf.to_json())\n return [feature['geometry'] for feature in geojson['features'] \n if feature['geometry']]\n\n\ndef inventory_to_shp(inventory_df, outfile):\n\n # change datetime datatypes\n inventory_df['acquisitiondate'] = inventory_df[\n 'acquisitiondate'].astype(str)\n inventory_df['ingestiondate'] = inventory_df['ingestiondate'].astype(str)\n inventory_df['beginposition'] = inventory_df['beginposition'].astype(str)\n inventory_df['endposition'] = inventory_df['endposition'].astype(str)\n\n # write to shapefile\n inventory_df.to_file(outfile)\n\n\ndef exterior(infile, outfile, buffer=None):\n\n gdf = gpd.read_file(infile, crs={'init': 'EPSG:4326'})\n gdf.geometry = gdf.geometry.apply(lambda row: Polygon(row.exterior))\n gdf_clean = gdf[gdf.geometry.area >= 1.0e-6]\n gdf_clean.geometry = gdf_clean.geometry.buffer(-0.0018)\n #if buffer:\n # gdf.geometry = gdf.geometry.apply(\n # lambda row: Polygon(row.buffer(-0.0018)))\n gdf_clean.to_file(outfile)\n\n\ndef difference(infile1, infile2, outfile):\n\n gdf1 = gpd.read_file(infile1)\n gdf2 = gpd.read_file(infile2)\n\n gdf3 = gpd.overlay(gdf1, gdf2, how='symmetric_difference')\n\n gdf3.to_file(outfile)\n\n\ndef buffer_shape(infile, outfile, buffer=None):\n\n with collection(infile, \"r\") as in_shape:\n # schema = in_shape.schema.copy()\n schema = {'geometry': 'Polygon', 'properties': {'id': 'int'}}\n crs = in_shape.crs\n with collection(\n outfile, \"w\", \"ESRI Shapefile\", schema, crs=crs) as output:\n\n for i, point in enumerate(in_shape):\n output.write({\n 'properties': {\n 'id': i\n },\n 'geometry': mapping(\n shape(point['geometry']).buffer(buffer))\n })\n\n\ndef plot_inventory(aoi, inventory_df, transparency=0.05, annotate = False):\n\n import matplotlib.pyplot as plt\n\n # load world borders for background\n world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))\n\n # import aoi as gdf\n aoi_gdf = wkt_to_gdf(aoi)\n\n # get bounds of AOI\n bounds = inventory_df.geometry.bounds\n\n # get world map as base\n base = world.plot(color='lightgrey', edgecolor='white')\n\n # plot aoi\n aoi_gdf.plot(ax=base, color='None', edgecolor='black')\n\n # plot footprints\n inventory_df.plot(ax=base, alpha=transparency)\n\n # set bounds\n plt.xlim([bounds.minx.min()-2, bounds.maxx.max()+2])\n plt.ylim([bounds.miny.min()-2, bounds.maxy.max()+2])\n plt.grid(color='grey', linestyle='-', linewidth=0.2)\n if annotate:\n import math\n for idx, row in inventory_df.iterrows():\n # print([row['geometry'].bounds[0],row['geometry'].bounds[3]])\n coord = [row['geometry'].centroid.x, row['geometry'].centroid.y]\n x1, y2, x2, y1 = row['geometry'].bounds\n angle = math.degrees(math.atan2((y2 - y1), (x2 - x1)))\n # rint(angle)\n plt.annotate(s=row['bid'], xy=coord, rotation=angle + 5, size=10, color='red', horizontalalignment='center')\n" ]
[ [ "matplotlib.pyplot.grid", "matplotlib.pyplot.annotate" ] ]
tawatts1/chess
[ "cb2917ec689bb8db1dc2436ed2ef6463319876a7" ]
[ "analyze_results.py" ]
[ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n \ndef get_data(fname):\n out = []\n with open(fname, 'r') as f:\n for line in f:\n datum = []\n for entry in line.split(','):\n datum.append(float(entry))\n out.append(datum)\n return np.array(out)\n\nfiles = os.listdir(\"game_results\")\n\n\n\nfor file in files:\n if file[0] != '.':\n print(file)\n data = get_data(f\"game_results/{file}\")\n fig, (ax1, ax2) = plt.subplots(1,2)\n fig.suptitle(file)\n mean = np.mean(data[:,0])\n print(np.shape(data))\n # deviation for 95 pct confidence interval:\n dev = 1.96*np.std(data[:,0])/ np.sqrt( np.shape(data)[0] )\n c0,c1 = mean-dev, mean+dev\n \n ax1.hist(data[:,0])\n ax1.set_title(\"White performance\")\n #ax1.figtext(.5,.01,f\"{file} and such and such\")\n ax2.hist(data[:,1])\n ax2.set_title(\"Game length\")\n #plt.figtext(.5,.01,f\"{file} and such and such\")\n plt.figtext(.5,.03,f\"The mean of white's performance is {mean:.3f}, with CI ({c0:.3f}, {c1:.3f}). \", wrap=True, ha=\"center\")\n plt.savefig(\"images/\" + file+\".png\", dpi = 300)\n #plt.show()\n\n " ]
[ [ "numpy.array", "matplotlib.pyplot.savefig", "numpy.mean", "matplotlib.pyplot.subplots", "numpy.shape", "matplotlib.pyplot.figtext", "numpy.std" ] ]
jasonrobwebster/sampling-importance-resampling-example
[ "250e54815f73ccf071a4dad8d62a2bd7ec38c0c2" ]
[ "linear.py" ]
[ "import numpy as np\nfrom scipy.special import softmax\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n data_size = 100\n true_grad = 3\n true_intercept = 1\n true_sig = 1\n\n x = np.linspace(0, 10, data_size)\n # y = m x + c\n y_obs = true_grad * x + true_intercept + np.random.normal(loc=0, scale=true_sig, size=data_size)\n\n M = 2000000\n m = M // 20 # M/m is usually around 20\n print(f'Generating {M} initial samples, and {m} re-samples')\n\n # sample M params from initial prior\n grad_prior = np.random.uniform(-10, 10, M) # m ~ U(-10, 10)\n intercept_prior = np.random.uniform(-10, 10, M) # c ~ U(-10, 10)\n sig_prior = np.random.uniform(0.1, 20, M) # sig ~ U(0.1, 10)\n\n # calculate importance weights, assuming that we model y ~ N(mu, sig)\n exponent = 1 / (2 * sig_prior ** 2) \\\n * np.sum([(y_obs[i] - (grad_prior * x[i] + intercept_prior)) ** 2 for i in range(data_size)], axis=0)\n\n log_weights = - data_size * np.log(sig_prior * np.sqrt(2 * np.pi)) - exponent\n\n weights = softmax(log_weights)\n\n # resample params using the above weights to get posterior\n grad_posterior = np.random.choice(grad_prior, m, p=weights)\n intercept_posterior = np.random.choice(intercept_prior, m, p=weights)\n sig_posterior = np.random.choice(sig_prior, m, p=weights)\n\n # report summary stats\n print(f'True gradient: {true_grad}')\n print(f'True intercept: {true_intercept}')\n print(f'True sigma: {true_sig}')\n print(f'Gradient posterior: mean={np.mean(grad_posterior):.3} - sd={np.std(grad_posterior):.3}')\n print(f'Intercept posterior: mean={np.mean(intercept_posterior):.3} - sd={np.std(intercept_posterior):.3}')\n print(f'Sigma posterior: mean={np.mean(sig_posterior):.3} - sd={np.std(sig_posterior):.3}')\n\n # plot the new samples\n fig, axes = plt.subplots(1, 4, figsize=(12, 3))\n\n axes[0].set_title('Data')\n axes[1].set_title('Gradient Posterior')\n axes[2].set_title('Intercept Posterior')\n axes[3].set_title('Sigma Posterior')\n\n axes[0].plot(x, y_obs, 'x')\n sns.distplot(grad_posterior, ax=axes[1])\n sns.distplot(intercept_posterior, ax=axes[2])\n sns.distplot(sig_posterior, ax=axes[3])\n plt.show()\n\n fig, ax = plt.subplots(figsize=(5, 5))\n ax.set_xlabel('Gradient')\n ax.set_ylabel('Intercept')\n ax.set_title('Joint distribution p(m, c)')\n sns.kdeplot(grad_posterior, intercept_posterior, shade=True, ax=ax)\n plt.show()\n" ]
[ [ "numpy.random.normal", "scipy.special.softmax", "numpy.random.choice", "matplotlib.pyplot.subplots", "numpy.mean", "numpy.std", "numpy.random.uniform", "numpy.sqrt", "matplotlib.pyplot.show", "numpy.linspace" ] ]
RichardLeeK/CNM
[ "a3c15cb0a0373d6ad03c5a815a7e020f90ab8522" ]
[ "Plateau/preprocess_image.py" ]
[ "import numpy as np\n\nfrom env import Env\n\ndef fill(image,x_idx,y_idx,bound,value):\n if (x_idx<0) or (x_idx>=900):\n return image\n elif (y_idx<0) or (y_idx>=110):\n return image\n elif image[x_idx][y_idx]>=bound:\n return image\n else:\n image[x_idx][y_idx]=value\n return image\n \ndef fill_edge(image,x_idx,y_idx,value,bound,dist=1):\n fill(image,x_idx-dist,y_idx,bound,value)\n fill(image,x_idx-dist,y_idx-dist,bound,value)\n fill(image,x_idx-dist,y_idx+dist,bound,value)\n \n fill(image,x_idx+dist,y_idx,bound,value)\n fill(image,x_idx+dist,y_idx-dist,bound,value)\n fill(image,x_idx+dist,y_idx+dist,bound,value)\n \n fill(image,x_idx,y_idx-dist,bound,value)\n fill(image,x_idx,y_idx+dist,bound,value)\n\ndef transform_img(data,window=900,y_range=110,step=60):\n icps=np.int64(data[1])\n # icps=np.array([icp for icp in icps if 0<icp<=y_range])\n image_set=[]\n start_time=0\n while start_time<(len(icps)-window):\n image=np.zeros((window,y_range), dtype=np.uint8)\n for time_idx in range(0,window):\n time=start_time+time_idx\n y_idx=icps[time]-1\n if y_idx<y_range:\n image[time_idx][y_idx]=255\n fill_edge(image,time_idx,y_idx,value=128,bound=255,dist=1)\n image_set.append(image.T)\n start_time=start_time+step\n return np.array(image_set)\n\ndef transform_imgdict(dataset,window=900,y_range=110,step=60):\n imgdict=dict()\n for i in range(len(dataset)):\n imgset=transform_img(dataset[i],window=window,y_range=y_range,step=step)\n imgdict[i]=imgset\n return imgdict" ]
[ [ "numpy.int64", "numpy.array", "numpy.zeros" ] ]
NVlabs/sionna
[ "527d0f7866b379afffad34a6bef7ed3bf6f33ad2", "488e6c3ff6ff2b3313d0ca0f94e4247b8dd6ff35" ]
[ "test/test_conv_encoding.py", "sionna/signal/upsampling.py" ]
[ "#\n# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\ntry:\n import sionna\nexcept ImportError as e:\n import sys\n sys.path.append(\"../\")\nimport unittest\nimport numpy as np\nimport tensorflow as tf\ngpus = tf.config.list_physical_devices('GPU')\nprint('Number of GPUs available :', len(gpus))\nif gpus:\n gpu_num = 0 # Number of the GPU to be used\n try:\n tf.config.set_visible_devices(gpus[gpu_num], 'GPU')\n print('Only GPU number', gpu_num, 'used.')\n tf.config.experimental.set_memory_growth(gpus[gpu_num], True)\n except RuntimeError as e:\n print(e)\nfrom sionna.fec.conv import ConvEncoder\nfrom sionna.utils import BinarySource\n\nclass TestConvEncoding(unittest.TestCase):\n\n def test_output_dim(self):\n r\"\"\"Test with allzero codeword that output dims are correct (=n) and output also equals all-zero.\"\"\"\n\n bs = 10\n coderates = [1/2, 1/3]\n ks = [10, 20, 50, 100]\n\n for rate in coderates:\n for k in ks:\n n = int(k/rate) # calculate coderate\n enc = ConvEncoder(rate=rate, constraint_length=5)\n u = np.zeros([bs, k])\n c = enc(u).numpy()\n self.assertTrue(c.shape[-1]==n)\n # also check that all-zero input yields all-zero output\n c_hat = np.zeros([bs, n])\n self.assertTrue(np.array_equal(c, c_hat))\n\n # test that output dim can change (in eager mode)\n k = k+1 # increase length\n n = int(k/rate) # calculate coderate\n u = np.zeros([bs, k])\n c = enc(u).numpy()\n self.assertTrue(c.shape[-1]==n)\n # also check that all-zero input yields all-zero output\n c_hat = np.zeros([bs, n])\n self.assertTrue(np.array_equal(c, c_hat))\n\n def test_invalid_inputs(self):\n r\"\"\"Test with invalid rate values and invalid constraint lengths as input.\n Only rates [1/2, 1/3] and constraint lengths [3, 4, 5, 6, 7, 8] are accepted currently.\"\"\"\n rate_invalid = [0.2, 0.45, 0.01]\n rate_valid = [1/3, 1/2]\n\n constraint_length_invalid = [2, 9, 0]\n constraint_length_valid = [3, 4, 5, 6, 7, 8]\n for rate in rate_valid:\n for mu in constraint_length_invalid:\n with self.assertRaises(AssertionError):\n enc = ConvEncoder(rate=rate, constraint_length=mu)\n\n for rate in rate_invalid:\n for mu in constraint_length_valid:\n with self.assertRaises(AssertionError):\n enc = ConvEncoder(rate=rate, constraint_length= mu)\n \n gmat = [['101', '111', '000'], ['000', '010', '011']]\n with self.assertRaises(AssertionError):\n enc = ConvEncoder(gen_poly=gmat)\n \n def test_polynomial_input(self):\n r\"\"\"Test that different formats of input polynomials are accepted and raises exceptions when the generator polynomials fail assertions.\"\"\"\n\n bs = 10\n k = 100\n rate = 1/2\n n = int(k/rate) # calculate coderate\n u = np.zeros([bs, k])\n\n g1 = ['101', '111']\n g2 = np.array(g1)\n\n g = [g1, g2]\n for gen_poly in g:\n enc = ConvEncoder(gen_poly=gen_poly)\n c = enc(u).numpy()\n self.assertTrue(c.shape[-1]==n)\n # also check that all-zero input yields all-zero output\n c_hat = np.zeros([bs, n])\n self.assertTrue(np.array_equal(c, c_hat))\n\n def util_check_assertion_err(gen_poly_, msg_):\n with self.assertRaises(AssertionError) as exception_context:\n enc = ConvEncoder(gen_poly=gen_poly_)\n self.assertEqual(str(exception_context.exception), msg_)\n\n gs = [\n ['1001', '111'],\n ['1001', 111],\n ('1211', '1101')]\n msg_s = [\n \"Each polynomial must be of same length.\",\n \"Each polynomial must be a string.\",\n \"Each Polynomial must be a string of 0/1 s.\"\n ]\n for idx, g in enumerate(gs):\n util_check_assertion_err(g,msg_s[idx])\n\n def test_keras(self):\n \"\"\"Test that Keras model can be compiled (+supports dynamic shapes).\"\"\"\n bs = 10\n k = 100\n\n source = BinarySource()\n inputs = tf.keras.Input(shape=(k), dtype=tf.float32)\n x = ConvEncoder(rate=0.5, constraint_length=4)(inputs)\n model = tf.keras.Model(inputs=inputs, outputs=x)\n\n b = source([bs, k])\n model(b)\n # call twice to see that bs can change\n b2 = source([bs+1, k])\n model(b2)\n \n model.summary()\n\n source = BinarySource()\n enc = ConvEncoder(rate=0.5, constraint_length=8)\n u = source([1, 32])\n x = enc(u)\n print(x.shape)\n u = source([2, 30])\n x = enc(u)\n print(x.shape)\n\n def test_multi_dimensional(self):\n \"\"\"Test against arbitrary shapes\n \"\"\"\n k = 120\n n = 240 # rate must be 1/2 or 1/3\n\n source = BinarySource()\n enc = ConvEncoder(rate=k/n, constraint_length=5)\n\n b = source([100, k])\n b_res = tf.reshape(b, [4, 5, 5, k])\n\n # encode 2D Tensor\n c = enc(b).numpy()\n # encode 4D Tensor\n c_res = enc(b_res).numpy()\n\n # test that shape was preserved\n self.assertTrue(c_res.shape[:-1]==b_res.shape[:-1])\n\n\n # and reshape to 2D shape\n c_res = tf.reshape(c_res, [100,n])\n # both version should yield same result\n self.assertTrue(np.array_equal(c, c_res))\n\n def test_ref_implementation(self):\n r\"\"\"Test against pre-encoded codewords from reference implementation.\n \"\"\"\n ref_path = 'codes/conv/'\n gs = [\n ['101', '111'],\n ['1101', '1111'],\n ['101', '111', '111'],\n ['101', '111', '111', '111']]\n gen_strs = [\n 'conv_rate_half_57_', \n 'conv_rate_half_6474_',\n 'conv_rate_onethird_577_',\n 'conv_rate_onefourth_5777_'] \n rs=[1/2, 1/2, 1/3, 1/4] \n mus = [3, 4, 3, 3]\n for idx, gen_poly in enumerate(gs):\n enc = ConvEncoder(gen_poly=gen_poly)\n gen_str = gen_strs[idx]\n u = np.load(ref_path + gen_str + 'ref_u.npy')\n cref = np.load(ref_path + gen_str + 'ref_x.npy')\n c = enc(u).numpy()\n self.assertTrue(np.array_equal(c, cref))\n \n if idx in [0, 2]:\n enc = ConvEncoder(rate=rs[idx], constraint_length=mus[idx]) \n c = enc(u).numpy()\n self.assertTrue(np.array_equal(c, cref)) \n \n def test_batch(self):\n \"\"\"Test that all samples in batch yield same output (for same input).\n \"\"\"\n bs = 100\n k = 120\n\n source = BinarySource()\n enc = ConvEncoder(rate=0.5, constraint_length=7)\n\n b = source([1, 15, k])\n b_rep = tf.tile(b, [bs, 1, 1])\n\n # and run tf version (to be tested)\n c = enc(b_rep).numpy()\n\n for i in range(bs):\n self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))\n\n def test_dtypes_flexible(self):\n \"\"\"Test that encoder supports variable dtypes and\n yields same result.\"\"\"\n\n dt_supported = (tf.float16, tf.float32, tf.float64, tf.int8,\n tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32)\n\n bs = 10\n k = 32\n\n source = BinarySource()\n\n enc_ref = ConvEncoder(rate=0.5,\n constraint_length=7,\n output_dtype=tf.float32)\n\n u = source([bs, k])\n c_ref = enc_ref(u)\n\n for dt in dt_supported:\n enc = ConvEncoder(rate=0.5,\n constraint_length=7,\n output_dtype=dt)\n u_dt = tf.cast(u, dt)\n c = enc(u_dt)\n\n c_32 = tf.cast(c, tf.float32)\n\n self.assertTrue(np.array_equal(c_ref.numpy(), c_32.numpy()))\n\n def test_tf_fun(self):\n \"\"\"Test that tf.function decorator works and XLA is supported\"\"\"\n\n @tf.function\n def run_graph(u):\n return enc(u)\n\n @tf.function(jit_compile=True)\n def run_graph_xla(u):\n return enc(u)\n\n bs = 10\n k = 100\n\n source = BinarySource()\n enc = ConvEncoder(rate=0.5, constraint_length=7)\n\n # test that for arbitrary input only 0,1 values are outputed\n u = source([bs, k])\n x = run_graph(u).numpy()\n\n # execute the graph twice\n x = run_graph(u).numpy()\n\n # and change batch_size\n u = source([bs+1, k])\n x = run_graph(u).numpy()\n\n #check XLA\n x = run_graph_xla(u).numpy()\n u = source([bs, k])\n x = run_graph_xla(u).numpy()\n\n", "#\n# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n\"\"\"Layers implementing upsampling\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.experimental.numpy import swapaxes\nfrom sionna.utils.tensors import flatten_last_dims\n\nclass Upsampling(Layer):\n \"\"\"Upsampling(samples_per_symbol, axis=-1, **kwargs)\n\n Upsamples a tensor along a specified axis by inserting zeros\n between samples.\n\n Parameters\n ----------\n samples_per_symbol: int\n The upsampling factor. If ``samples_per_symbol`` is equal to `n`,\n then the upsampled axis will be `n`-times longer.\n\n axis: int\n The dimension to be up-sampled. Must not be the first dimension.\n\n Input\n -----\n x : [...,n,...], tf.DType\n The tensor to be upsampled. `n` is the size of the `axis` dimension.\n\n Output\n ------\n y : [...,n*samples_per_symbol,...], same dtype as ``x``\n The upsampled tensor.\n \"\"\"\n def __init__(self, samples_per_symbol, axis=-1, **kwargs):\n super().__init__(**kwargs)\n self._samples_per_symbol = samples_per_symbol\n self._axis = axis\n\n def build(self, input_shape):\n paddings = []\n for _ in range(len(input_shape)):\n paddings.append([0, 0])\n paddings.append([0, self._samples_per_symbol-1])\n self._paddings = paddings\n\n def call(self, inputs):\n x = swapaxes(inputs, self._axis, -1)\n x = tf.expand_dims(x, -1)\n x = tf.pad(x,\n self._paddings,\n constant_values=tf.cast(0, dtype=x.dtype))\n x = flatten_last_dims(x, 2)\n x = swapaxes(x, -1, self._axis)\n return x\n" ]
[ [ "numpy.array", "numpy.array_equal", "numpy.zeros", "tensorflow.config.experimental.set_memory_growth", "numpy.load", "tensorflow.reshape", "tensorflow.function", "tensorflow.keras.Model", "tensorflow.tile", "tensorflow.config.list_physical_devices", "tensorflow.keras.Input", "tensorflow.config.set_visible_devices", "tensorflow.cast" ], [ "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.experimental.numpy.swapaxes" ] ]
hphphp123321/DeepKE
[ "f7efd3fc87d3bf88783a41efc3c09dca7a986013" ]
[ "example/ner/few-shot/run.py" ]
[ "import os\n\nimport hydra\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='1'\nimport logging\nimport sys\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\")))\n\nfrom hydra import utils\nfrom torch.utils.data import DataLoader\nfrom deepke.name_entity_re.few_shot.models.model import PromptBartModel, PromptGeneratorModel\nfrom deepke.name_entity_re.few_shot.module.datasets import ConllNERProcessor, ConllNERDataset\nfrom deepke.name_entity_re.few_shot.module.train import Trainer\nfrom deepke.name_entity_re.few_shot.module.metrics import Seq2SeqSpanMetric\nfrom deepke.name_entity_re.few_shot.utils.util import get_loss, set_seed\nfrom deepke.name_entity_re.few_shot.module.mapping_type import mit_movie_mapping, mit_restaurant_mapping, atis_mapping\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nfrom tensorboardX import SummaryWriter\nwriter = SummaryWriter(log_dir='logs')\n\nlogging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nDATASET_CLASS = {\n 'conll2003': ConllNERDataset,\n 'mit-movie': ConllNERDataset,\n 'mit-restaurant': ConllNERDataset,\n 'atis': ConllNERDataset\n}\n\nDATA_PROCESS = {\n 'conll2003': ConllNERProcessor,\n 'mit-movie': ConllNERProcessor,\n 'mit-restaurant': ConllNERProcessor,\n 'atis': ConllNERProcessor\n}\n\nDATA_PATH = {\n 'conll2003': {'train': 'data/conll2003/train.txt',\n 'dev': 'data/conll2003/dev.txt',\n 'test': 'data/conll2003/test.txt'},\n 'mit-movie': {'train': 'data/mit-movie/20-shot-train.txt',\n 'dev': 'data/mit-movie/test.txt'},\n 'mit-restaurant': {'train': 'data/mit-restaurant/10-shot-train.txt',\n 'dev': 'data/mit-restaurant/test.txt'},\n 'atis': {'train': 'data/atis/20-shot-train.txt',\n 'dev': 'data/atis/test.txt'}\n}\n\nMAPPING = {\n 'conll2003': {'loc': '<<location>>',\n 'per': '<<person>>',\n 'org': '<<organization>>',\n 'misc': '<<others>>'},\n 'mit-movie': mit_movie_mapping,\n 'mit-restaurant': mit_restaurant_mapping,\n 'atis': atis_mapping\n}\n\n@hydra.main(config_path=\"conf/config.yaml\")\ndef main(cfg):\n cwd = utils.get_original_cwd()\n cfg.cwd = cwd\n print(cfg)\n \n data_path = DATA_PATH[cfg.dataset_name]\n for mode, path in data_path.items():\n data_path[mode] = os.path.join(cfg.cwd, path)\n dataset_class, data_process = DATASET_CLASS[cfg.dataset_name], DATA_PROCESS[cfg.dataset_name]\n mapping = MAPPING[cfg.dataset_name]\n\n set_seed(cfg.seed) # set seed, default is 1\n if cfg.save_path is not None: # make save_path dir\n cfg.save_path = os.path.join(cfg.save_path, cfg.dataset_name+\"_\"+str(cfg.batch_size)+\"_\"+str(cfg.learning_rate)+cfg.notes)\n if not os.path.exists(cfg.save_path):\n os.makedirs(cfg.save_path, exist_ok=True)\n \n process = data_process(data_path=data_path, mapping=mapping, bart_name=cfg.bart_name, learn_weights=cfg.learn_weights)\n train_dataset = dataset_class(data_processor=process, mode='train')\n train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)\n \n dev_dataset = dataset_class(data_processor=process, mode='dev')\n dev_dataloader = DataLoader(dev_dataset, collate_fn=dev_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)\n\n label_ids = list(process.mapping2id.values())\n\n prompt_model = PromptBartModel(tokenizer=process.tokenizer, label_ids=label_ids, args=cfg)\n model = PromptGeneratorModel(prompt_model=prompt_model, bos_token_id=0,\n eos_token_id=1,\n max_length=cfg.tgt_max_len, max_len_a=cfg.src_seq_ratio,num_beams=cfg.num_beams, do_sample=False,\n repetition_penalty=1, length_penalty=cfg.length_penalty, pad_token_id=1,\n restricter=None)\n metrics = Seq2SeqSpanMetric(eos_token_id=1, num_labels=len(label_ids), target_type='word')\n loss = get_loss\n\n trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=None, model=model, args=cfg, logger=logger, loss=loss,\n metrics=metrics, writer=writer)\n trainer.train()\n\n writer.close()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.utils.data.DataLoader" ] ]