repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
brandontrabucco/im2txt_attend
[ "b1381574139a5dc54e4a6f6635bf6cf676e1dad8" ]
[ "im2txt_attend/data/build_mscoco_data.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converts MSCOCO data to TFRecord file format with SequenceExample protos.\n\nThe MSCOCO images are expected to reside in JPEG files located in the following\ndirectory structure:\n\n train_image_dir/COCO_train2014_000000000151.jpg\n train_image_dir/COCO_train2014_000000000260.jpg\n ...\n\nand\n\n val_image_dir/COCO_val2014_000000000042.jpg\n val_image_dir/COCO_val2014_000000000073.jpg\n ...\n\nThe MSCOCO annotations JSON files are expected to reside in train_captions_file\nand val_captions_file respectively.\n\nThis script converts the combined MSCOCO data into sharded data files consisting\nof 256, 4 and 8 TFRecord files, respectively:\n\n output_dir/train-00000-of-00256\n output_dir/train-00001-of-00256\n ...\n output_dir/train-00255-of-00256\n\nand\n\n output_dir/val-00000-of-00004\n ...\n output_dir/val-00003-of-00004\n\nand\n\n output_dir/test-00000-of-00008\n ...\n output_dir/test-00007-of-00008\n\nEach TFRecord file contains ~2300 records. Each record within the TFRecord file\nis a serialized SequenceExample proto consisting of precisely one image-caption\npair. Note that each image has multiple captions (usually 5) and therefore each\nimage is replicated multiple times in the TFRecord files.\n\nThe SequenceExample proto contains the following fields:\n\n context:\n image/image_id: integer MSCOCO image identifier\n image/data: string containing JPEG encoded image in RGB colorspace\n\n feature_lists:\n image/caption: list of strings containing the (tokenized) caption words\n image/caption_ids: list of integer ids corresponding to the caption words\n\nThe captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.\nThe vocabulary of word identifiers is constructed from the sorted list (by\ndescending frequency) of word tokens in the training set. Only tokens appearing\nat least 4 times are considered; all other words get the \"unknown\" word id.\n\nNOTE: This script will consume around 100GB of disk space because each image\nin the MSCOCO dataset is replicated ~5 times (once per caption) in the output.\nThis is done for two reasons:\n 1. In order to better shuffle the training data.\n 2. It makes it easier to perform asynchronous preprocessing of each image in\n TensorFlow.\n\nRunning this script using 16 threads may take around 1 hour on a HP Z420.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import Counter\nfrom collections import namedtuple\nfrom datetime import datetime\nimport json\nimport os.path\nimport random\nimport sys\nimport threading\n\n\n\nimport nltk.tokenize\nimport numpy as np\nfrom six.moves import xrange\nimport tensorflow as tf\n\ntf.flags.DEFINE_string(\"train_image_dir\", \"/tmp/train2014/\",\n \"Training image directory.\")\ntf.flags.DEFINE_string(\"val_image_dir\", \"/tmp/val2014\",\n \"Validation image directory.\")\n\ntf.flags.DEFINE_string(\"train_captions_file\", \"/tmp/captions_train2014.json\",\n \"Training captions JSON file.\")\ntf.flags.DEFINE_string(\"val_captions_file\", \"/tmp/captions_val2014.json\",\n \"Validation captions JSON file.\")\n\ntf.flags.DEFINE_string(\"output_dir\", \"/tmp/\", \"Output data directory.\")\n\ntf.flags.DEFINE_integer(\"train_shards\", 256,\n \"Number of shards in training TFRecord files.\")\ntf.flags.DEFINE_integer(\"val_shards\", 4,\n \"Number of shards in validation TFRecord files.\")\ntf.flags.DEFINE_integer(\"test_shards\", 8,\n \"Number of shards in testing TFRecord files.\")\n\ntf.flags.DEFINE_string(\"start_word\", \"<S>\",\n \"Special word added to the beginning of each sentence.\")\ntf.flags.DEFINE_string(\"end_word\", \"</S>\",\n \"Special word added to the end of each sentence.\")\ntf.flags.DEFINE_string(\"unknown_word\", \"<UNK>\",\n \"Special word meaning 'unknown'.\")\ntf.flags.DEFINE_integer(\"min_word_count\", 4,\n \"The minimum number of occurrences of each word in the \"\n \"training set for inclusion in the vocabulary.\")\ntf.flags.DEFINE_string(\"word_counts_output_file\", \"/tmp/word_counts.txt\",\n \"Output vocabulary file of word counts.\")\n\ntf.flags.DEFINE_integer(\"num_threads\", 8,\n \"Number of threads to preprocess the images.\")\n\nFLAGS = tf.flags.FLAGS\n\nImageMetadata = namedtuple(\"ImageMetadata\",\n [\"image_id\", \"filename\", \"captions\"])\n\n\nclass Vocabulary(object):\n \"\"\"Simple vocabulary wrapper.\"\"\"\n\n def __init__(self, vocab, unk_id):\n \"\"\"Initializes the vocabulary.\n\n Args:\n vocab: A dictionary of word to word_id.\n unk_id: Id of the special 'unknown' word.\n \"\"\"\n self._vocab = vocab\n self._unk_id = unk_id\n\n def word_to_id(self, word):\n \"\"\"Returns the integer id of a word string.\"\"\"\n if word in self._vocab:\n return self._vocab[word]\n else:\n return self._unk_id\n\n\nclass ImageDecoder(object):\n \"\"\"Helper class for decoding images in TensorFlow.\"\"\"\n\n def __init__(self):\n # Create a single TensorFlow Session for all image decoding calls.\n self._sess = tf.Session()\n\n # TensorFlow ops for JPEG decoding.\n self._encoded_jpeg = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)\n\n def decode_jpeg(self, encoded_jpeg):\n image = self._sess.run(self._decode_jpeg,\n feed_dict={self._encoded_jpeg: encoded_jpeg})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n\ndef _int64_feature(value):\n \"\"\"Wrapper for inserting an int64 Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n \"\"\"Wrapper for inserting a bytes Feature into a SequenceExample proto.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))\n\n\ndef _int64_feature_list(values):\n \"\"\"Wrapper for inserting an int64 FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])\n\n\ndef _bytes_feature_list(values):\n \"\"\"Wrapper for inserting a bytes FeatureList into a SequenceExample proto.\"\"\"\n return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])\n\n\ndef _to_sequence_example(image, decoder, vocab):\n \"\"\"Builds a SequenceExample proto for an image-caption pair.\n\n Args:\n image: An ImageMetadata object.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n\n Returns:\n A SequenceExample proto.\n \"\"\"\n with tf.gfile.FastGFile(image.filename, \"r\") as f:\n encoded_image = f.read()\n\n try:\n decoder.decode_jpeg(encoded_image)\n except (tf.errors.InvalidArgumentError, AssertionError):\n print(\"Skipping file with invalid JPEG data: %s\" % image.filename)\n return\n\n context = tf.train.Features(feature={\n \"image/image_id\": _int64_feature(image.image_id),\n \"image/data\": _bytes_feature(encoded_image),\n })\n\n assert len(image.captions) == 1\n caption = image.captions[0]\n caption_ids = [vocab.word_to_id(word) for word in caption]\n feature_lists = tf.train.FeatureLists(feature_list={\n \"image/caption\": _bytes_feature_list(caption),\n \"image/caption_ids\": _int64_feature_list(caption_ids)\n })\n sequence_example = tf.train.SequenceExample(\n context=context, feature_lists=feature_lists)\n\n return sequence_example\n\n\ndef _process_image_files(thread_index, ranges, name, images, decoder, vocab,\n num_shards):\n \"\"\"Processes and saves a subset of images as TFRecord files in one thread.\n\n Args:\n thread_index: Integer thread identifier within [0, len(ranges)].\n ranges: A list of pairs of integers specifying the ranges of the dataset to\n process in parallel.\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n decoder: An ImageDecoder object.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.\n \"\"\"\n # Each thread produces N shards where N = num_shards / num_threads. For\n # instance, if num_shards = 128, and num_threads = 2, then the first thread\n # would produce shards [0, 64).\n num_threads = len(ranges)\n assert not num_shards % num_threads\n num_shards_per_batch = int(num_shards / num_threads)\n\n shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],\n num_shards_per_batch + 1).astype(int)\n num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]\n\n counter = 0\n for s in xrange(num_shards_per_batch):\n # Generate a sharded version of the file name, e.g. 'train-00002-of-00010'\n shard = thread_index * num_shards_per_batch + s\n output_filename = \"%s-%.5d-of-%.5d\" % (name, shard, num_shards)\n output_file = os.path.join(FLAGS.output_dir, output_filename)\n writer = tf.python_io.TFRecordWriter(output_file)\n\n shard_counter = 0\n images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n for i in images_in_shard:\n image = images[i]\n\n sequence_example = _to_sequence_example(image, decoder, vocab)\n if sequence_example is not None:\n writer.write(sequence_example.SerializeToString())\n shard_counter += 1\n counter += 1\n\n if not counter % 1000:\n print(\"%s [thread %d]: Processed %d of %d items in thread batch.\" %\n (datetime.now(), thread_index, counter, num_images_in_thread))\n sys.stdout.flush()\n\n writer.close()\n print(\"%s [thread %d]: Wrote %d image-caption pairs to %s\" %\n (datetime.now(), thread_index, shard_counter, output_file))\n sys.stdout.flush()\n shard_counter = 0\n print(\"%s [thread %d]: Wrote %d image-caption pairs to %d shards.\" %\n (datetime.now(), thread_index, counter, num_shards_per_batch))\n sys.stdout.flush()\n\n\ndef _process_dataset(name, images, vocab, num_shards):\n \"\"\"Processes a complete data set and saves it as a TFRecord.\n\n Args:\n name: Unique identifier specifying the dataset.\n images: List of ImageMetadata.\n vocab: A Vocabulary object.\n num_shards: Integer number of shards for the output files.\n \"\"\"\n # Break up each image into a separate entity for each caption.\n images = [ImageMetadata(image.image_id, image.filename, [caption])\n for image in images for caption in image.captions]\n\n # Shuffle the ordering of images. Make the randomization repeatable.\n random.seed(12345)\n random.shuffle(images)\n\n # Break the images into num_threads batches. Batch i is defined as\n # images[ranges[i][0]:ranges[i][1]].\n num_threads = min(num_shards, FLAGS.num_threads)\n spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)\n ranges = []\n threads = []\n for i in xrange(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n # Create a utility for decoding JPEG images to run sanity checks.\n decoder = ImageDecoder()\n\n # Launch a thread for each batch.\n print(\"Launching %d threads for spacings: %s\" % (num_threads, ranges))\n for thread_index in xrange(len(ranges)):\n args = (thread_index, ranges, name, images, decoder, vocab, num_shards)\n t = threading.Thread(target=_process_image_files, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print(\"%s: Finished processing all %d image-caption pairs in data set '%s'.\" %\n (datetime.now(), len(images), name))\n\n\ndef _create_vocab(captions):\n \"\"\"Creates the vocabulary of word to word_id.\n\n The vocabulary is saved to disk in a text file of word counts. The id of each\n word in the file is its corresponding 0-based line number.\n\n Args:\n captions: A list of lists of strings.\n\n Returns:\n A Vocabulary object.\n \"\"\"\n print(\"Creating vocabulary.\")\n counter = Counter()\n for c in captions:\n counter.update(c)\n print(\"Total words:\", len(counter))\n\n # Filter uncommon words and sort by descending count.\n word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]\n word_counts.sort(key=lambda x: x[1], reverse=True)\n print(\"Words in vocabulary:\", len(word_counts))\n\n # Write out the word counts file.\n with tf.gfile.FastGFile(FLAGS.word_counts_output_file, \"w\") as f:\n f.write(\"\\n\".join([\"%s %d\" % (w, c) for w, c in word_counts]))\n print(\"Wrote vocabulary file:\", FLAGS.word_counts_output_file)\n\n # Create the vocabulary dictionary.\n reverse_vocab = [x[0] for x in word_counts]\n unk_id = len(reverse_vocab)\n vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])\n vocab = Vocabulary(vocab_dict, unk_id)\n\n return vocab\n\n\ndef _process_caption(caption):\n \"\"\"Processes a caption string into a list of tonenized words.\n\n Args:\n caption: A string caption.\n\n Returns:\n A list of strings; the tokenized caption.\n \"\"\"\n tokenized_caption = [FLAGS.start_word]\n tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))\n tokenized_caption.append(FLAGS.end_word)\n return tokenized_caption\n\n\ndef _load_and_process_metadata(captions_file, image_dir):\n \"\"\"Loads image metadata from a JSON file and processes the captions.\n\n Args:\n captions_file: JSON file containing caption annotations.\n image_dir: Directory containing the image files.\n\n Returns:\n A list of ImageMetadata.\n \"\"\"\n with tf.gfile.FastGFile(captions_file, \"r\") as f:\n caption_data = json.load(f)\n\n # Extract the filenames.\n id_to_filename = [(x[\"id\"], x[\"file_name\"]) for x in caption_data[\"images\"]]\n\n # Extract the captions. Each image_id is associated with multiple captions.\n id_to_captions = {}\n for annotation in caption_data[\"annotations\"]:\n image_id = annotation[\"image_id\"]\n caption = annotation[\"caption\"]\n id_to_captions.setdefault(image_id, [])\n id_to_captions[image_id].append(caption)\n\n assert len(id_to_filename) == len(id_to_captions)\n assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())\n print(\"Loaded caption metadata for %d images from %s\" %\n (len(id_to_filename), captions_file))\n\n # Process the captions and combine the data into a list of ImageMetadata.\n print(\"Processing captions.\")\n image_metadata = []\n num_captions = 0\n for image_id, base_filename in id_to_filename:\n filename = os.path.join(image_dir, base_filename)\n captions = [_process_caption(c) for c in id_to_captions[image_id]]\n image_metadata.append(ImageMetadata(image_id, filename, captions))\n num_captions += len(captions)\n print(\"Finished processing %d captions for %d images in %s\" %\n (num_captions, len(id_to_filename), captions_file))\n\n return image_metadata\n\n\ndef main(unused_argv):\n def _is_valid_num_shards(num_shards):\n \"\"\"Returns True if num_shards is compatible with FLAGS.num_threads.\"\"\"\n return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads\n\n assert _is_valid_num_shards(FLAGS.train_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards\")\n assert _is_valid_num_shards(FLAGS.val_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards\")\n assert _is_valid_num_shards(FLAGS.test_shards), (\n \"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards\")\n\n if not tf.gfile.IsDirectory(FLAGS.output_dir):\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n # Load image metadata from caption files.\n mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,\n FLAGS.train_image_dir)\n mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,\n FLAGS.val_image_dir)\n\n # Redistribute the MSCOCO data as follows:\n # train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.\n # val_dataset = 5% of mscoco_val_dataset (for validation during training).\n # test_dataset = 10% of mscoco_val_dataset (for final evaluation).\n train_cutoff = int(0.85 * len(mscoco_val_dataset))\n val_cutoff = int(0.90 * len(mscoco_val_dataset))\n\n #train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]\n #val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]\n #test_dataset = mscoco_val_dataset[val_cutoff:]\n \n train_dataset = mscoco_train_dataset[:100]\n val_dataset = mscoco_val_dataset[0:20]\n test_dataset = mscoco_val_dataset[20:40]\n\n # Create vocabulary from the training captions.\n train_captions = [c for image in train_dataset for c in image.captions]\n vocab = _create_vocab(train_captions)\n\n _process_dataset(\"train\", train_dataset, vocab, FLAGS.train_shards)\n _process_dataset(\"val\", val_dataset, vocab, FLAGS.val_shards)\n _process_dataset(\"test\", test_dataset, vocab, FLAGS.test_shards)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.train.Int64List", "tensorflow.image.decode_jpeg", "tensorflow.gfile.FastGFile", "numpy.linspace", "tensorflow.flags.DEFINE_string", "numpy.arange", "tensorflow.train.Coordinator", "tensorflow.train.SequenceExample", "tensorflow.placeholder", "tensorflow.python_io.TFRecordWriter", "tensorflow.gfile.MakeDirs", "tensorflow.Session", "tensorflow.gfile.IsDirectory", "tensorflow.flags.DEFINE_integer", "tensorflow.app.run" ] ]
ismetshn/qiskit-terra
[ "691ffdf7fe50214cf944783357fdcb5c5054e9c2" ]
[ "qiskit/extensions/standard/u3.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nU3 Gate, three-parameter single-qubit gate.\n\"\"\"\n\nimport numpy\nfrom qiskit.circuit import ControlledGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.util import deprecate_arguments\n\n\n# pylint: disable=cyclic-import\nclass U3Gate(Gate):\n r\"\"\"Generic single-qubit rotation gate with 3 Euler angles.\n\n Implemented using two X90 pulses on IBM Quantum systems:\n\n .. math::\n U2(\\phi, \\lambda) = RZ(\\phi+\\pi/2) RX(\\frac{\\pi}{2}) RZ(\\lambda-\\pi/2)\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───────────┐\n q_0: ┤ U3(ϴ,φ,λ) ├\n └───────────┘\n\n **Matrix Representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n U3(\\theta, \\phi, \\lambda) =\n \\begin{pmatrix}\n \\cos(\\th) & e^{-i\\lambda}\\sin(\\th) \\\\\n e^{i\\phi}\\sin(\\th) & e^{i(\\phi+\\lambda)\\cos(\\th)}\n \\end{pmatrix}\n\n **Examples:**\n\n .. math::\n\n U3(\\theta, -\\frac{\\pi}{2}, \\frac{pi}{2}) = RX(\\theta)\n\n .. math::\n\n U3(\\theta, 0, 0) = RY(\\theta)\n \"\"\"\n\n def __init__(self, theta, phi, lam, label=None):\n \"\"\"Create new U3 gate.\"\"\"\n super().__init__('u3', 1, [theta, phi, lam], label=label)\n\n def inverse(self):\n r\"\"\"Return inverted U3 gate.\n\n :math:`U3(\\theta,\\phi,\\lambda)^{\\dagger} =U3(-\\theta,-\\phi,-\\lambda)`)\n \"\"\"\n return U3Gate(-self.params[0], -self.params[2], -self.params[1])\n\n def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):\n \"\"\"Return a (mutli-)controlled-U3 gate.\n\n Args:\n num_ctrl_qubits (int): number of control qubits.\n label (str or None): An optional label for the gate [Default: None]\n ctrl_state (int or str or None): control state expressed as integer,\n string (e.g. '110'), or None. If None, use all 1s.\n\n Returns:\n ControlledGate: controlled version of this gate.\n \"\"\"\n if ctrl_state is None:\n if num_ctrl_qubits == 1:\n return CU3Gate(*self.params)\n return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label,\n ctrl_state=ctrl_state)\n\n def to_matrix(self):\n \"\"\"Return a Numpy.array for the U3 gate.\"\"\"\n theta, phi, lam = self.params\n theta, phi, lam = float(theta), float(phi), float(lam)\n return numpy.array([\n [\n numpy.cos(theta / 2),\n -numpy.exp(1j * lam) * numpy.sin(theta / 2)\n ],\n [\n numpy.exp(1j * phi) * numpy.sin(theta / 2),\n numpy.exp(1j * (phi + lam)) * numpy.cos(theta / 2)\n ]\n ], dtype=complex)\n\n\n@deprecate_arguments({'q': 'qubit'})\ndef u3(self, theta, phi, lam, qubit, *, q=None): # pylint: disable=invalid-name,unused-argument\n \"\"\"Apply :class:`~qiskit.extensions.standard.U3Gate`.\"\"\"\n return self.append(U3Gate(theta, phi, lam), [qubit], [])\n\n\nQuantumCircuit.u3 = u3\n\n\nclass CU3Meta(type):\n \"\"\"A metaclass to ensure that Cu3Gate and CU3Gate are of the same type.\n\n Can be removed when Cu3Gate gets removed.\n \"\"\"\n @classmethod\n def __instancecheck__(mcs, inst):\n return type(inst) in {CU3Gate, Cu3Gate} # pylint: disable=unidiomatic-typecheck\n\n\nclass CU3Gate(ControlledGate, metaclass=CU3Meta):\n r\"\"\"Controlled-U3 gate (3-parameter two-qubit gate).\n\n This is a controlled version of the U3 gate (generic single qubit rotation).\n It is restricted to 3 parameters, and so cannot cover generic two-qubit\n controlled gates).\n\n **Circuit symbol:**\n\n .. parsed-literal::\n\n ┌───────────┐\n q_0: ┤ U3(ϴ,φ,λ) ├\n └─────┬─────┘\n q_1: ──────■──────\n\n\n **Matrix representation:**\n\n .. math::\n\n \\newcommand{\\th}{\\frac{\\theta}{2}}\n\n CU3(\\theta, \\phi, \\lambda)\\ q_1, q_0=\n |0\\rangle\\langle 0| \\otimes I + |1\\rangle\\langle 1| \\otimes U3(\\theta,\\phi,\\lambda) =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 \\\\\n 0 & 0 & \\cos(\\th) & e^{-i\\lambda}\\sin(\\th) \\\\\n 0 & 0 & e^{i\\phi}\\sin(\\th) & e^{i(\\phi+\\lambda)\\cos(\\th)}\n \\end{pmatrix}\n\n\n .. note::\n\n In Qiskit's convention, higher qubit indices are more significant\n (little endian convention). In many textbooks, controlled gates are\n presented with the assumption of more significant qubits as control,\n which is how we present the gate above as well, resulting in textbook\n matrices. Instead, if we use q_0 as control, the matrix will be:\n\n .. math::\n\n CU3(\\theta, \\phi, \\lambda)\\ q_0, q_1 =\n I \\otimes |0\\rangle\\langle 0| +\n U3(\\theta,\\phi,\\lambda) \\otimes |1\\rangle\\langle 1| =\n \\begin{pmatrix}\n 1 & 0 & 0 & 0 \\\\\n 0 & \\cos(\\th) & 0 & e^{-i\\lambda}\\sin(\\th) \\\\\n 0 & 0 & 1 & 0 \\\\\n 0 & e^{i\\phi}\\sin(\\th) & 0 & e^{i(\\phi+\\lambda)\\cos(\\th)}\n \\end{pmatrix}\n \"\"\"\n\n def __init__(self, theta, phi, lam):\n \"\"\"Create new CU3 gate.\"\"\"\n super().__init__('cu3', 2, [theta, phi, lam], num_ctrl_qubits=1)\n self.base_gate = U3Gate(theta, phi, lam)\n\n def _define(self):\n \"\"\"\n gate cu3(theta,phi,lambda) c, t\n { u1((lambda+phi)/2) c;\n u1((lambda-phi)/2) t;\n cx c,t;\n u3(-theta/2,0,-(phi+lambda)/2) t;\n cx c,t;\n u3(theta/2,phi,0) t;\n }\n \"\"\"\n from qiskit.extensions.standard.u1 import U1Gate\n from qiskit.extensions.standard.x import CXGate\n definition = []\n q = QuantumRegister(2, 'q')\n rule = [\n (U1Gate((self.params[2] + self.params[1]) / 2), [q[0]], []),\n (U1Gate((self.params[2] - self.params[1]) / 2), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (U3Gate(-self.params[0] / 2, 0, -(self.params[1] + self.params[2]) / 2), [q[1]], []),\n (CXGate(), [q[0], q[1]], []),\n (U3Gate(self.params[0] / 2, self.params[1], 0), [q[1]], [])\n ]\n for inst in rule:\n definition.append(inst)\n self.definition = definition\n\n def inverse(self):\n r\"\"\"Return inverted CU3 gate.\n\n :math:`CU3(\\theta,\\phi,\\lambda)^{\\dagger} =CU3(-\\theta,-\\phi,-\\lambda)`)\n \"\"\"\n return CU3Gate(-self.params[0], -self.params[2], -self.params[1])\n\n\nclass Cu3Gate(CU3Gate, metaclass=CU3Meta):\n \"\"\"The deprecated CU3Gate class.\"\"\"\n\n def __init__(self, theta, phi, lam):\n import warnings\n warnings.warn('The class Cu3Gate is deprecated as of 0.14.0, and '\n 'will be removed no earlier than 3 months after that release date. '\n 'You should use the class CU3Gate instead.',\n DeprecationWarning, stacklevel=2)\n super().__init__(theta, phi, lam)\n\n\n@deprecate_arguments({'ctl': 'control_qubit',\n 'tgt': 'target_qubit'})\ndef cu3(self, theta, phi, lam, control_qubit, target_qubit,\n *, ctl=None, tgt=None): # pylint: disable=unused-argument\n \"\"\"Apply :class:`~qiskit.extensions.standard.U3Gate`.\"\"\"\n return self.append(CU3Gate(theta, phi, lam), [control_qubit, target_qubit], [])\n\n\nQuantumCircuit.cu3 = cu3\n" ]
[ [ "numpy.exp", "numpy.cos", "numpy.sin" ] ]
rosocz/CarND-Capstone
[ "39831a307ca71e9d5fb7a7deb7a7a1a58cabfb44" ]
[ "train_nn/train.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport random\nimport cv2\nimport csv\nimport math\nimport glob\nfrom sklearn import model_selection\nfrom skimage.transform import rescale\n\nfrom keras import backend as K\nfrom keras import models, optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Dense, Dropout, Flatten, Lambda, Activation, MaxPooling2D, Reshape, Input, concatenate\nfrom keras.optimizers import Adam\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\n\nROOT_PATH = './'\nBATCH_SIZE = 64\nEPOCHS = 100\nNUM_CLASSES = 4\n\nIMAGE_HEIGHT = 256\nIMAGE_WIDTH = 128\nIMAGE_CHANNEL = 3\n\nAUGMENT_INDEX = 6\n\nMODEL_FILE_NAME = './tl_classifier.h5'\n\n# check for GPU\nfrom tensorflow.python.client import device_lib\nprint(device_lib.list_local_devices())\n\ndef create_labeled_list():\n with open('traffic_light_train.csv', 'w') as csvfile:\n mywriter = csv.writer(csvfile)\n mywriter.writerow(['path', 'class', 'color'])\n\n classes_distribution = [0, 0, 0, 0]\n for myclass, directory in enumerate(['NoTrafficLight', 'Red', 'Yellow', 'Green']):\n for filename in glob.glob('./training_data/real/{}/*.png'.format(directory)):\n filename = '/'.join(filename.split('\\\\'))\n mywriter.writerow([filename, myclass, directory])\n if (directory == 'Red'):\n classes_distribution[0] += 1\n if (directory == 'Green'):\n classes_distribution[1] += 1\n if (directory ==\"Yellow\"):\n classes_distribution[2] += 1\n if (directory == 'NoTrafficLight'):\n classes_distribution[3] += 1\n\ndef analyse_data_distribution(data):\n distribution = data['color'].value_counts()\n # max_count = distribution.max()\n # print(max_count - distribution['Red'])\n\n return distribution\n\n\ndef random_brightness(image):\n # Convert 2 HSV colorspace from RGB colorspace\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n # Generate new random brightness\n rand = random.uniform(0.3, 1.0)\n hsv[:, :, 2] = rand*hsv[:, :, 2]\n # Convert back to RGB colorspace\n new_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n return new_img\n\n\ndef zoom(image):\n zoom_pix = random.randint(0, 10)\n zoom_factor = 1 + (2*zoom_pix)/IMAGE_HEIGHT\n image = cv2.resize(image, None, fx=zoom_factor,\n fy=zoom_factor, interpolation=cv2.INTER_LINEAR)\n top_crop = (image.shape[0] - IMAGE_HEIGHT)//2\n left_crop = (image.shape[1] - IMAGE_WIDTH)//2\n image = image[top_crop: top_crop+IMAGE_HEIGHT,\n left_crop: left_crop+IMAGE_WIDTH]\n return image\n\ndef noise(image):\n row,col,ch = image.shape\n s_vs_p = 0.5\n amount = 0.004\n out = np.copy(image)\n # Salt mode\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 1\n\n # Pepper mode\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n\n return out\n\n\n# loads image\ndef get_image(index, data):\n # pair image and color clasiffication\n image = cv2.imread(os.path.join(data['path'].values[index].strip()))\n color = data['class'].values[index]\n\n return [image, color]\n\ndef augment(image):\n\n if (random.randint(0, 1)):\n image = random_brightness(image)\n\n if (random.randint(0, 1)):\n image = cv2.flip(image, 1)\n\n if (random.randint(0, 1)):\n image = zoom(image)\n\n if (random.randint(0, 1)):\n image = noise(image)\n\n return image\n\n#normalize image canvas\ndef normalize_canvas_size(image):\n normalized_canvas = np.ndarray((IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL), dtype=np.uint8)\n if (image.shape[0] > IMAGE_HEIGHT):\n image = rescale(image, 1.0 / math.ceil(float(image.shape[0]) / float(IMAGE_HEIGHT)), mode='constant', multichannel='None', anti_aliasing=True)\n\n if (image.shape[1] > IMAGE_WIDTH):\n image = rescale(image, 1.0 / math.ceil(float(image.shape[1]) / float(IMAGE_WIDTH)), mode='constant', multichannel='None', anti_aliasing=True)\n\n h, w = image.shape[:2]\n normalized_canvas[:h, :w] = image\n\n return normalized_canvas\n\n# generator function to return images batchwise\ndef generator(data, has_augment=False):\n while True:\n # Randomize the indices to make an array\n indices_arr = np.random.permutation(data.count()[0])\n for batch in range(0, len(indices_arr), BATCH_SIZE):\n # slice out the current batch according to batch-size\n current_batch = indices_arr[batch:(batch + BATCH_SIZE)]\n\n # initializing the arrays, x_train and y_train\n x_train = np.empty(\n [0, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL], dtype=np.float32)\n y_train = np.empty([0], dtype=np.int32)\n\n for i in current_batch:\n [image, color] = get_image(i, data)\n\n image = normalize_canvas_size(image)\n\n x_train = np.append(x_train, [image], axis=0)\n y_train = np.append(y_train, [color])\n\n if (has_augment):\n distribution = analyse_data_distribution(data)\n for i in range(0, int((((distribution.max() * AUGMENT_INDEX)-distribution[color])/distribution[color])/BATCH_SIZE)):\n augmented_image = augment(image)\n x_train = np.append(x_train, [augmented_image], axis=0)\n y_train = np.append(y_train, [color])\n y_train = to_categorical(y_train, num_classes=NUM_CLASSES)\n\n yield (x_train, y_train)\n\n\ndef get_model():\n\n model = Sequential()\n\n input_shape=(IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNEL)\n model.add(Conv2D(32, 16, strides=2, input_shape=input_shape,padding='same', activation='relu'))\n model.add(Lambda(lambda x: x/127.5 - 1.))\n model.add(Conv2D(32, 8, strides=2, padding=\"same\", activation='relu'))\n model.add(MaxPooling2D(2, 2))\n model.add(Conv2D(16, 8, strides=2, padding=\"same\", activation='relu'))\n model.add(MaxPooling2D(2, 2))\n model.add(Flatten())\n model.add(Dropout(.35))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(.5))\n model.add(Dense(NUM_CLASSES))\n # model.add(Lambda(lambda x: (K.exp(x) + 1e-4) / (K.sum(K.exp(x)) + 1e-4)))\n model.add(Lambda(lambda x: K.tf.nn.softmax(x)))\n\n\n model.compile(optimizer=Adam(lr=5e-4),\n loss='categorical_crossentropy', metrics=['accuracy'])\n\n model.summary()\n\n return model\n\nif __name__ == \"__main__\":\n\n if not os.path.exists('./traffic_light_train.csv'):\n create_labeled_list()\n print('CSV file created successfully')\n else:\n print('CSV already present')\n\n data = pd.read_csv(os.path.join('./traffic_light_train.csv'))\n\n # Split data into random training and validation sets\n d_train, d_valid = model_selection.train_test_split(data, test_size=.2)\n\n train_gen = generator(d_train, True)\n validation_gen = generator(d_valid, False)\n\n model = get_model()\n\n # checkpoint to save best weights after each epoch based on the improvement in val_loss\n checkpoint = ModelCheckpoint(MODEL_FILE_NAME, monitor='val_loss', verbose=1,save_best_only=True, mode='min',save_weights_only=False)\n callbacks_list = [checkpoint] #,callback_each_epoch]\n\n print('Training started....')\n\n history = model.fit_generator(\n train_gen,\n steps_per_epoch=len(d_train)//BATCH_SIZE,\n epochs=EPOCHS,\n validation_data=validation_gen,\n validation_steps=len(d_valid)//BATCH_SIZE,\n verbose=1,\n callbacks=callbacks_list\n )\n\n # print(\"Saving model..\")\n # model.save(\"./tl_classifier_keras.h5\")\n # print(\"Model Saved successfully!!\")\n\n # Destroying the current TF graph to avoid clutter from old models / layers\n K.clear_session()\n" ]
[ [ "tensorflow.python.client.device_lib.list_local_devices", "sklearn.model_selection.train_test_split", "numpy.ndarray", "numpy.ceil", "numpy.copy", "numpy.append", "numpy.empty" ] ]
kuperov/distrax
[ "dd3363a64017c5eafb3241bb2a3884de50f21427" ]
[ "distrax/_src/bijectors/scalar_affine_test.py" ]
[ "# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `scalar_affine.py`.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport chex\nfrom distrax._src.bijectors import scalar_affine\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\nclass ScalarAffineTest(parameterized.TestCase):\n\n def test_properties(self):\n bij = scalar_affine.ScalarAffine(shift=0., scale=1.)\n self.assertTrue(bij.is_constant_jacobian)\n self.assertTrue(bij.is_constant_log_det)\n np.testing.assert_allclose(bij.shift, 0.)\n np.testing.assert_allclose(bij.scale, 1.)\n np.testing.assert_allclose(bij.log_scale, 0.)\n\n def test_raises_if_both_scale_and_log_scale_are_specified(self):\n with self.assertRaises(ValueError):\n scalar_affine.ScalarAffine(shift=0., scale=1., log_scale=0.)\n\n @chex.all_variants\n def test_shapes_are_correct(self):\n k1, k2, k3, k4 = jax.random.split(jax.random.PRNGKey(42), 4)\n x = jax.random.normal(k1, (2, 3, 4, 5))\n shift = jax.random.normal(k2, (4, 5))\n scale = jax.random.uniform(k3, (3, 4, 5)) + 0.1\n log_scale = jax.random.normal(k4, (3, 4, 5))\n bij_no_scale = scalar_affine.ScalarAffine(shift)\n bij_with_scale = scalar_affine.ScalarAffine(shift, scale=scale)\n bij_with_log_scale = scalar_affine.ScalarAffine(shift, log_scale=log_scale)\n for bij in [bij_no_scale, bij_with_scale, bij_with_log_scale]:\n # Forward methods.\n y, logdet = self.variant(bij.forward_and_log_det)(x)\n self.assertEqual(y.shape, (2, 3, 4, 5))\n self.assertEqual(logdet.shape, (2, 3, 4, 5))\n # Inverse methods.\n x, logdet = self.variant(bij.inverse_and_log_det)(y)\n self.assertEqual(x.shape, (2, 3, 4, 5))\n self.assertEqual(logdet.shape, (2, 3, 4, 5))\n\n @chex.all_variants\n def test_forward_methods_are_correct(self):\n key = jax.random.PRNGKey(42)\n x = jax.random.normal(key, (2, 3, 4, 5))\n bij_no_scale = scalar_affine.ScalarAffine(shift=3.)\n bij_with_scale = scalar_affine.ScalarAffine(shift=3., scale=1.)\n bij_with_log_scale = scalar_affine.ScalarAffine(shift=3., log_scale=0.)\n for bij in [bij_no_scale, bij_with_scale, bij_with_log_scale]:\n y, logdet = self.variant(bij.forward_and_log_det)(x)\n np.testing.assert_allclose(y, x + 3., atol=1e-8)\n np.testing.assert_allclose(logdet, 0., atol=1e-8)\n\n @chex.all_variants\n def test_inverse_methods_are_correct(self):\n k1, k2, k3, k4 = jax.random.split(jax.random.PRNGKey(42), 4)\n x = jax.random.normal(k1, (2, 3, 4, 5))\n shift = jax.random.normal(k2, (4, 5))\n scale = jax.random.uniform(k3, (3, 4, 5)) + 0.1\n log_scale = jax.random.normal(k4, (3, 4, 5))\n bij_no_scale = scalar_affine.ScalarAffine(shift)\n bij_with_scale = scalar_affine.ScalarAffine(shift, scale=scale)\n bij_with_log_scale = scalar_affine.ScalarAffine(shift, log_scale=log_scale)\n for bij in [bij_no_scale, bij_with_scale, bij_with_log_scale]:\n y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)\n x_rec, logdet_inv = self.variant(bij.inverse_and_log_det)(y)\n np.testing.assert_allclose(x_rec, x, atol=1e-5)\n np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=3e-6)\n\n @chex.all_variants\n def test_composite_methods_are_consistent(self):\n k1, k2, k3, k4 = jax.random.split(jax.random.PRNGKey(42), 4)\n bij = scalar_affine.ScalarAffine(\n shift=jax.random.normal(k1, (4, 5)),\n log_scale=jax.random.normal(k2, (4, 5)))\n # Forward methods.\n x = jax.random.normal(k3, (2, 3, 4, 5))\n y1 = self.variant(bij.forward)(x)\n logdet1 = self.variant(bij.forward_log_det_jacobian)(x)\n y2, logdet2 = self.variant(bij.forward_and_log_det)(x)\n np.testing.assert_allclose(y1, y2, atol=1e-12)\n np.testing.assert_allclose(logdet1, logdet2, atol=1e-12)\n # Inverse methods.\n y = jax.random.normal(k4, (2, 3, 4, 5))\n x1 = self.variant(bij.inverse)(y)\n logdet1 = self.variant(bij.inverse_log_det_jacobian)(y)\n x2, logdet2 = self.variant(bij.inverse_and_log_det)(y)\n np.testing.assert_allclose(x1, x2, atol=1e-12)\n np.testing.assert_allclose(logdet1, logdet2, atol=1e-12)\n\n @chex.all_variants\n @parameterized.parameters(\n ((5,), (5,), (5,)),\n ((5,), (5,), ()),\n ((5,), (), (5,)),\n ((), (5,), (5,)),\n ((), (), (5,)),\n ((), (5,), ()),\n ((5,), (), ()),\n )\n def test_batched_parameters(self, scale_batch_shape, shift_batch_shape,\n input_batch_shape):\n k1, k2, k3 = jax.random.split(jax.random.PRNGKey(42), 3)\n log_scale = jax.random.normal(k1, scale_batch_shape)\n shift = jax.random.normal(k2, shift_batch_shape)\n bijector = scalar_affine.ScalarAffine(shift, log_scale=log_scale)\n\n x = jax.random.normal(k3, input_batch_shape)\n y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)\n z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)\n\n output_batch_shape = jnp.broadcast_arrays(log_scale, shift, x)[0].shape\n\n self.assertEqual(y.shape, output_batch_shape)\n self.assertEqual(z.shape, output_batch_shape)\n self.assertEqual(logdet_fwd.shape, output_batch_shape)\n self.assertEqual(logdet_inv.shape, output_batch_shape)\n\n log_scale = jnp.broadcast_to(log_scale, output_batch_shape).flatten()\n shift = jnp.broadcast_to(shift, output_batch_shape).flatten()\n x = jnp.broadcast_to(x, output_batch_shape).flatten()\n y = y.flatten()\n z = z.flatten()\n logdet_fwd = logdet_fwd.flatten()\n logdet_inv = logdet_inv.flatten()\n\n for i in range(np.prod(output_batch_shape)):\n bijector = scalar_affine.ScalarAffine(shift[i], jnp.exp(log_scale[i]))\n this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])\n this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])\n np.testing.assert_allclose(this_y, y[i], atol=1e-7)\n np.testing.assert_allclose(this_z, z[i], atol=1e-5)\n np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-4)\n np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-4)\n\n def test_jittable(self):\n @jax.jit\n def f(x, b):\n return b.forward(x)\n\n bijector = scalar_affine.ScalarAffine(0, 1)\n x = np.zeros(())\n f(x, bijector)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.zeros", "numpy.prod", "numpy.testing.assert_allclose" ] ]
Warmshawn/CaliCompari
[ "34cb5204a1b11a799f94b233189ebcd87816cbc1" ]
[ "Python/calicompari.py" ]
[ "#!/usr/bin/env python\n# Copyright 2013 Jonathan Whitmore\n# Distributed under the Boost Software License, Version 1.0.\n#\n# Permission is hereby granted, free of charge, to any person or organization\n# obtaining a copy of the software and accompanying documentation covered by\n# this license (the \"Software\") to use, reproduce, display, distribute,\n# execute, and transmit the Software, and to prepare derivative works of the\n# Software, and to permit third-parties to whom the Software is furnished to\n# do so, all subject to the following:\n#\n# The copyright notices in the Software and this entire statement, including\n# the above license grant, this restriction and the following disclaimer,\n# must be included in all copies of the Software, in whole or in part, and\n# all derivative works of the Software, unless such copies or derivative\n# works are solely in the form of machine-executable object code generated\n# by a source language processor.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND\n# NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE\n# DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,\n# WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\nimport sys\nimport os\nimport glob\nimport barak\nfrom barak.fitcont import spline_continuum\nfrom barak import convolve\nfrom barak import interp\n\nimport numpy as np\nimport scipy\nimport scipy as sp\nimport scipy.interpolate as si\nimport scipy.signal as ss\nimport scipy.constants as spc\nimport time\nimport json\nimport datetime\nimport argparse\nfrom configparser import RawConfigParser, SafeConfigParser\nimport random as ra\nimport itertools\nimport pyfits as pf\nimport pickle as pickle\nimport gzip\nimport matplotlib.pylab as pl\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom functools import reduce\n\npl.rcParams['figure.figsize'] = 16, 8 # that's default image size for this interactive session\n\nimport iminuit as mi\n\nc_light = spc.c\n# np.seterr(divide='ignore', invalid='ignore')\n\nhelp_message = '''\nVarious limitations:\nMust have an FTS spectrum w/o gaps\nMust have a telescope spectrum w/ monotonically increasing wavelength per order (gaps are OK)\n'''\n\nclass AutoVivification(dict):\n \"\"\"Implementation of perl's autovivification feature.\n from: http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python\n Testing:\n\n >>> a = AutoVivification()\n >>> a[1][2][3] = 4\n >>> a[1][3][3] = 5\n >>> a[1][2]['test'] = 6\n >>> print a\n Output:\n\n {1: {2: {'test': 6, 3: 4}, 3: {3: 5}}}\n \"\"\"\n def __getitem__(self, item):\n try:\n return dict.__getitem__(self, item)\n except KeyError:\n value = self[item] = type(self)()\n return value\n\ndef slope_to_array(slope, wavelength_array, flux_array):\n \"\"\"Return a flux array that is modified by a slope\n across the array.\"\"\"\n import numpy as np\n midpoint = np.average([wavelength_array[0], wavelength_array[-1]])\n factor = 1.0 + slope * (wavelength_array - midpoint)\n return factor * flux_array\n\ndef load_exposure(filename='big.gz'):\n \"\"\"load_exposure recreates the final fit \"\"\"\n with gzip.open(filename, 'rb') as file_handle:\n loadexposure = pickle.load(file_handle)\n return loadexposure\n\ndef save_exposure(object, filename='big.gz'):\n \"\"\"Save to a large file all the information from the supercalibration\n fitting process. Includes data, and headers.\"\"\"\n with gzip.open(filename, 'wb') as file_handle:\n pickle.dump(object, file_handle, pickle.HIGHEST_PROTOCOL)\n pass\n\ndef save_small_exposure(expo, filename=\"small.gz\"):\n \"\"\"docstring\"\"\"\n small_dictionary = {}\n small_dictionary['Results'] = expo.Results\n try:\n small_dictionary['arc_header'] = expo.arc_header\n except:\n small_dictionary['arc_header'] = \"\"\n try:\n small_dictionary['flux_header'] = expo.flux_header\n except:\n small_dictionary['flux_header'] = \"\"\n try:\n small_dictionary['header'] = expo.header\n except:\n small_dictionary['header'] = \"\"\n try:\n small_dictionary['exposure_file'] = expo.exposure_file\n except:\n small_dictionary['exposure_file'] = expo.exposureFile\n small_dictionary['safe_orders'] = expo.safe_orders\n with gzip.open(filename, 'wb') as file_handle:\n pickle.dump(small_dictionary, file_handle, pickle.HIGHEST_PROTOCOL)\n pass\n\ndef modify_small_exposure(small_dictionary, filename=\"small.gz\"):\n \"\"\"after modifying a small_dictionary file, save the resulting output.\"\"\"\n with gzip.open(filename, 'wb') as file_handle:\n pickle.dump(small_dictionary, file_handle, pickle.HIGHEST_PROTOCOL)\n pass\n\ndef cleaned_data(filename_expo):\n \"\"\"Modifies hand_tweak-ed file to contain only best data.\"\"\"\n infile, expo = filename_expo\n upper_error_bound = expo[\"hand_tweak\"][\"upper_error_bound\"]\n upper_wavelength_cutoff = expo[\"hand_tweak\"][\"upper_wavelength_cutoff\"]\n badorders = expo[\"hand_tweak\"][\"badorders\"]\n orderbegin = expo[\"hand_tweak\"][\"orderbegin\"]\n orderend = expo[\"hand_tweak\"][\"orderend\"]\n offset = expo[\"hand_tweak\"][\"offset\"]\n minimum_number_of_chunks = expo[\"hand_tweak\"][\"minimum_number_of_chunks\"]\n expo['cleaned'] = {}\n expo['cleaned'][500] = {}\n for order in [x for x in expo['safe_orders'] if x not in badorders]:\n if order in expo['Results'][500]:\n mask = (expo['Results'][500][order]['calerr'] < upper_error_bound) & (expo['Results'][500][order]['avwav'] < upper_wavelength_cutoff)\n expo['cleaned'][500][order] = {}\n if np.sum(mask) > minimum_number_of_chunks:\n expo['cleaned'][500][order]['avwav'] = expo['Results'][500][order]['avwav'][mask][orderbegin:orderend]\n expo['cleaned'][500][order]['cal'] = expo['Results'][500][order]['cal'][mask][orderbegin:orderend]\n expo['cleaned'][500][order]['calerr'] = expo['Results'][500][order]['calerr'][mask][orderbegin:orderend]\n expo['cleaned'][500][order]['avpix'] = expo['Results'][500][order]['avpix'][mask][orderbegin:orderend]\n expo['cleaned'][500][order]['R'] = expo['Results'][500][order]['R'][mask][orderbegin:orderend]\n expo['cleaned'][500][order]['Rerr'] = expo['Results'][500][order]['Rerr'][mask][orderbegin:orderend]\n pass\n\ndef hand_tweak( filename_expo,\n help=False,\n color=\"blue\",\n linewidth=2.0,\n clobber=False,\n plot=True,\n vbuffer=800.0,\n *args,\n **kwargs):\n \"\"\"Organized way of hand-tweaking the final calicompari results.\n The input is a already final fit with a dictionary that defines the modifications\n that allows for a reasonable fit.\"\"\"\n tempx = []\n tempy = []\n infile, expo = filename_expo\n if (\"hand_tweak\" in list(expo.keys())) and (clobber == False):\n print(\"Turn clobber=True to modify this.\")\n return\n if \"hand_tweak\" not in list(expo.keys()):\n print(\"Must first create hand_tweak dictionary. Some defaults.\")\n print(\"\"\"expo[\"hand_tweak\"] = {}\n expo[\"hand_tweak\"][\"upper_error_bound\"] = 200.0\n expo[\"hand_tweak\"][\"upper_wavelength_cutoff\"] = 7600.0\n expo[\"hand_tweak\"][\"badorders\"] = []\n expo[\"hand_tweak\"][\"orderbegin\"] = 0\n expo[\"hand_tweak\"][\"orderend\"] = -1\n expo[\"hand_tweak\"][\"offset\"] = 0.\n expo[\"hand_tweak\"][\"minimum_number_of_chunks\"] = 5\"\"\")\n\n upper_error_bound = expo[\"hand_tweak\"][\"upper_error_bound\"]\n upper_wavelength_cutoff = expo[\"hand_tweak\"][\"upper_wavelength_cutoff\"]\n badorders = expo[\"hand_tweak\"][\"badorders\"]\n orderbegin = expo[\"hand_tweak\"][\"orderbegin\"]\n orderend = expo[\"hand_tweak\"][\"orderend\"]\n offset = expo[\"hand_tweak\"][\"offset\"]\n minimum_number_of_chunks = expo[\"hand_tweak\"][\"minimum_number_of_chunks\"]\n\n for order in [x for x in expo['safe_orders'] if x not in badorders]:\n if order in expo['Results'][500]:\n mask = (expo['Results'][500][order]['calerr'] < upper_error_bound) & (expo['Results'][500][order]['avwav'] < upper_wavelength_cutoff)\n if np.sum(mask) > minimum_number_of_chunks:\n if plot==True:\n pl.errorbar(expo['Results'][500][order]['avwav'][mask][orderbegin:orderend],\n expo['Results'][500][order]['cal'][mask][orderbegin:orderend] + offset,\n expo['Results'][500][order]['calerr'][mask][orderbegin:orderend], color=color, linewidth=linewidth)\n tempx.append(np.average(expo['Results'][500][order]['avwav'][mask][orderbegin:orderend]))\n tempy.append(np.average(expo['Results'][500][order]['cal'][mask][orderbegin:orderend] + offset,\n weights=1.0/(expo['Results'][500][order]['calerr'][mask][orderbegin:orderend])**2))\n tempx = np.hstack(tempx)\n tempy = np.hstack(tempy)\n # vbuffer = 800.0\n p, res, _, _, _ = np.polyfit(tempx, tempy, 1, full=True)\n wbuffer = 50.0\n pwav = np.arange(np.min(tempx) - wbuffer, np.max(tempx) + wbuffer)\n slope = p[0]\n intercept = p[1]\n expo[\"hand_tweak\"][\"calc_slope\"] = slope\n expo[\"hand_tweak\"][\"calc_intercept\"] = intercept\n if plot==True:\n pl.plot(pwav, slope * pwav + intercept, color=\"black\", label=\"slope: \" + str(round(slope * 1000,2)) + \" m/s/1000 A\")\n pl.ylim(np.average(tempy) - vbuffer, np.average(tempy) + vbuffer)\n pl.legend()\n pl.title(infile)\n pl.xlabel(\"Wavelength (Angstroms)\", fontsize=20.0)\n pl.xticks(fontsize=20.0)\n pl.ylabel(\"v_shift (m/s)\", fontsize=20.0)\n pl.yticks(fontsize=20.0)\n print(\"Filename: \", infile)\n print(\"Slope: \", str(round(slope * 1000, 2)) + \" m/s/1000 A\")\n print(\"Current setup: \")\n print(\" Discard data points with error larger than: \", upper_error_bound)\n print(\" Exclude wavelenths greater than: \", upper_wavelength_cutoff)\n print(\" Chunks each order between indices: \", orderbegin, orderend)\n print(\" Removed orders: \", [order for order in badorders])\n print(\" Offset: \", offset)\n print(\" Min # chunks required / order: \", minimum_number_of_chunks)\n try:\n instrument = expo['flux_header']['INSTRUME']\n if instrument == \"UVES\":\n key = [key for key in list(expo['flux_header'].keys()) if \"WLEN\" in str(key)][0]\n center_wavelength = expo['flux_header'][key]\n expo[\"hand_tweak\"][\"center_wavelength\"] = center_wavelength\n expo[\"hand_tweak\"][\"center_offset\"] = slope * center_wavelength + intercept\n except:\n pass\n if help == True:\n print(\"\"\"Some help:\n expo[\"hand_tweak\"][\"upper_error_bound\"] = 200.0\n expo[\"hand_tweak\"][\"upper_wavelength_cutoff\"] = 7600.0\n expo[\"hand_tweak\"][\"badorders\"] = []\n expo[\"hand_tweak\"][\"orderbegin\"] = 0\n expo[\"hand_tweak\"][\"orderend\"] = -1\n expo[\"hand_tweak\"][\"offset\"] = 0.\n expo[\"hand_tweak\"][\"minimum_number_of_chunks\"] = 5\"\"\")\n pass\n\nclass Exposure(object):\n \"\"\"An oject class that contains the data for quasar absorption spectroscopy study.\n\n An exposure has:\n orders which have\n pixels with corresponding\n wavelength\n flux\n error values.\n\n fit.\n \"\"\"\n def __init__(self, arcFile='', reduction_program='', calibration_type='',\n calibration_file='', exposure_file='', header_file='', first_guess_file=''):\n \"\"\"docstring for __init__\"\"\"\n super(Exposure, self).__init__()\n self.arcFile = arcFile # a calibration Arc File\n self.exposure_file = exposure_file # a calibration Arc File\n self.reduction_program = reduction_program # reduction software used\n self.calibration_type = calibration_type # Calibration type: iodine, asteroid, none\n self.calibration_file = calibration_file # Calibration File\n self.first_guess_file = first_guess_file # First guesses file\n\n self.header_file = header_file # science and arc file headers\n if self.header_file:\n with gzip.open(self.header_file, 'rb') as file_handle:\n loadheader = pickle.load(file_handle)\n self.arc_header, self.flux_header = loadheader[0], loadheader[1]\n\n self.fitGuess = AutoVivification()\n self.fit_starting = AutoVivification()\n self.fit_starting['initial'] = {}\n self.fit_starting['initial'].update({'shift':-0.003, 'fix_shift':False, 'limit_shift':(-1.5, 1.5), 'error_shift':0.03})\n self.fit_starting['initial'].update({'slope':-0.002, 'fix_slope':False, 'limit_slope':(-2.0, 2.0), 'error_slope':0.04})\n self.fit_starting['initial'].update({'sigma':3.102, 'fix_sigma':False, 'limit_sigma':(1.0, 10.0), 'error_sigma':0.2})\n self.fit_starting['initial'].update({'multiple':1.37, 'fix_multiple':False, 'limit_multiple':(0.1, 20.0), 'error_multiple':0.03})\n self.fit_starting['initial'].update({'offset':0.002, 'fix_offset':False, 'limit_offset':(-2.0, 2.0), 'error_offset':0.03})\n self.fit_starting['initial'].update({'minuit':0, 'fix_minuit':True})\n if self.first_guess_file:\n with open(self.first_guess_file, 'r') as file_handle:\n self.first_guesses = json.load(file_handle)\n else:\n self.first_guesses = self.fit_starting['initial']\n\n self.fitResults = AutoVivification()\n if self.exposure_file.split('.')[-1] == 'fits':\n print(\"A fits exposure file.\")\n self.Orders = {}\n hdu = pf.open(self.exposure_file)\n self.header = hdu[0].header\n for index, table in enumerate(hdu):\n try:\n type(hdu[index].data)\n self.Orders[index] = {}\n self.Orders[index]['wav'] = table.data[0]\n self.Orders[index]['flx'] = table.data[1]\n self.Orders[index]['err'] = table.data[2]\n self.Orders[index]['pix'] = np.arange(len(self.Orders[index]['wav']))\n except:\n self.exposureHeader = hdu[-1].header\n for field in list(self.Orders.keys()):\n if len(self.Orders[field]) < 1:\n print(\"deleting\", field)\n del(self.Orders[field])\n else:\n print(\"Not a fits file.\", self.exposure_file)\n pass\n\n def scangrid(self, *args, **kwargs):\n \"\"\"\n Generator function which returns lists of parameter (name, value)\n 2-tuples via yields (i.e. in a for-expression).\n\n The arguments are 4-tuples of (name, numpts, low, high) for each\n parameter; the generator iterations will then visit each of the\n lattice points in a grid with num points between low..high in each\n param.\n \"\"\"\n vec = kwargs.get(\"vec\", [])\n if args:\n var, npts, low, high = args[0]\n for val in np.linspace(low, high, npts):\n newargs = args[1:]\n newvec = vec + [(var, val)]\n newkwargs = kwargs\n newkwargs[\"vec\"] = newvec\n for i in scangrid(*newargs, **newkwargs):\n yield i\n else:\n yield vec\n\n def load_reference_spectra(self):\n \"\"\"docstring for load_reference_spectra\"\"\"\n try:\n iow, iof = np.loadtxt(self.calibration_file)\n except:\n print(\"Consider saving a faster-loading calibration file.\")\n iow, iof = np.loadtxt(self.calibration_file, unpack='True')\n print(\"Reference FTS wavelength range:\", iow[0], iow[-1])\n self.safe_orders = []\n for order in self.Orders:\n self.safe_orders.append(order)\n if (self.Orders[order]['wav'][0] > iow[0] + 40.0) & (self.Orders[order]['wav'][-1] < iow[-1] - 150.0):\n try:\n ok = (self.Orders[order]['wav'][0] - 10 < iow) & (self.Orders[order]['wav'][-1] + 10 > iow)\n if len(iow[ok]) > 200:\n self.Orders[order]['iow'] = iow[ok].copy()\n self.Orders[order]['iof'] = iof[ok].copy()\n except:\n print(\"Order\", order, \"is outside overlap with reference FTS.\")\n for order in self.Orders:\n \"\"\"removes orders\"\"\"\n try:\n len(self.Orders[order]['iow'])\n print(order, \"is safe.\")\n except:\n self.safe_orders.remove(order)\n print(order, \"was removed.\")\n pass\n\n def cleanup(self, verbose=False):\n \"\"\"mask out bad regions of the spectra\n Example config file setup.\n [skylines]\n remove:\n 5589.128 5589.132\n 5865.454 5865.459\n \"\"\"\n parser = SafeConfigParser()\n candidates = glob.glob('config*')\n found = parser.read(candidates)\n try:\n wavekill = parser.get('skylines','remove')\n except:\n print(\"Warning: not removing skylines (if you want this create a config.wavekill file).\")\n if verbose==True:\n print(\"Beginning cleanup of data...\", datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n errorcutoff = 0.0\n flxcutoff = 0.0\n sncutoff = 10.0\n for order in self.safe_orders:\n masks = []\n masks.append(self.Orders[order]['err'] > errorcutoff)\n masks.append(self.Orders[order]['flx'] > flxcutoff)\n # print self.Orders[order]['err']\n # print self.Orders[order]['flx']\n masks.append(np.select([self.Orders[order]['err'] > 0],\n [np.nan_to_num(self.Orders[order]['flx']/self.Orders[order]['err']) >= sncutoff]))\n try:\n for killLine in wavekill.splitlines():\n if len(killLine) > 1:\n masks.append(reduce(np.logical_or, \\\n [self.Orders[order]['wav'] < float(killLine.split()[0]), \\\n self.Orders[order]['wav'] > float(killLine.split()[1])]))\n except:\n pass\n self.Orders[order]['mask'] = reduce(np.logical_and, masks)\n pass\n\n def continuum_fit(self, knots=10, plot=False, verbose=False):\n \"\"\"fits a continuum via a spline through the flux values.\"\"\"\n knots = 10\n edgeTolerance = 0.1\n remove_orders = []\n for order in self.safe_orders:\n mask = self.Orders[order]['mask']\n if np.sum(mask) < 100:\n remove_orders.append(order)\n for order in remove_orders:\n self.safe_orders.remove(order)\n print(\"Removing from safe_orders (under 100 useable pixels): \", order)\n remove_orders = []\n for order in self.safe_orders:\n mask = self.Orders[order]['mask']\n self.Orders[order]['con'] = np.zeros_like(self.Orders[order]['wav'])\n try:\n s = si.LSQUnivariateSpline(self.Orders[order]['wav'][mask],\\\n self.Orders[order]['flx'][mask],\\\n np.linspace(self.Orders[order]['wav'][mask][0]+edgeTolerance,\\\n self.Orders[order]['wav'][mask][-1]-edgeTolerance, knots),\\\n w=self.Orders[order]['err'][mask])\n self.Orders[order]['con'][mask] = s(self.Orders[order]['wav'][mask])\n except:\n remove_orders.append(order)\n for order in remove_orders:\n self.safe_orders.remove(order)\n print(\"Removing from safe_orders (sparse-ness): \", order)\n pass\n\n def continuum_fit_2(self, knots=10, nsig=4.0):\n \"\"\"barak implementation\"\"\"\n knots = knots\n for order in self.safe_orders:\n self.Orders[order]['con'] = np.zeros_like(self.Orders[order]['wav'])\n mask = self.Orders[order]['mask']\n self.Orders[order]['con'][mask] = spline_continuum(self.Orders[order]['wav'][mask],\n self.Orders[order]['flx'][mask],\n self.Orders[order]['err'][mask],\n np.linspace(self.Orders[order]['wav'][mask][0], self.Orders[order]['wav'][mask][-1], knots,),\n nsig=nsig)[0]\n pass\n\n\n def oversample(self):\n \"\"\"sets the minimum spacing in the telescope spectra (mindel)\n for each order over the whole exposure.\n Rename. \"\"\"\n for order in self.safe_orders:\n mask = self.Orders[order]['mask']\n self.Orders[order]['mindel'] = self.Orders[order]['wav'][mask][-1] - self.Orders[order]['wav'][mask][0]\n for i in range(len(self.Orders[order]['wav'][mask]) - 1):\n adjacent_difference = self.Orders[order]['wav'][mask][i+1] - self.Orders[order]['wav'][mask][i]\n if self.Orders[order]['mindel'] > adjacent_difference:\n self.Orders[order]['mindel'] = adjacent_difference\n pass\n\n\n def full_order_shift_scale(self, order=7, verbose=False, veryVerbose=False,\n robustSearch=False,\n first_guesses=None):\n \"\"\"docstring for dictionaryShift\n first_guesses needs to look something like:\n first_guesses = {}\n first_guesses.update({'shift':-0.003,\n 'fix_shift':False,\n 'limit_shift':(-1.5, 1.5),\n 'error_shift':0.03})\n first_guesses.update({'slope':-0.002,\n 'fix_slope':False,\n 'limit_slope':(-2.0, 2.0),\n 'error_slope':0.04})\n first_guesses.update({'sigma':3.102,\n 'fix_sigma':False,\n 'limit_sigma':(1.0, 10.0),\n 'error_sigma':0.2})\n first_guesses.update({'multiple':1.37,\n 'fix_multiple':False,\n 'limit_multiple':(0.1, 20.0),\n 'error_multiple':0.03})\n first_guesses.update({'offset':0.002,\n 'fix_offset':False,\n 'limit_offset':(-2.0, 2.0),\n 'error_offset':0.03})\n first_guesses.update({'minuit':0, 'fix_minuit':True})\n \"\"\"\n if first_guesses is None:\n first_guesses = self.fit_starting['initial']\n try:\n m = mi.Minuit(self.order_shift_and_scale_Akima, order=order, fix_order=True, **first_guesses)\n if veryVerbose==True:\n m.printMode=1\n if robustSearch==True:\n print(\"Robust search. Beginning initial scan...\")\n m.scan((\"fshift\", 20, -0.5, 0.5))\n print(\"done.\")\n print(\"Finding initial full order shift/fit\", '\\n', datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n m.set_strategy(2)\n m.migrad()\n self.fitResults['order'][order]['values'] = m.values\n try:\n del self.fitResults['order'][order]['values']['order']\n except:\n pass\n self.fitResults['order'][order]['errors'] = m.errors\n except:\n print(\"Serious problem with order:\", order)\n pass\n\n\n def order_shift_and_scale_Akima(self, order, multiple, shift, sigma, slope, offset, minuit, **kwargs):\n \"\"\"trying to smooth, interpolate, and integrate the fit.\"\"\"\n mask = self.Orders[order]['mask']\n iow = self.Orders[order]['iow']\n iof = self.Orders[order]['iof']\n wav = self.Orders[order]['wav'][mask]\n flx = self.Orders[order]['flx'][mask]\n err = self.Orders[order]['err'][mask]\n con = self.Orders[order]['con'][mask]\n pix = self.Orders[order]['pix'][mask]\n overflx = multiple * slope_to_array(slope,\n wav + shift,\n interp.interp_Akima(wav + shift,\n iow,\n convolve.convolve_constant_dv(iow,\n iof,\n vfwhm=sigma)\n )\n ) + offset\n chi_square = np.sum((overflx - flx/con)**2 / (err/con)**2)\n if minuit == 0:\n return chi_square\n else:\n return chi_square, wav, flx/con, err/con, pix, overflx\n\n def order_shift_and_scale_spline(self, order, multiple, shift, sigma, slope, offset, minuit, **kwargs):\n \"\"\"trying to smooth, interpolate, and integrate the fit.\"\"\"\n mask = self.Orders[order]['mask']\n iow = self.Orders[order]['iow']\n iof = self.Orders[order]['iof']\n wav = self.Orders[order]['wav'][mask]\n flx = self.Orders[order]['flx'][mask]\n err = self.Orders[order]['err'][mask]\n con = self.Orders[order]['con'][mask]\n pix = self.Orders[order]['pix'][mask]\n overflx = multiple * slope_to_array(slope,\n wav + shift,\n interp.interp_spline(wav + shift,\n iow,\n convolve.convolve_constant_dv(iow,\n iof,\n vfwhm=sigma)\n )\n ) + offset\n chi_square = np.sum((overflx - flx/con)**2 / (err/con)**2)\n if minuit == 0:\n return chi_square\n else:\n return chi_square, wav, flx/con, err/con, pix, overflx\n\n def create_bin_arrays(self, order=7, binSize=350, overlap=0.5, iowTolerance=2.0, minPixelsPerBin=100):\n \"\"\"overlap is the fractional overlap or how much the bin is shifted relative to the binSize.\n so overlapping by .5 shifts by half binSize; .33 by .33 binSize. \"\"\"\n mask = self.Orders[order]['mask']\n lamb = np.average(self.Orders[order]['wav'][mask])\n try:\n type(self.fitResults[binSize])\n except:\n self.fitResults[binSize] = {}\n try:\n type(self.Orders[order][binSize])\n return\n except:\n self.Orders[order][binSize] = {}\n binAngstroms = lamb * binSize * 1000 / c_light\n temp = []\n mask = self.Orders[order]['mask']\n for x in range(int(1.0/overlap)):\n temp.append(np.arange(self.Orders[order]['wav'][mask][0] + overlap * x * binAngstroms, self.Orders[order]['wav'][mask][-1] + overlap * x * binAngstroms, binAngstroms))\n np.append(temp[0], self.Orders[order]['wav'][mask][-1]) # add last wavelength point to first bin edges array\n iowTolerance = iowTolerance\n minPixelsPerBin = minPixelsPerBin\n COUNTER = 0\n for edgearray in temp:\n for i in range(len(edgearray) - 1):\n if len(self.Orders[order]['wav'][(self.Orders[order]['wav'] > edgearray[i]) & (self.Orders[order]['wav'] <= edgearray[i + 1])]) > minPixelsPerBin:\n self.Orders[order][binSize][COUNTER] = {}\n self.Orders[order][binSize][COUNTER]['ok'] = (self.Orders[order]['wav'] > edgearray[i]) & (self.Orders[order]['wav'] <= edgearray[i + 1])\n self.Orders[order][binSize][COUNTER]['iok'] = (self.Orders[order]['iow'] > edgearray[i] - iowTolerance) & (self.Orders[order]['iow'] <= edgearray[i + 1] + iowTolerance)\n COUNTER += 1\n else:\n print(\"Bin \", i, \" would have had less than \", minPixelsPerBin, \" -- not creating a bin for it.\")\n pass\n\n def full_order_bin_shift_and_scale(self, order=7, binSize=350, override_order_fit=False):\n self.fit_starting['order'][order] = self.fit_starting['initial']\n self.fit_starting['order'][order].update(self.fitResults['order'][order]['values'])\n for singlebin in self.Orders[order][binSize]:\n self.fitResults[binSize][order][singlebin] = {}\n if override_order_fit:\n self.small_bin_shift(order, binSize, singlebin, override_order_fit=True)\n else:\n self.small_bin_shift(order, binSize, singlebin)\n pass\n\n def small_bin_shift(self, order=7, binSize=350, singlebin=2, veryVerbose=False,\n robustSearch=False, override_order_fit=False):\n \"\"\"docstring for smallBinShift\"\"\"\n # TODO check that the full order solution has run.\n try:\n type(self.fitResults['order'][order]['values'])\n except:\n print(\"It doesn't look like the full order was run... \")\n if override_order_fit:\n starting_values = self.first_guesses\n else:\n starting_values = self.fit_starting['order'][order]\n m = mi.Minuit(self.bin_shift_and_tilt_Akima, order=order, binSize=binSize, singlebin=singlebin, fix_order=True, fix_binSize=True, fix_singlebin=True, **starting_values)\n if veryVerbose==True:\n m.printMode=1\n if robustSearch==True:\n print(\"Robust search. Beginning initial scan...\")\n m.scan((\"fshift\", 20, -0.5, 0.5))\n print(\"done.\")\n try:\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), \"Finding initial shift/fit for order:\", order, \"and bin:\", singlebin)\n m.set_strategy(2)\n m.migrad()\n self.fitResults[binSize][order][singlebin]['values'] = m.values\n self.fitResults[binSize][order][singlebin]['errors'] = m.errors\n mask = self.Orders[order]['mask']\n ok = reduce(np.logical_and, [self.Orders[order][binSize][singlebin]['ok'], mask])\n iok = self.Orders[order][binSize][singlebin]['iok']\n wav = self.Orders[order]['wav'][ok]\n pix = self.Orders[order]['pix'][ok]\n lamb = np.average(wav)\n avpix = np.average(pix)\n cal = m.values['shift'] * c_light / lamb\n calerr = m.errors['shift'] * c_light / lamb\n R = c_light / m.values['sigma'] / 1000.\n Rerr = c_light / m.errors['sigma'] / 1000.\n self.fitResults[binSize][order][singlebin]['avwav'] = lamb\n self.fitResults[binSize][order][singlebin]['cal'] = cal\n self.fitResults[binSize][order][singlebin]['calerr'] = calerr\n self.fitResults[binSize][order][singlebin]['R'] = R\n self.fitResults[binSize][order][singlebin]['Rerr'] = Rerr\n self.fitResults[binSize][order][singlebin]['avpix'] = avpix\n self.fitResults[binSize][order][singlebin]['converged'] = True\n self.fitResults[binSize][order][singlebin]['values']['minuit'] = 1\n chisq, wav, nflx, nerr, pix, overflx = self.bin_shift_and_tilt_Akima(**self.fitResults[binSize][order][singlebin]['values'])\n self.fitResults[binSize][order][singlebin]['chisq'] = chisq\n self.fitResults[binSize][order][singlebin]['wav'] = wav\n self.fitResults[binSize][order][singlebin]['nflx'] = nflx\n self.fitResults[binSize][order][singlebin]['nerr'] = nerr\n self.fitResults[binSize][order][singlebin]['pix'] = pix\n self.fitResults[binSize][order][singlebin]['overflx'] = overflx\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), \"finished.\")\n except:\n self.fitResults[binSize][order][singlebin]['converged'] = False\n print(\"Serious problem with bin:\", singlebin)\n pass\n\n def bin_shift_and_tilt_Akima(self, order, singlebin, binSize, multiple, shift, sigma, slope, offset, minuit, **kwargs):\n \"\"\"trying to smooth, interpolate, and integrate the fit.\"\"\"\n mask = self.Orders[order]['mask']\n ok = reduce(np.logical_and, [self.Orders[order][binSize][singlebin]['ok'], mask])\n iok = self.Orders[order][binSize][singlebin]['iok']\n iow = self.Orders[order]['iow'][iok]\n iof = self.Orders[order]['iof'][iok]\n wav = self.Orders[order]['wav'][ok]\n flx = self.Orders[order]['flx'][ok]\n err = self.Orders[order]['err'][ok]\n con = self.Orders[order]['con'][ok]\n pix = self.Orders[order]['pix'][ok]\n overflx = multiple * slope_to_array(slope,\n wav + shift,\n interp.interp_Akima(wav + shift,\n iow,\n convolve.convolve_constant_dv(iow,\n iof,\n vfwhm=sigma)\n )\n ) + offset\n chi_square = np.sum((overflx - flx/con)**2 / (err/con)**2)\n if minuit == 0:\n return chi_square\n else:\n return chi_square, wav, flx/con, err/con, pix, overflx\n\n def bin_shift_and_tilt_spline(self, order, singlebin, binSize, multiple, shift, sigma, slope, offset, minuit, **kwargs):\n \"\"\"trying to smooth, interpolate, and integrate the fit.\"\"\"\n mask = self.Orders[order]['mask']\n ok = reduce(np.logical_and, [self.Orders[order][binSize][singlebin]['ok'], mask])\n iok = self.Orders[order][binSize][singlebin]['iok']\n iow = self.Orders[order]['iow'][iok]\n iof = self.Orders[order]['iof'][iok]\n wav = self.Orders[order]['wav'][ok]\n flx = self.Orders[order]['flx'][ok]\n err = self.Orders[order]['err'][ok]\n con = self.Orders[order]['con'][ok]\n pix = self.Orders[order]['pix'][ok]\n overflx = multiple * slope_to_array(slope,\n wav + shift,\n interp.interp_spline(wav + shift,\n iow,\n convolve.convolve_constant_dv(iow,\n iof,\n vfwhm=sigma)\n )\n ) + offset\n chi_square = np.sum((overflx - flx/con)**2 / (err/con)**2)\n if minuit == 0:\n return chi_square\n else:\n return chi_square, wav, flx/con, err/con, pix, overflx\n\n def make_pretty_results(self):\n self.Results = {}\n for binSizeKey in list(self.fitResults.keys()):\n if binSizeKey == 'order':\n continue\n else:\n self.Results[binSizeKey] = {}\n for order in list(self.fitResults[binSizeKey].keys()):\n self.Results[binSizeKey][order] = {}\n self.Results[binSizeKey][order]['avwav'] = []\n self.Results[binSizeKey][order]['cal'] = []\n self.Results[binSizeKey][order]['calerr'] = []\n self.Results[binSizeKey][order]['R'] = []\n self.Results[binSizeKey][order]['Rerr'] = []\n self.Results[binSizeKey][order]['avpix'] = []\n self.Results[binSizeKey][order]['converged'] = []\n for bin in list(self.fitResults[binSizeKey][order].keys()):\n if len(self.fitResults[binSizeKey][order][bin]) > 2:\n self.Results[binSizeKey][order]['avwav'].append(self.fitResults[binSizeKey][order][bin]['avwav'])\n self.Results[binSizeKey][order]['cal'].append(self.fitResults[binSizeKey][order][bin]['cal'])\n self.Results[binSizeKey][order]['calerr'].append(self.fitResults[binSizeKey][order][bin]['calerr'])\n self.Results[binSizeKey][order]['R'].append(self.fitResults[binSizeKey][order][bin]['R'])\n self.Results[binSizeKey][order]['Rerr'].append(self.fitResults[binSizeKey][order][bin]['Rerr'])\n self.Results[binSizeKey][order]['avpix'].append(self.fitResults[binSizeKey][order][bin]['avpix'])\n self.Results[binSizeKey][order]['converged'].append(self.fitResults[binSizeKey][order][bin]['converged'])\n shuffle = np.argsort(self.Results[binSizeKey][order]['avwav'])\n self.Results[binSizeKey][order]['avwav'] = np.array(self.Results[binSizeKey][order]['avwav'])[shuffle]\n self.Results[binSizeKey][order]['cal'] = np.array(self.Results[binSizeKey][order]['cal'])[shuffle]\n self.Results[binSizeKey][order]['calerr'] = np.array(self.Results[binSizeKey][order]['calerr'])[shuffle]\n self.Results[binSizeKey][order]['R'] = np.array(self.Results[binSizeKey][order]['R'])[shuffle]\n self.Results[binSizeKey][order]['Rerr'] = np.array(self.Results[binSizeKey][order]['Rerr'])[shuffle]\n self.Results[binSizeKey][order]['avpix'] = np.array(self.Results[binSizeKey][order]['avpix'])[shuffle]\n self.Results[binSizeKey][order]['converged'] = np.array(self.Results[binSizeKey][order]['converged'])[shuffle]\n pass\n\n" ]
[ [ "numpy.polyfit", "numpy.linspace", "matplotlib.pylab.errorbar", "numpy.nan_to_num", "numpy.max", "numpy.zeros_like", "numpy.average", "matplotlib.pylab.legend", "numpy.hstack", "matplotlib.pylab.yticks", "numpy.arange", "numpy.min", "numpy.append", "matplotlib.pylab.xticks", "numpy.argsort", "numpy.array", "numpy.sum", "matplotlib.pylab.title", "matplotlib.pylab.ylabel", "matplotlib.pylab.xlabel", "numpy.loadtxt" ] ]
fcco/SkySol
[ "78a0a35139f51f56a8c32d75908d0203dfb19eea" ]
[ "skysol/lib/classification.py" ]
[ "\"\"\"\nModule for cloud classification training and application\n\nIt uses sklearn for machine learning\n\"\"\"\n\nimport numpy as np\nimport csv\nimport os\nimport time\nimport h5py\nfrom operator import itemgetter\n\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, ShuffleSplit, train_test_split\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import tree\n\nimport pickle\nimport lzma\n\ndef pickle_to_array(obj):\n return np.fromstring(lzma.compress(pickle.dumps(obj)), dtype=np.uint8)\n\ndef unpickle_from_array(array):\n return pickle.loads(lzma.decompress(array.data))\n\n\n\nclass classificator:\n\n def __init__(self,ini):\n\n self.names = \"\" # Feature names\n self.x = [] # Feature data\n self.y = [] # Label data\n self.cloud_class = []\n self.class_prob = []\n self.indices = None\n self.time = []\n self.modelfile = ini.cloud_class_model\n self.label_names = ['Cu','Ci & Cs','Cc & Ac','Clear','Sc','St & As','Cb & Ns']\n\n # Collect training data\n def get_training(self, ini, contour_flag=False, exclude=[]):\n\n if contour_flag:\n self.feats = np.arange(6,33)\n else:\n self.feats = np.arange(6,26)\n\n for i in range(1,8):\n filename = ini.path_training + os.sep + str(i) + os.sep + \"features.dat\"\n print((\"read file: %s\" % filename))\n reader = csv.reader(open(filename, \"r\"),delimiter=\" \")\n names = next(reader)\n self.names = np.array(names)[self.feats]\n find = []\n for j in range(0, len(self.names)):\n if self.names[j] not in exclude:\n find.append(j)\n else:\n print(self.names[j])\n find = np.array(find)\n self.names = self.names[find]\n cnt = 0\n for row in reader:\n if contour_flag:\n tmp = np.array([float(x) for x in row[6:]])\n self.x.append(tmp[find])\n else:\n tmp = np.array([ float(row[x]) for x in self.feats ])\n self.x.append(tmp[find])\n self.y.append(i)\n cnt += 1\n print(\"Number of images for class %d: %d\" % (i, cnt))\n print(\"Number of images for classification: %d\" % len(self.x))\n\n self.x = np.array(self.x)\n self.y = np.float16(self.y)\n\n # Preprocess feature \"overall RBR\"\n q = np.where(self.names == \"Overall_RB_ratio\")[0]\n if len(q) > 0:\n q = q[0]\n ind = np.where(self.x[:,q] > 1)\n if len(ind) > 0:\n for h in ind:\n self.x[h,:] = np.nan\n self.y[h] = np.nan\n\n ind = np.isfinite(self.y) & ( np.all(np.isfinite(self.x),axis=1) )\n\n self.x = self.x[ind,:]\n self.y = np.int32(self.y[ind])\n\n self.max_feat = self.x.shape[1]\n\n\n # Learn model ( with grid search and cross validation )\n def fit(self, modelfile, model=\"SVC\", rank_feat=False, print_feat=False,\n plot_feat=False, grid_search=False):\n\n st = time.time()\n\n # train normalizer\n self.normalize()\n\n # defaults\n params = {}\n params['gamma'] = 'auto'\n params['C'] = 1\n params['n_neighbors'] = 100\n\n # define initial model\n if model == \"SVC\":\n self.predictor = SVC(gamma=params['gamma'], C=params['C'],probability=True)\n elif model == \"kNN\":\n self.predictor = KNeighborsClassifier(n_neighbors=params['n_neighbors'])\n elif model == \"RandomForest\":\n self.predictor = RandomForestClassifier(n_estimators=10, max_depth=None, \\\n min_samples_split=10, random_state=0)\n elif model == \"Tree\":\n self.predictor = tree.DecisionTreeClassifier()\n\n\n # Apply feature ranking\n if rank_feat:\n self.indices = self.feature_selection(self.x,self.y,self.max_feat,\n print_flag=print_feat,plot_flag=plot_feat)\n self.max_feat = self.test_feature_ranking(params)\n self.indices = self.indices[:self.max_feat]\n\n self.x = self.reduce_features(self.x, self.indices, max_feat=self.max_feat)\n self.normalize() # repeat normalization on reduced set\n else:\n self.indices = np.arange(0,self.x.shape[1])\n self.max_feat = self.x.shape[1]\n\n\n # normalize feature data\n self.x = self.scaler.transform(self.x)\n\n if grid_search:\n\n # specify parameters for parameter grid search\n if model == \"SVC\":\n param_grid = {\"gamma\": [2**-10,2**-9,2**-8,2**-7,2**-6,2**-5,2**-4,2**-3,2**-2,2**-1,2**0,2**1,2**2], \\\n \"C\": [2**6,2**7,2**8,2**9,2**10]}\n elif model == \"kNN\":\n param_grid = {\"n_neighbors\": [1, 10, 50, 100, 200, 500, 1000]}\n\n grid_search = GridSearchCV(self.predictor, param_grid=param_grid,cv=4)\n grid_search.fit(self.x, self.y)\n\n # get best parameters\n params, performance = self.report(grid_search.grid_scores_,print_flag=True)\n print(\"Grid search...finished with a performance of %.2f and parameter %s in %.2f seconds.\" % \\\n ( performance, params, round(time.time() - st,1)))\n\n\n # define new model\n if model == \"SVC\":\n self.predictor = SVC(gamma=params['gamma'], C=params['C'],probability=True)\n elif model == \"kNN\":\n self.predictor = KNeighborsClassifier(n_neighbors=params['n_neighbors'])\n elif model == \"RandomForest\":\n self.predictor = RandomForestClassifier(n_estimators=10, max_depth=None, \\\n min_samples_split=1, random_state=0)\n elif model == \"Tree\":\n self.predictor = tree.DecisionTreeClassifier()\n\n\n X_train, X_test, y_train, y_test = train_test_split(\n self.x[:,:], self.y, test_size=0.33, random_state=42)\n scores = self.crossval(self.predictor, X_train, y_train)\n self.predictor.fit(X_train, y_train)\n y_pred = self.predictor.predict(X_test)\n r = accuracy_score(y_test, y_pred)\n\n print(r, scores)\n\n self.predictor.fit(self.x[:,:],self.y)\n\n\n # Export model\n with h5py.File(self.modelfile,'w') as h:\n h.create_dataset('scaler', data=pickle_to_array(self.scaler))\n h.create_dataset('model', data=pickle_to_array(self.predictor))\n h.create_dataset('names', data=pickle_to_array(self.names[self.indices]) )\n h.create_dataset('accuracy', data=r)\n\n\n\n # Normalize training data set and store transformation function\n def normalize(self):\n self.scaler = preprocessing.StandardScaler().fit(self.x)\n\n def crossval(self, model, x, y):\n cv = ShuffleSplit(n_splits=3, test_size=0.7, random_state=0)\n scores = cross_val_score(model, x, y, cv=cv)\n return scores\n\n # imgeature Selection\n def feature_selection(self,x,y,nfeatures,plot_flag=False,print_flag=False):\n\n # Build a forest and compute the feature importances\n forest = ExtraTreesClassifier(n_estimators=250, \\\n random_state=0)\n\n forest.fit(self.scaler.transform(x) ,y )\n importances = forest.feature_importances_\n std = np.std([tree.feature_importances_ for tree in forest.estimators_], \\\n axis=0)\n indices = np.argsort(importances)[::-1]\n indices = indices[0:nfeatures]\n\n if print_flag == True:\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(nfeatures):\n print((\"%d. feature %s (%f)\" % (f + 1, self.names[indices[f]], importances[indices[f]])))\n\n if plot_flag == True:\n # Plot the feature importances of the forest\n from matplotlib import pyplot as plt\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(list(range(nfeatures)), importances[indices], \\\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(list(range(nfeatures)), indices)\n plt.xlim([-1,nfeatures])\n plt.show()\n\n return indices\n\n\n def reduce_features(self,x,indices,max_feat=10):\n x_sel = np.empty([x.shape[0],max_feat])\n for f in range(max_feat):\n x_sel[:,f] = x[:,indices[f]]\n\n return x_sel\n\n\n\n def test_feature_ranking(self, params):\n\n from matplotlib import pyplot as plt\n\n acc = []\n\n X_train, X_test, y_train, y_test = train_test_split(\n self.x, self.y, test_size=0.7, random_state=0)\n\n # Start feature selection\n for i in range(1, self.x.shape[1]+1, 1):\n\n # train model\n self.predictor.fit(X_train[:,self.indices][:,:i], y_train)\n y_pred = self.predictor.predict(X_test[:,self.indices][:,:i])\n r = accuracy_score(y_test, y_pred)\n acc.append(r)\n print('Features = %d - Extra feature: %s - Accuracy = %f ' % (i, \\\n self.names[self.indices][i-1], r))\n\n acc = np.array(acc)\n return np.argwhere(acc>0.99)[0][0] + 1\n\n\n # Utility function to report best scores\n def report(self, grid_scores, n_top=10, print_flag=True):\n top_scores = sorted(grid_scores, key=itemgetter(1),reverse=True)[:n_top]\n if print_flag:\n for i, score in enumerate(top_scores):\n print((\"Model with rank: {0}\".format(i + 1)))\n print((\"Mean validation score: {0:.3f} (std: {1:.3f})\".format( \\\n score.mean_validation_score, \\\n np.std(score.cv_validation_scores))))\n print((\"Parameters: {0}\".format(score.parameters)))\n print(\"\")\n return top_scores[0].parameters, top_scores[0].mean_validation_score\n\n\n\n # Apply classification model\n def apply_model(self,features,modelfile,rank_feat=False,contour_flag=False):\n # Load model, scaler and featurelist\n with h5py.File(self.modelfile, 'r') as infile:\n self.predictor = unpickle_from_array(infile['model'][:])\n self.scaler = unpickle_from_array(infile['scaler'][:])\n names = unpickle_from_array(infile['names'][:])\n vec = []\n for key in names:\n vec.append(features[key])\n vec = np.array(vec)\n if np.any(np.isnan(vec)):\n return -1, np.nan\n else:\n # normalization\n vec = np.array(vec).reshape(1,-1)\n vec = self.scaler.transform(vec)\n # predicted class\n cloudClass = self.predictor.predict(vec)[0]\n # predicted probabilities for each class\n prob = self.predictor.predict_proba(vec)[0]\n return cloudClass, np.round(np.array(prob),2)\n" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "numpy.where", "sklearn.model_selection.ShuffleSplit", "sklearn.ensemble.RandomForestClassifier", "numpy.arange", "numpy.float16", "sklearn.neighbors.KNeighborsClassifier", "numpy.std", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.isnan", "sklearn.ensemble.ExtraTreesClassifier", "sklearn.model_selection.train_test_split", "sklearn.svm.SVC", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "sklearn.model_selection.GridSearchCV", "sklearn.model_selection.cross_val_score", "numpy.isfinite", "numpy.int32", "numpy.argwhere", "matplotlib.pyplot.xlim", "sklearn.preprocessing.StandardScaler", "numpy.empty", "sklearn.metrics.accuracy_score" ] ]
Mofetoluwa/text2text
[ "a1b7868854449cd8b0f874cc1aeb02bae97a05a6" ]
[ "text2text/biunilm/seq2seq_loader.py" ]
[ "from random import randint, shuffle, choice\nfrom random import random as rand\nimport math\nimport torch\n\nfrom .loader_utils import get_random_word, batch_list_to_batch_tensors, Pipeline\n\n# Input file format :\n# 1. One sentence per line. These should ideally be actual sentences,\n# not entire paragraphs or arbitrary spans of text. (Because we use\n# the sentence boundaries for the \"next sentence prediction\" task).\n# 2. Blank lines between documents. Document boundaries are needed\n# so that the \"next sentence prediction\" task doesn't span between documents.\n\n\ndef truncate_tokens_pair(tokens_a, tokens_b, max_len, max_len_a=0, max_len_b=0, trunc_seg=None, always_truncate_tail=False):\n num_truncated_a = [0, 0]\n num_truncated_b = [0, 0]\n while True:\n if len(tokens_a) + len(tokens_b) <= max_len:\n break\n if (max_len_a > 0) and len(tokens_a) > max_len_a:\n trunc_tokens = tokens_a\n num_truncated = num_truncated_a\n elif (max_len_b > 0) and len(tokens_b) > max_len_b:\n trunc_tokens = tokens_b\n num_truncated = num_truncated_b\n elif trunc_seg:\n # truncate the specified segment\n if trunc_seg == 'a':\n trunc_tokens = tokens_a\n num_truncated = num_truncated_a\n else:\n trunc_tokens = tokens_b\n num_truncated = num_truncated_b\n else:\n # truncate the longer segment\n if len(tokens_a) > len(tokens_b):\n trunc_tokens = tokens_a\n num_truncated = num_truncated_a\n else:\n trunc_tokens = tokens_b\n num_truncated = num_truncated_b\n # whether always truncate source sequences\n if (not always_truncate_tail) and (rand() < 0.5):\n del trunc_tokens[0]\n num_truncated[0] += 1\n else:\n trunc_tokens.pop()\n num_truncated[1] += 1\n return num_truncated_a, num_truncated_b\n\n\nclass Seq2SeqDataset(torch.utils.data.Dataset):\n \"\"\" Load sentence pair (sequential or random order) from corpus \"\"\"\n\n def __init__(self, file_src, file_tgt, batch_size, tokenizer, max_len, file_oracle=None, short_sampling_prob=0.1, sent_reverse_order=False, bi_uni_pipeline=[]):\n super().__init__()\n self.tokenizer = tokenizer # tokenize function\n self.max_len = max_len # maximum length of tokens\n self.short_sampling_prob = short_sampling_prob\n self.bi_uni_pipeline = bi_uni_pipeline\n self.batch_size = batch_size\n self.sent_reverse_order = sent_reverse_order\n\n # read the file into memory\n self.ex_list = []\n if file_oracle is None:\n with open(file_src, \"r\", encoding='utf-8') as f_src, open(file_tgt, \"r\", encoding='utf-8') as f_tgt:\n for src, tgt in zip(f_src, f_tgt):\n src_tk = tokenizer.tokenize(src.strip())\n tgt_tk = tokenizer.tokenize(tgt.strip())\n assert len(src_tk) > 0\n assert len(tgt_tk) > 0\n self.ex_list.append((src_tk, tgt_tk))\n else:\n with open(file_src, \"r\", encoding='utf-8') as f_src, \\\n open(file_tgt, \"r\", encoding='utf-8') as f_tgt, \\\n open(file_oracle, \"r\", encoding='utf-8') as f_orc:\n for src, tgt, orc in zip(f_src, f_tgt, f_orc):\n src_tk = tokenizer.tokenize(src.strip())\n tgt_tk = tokenizer.tokenize(tgt.strip())\n s_st, labl = orc.split('\\t')\n s_st = [int(x) for x in s_st.split()]\n labl = [int(x) for x in labl.split()]\n self.ex_list.append((src_tk, tgt_tk, s_st, labl))\n print('Load {0} documents'.format(len(self.ex_list)))\n\n def __len__(self):\n return len(self.ex_list)\n\n def __getitem__(self, idx):\n instance = self.ex_list[idx]\n proc = choice(self.bi_uni_pipeline)\n instance = proc(instance)\n return instance\n\n def __iter__(self): # iterator to load data\n for __ in range(math.ceil(len(self.ex_list) / float(self.batch_size))):\n batch = []\n for __ in range(self.batch_size):\n idx = randint(0, len(self.ex_list)-1)\n batch.append(self.__getitem__(idx))\n # To Tensor\n yield batch_list_to_batch_tensors(batch)\n\n\nclass Preprocess4Seq2seq(Pipeline):\n \"\"\" Pre-processing steps for pretraining transformer \"\"\"\n\n def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, block_mask=False, mask_whole_word=False, new_segment_ids=False, truncate_config={}, mask_source_words=False, mode=\"s2s\", has_oracle=False, num_qkv=0, s2s_special_token=False, s2s_add_segment=False, s2s_share_segment=False, pos_shift=False, **kwargs):\n super().__init__()\n self.max_len = max_len\n self.max_pred = max_pred # max tokens of prediction\n self.mask_prob = mask_prob # masking probability\n self.vocab_words = vocab_words # vocabulary (sub)words\n self.indexer = indexer # function from token to token index\n self.max_len = max_len\n self._tril_matrix = torch.tril(torch.ones(\n (max_len, max_len), dtype=torch.long))\n self.skipgram_prb = skipgram_prb\n self.skipgram_size = skipgram_size\n self.mask_whole_word = mask_whole_word\n self.new_segment_ids = new_segment_ids\n self.always_truncate_tail = truncate_config.get(\n 'always_truncate_tail', False)\n self.max_len_a = truncate_config.get('max_len_a', None)\n self.max_len_b = truncate_config.get('max_len_b', None)\n self.trunc_seg = truncate_config.get('trunc_seg', None)\n self.task_idx = 3 # relax projection layer for different tasks\n self.mask_source_words = mask_source_words\n assert mode in (\"s2s\", \"l2r\")\n self.mode = mode\n self.has_oracle = has_oracle\n self.num_qkv = num_qkv\n self.s2s_special_token = s2s_special_token\n self.s2s_add_segment = s2s_add_segment\n self.s2s_share_segment = s2s_share_segment\n self.pos_shift = pos_shift\n\n def __call__(self, instance):\n tokens_a, tokens_b = instance[:2]\n\n if self.pos_shift:\n tokens_b = ['[S2S_SOS]'] + tokens_b\n\n # -3 for special tokens [CLS], [SEP], [SEP]\n num_truncated_a, _ = truncate_tokens_pair(tokens_a, tokens_b, self.max_len - 3, max_len_a=self.max_len_a,\n max_len_b=self.max_len_b, trunc_seg=self.trunc_seg, always_truncate_tail=self.always_truncate_tail)\n\n # Add Special Tokens\n if self.s2s_special_token:\n tokens = ['[S2S_CLS]'] + tokens_a + \\\n ['[S2S_SEP]'] + tokens_b + ['[SEP]']\n else:\n tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']\n\n if self.new_segment_ids:\n if self.mode == \"s2s\":\n if self.s2s_add_segment:\n if self.s2s_share_segment:\n segment_ids = [0] + [1] * \\\n (len(tokens_a)+1) + [5]*(len(tokens_b)+1)\n else:\n segment_ids = [4] + [6] * \\\n (len(tokens_a)+1) + [5]*(len(tokens_b)+1)\n else:\n segment_ids = [4] * (len(tokens_a)+2) + \\\n [5]*(len(tokens_b)+1)\n else:\n segment_ids = [2] * (len(tokens))\n else:\n segment_ids = [0]*(len(tokens_a)+2) + [1]*(len(tokens_b)+1)\n\n if self.pos_shift:\n n_pred = min(self.max_pred, len(tokens_b))\n masked_pos = [len(tokens_a)+2+i for i in range(len(tokens_b))]\n masked_weights = [1]*n_pred\n masked_ids = self.indexer(tokens_b[1:]+['[SEP]'])\n else:\n # For masked Language Models\n # the number of prediction is sometimes less than max_pred when sequence is short\n effective_length = len(tokens_b)\n if self.mask_source_words:\n effective_length += len(tokens_a)\n n_pred = min(self.max_pred, max(\n 1, int(round(effective_length*self.mask_prob))))\n # candidate positions of masked tokens\n cand_pos = []\n special_pos = set()\n for i, tk in enumerate(tokens):\n # only mask tokens_b (target sequence)\n # we will mask [SEP] as an ending symbol\n if (i >= len(tokens_a)+2) and (tk != '[CLS]'):\n cand_pos.append(i)\n elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')):\n cand_pos.append(i)\n else:\n special_pos.add(i)\n shuffle(cand_pos)\n\n masked_pos = set()\n max_cand_pos = max(cand_pos)\n for pos in cand_pos:\n if len(masked_pos) >= n_pred:\n break\n if pos in masked_pos:\n continue\n\n def _expand_whole_word(st, end):\n new_st, new_end = st, end\n while (new_st >= 0) and tokens[new_st].startswith('##'):\n new_st -= 1\n while (new_end < len(tokens)) and tokens[new_end].startswith('##'):\n new_end += 1\n return new_st, new_end\n\n if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb):\n # ngram\n cur_skipgram_size = randint(2, self.skipgram_size)\n if self.mask_whole_word:\n st_pos, end_pos = _expand_whole_word(\n pos, pos + cur_skipgram_size)\n else:\n st_pos, end_pos = pos, pos + cur_skipgram_size\n else:\n # directly mask\n if self.mask_whole_word:\n st_pos, end_pos = _expand_whole_word(pos, pos + 1)\n else:\n st_pos, end_pos = pos, pos + 1\n\n for mp in range(st_pos, end_pos):\n if (0 < mp <= max_cand_pos) and (mp not in special_pos):\n masked_pos.add(mp)\n else:\n break\n\n masked_pos = list(masked_pos)\n if len(masked_pos) > n_pred:\n shuffle(masked_pos)\n masked_pos = masked_pos[:n_pred]\n\n masked_tokens = [tokens[pos] for pos in masked_pos]\n for pos in masked_pos:\n if rand() < 0.8: # 80%\n tokens[pos] = '[MASK]'\n elif rand() < 0.5: # 10%\n tokens[pos] = get_random_word(self.vocab_words)\n # when n_pred < max_pred, we only calculate loss within n_pred\n masked_weights = [1]*len(masked_tokens)\n\n # Token Indexing\n masked_ids = self.indexer(masked_tokens)\n # Token Indexing\n input_ids = self.indexer(tokens)\n\n # Zero Padding\n n_pad = self.max_len - len(input_ids)\n input_ids.extend([0]*n_pad)\n segment_ids.extend([0]*n_pad)\n\n if self.num_qkv > 1:\n mask_qkv = [0]*(len(tokens_a)+2) + [1] * (len(tokens_b)+1)\n mask_qkv.extend([0]*n_pad)\n else:\n mask_qkv = None\n\n input_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long)\n if self.mode == \"s2s\":\n input_mask[:, :len(tokens_a)+2].fill_(1)\n second_st, second_end = len(\n tokens_a)+2, len(tokens_a)+len(tokens_b)+3\n input_mask[second_st:second_end, second_st:second_end].copy_(\n self._tril_matrix[:second_end-second_st, :second_end-second_st])\n else:\n st, end = 0, len(tokens_a) + len(tokens_b) + 3\n input_mask[st:end, st:end].copy_(self._tril_matrix[:end, :end])\n\n # Zero Padding for masked target\n if self.max_pred > n_pred:\n n_pad = self.max_pred - n_pred\n if masked_ids is not None:\n masked_ids.extend([0]*n_pad)\n if masked_pos is not None:\n masked_pos.extend([0]*n_pad)\n if masked_weights is not None:\n masked_weights.extend([0]*n_pad)\n\n oracle_pos = None\n oracle_weights = None\n oracle_labels = None\n if self.has_oracle:\n s_st, labls = instance[2:]\n oracle_pos = []\n oracle_labels = []\n for st, lb in zip(s_st, labls):\n st = st - num_truncated_a[0]\n if st > 0 and st < len(tokens_a):\n oracle_pos.append(st)\n oracle_labels.append(lb)\n oracle_pos = oracle_pos[:20]\n oracle_labels = oracle_labels[:20]\n oracle_weights = [1] * len(oracle_pos)\n if len(oracle_pos) < 20:\n x_pad = 20 - len(oracle_pos)\n oracle_pos.extend([0] * x_pad)\n oracle_labels.extend([0] * x_pad)\n oracle_weights.extend([0] * x_pad)\n\n return (input_ids, segment_ids, input_mask, mask_qkv, masked_ids,\n masked_pos, masked_weights, -1, self.task_idx,\n oracle_pos, oracle_weights, oracle_labels)\n\n return (input_ids, segment_ids, input_mask, mask_qkv, masked_ids, masked_pos, masked_weights, -1, self.task_idx)\n\n\nclass Preprocess4Seq2seqDecoder(Pipeline):\n \"\"\" Pre-processing steps for pretraining transformer \"\"\"\n\n def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128, new_segment_ids=False, mode=\"s2s\", num_qkv=0, s2s_special_token=False, s2s_add_segment=False, s2s_share_segment=False, pos_shift=False, **kwargs):\n super().__init__()\n self.max_len = max_len\n self.vocab_words = vocab_words # vocabulary (sub)words\n self.indexer = indexer # function from token to token index\n self.max_len = max_len\n self._tril_matrix = torch.tril(torch.ones(\n (max_len, max_len), dtype=torch.long))\n self.new_segment_ids = new_segment_ids\n self.task_idx = 3 # relax projection layer for different tasks\n assert mode in (\"s2s\", \"l2r\")\n self.mode = mode\n self.max_tgt_length = max_tgt_length\n self.num_qkv = num_qkv\n self.s2s_special_token = s2s_special_token\n self.s2s_add_segment = s2s_add_segment\n self.s2s_share_segment = s2s_share_segment\n self.pos_shift = pos_shift\n\n def __call__(self, instance):\n tokens_a, max_a_len = instance\n\n # Add Special Tokens\n if self.s2s_special_token:\n padded_tokens_a = ['[S2S_CLS]'] + tokens_a + ['[S2S_SEP]']\n else:\n padded_tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']\n assert len(padded_tokens_a) <= max_a_len + 2\n if max_a_len + 2 > len(padded_tokens_a):\n padded_tokens_a += ['[PAD]'] * \\\n (max_a_len + 2 - len(padded_tokens_a))\n assert len(padded_tokens_a) == max_a_len + 2\n max_len_in_batch = min(self.max_tgt_length +\n max_a_len + 2, self.max_len)\n tokens = padded_tokens_a\n if self.new_segment_ids:\n if self.mode == \"s2s\":\n _enc_seg1 = 0 if self.s2s_share_segment else 4\n if self.s2s_add_segment:\n if self.s2s_share_segment:\n segment_ids = [\n 0] + [1]*(len(padded_tokens_a)-1) + [5]*(max_len_in_batch - len(padded_tokens_a))\n else:\n segment_ids = [\n 4] + [6]*(len(padded_tokens_a)-1) + [5]*(max_len_in_batch - len(padded_tokens_a))\n else:\n segment_ids = [4]*(len(padded_tokens_a)) + \\\n [5]*(max_len_in_batch - len(padded_tokens_a))\n else:\n segment_ids = [2]*max_len_in_batch\n else:\n segment_ids = [0]*(len(padded_tokens_a)) \\\n + [1]*(max_len_in_batch - len(padded_tokens_a))\n\n if self.num_qkv > 1:\n mask_qkv = [0]*(len(padded_tokens_a)) + [1] * \\\n (max_len_in_batch - len(padded_tokens_a))\n else:\n mask_qkv = None\n\n position_ids = []\n for i in range(len(tokens_a) + 2):\n position_ids.append(i)\n for i in range(len(tokens_a) + 2, max_a_len + 2):\n position_ids.append(0)\n for i in range(max_a_len + 2, max_len_in_batch):\n position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2)\n\n # Token Indexing\n input_ids = self.indexer(tokens)\n\n # Zero Padding\n input_mask = torch.zeros(\n max_len_in_batch, max_len_in_batch, dtype=torch.long)\n if self.mode == \"s2s\":\n input_mask[:, :len(tokens_a)+2].fill_(1)\n else:\n st, end = 0, len(tokens_a) + 2\n input_mask[st:end, st:end].copy_(\n self._tril_matrix[:end, :end])\n input_mask[end:, :len(tokens_a)+2].fill_(1)\n second_st, second_end = len(padded_tokens_a), max_len_in_batch\n\n input_mask[second_st:second_end, second_st:second_end].copy_(\n self._tril_matrix[:second_end-second_st, :second_end-second_st])\n\n return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)" ]
[ [ "torch.ones", "torch.zeros" ] ]
zalanborsos/coresets
[ "47896a68c79666496cf1ef1d2683bd76875fe013" ]
[ "tests/test_sensitivity.py" ]
[ "from __future__ import division, absolute_import\n\nimport pytest\nfrom sklearn.datasets import load_iris\nimport numpy as np\n\nfrom coresets import *\n\n\nclass TestSensitivity(object):\n\n @pytest.fixture\n def gen_data(self):\n X, _ = load_iris(return_X_y=True)\n centers = X[np.random.choice(np.arange(X.shape[0]), size=5)]\n return X, centers\n\n def test_kmeans_sensitivity(self, gen_data):\n # test kmeans sensitivity\n # make sure that the efficient C++ implementation gives the same results as Python\n X, centers = gen_data\n alpha = 1\n sensitivity = kmeans_sensitivity(X, np.ones(X.shape[0]), centers, alpha)\n\n # calc sensitivity in Python to check Cython impl\n dists = np.zeros((X.shape[0], centers.shape[0]))\n for i, x in enumerate(X):\n for j, c in enumerate(centers):\n dists[i, j] = np.sum((x - c) ** 2)\n assigns = np.argmin(dists, axis=1)\n dists = np.min(dists, axis=1)\n cnts = np.bincount(assigns)\n total = np.mean(dists)\n cluster_tot = np.zeros(centers.shape[0])\n for i, c in enumerate(centers):\n cluster_tot[i] = np.sum(dists[np.where(assigns == i)[0]])\n sensitivity2 = 2 * alpha * dists / total + 4 * alpha / (total * cnts[assigns]) * cluster_tot[assigns] + 4. * \\\n X.shape[0] / cnts[assigns]\n assert np.allclose(sensitivity, sensitivity2)\n" ]
[ [ "numpy.allclose", "numpy.min", "numpy.arange", "sklearn.datasets.load_iris", "numpy.ones", "numpy.argmin", "numpy.bincount", "numpy.mean", "numpy.where", "numpy.zeros", "numpy.sum" ] ]
zygmuntz/time-series-classification
[ "31026cd3448cb2a1807267cfa5b460aa8386830b" ]
[ "train_and_evaluate.py" ]
[ "#!/usr/bin/env python\n\n\"train a binary classifier on extracted features, predict, evaluate\"\n\nimport pandas as pd\n\nfrom pprint import pprint\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression as LR\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.ensemble import RandomForestClassifier as RF\nfrom sklearn.metrics import roc_auc_score as AUC, accuracy_score as accuracy\n\n#\n\nfrom config import train_file, test_file\n\ntrain = pd.read_csv( train_file )\ntest = pd.read_csv( test_file )\n\nx_train = train.drop( 'y', axis = 1 ).values\ny_train = train.y.values\n\nx_test = test.drop( 'y', axis = 1 ).values\ny_test = test.y.values\n\nclassifiers = [\n\t#LR( C = 10 ),\n\t#LR( C = 1 ),\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t#LR( C = 0.1 ),\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\tmake_pipeline( StandardScaler(), LR()),\t\n\t#make_pipeline( StandardScaler(), LR( C = 10 )),\n\t#make_pipeline( StandardScaler(), LR( C = 30 )),\n\n\tmake_pipeline( MinMaxScaler(), LR()),\t\t\t\t\t\n\t#make_pipeline( MinMaxScaler(), LR( C = 10 )),\t\n\t#make_pipeline( MinMaxScaler(), LR( C = 30 )),\n\n\t#LDA(),\t\t\t\t\t\t\t\t\t\t\n\tRF( n_estimators = 100, min_samples_leaf = 5 )\n]\n\nfor clf in classifiers:\n\n\tclf.fit( x_train, y_train )\n\tp = clf.predict_proba( x_test )[:,1]\n\tp_bin = clf.predict( x_test )\n\n\tauc = AUC( y_test, p )\n\tacc = accuracy( y_test, p_bin )\n\tprint( \"AUC: {:.2%}, accuracy: {:.2%} \\n\\n{}\\n\\n\".format( auc, acc, clf ))\n\n" ]
[ [ "sklearn.metrics.roc_auc_score", "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.MinMaxScaler", "sklearn.metrics.accuracy_score" ] ]
rejane-paulino/exercicos
[ "c8844dd7beb8eecdf92b2852e1f5356b15d2de06" ]
[ "exercicio-02.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[26]:\n\n\n#Exercicio 02\n\nimport matplotlib.pyplot as plt\n\n#Grupos de Idade\ng_idade = ['0 a 4','5 a 9','10 a 14','15 a 19','20 a 24','25 a 29','30 a 34','35 a 39','40 a 44','45 a 49','50 a 54','55 a 59','60 a 64','65 a 69','70 a 74','75 a 79','80 a 84','85 a 89','90 a 94','95 a 99','≥ 100']\n \n#População Femenina\np_femenina = [6779171, 7345231, 8441348, 8432004, 8614963, 8643419, 8026854, 7121915, 6688796, 6141338, 5305407, 4373877, 3468085, 2616745, 2074264, 1472930, 998349, 508724, 211594, 66806, 16989]\n\nx = [ x for x in range(len(g_idade))]\n\nplt.figure(figsize=(10, 8))\n\nplt.bar(x, p_femenina, align='center',\n color='orange', linewidth=1, edgecolor='black')\n\nplt.yticks([ 0, 1000000, 2000000, 3000000, 4000000, 5000000, 6000000, 7000000, 8000000, 9000000],\n [ \"0\", \"1.0 milhão\", \"2.0 milhões\", \"3.0 milhões\", \"4.0 milhões\", \"5.0 milhões\", \"6.0 milhões\", \"7.0 milhões\", \"8.0 milhões\", \"9.0 milhões\"])\n\nplt.xticks(x, g_idade, rotation=45)\n \nplt.title('População Femenina - IBGE/2010')\n\nplt.xlabel('Grupos de Idade (anos)')\nplt.ylabel('População Femenina');\n\nplt.show()\n \n \n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.bar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
aysedemirel/udacity-pytorch-scholarship
[ "48823e9f97c2bbb7f2df0b2b6363623dd11587fa" ]
[ "perceptron.py" ]
[ "import numpy as np\n# Setting the random seed, feel free to change it and see different solutions.\nnp.random.seed(42)\n\ndef stepFunction(t):\n if t >= 0:\n return 1\n return 0\n\ndef prediction(X, W, b):\n return stepFunction((np.matmul(X,W)+b)[0])\n\n# TODO: Fill in the code below to implement the perceptron trick.\n# The function should receive as inputs the data X, the labels y,\n# the weights W (as an array), and the bias b,\n# update the weights and bias W, b, according to the perceptron algorithm,\n# and return W and b.\ndef perceptronStep(X, y, W, b, learn_rate = 0.01):\n # Fill in code\n for i in range(len(X)):\n y_hat = prediction(X[i],W,b)\n if y[i]-y_hat == 1:\n W[0] += X[i][0]*learn_rate\n W[1] += X[i][1]*learn_rate\n b += learn_rate\n elif y[i]-y_hat == -1:\n W[0] -= X[i][0]*learn_rate\n W[1] -= X[i][1]*learn_rate\n b -= learn_rate\n return W, b\n \n# This function runs the perceptron algorithm repeatedly on the dataset,\n# and returns a few of the boundary lines obtained in the iterations,\n# for plotting purposes.\n# Feel free to play with the learning rate and the num_epochs,\n# and see your results plotted below.\ndef trainPerceptronAlgorithm(X, y, learn_rate = 0.01, num_epochs = 25):\n x_min, x_max = min(X.T[0]), max(X.T[0])\n y_min, y_max = min(X.T[1]), max(X.T[1])\n W = np.array(np.random.rand(2,1))\n b = np.random.rand(1)[0] + x_max\n # These are the solution lines that get plotted below.\n boundary_lines = []\n for i in range(num_epochs):\n # In each epoch, we apply the perceptron step.\n W, b = perceptronStep(X, y, W, b, learn_rate)\n boundary_lines.append((-W[0]/W[1], -b/W[1]))\n return boundary_lines\n\n\n# data.csv\n# 0.78051,-0.063669,1\n# 0.28774,0.29139,1\n# 0.40714,0.17878,1\n# 0.2923,0.4217,1\n# 0.50922,0.35256,1\n# 0.27785,0.10802,1\n# 0.27527,0.33223,1\n# 0.43999,0.31245,1\n# 0.33557,0.42984,1\n# 0.23448,0.24986,1\n# 0.0084492,0.13658,1\n# 0.12419,0.33595,1\n# 0.25644,0.42624,1\n# 0.4591,0.40426,1\n# 0.44547,0.45117,1\n# 0.42218,0.20118,1\n# 0.49563,0.21445,1\n# 0.30848,0.24306,1\n# 0.39707,0.44438,1\n# 0.32945,0.39217,1\n# 0.40739,0.40271,1\n# 0.3106,0.50702,1\n# 0.49638,0.45384,1\n# 0.10073,0.32053,1\n# 0.69907,0.37307,1\n# 0.29767,0.69648,1\n# 0.15099,0.57341,1\n# 0.16427,0.27759,1\n# 0.33259,0.055964,1\n# 0.53741,0.28637,1\n# 0.19503,0.36879,1\n# 0.40278,0.035148,1\n# 0.21296,0.55169,1\n# 0.48447,0.56991,1\n# 0.25476,0.34596,1\n# 0.21726,0.28641,1\n# 0.67078,0.46538,1\n# 0.3815,0.4622,1\n# 0.53838,0.32774,1\n# 0.4849,0.26071,1\n# 0.37095,0.38809,1\n# 0.54527,0.63911,1\n# 0.32149,0.12007,1\n# 0.42216,0.61666,1\n# 0.10194,0.060408,1\n# 0.15254,0.2168,1\n# 0.45558,0.43769,1\n# 0.28488,0.52142,1\n# 0.27633,0.21264,1\n# 0.39748,0.31902,1\n# 0.5533,1,0\n# 0.44274,0.59205,0\n# 0.85176,0.6612,0\n# 0.60436,0.86605,0\n# 0.68243,0.48301,0\n# 1,0.76815,0\n# 0.72989,0.8107,0\n# 0.67377,0.77975,0\n# 0.78761,0.58177,0\n# 0.71442,0.7668,0\n# 0.49379,0.54226,0\n# 0.78974,0.74233,0\n# 0.67905,0.60921,0\n# 0.6642,0.72519,0\n# 0.79396,0.56789,0\n# 0.70758,0.76022,0\n# 0.59421,0.61857,0\n# 0.49364,0.56224,0\n# 0.77707,0.35025,0\n# 0.79785,0.76921,0\n# 0.70876,0.96764,0\n# 0.69176,0.60865,0\n# 0.66408,0.92075,0\n# 0.65973,0.66666,0\n# 0.64574,0.56845,0\n# 0.89639,0.7085,0\n# 0.85476,0.63167,0\n# 0.62091,0.80424,0\n# 0.79057,0.56108,0\n# 0.58935,0.71582,0\n# 0.56846,0.7406,0\n# 0.65912,0.71548,0\n# 0.70938,0.74041,0\n# 0.59154,0.62927,0\n# 0.45829,0.4641,0\n# 0.79982,0.74847,0\n# 0.60974,0.54757,0\n# 0.68127,0.86985,0\n# 0.76694,0.64736,0\n# 0.69048,0.83058,0\n# 0.68122,0.96541,0\n# 0.73229,0.64245,0\n# 0.76145,0.60138,0\n# 0.58985,0.86955,0\n# 0.73145,0.74516,0\n# 0.77029,0.7014,0\n# 0.73156,0.71782,0\n# 0.44556,0.57991,0\n# 0.85275,0.85987,0\n# 0.51912,0.62359,0\n\n" ]
[ [ "numpy.matmul", "numpy.random.rand", "numpy.random.seed" ] ]
himanshu007-creator/jina
[ "129c7d9db4b0e3077a621f58570e4848b46d5740" ]
[ "jina/types/document/__init__.py" ]
[ "import base64\nimport json\nimport mimetypes\nfrom collections import Counter\nfrom hashlib import blake2b\nfrom typing import (\n Any,\n Dict,\n Iterable,\n List,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n overload,\n)\n\nimport numpy as np\nfrom google.protobuf import json_format\nfrom google.protobuf.field_mask_pb2 import FieldMask\n\nfrom ...excepts import BadDocType\nfrom ...helper import download_mermaid_url, dunder_get, random_identity, typename\nfrom ...importer import ImportExtensions\nfrom ...logging.predefined import default_logger\nfrom ...proto import jina_pb2\nfrom ..mixin import ProtoTypeMixin\nfrom ..ndarray.generic import BaseSparseNdArray, NdArray\nfrom ..score import NamedScore\nfrom ..score.map import NamedScoreMapping\nfrom ..struct import StructView\nfrom .converters import ContentConversionMixin, _text_to_word_sequence\nfrom .helper import VersionedMixin, versioned\n\nif False:\n # fix type-hint complain for sphinx and flake\n import scipy\n import tensorflow as tf\n import torch\n from scipy.sparse import coo_matrix\n\n from ..arrays.chunk import ChunkArray\n from ..arrays.match import MatchArray\n\n ArrayType = TypeVar(\n 'ArrayType',\n np.ndarray,\n scipy.sparse.csr_matrix,\n scipy.sparse.coo_matrix,\n scipy.sparse.bsr_matrix,\n scipy.sparse.csc_matrix,\n torch.sparse_coo_tensor,\n tf.SparseTensor,\n )\n\n SparseArrayType = TypeVar(\n 'SparseArrayType',\n np.ndarray,\n scipy.sparse.csr_matrix,\n scipy.sparse.coo_matrix,\n scipy.sparse.bsr_matrix,\n scipy.sparse.csc_matrix,\n torch.sparse_coo_tensor,\n tf.SparseTensor,\n )\n\n__all__ = ['Document', 'DocumentContentType', 'DocumentSourceType']\nDIGEST_SIZE = 8\n\n# This list is not exhaustive because we cannot add the `sparse` types without adding the `dependencies`\nDocumentContentType = TypeVar('DocumentContentType', bytes, str, 'ArrayType')\nDocumentSourceType = TypeVar(\n 'DocumentSourceType', jina_pb2.DocumentProto, bytes, str, Dict, 'Document'\n)\n\n_all_mime_types = set(mimetypes.types_map.values())\n\n_all_doc_content_keys = {'content', 'blob', 'text', 'buffer', 'graph'}\n_all_doc_array_keys = ('blob', 'embedding')\n_special_mapped_keys = ('scores', 'evaluations')\n\n\nclass Document(ProtoTypeMixin, VersionedMixin, ContentConversionMixin):\n \"\"\"\n :class:`Document` is one of the **primitive data type** in Jina.\n\n It offers a Pythonic interface to allow users access and manipulate\n :class:`jina.jina_pb2.DocumentProto` object without working with Protobuf itself.\n\n To create a :class:`Document` object, simply:\n\n .. highlight:: python\n .. code-block:: python\n\n from jina import Document\n d = Document()\n d.text = 'abc'\n\n Jina requires each Document to have a string id. You can set a custom one,\n or if non has been set a random one will be assigned.\n\n To access and modify the content of the document, you can use :attr:`text`, :attr:`blob`, and :attr:`buffer`.\n Each property is implemented with proper setter, to improve the integrity and user experience. For example,\n assigning ``doc.blob`` or ``doc.embedding`` can be simply done via:\n\n .. highlight:: python\n .. code-block:: python\n\n import numpy as np\n\n # to set as content\n d.content = np.random.random([10, 5])\n\n # to set as embedding\n d.embedding = np.random.random([10, 5])\n\n MIME type is auto set/guessed when setting :attr:`content` and :attr:`uri`\n\n :class:`Document` also provides multiple way to build from existing Document. You can build :class:`Document`\n from ``jina_pb2.DocumentProto``, ``bytes``, ``str``, and ``Dict``. You can also use it as view (i.e.\n weak reference when building from an existing ``jina_pb2.DocumentProto``). For example,\n\n .. highlight:: python\n .. code-block:: python\n\n a = DocumentProto()\n b = Document(a, copy=False)\n a.text = 'hello'\n assert b.text == 'hello'\n\n You can leverage the :meth:`convert_a_to_b` interface to convert between content forms.\n\n \"\"\"\n\n ON_GETATTR = ['matches', 'chunks']\n\n # overload_inject_start_document\n @overload\n def __init__(\n self,\n *,\n adjacency: Optional[int] = None,\n blob: Optional[Union['ArrayType', 'jina_pb2.NdArrayProto', 'NdArray']] = None,\n buffer: Optional[bytes] = None,\n chunks: Optional[Iterable['Document']] = None,\n content: Optional[DocumentContentType] = None,\n embedding: Optional[\n Union['ArrayType', 'jina_pb2.NdArrayProto', 'NdArray']\n ] = None,\n granularity: Optional[int] = None,\n id: Optional[str] = None,\n matches: Optional[Iterable['Document']] = None,\n mime_type: Optional[str] = None,\n modality: Optional[str] = None,\n parent_id: Optional[str] = None,\n tags: Optional[Union[Dict, StructView]] = None,\n text: Optional[str] = None,\n uri: Optional[str] = None,\n weight: Optional[float] = None,\n **kwargs,\n ):\n \"\"\"\n :param adjacency: the adjacency of this Document\n :param blob: the blob content of thi Document\n :param buffer: the buffer bytes from this document\n :param chunks: the array of chunks of this document\n :param content: the value of the content depending on `:meth:`content_type`\n :param embedding: the embedding of this Document\n :param granularity: the granularity of this Document\n :param id: the id of this Document\n :param matches: the array of matches attached to this document\n :param mime_type: the mime_type of this Document\n :param modality: the modality of the document.\n :param parent_id: the parent id of this Document\n :param tags: a Python dict view of the tags.\n :param text: the text from this document content\n :param uri: the uri of this Document\n :param weight: the weight of the document\n :param kwargs: other parameters to be set _after_ the document is constructed\n \"\"\"\n\n # overload_inject_end_document\n\n def __init__(\n self,\n document: Optional[DocumentSourceType] = None,\n field_resolver: Dict[str, str] = None,\n copy: bool = False,\n **kwargs,\n ):\n \"\"\"\n :param document: the document to construct from. If ``bytes`` is given\n then deserialize a :class:`DocumentProto`; ``dict`` is given then\n parse a :class:`DocumentProto` from it; ``str`` is given, then consider\n it as a JSON string and parse a :class:`DocumentProto` from it; finally,\n one can also give `DocumentProto` directly, then depending on the ``copy``,\n it builds a view or a copy from it.\n :param copy: when ``document`` is given as a :class:`DocumentProto` object, build a\n view (i.e. weak reference) from it or a deep copy from it.\n :param field_resolver: a map from field names defined in JSON, dict to the field\n names defined in Document.\n :param kwargs: other parameters to be set _after_ the document is constructed\n\n .. note::\n\n When ``document`` is a JSON string or Python dictionary object, the constructor will only map the values\n from known fields defined in Protobuf, all unknown fields are mapped to ``document.tags``. For example,\n\n .. highlight:: python\n .. code-block:: python\n\n d = Document({'id': '123', 'hello': 'world', 'tags': {'good': 'bye'}})\n\n assert d.id == '123' # true\n assert d.tags['hello'] == 'world' # true\n assert d.tags['good'] == 'bye' # true\n \"\"\"\n try:\n if isinstance(document, jina_pb2.DocumentProto):\n if copy:\n self._pb_body = jina_pb2.DocumentProto()\n self._pb_body.CopyFrom(document)\n else:\n self._pb_body = document\n elif isinstance(document, bytes):\n self._pb_body = jina_pb2.DocumentProto()\n self._pb_body.ParseFromString(document)\n elif isinstance(document, (dict, str)):\n if isinstance(document, str):\n document = json.loads(document)\n\n def _update_doc(d: Dict):\n for key in _all_doc_array_keys:\n if key in d:\n value = d[key]\n if isinstance(value, list):\n d[key] = NdArray(np.array(d[key])).dict()\n if 'chunks' in d:\n for chunk in d['chunks']:\n _update_doc(chunk)\n if 'matches' in d:\n for match in d['matches']:\n _update_doc(match)\n\n _update_doc(document)\n\n if field_resolver:\n document = {\n field_resolver.get(k, k): v for k, v in document.items()\n }\n\n user_fields = set(document)\n support_fields = set(\n self.attributes(\n include_proto_fields_camelcase=True,\n include_properties=False,\n )\n )\n\n self._pb_body = jina_pb2.DocumentProto()\n if support_fields.issuperset(user_fields):\n json_format.ParseDict(document, self._pb_body)\n else:\n _intersect = support_fields.intersection(user_fields)\n _remainder = user_fields.difference(_intersect)\n if _intersect:\n json_format.ParseDict(\n {k: document[k] for k in _intersect}, self._pb_body\n )\n if _remainder:\n support_prop = set(\n self.attributes(\n include_proto_fields=False, include_properties=True\n )\n )\n _intersect2 = support_prop.intersection(_remainder)\n _remainder2 = _remainder.difference(_intersect2)\n\n if _intersect2:\n self.set_attributes(**{p: document[p] for p in _intersect2})\n\n if _remainder2:\n self._pb_body.tags.update(\n {k: document[k] for k in _remainder}\n )\n elif isinstance(document, Document):\n if copy:\n self._pb_body = jina_pb2.DocumentProto()\n self._pb_body.CopyFrom(document.proto)\n else:\n self._pb_body = document.proto\n elif document is not None:\n # note ``None`` is not considered as a bad type\n raise ValueError(f'{typename(document)} is not recognizable')\n else:\n # create an empty document\n self._pb_body = jina_pb2.DocumentProto()\n except Exception as ex:\n raise BadDocType(\n f'fail to construct a document from {document}, '\n f'if you are trying to set the content '\n f'you may use \"Document(content=your_content)\"'\n ) from ex\n\n if self._pb_body.id is None or not self._pb_body.id:\n self.id = random_identity(use_uuid1=True)\n\n if kwargs:\n # check if there are mutually exclusive content fields\n if len(_all_doc_content_keys.intersection(kwargs.keys())) > 1:\n raise ValueError(\n f'Document content fields are mutually exclusive, please provide only one of {_all_doc_content_keys}'\n )\n self.set_attributes(**kwargs)\n self.__mermaid_id = None\n\n @property\n def _mermaid_id(self):\n if self.__mermaid_id is None:\n self.__mermaid_id = random_identity()\n return self.__mermaid_id\n\n @_mermaid_id.setter\n def _mermaid_id(self, m_id):\n self.__mermaid_id = m_id\n\n def pop(self, *fields) -> None:\n \"\"\"Remove the values from the given fields of this Document.\n\n :param fields: field names\n \"\"\"\n for k in fields:\n self._pb_body.ClearField(k)\n\n def clear(self) -> None:\n \"\"\"Remove all values from all fields of this Document.\"\"\"\n self._pb_body.Clear()\n\n @property\n def weight(self) -> float:\n \"\"\"\n :return: the weight of the document\n \"\"\"\n return self._pb_body.weight\n\n @weight.setter\n def weight(self, value: float):\n \"\"\"\n Set the weight of the document.\n\n :param value: the float weight of the document.\n \"\"\"\n self._pb_body.weight = value\n\n @property\n def modality(self) -> str:\n \"\"\"\n :return: the modality of the document.\"\"\"\n return self._pb_body.modality\n\n @modality.setter\n def modality(self, value: str):\n \"\"\"Set the modality of the document.\n\n :param value: The modality of the document\n \"\"\"\n self._pb_body.modality = value\n\n @property\n def tags(self) -> StructView:\n \"\"\"Return the `tags` field of this Document as a Python dict\n\n :return: a Python dict view of the tags.\n \"\"\"\n return StructView(self._pb_body.tags)\n\n @tags.setter\n def tags(self, value: Union[Dict, StructView]):\n \"\"\"Set the `tags` field of this Document to a Python dict\n\n :param value: a Python dict or a StructView\n \"\"\"\n if isinstance(value, StructView):\n self._pb_body.tags.Clear()\n self._pb_body.tags.update(value._pb_body)\n elif isinstance(value, dict):\n self._pb_body.tags.Clear()\n self._pb_body.tags.update(value)\n else:\n raise TypeError(f'{value!r} is not supported.')\n\n def update(\n self,\n source: 'Document',\n fields: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Updates fields specified in ``fields`` from the source to current Document.\n\n :param source: The :class:`Document` we want to update from as source. The current\n :class:`Document` is referred as destination.\n :param fields: a list of field names that we want to update, if not specified,\n use all present fields in source.\n\n .. note::\n *. if ``fields`` are empty, then all present fields in source will be merged into current document.\n * `tags` will be updated like a python :attr:`dict`.\n *. the current :class:`Document` will be modified in place, ``source`` will be unchanged.\n *. if current document has more fields than :attr:`source`, these extra fields wll be preserved.\n \"\"\"\n # We do a safe update: only update existent (value being set) fields from source.\n present_fields = [\n field_descriptor.name\n for field_descriptor, _ in source._pb_body.ListFields()\n ]\n if not fields:\n fields = present_fields # if `fields` empty, update all present fields.\n for field in fields:\n if (\n field == 'tags'\n ): # For the tags, stay consistent with the python update method.\n self.tags.update(source.tags)\n else:\n self._pb_body.ClearField(field)\n try:\n setattr(self, field, getattr(source, field))\n except AttributeError:\n setattr(self._pb_body, field, getattr(source, field))\n\n @property\n def content_hash(self) -> str:\n \"\"\"Get the document hash according to its content.\n\n :return: the unique hash code to represent this Document\n \"\"\"\n # a tuple of field names that inclusive when computing content hash.\n fields = (\n 'text',\n 'blob',\n 'buffer',\n 'embedding',\n 'uri',\n 'tags',\n 'mime_type',\n 'granularity',\n 'adjacency',\n )\n masked_d = jina_pb2.DocumentProto()\n present_fields = {\n field_descriptor.name for field_descriptor, _ in self._pb_body.ListFields()\n }\n fields_to_hash = present_fields.intersection(fields)\n FieldMask(paths=fields_to_hash).MergeMessage(self._pb_body, masked_d)\n return blake2b(\n masked_d.SerializePartialToString(), digest_size=DIGEST_SIZE\n ).hexdigest()\n\n @property\n def id(self) -> str:\n \"\"\"The document id in string.\n\n :return: the id of this Document\n \"\"\"\n return self._pb_body.id\n\n @property\n def parent_id(self) -> str:\n \"\"\"The document's parent id in string.\n\n :return: the parent id of this Document\n \"\"\"\n return self._pb_body.parent_id\n\n @id.setter\n def id(self, value: str):\n \"\"\"Set document id to a string value.\n\n :param value: id as string\n \"\"\"\n self._pb_body.id = str(value)\n\n @parent_id.setter\n def parent_id(self, value: str):\n \"\"\"Set document's parent id to a string value.\n\n :param value: id as string\n \"\"\"\n self._pb_body.parent_id = str(value)\n\n @property\n def blob(self) -> 'ArrayType':\n \"\"\"Return ``blob``, one of the content form of a Document.\n\n .. note::\n Use :attr:`content` to return the content of a Document\n\n This property will return the `blob` of the `Document` as a `Dense` or `Sparse` array depending on the actual\n proto instance stored. In the case where the `blob` stored is sparse, it will return them as a `coo` matrix.\n If any other type of `sparse` type is desired, use the `:meth:`get_sparse_blob`.\n\n :return: the blob content of thi Document\n \"\"\"\n return NdArray(self._pb_body.blob).value\n\n def get_sparse_blob(\n self, sparse_ndarray_cls_type: Type[BaseSparseNdArray], **kwargs\n ) -> 'SparseArrayType':\n \"\"\"Return ``blob`` of the content of a Document as an sparse array.\n\n :param sparse_ndarray_cls_type: Sparse class type, such as `SparseNdArray`.\n :param kwargs: Additional key value argument, for `scipy` backend, we need to set\n the keyword `sp_format` as one of the scipy supported sparse format, such as `coo`\n or `csr`.\n :return: the blob of this Document but as an sparse array\n \"\"\"\n return NdArray(\n self._pb_body.blob,\n sparse_cls=sparse_ndarray_cls_type,\n is_sparse=True,\n **kwargs,\n ).value\n\n @blob.setter\n def blob(self, value: Union['ArrayType', 'jina_pb2.NdArrayProto', 'NdArray']):\n \"\"\"Set the `blob` to :param:`value`.\n\n :param value: the array value to set the blob\n \"\"\"\n self._update_ndarray('blob', value)\n\n @property\n def embedding(self) -> 'SparseArrayType':\n \"\"\"Return ``embedding`` of the content of a Document.\n\n .. note::\n This property will return the `embedding` of the `Document` as a `Dense` or `Sparse` array depending on the actual\n proto instance stored. In the case where the `embedding` stored is sparse, it will return them as a `coo` matrix.\n If any other type of `sparse` type is desired, use the `:meth:`get_sparse_embedding`.\n\n :return: the embedding of this Document\n \"\"\"\n return NdArray(self._pb_body.embedding).value\n\n def get_sparse_embedding(\n self, sparse_ndarray_cls_type: Type[BaseSparseNdArray], **kwargs\n ) -> 'SparseArrayType':\n \"\"\"Return ``embedding`` of the content of a Document as an sparse array.\n\n :param sparse_ndarray_cls_type: Sparse class type, such as `SparseNdArray`.\n :param kwargs: Additional key value argument, for `scipy` backend, we need to set\n the keyword `sp_format` as one of the scipy supported sparse format, such as `coo`\n or `csr`.\n :return: the embedding of this Document but as as an sparse array\n \"\"\"\n return NdArray(\n self._pb_body.embedding,\n sparse_cls=sparse_ndarray_cls_type,\n is_sparse=True,\n **kwargs,\n ).value\n\n @embedding.setter\n def embedding(self, value: Union['ArrayType', 'jina_pb2.NdArrayProto', 'NdArray']):\n \"\"\"Set the ``embedding`` of the content of a Document.\n\n :param value: the array value to set the embedding\n \"\"\"\n self._update_ndarray('embedding', value)\n\n def _update_sparse_ndarray(self, k, v, sparse_cls):\n NdArray(\n is_sparse=True,\n sparse_cls=sparse_cls,\n proto=getattr(self._pb_body, k),\n ).value = v\n\n @staticmethod\n def _check_installed_array_packages():\n from ... import JINA_GLOBAL\n\n if JINA_GLOBAL.scipy_installed is None:\n JINA_GLOBAL.scipy_installed = False\n with ImportExtensions(required=False, pkg_name='scipy'):\n import scipy\n\n JINA_GLOBAL.scipy_installed = True\n\n if JINA_GLOBAL.tensorflow_installed is None:\n JINA_GLOBAL.tensorflow_installed = False\n with ImportExtensions(required=False, pkg_name='tensorflow'):\n import tensorflow\n\n JINA_GLOBAL.tensorflow_installed = True\n\n if JINA_GLOBAL.torch_installed is None:\n JINA_GLOBAL.torch_installed = False\n with ImportExtensions(required=False, pkg_name='torch'):\n import torch\n\n JINA_GLOBAL.torch_installed = True\n\n def _update_if_sparse(self, k, v):\n\n from ... import JINA_GLOBAL\n\n v_valid_sparse_type = False\n Document._check_installed_array_packages()\n\n if JINA_GLOBAL.scipy_installed:\n import scipy\n import scipy.sparse\n\n if scipy.sparse.issparse(v):\n from ..ndarray.sparse.scipy import SparseNdArray\n\n self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)\n v_valid_sparse_type = True\n\n if JINA_GLOBAL.tensorflow_installed:\n import tensorflow\n\n if isinstance(v, tensorflow.SparseTensor):\n from ..ndarray.sparse.tensorflow import SparseNdArray\n\n self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)\n v_valid_sparse_type = True\n\n if JINA_GLOBAL.torch_installed:\n import torch\n\n if isinstance(v, torch.Tensor) and v.is_sparse:\n from ..ndarray.sparse.pytorch import SparseNdArray\n\n self._update_sparse_ndarray(k=k, v=v, sparse_cls=SparseNdArray)\n v_valid_sparse_type = True\n\n return v_valid_sparse_type\n\n def _update_ndarray(self, k, v):\n if isinstance(v, jina_pb2.NdArrayProto):\n getattr(self._pb_body, k).CopyFrom(v)\n elif isinstance(v, np.ndarray):\n NdArray(getattr(self._pb_body, k)).value = v\n elif isinstance(v, NdArray):\n NdArray(getattr(self._pb_body, k)).is_sparse = v.is_sparse\n NdArray(getattr(self._pb_body, k)).value = v.value\n else:\n v_valid_sparse_type = self._update_if_sparse(k, v)\n\n if not v_valid_sparse_type:\n raise TypeError(f'{k} is in unsupported type {typename(v)}')\n\n @property\n @versioned\n def matches(self) -> 'MatchArray':\n \"\"\"Get all matches of the current document.\n\n :return: the array of matches attached to this document\n \"\"\"\n # Problem with cyclic dependency\n from ..arrays.match import MatchArray\n\n return MatchArray(self._pb_body.matches, reference_doc=self)\n\n @matches.setter\n def matches(self, value: Iterable['Document']):\n \"\"\"Get all chunks of the current document.\n\n :param value: value to set\n \"\"\"\n self.pop('matches')\n self.matches.extend(value)\n\n @property\n @versioned\n def chunks(self) -> 'ChunkArray':\n \"\"\"Get all chunks of the current document.\n\n :return: the array of chunks of this document\n \"\"\"\n # Problem with cyclic dependency\n from ..arrays.chunk import ChunkArray\n\n return ChunkArray(self._pb_body.chunks, reference_doc=self)\n\n @chunks.setter\n def chunks(self, value: Iterable['Document']):\n \"\"\"Get all chunks of the current document.\n\n :param value: the array of chunks of this document\n \"\"\"\n self.pop('chunks')\n self.chunks.extend(value)\n\n def set_attributes(self, **kwargs):\n \"\"\"Bulk update Document fields with key-value specified in kwargs\n\n .. seealso::\n :meth:`get_attributes` for bulk get attributes\n\n :param kwargs: the keyword arguments to set the values, where the keys are the fields to set\n \"\"\"\n for k, v in kwargs.items():\n if isinstance(v, (list, tuple)):\n if k == 'chunks':\n self.chunks.extend(v)\n elif k == 'matches':\n self.matches.extend(v)\n else:\n self._pb_body.ClearField(k)\n getattr(self._pb_body, k).extend(v)\n elif isinstance(v, dict) and k not in _special_mapped_keys:\n self._pb_body.ClearField(k)\n getattr(self._pb_body, k).update(v)\n else:\n if (\n hasattr(Document, k)\n and isinstance(getattr(Document, k), property)\n and getattr(Document, k).fset\n ):\n # if class property has a setter\n setattr(self, k, v)\n elif hasattr(self._pb_body, k):\n # no property setter, but proto has this attribute so fallback to proto\n setattr(self._pb_body, k, v)\n else:\n raise AttributeError(f'{k} is not recognized')\n\n def get_attributes(self, *fields: str) -> Union[Any, List[Any]]:\n \"\"\"Bulk fetch Document fields and return a list of the values of these fields\n\n .. note::\n Arguments will be extracted using `dunder_get`\n .. highlight:: python\n .. code-block:: python\n\n d = Document({'id': '123', 'hello': 'world', 'tags': {'id': 'external_id', 'good': 'bye'}})\n\n assert d.id == '123' # true\n assert d.tags['hello'] == 'world' # true\n assert d.tags['good'] == 'bye' # true\n assert d.tags['id'] == 'external_id' # true\n\n res = d.get_attrs_values(*['id', 'tags__hello', 'tags__good', 'tags__id'])\n\n assert res == ['123', 'world', 'bye', 'external_id']\n\n :param fields: the variable length values to extract from the document\n :return: a list with the attributes of this document ordered as the args\n \"\"\"\n\n ret = []\n for k in fields:\n try:\n value = getattr(self, k)\n\n if value is None:\n raise ValueError\n\n ret.append(value)\n except (AttributeError, ValueError):\n default_logger.warning(\n f'Could not get attribute `{typename(self)}.{k}`, returning `None`'\n )\n ret.append(None)\n\n # unboxing if args is single\n if len(fields) == 1:\n ret = ret[0]\n\n return ret\n\n @property\n def buffer(self) -> bytes:\n \"\"\"Return ``buffer``, one of the content form of a Document.\n\n .. note::\n Use :attr:`content` to return the content of a Document\n\n :return: the buffer bytes from this document\n \"\"\"\n return self._pb_body.buffer\n\n @buffer.setter\n def buffer(self, value: bytes):\n \"\"\"Set the ``buffer`` to :param:`value`.\n\n :param value: the bytes value to set the buffer\n \"\"\"\n self._pb_body.buffer = value\n\n @property\n def text(self):\n \"\"\"Return ``text``, one of the content form of a Document.\n\n .. note::\n Use :attr:`content` to return the content of a Document\n\n :return: the text from this document content\n \"\"\"\n return self._pb_body.text\n\n @text.setter\n def text(self, value: str):\n \"\"\"Set the `text` to :param:`value`\n\n :param value: the text value to set as content\n \"\"\"\n self._pb_body.text = value\n self.mime_type = 'text/plain'\n\n @property\n def uri(self) -> str:\n \"\"\"Return the URI of the document.\n\n :return: the uri of this Document\n \"\"\"\n return self._pb_body.uri\n\n @uri.setter\n def uri(self, value: str):\n \"\"\"Set the URI of the document.\n\n .. note::\n :attr:`mime_type` will be updated accordingly\n\n :param value: acceptable URI/URL, raise ``ValueError`` when it is not a valid URI\n \"\"\"\n self._pb_body.uri = value\n mime_type = mimetypes.guess_type(value)[0]\n if mime_type:\n self.mime_type = mime_type # Remote http/https contents mime_type will not be recognized.\n\n @property\n def mime_type(self) -> str:\n \"\"\"Get MIME type of the document\n\n :return: the mime_type of this Document\n \"\"\"\n return self._pb_body.mime_type\n\n @mime_type.setter\n def mime_type(self, value: str):\n \"\"\"Set MIME type of the document\n\n :param value: the acceptable MIME type, raise ``ValueError`` when MIME type is not\n recognizable.\n \"\"\"\n if value in _all_mime_types:\n self._pb_body.mime_type = value\n elif value:\n # given but not recognizable, do best guess\n r = mimetypes.guess_type(f'*.{value}')[0]\n if r:\n self._pb_body.mime_type = r\n else:\n self._pb_body.mime_type = value\n\n def __eq__(self, other):\n return self.proto == other.proto\n\n @property\n def content_type(self) -> str:\n \"\"\"Return the content type of the document, possible values: text, blob, buffer\n\n :return: the type of content of this Document\n \"\"\"\n return self._pb_body.WhichOneof('content')\n\n @property\n def content(self) -> DocumentContentType:\n \"\"\"Return the content of the document. It checks whichever field among :attr:`blob`, :attr:`text`,\n :attr:`buffer` has value and return it.\n\n .. seealso::\n :attr:`blob`, :attr:`buffer`, :attr:`text`\n\n :return: the value of the content depending on `:meth:`content_type`\n \"\"\"\n attr = self.content_type\n if attr:\n return getattr(self, attr)\n\n @content.setter\n def content(self, value: DocumentContentType):\n \"\"\"Set the content of the document. It assigns the value to field with the right type.\n\n .. seealso::\n :attr:`blob`, :attr:`buffer`, :attr:`text`\n\n :param value: the value from which to set the content of the Document\n \"\"\"\n if isinstance(value, bytes):\n self.buffer = value\n elif isinstance(value, str):\n self.text = value\n elif isinstance(value, np.ndarray):\n self.blob = value\n else:\n try:\n # try to set blob to `sparse` without needing to import all the `scipy` sparse requirements\n self.blob = value\n except:\n # ``None`` is also considered as bad type\n raise TypeError(f'{typename(value)} is not recognizable')\n\n @property\n def granularity(self):\n \"\"\"Return the granularity of the document.\n\n :return: the granularity of this Document\n \"\"\"\n return self._pb_body.granularity\n\n @granularity.setter\n def granularity(self, value: int):\n \"\"\"Set the granularity of the document.\n\n :param value: the value of the granularity to be set\n \"\"\"\n self._pb_body.granularity = value\n\n @property\n def adjacency(self):\n \"\"\"Return the adjacency of the document.\n\n :return: the adjacency of this Document\n \"\"\"\n return self._pb_body.adjacency\n\n @adjacency.setter\n def adjacency(self, value: int):\n \"\"\"Set the adjacency of the document.\n\n :param value: the value of the adjacency to be set\n \"\"\"\n self._pb_body.adjacency = value\n\n @property\n def scores(self):\n \"\"\"Return the scores of the document.\n\n :return: the scores attached to this document as `:class:NamedScoreMapping`\n \"\"\"\n return NamedScoreMapping(self._pb_body.scores)\n\n @scores.setter\n def scores(\n self,\n value: Dict[\n str, Union[NamedScore, jina_pb2.NamedScoreProto, float, np.generic]\n ],\n ):\n \"\"\"Sets the scores of the `Document`. Specially important to provide the ability to start `scores` as:\n\n .. highlight:: python\n .. code-block:: python\n\n from jina import Document\n from jina.types.score import NamedScore\n d = Document(scores={'euclidean': 5, 'cosine': NamedScore(value=0.5)})\n\n :param value: the dictionary to set the scores\n \"\"\"\n scores = NamedScoreMapping(self._pb_body.scores)\n for k, v in value.items():\n scores[k] = v\n\n @property\n def evaluations(self):\n \"\"\"Return the evaluations of the document.\n\n :return: the evaluations attached to this document as `:class:NamedScoreMapping`\n \"\"\"\n return NamedScoreMapping(self._pb_body.evaluations)\n\n @evaluations.setter\n def evaluations(\n self,\n value: Dict[\n str, Union[NamedScore, jina_pb2.NamedScoreProto, float, np.generic]\n ],\n ):\n \"\"\"Sets the evaluations of the `Document`. Specially important to provide the ability to start `evaluations` as:\n\n .. highlight:: python\n .. code-block:: python\n\n from jina import Document\n from jina.types.score import NamedScore\n d = Document(evaluations={'precision': 0.9, 'recall': NamedScore(value=0.5)})\n\n :param value: the dictionary to set the evaluations\n \"\"\"\n scores = NamedScoreMapping(self._pb_body.evaluations)\n for k, v in value.items():\n scores[k] = v\n\n def MergeFrom(self, doc: 'Document'):\n \"\"\"Merge the content of target\n\n :param doc: the document to merge from\n \"\"\"\n self._pb_body.MergeFrom(doc.proto)\n\n def CopyFrom(self, doc: 'Document'):\n \"\"\"Copy the content of target\n\n :param doc: the document to copy from\n \"\"\"\n self._pb_body.CopyFrom(doc.proto)\n\n def __mermaid_str__(self):\n results = []\n from google.protobuf.json_format import MessageToDict\n\n content = MessageToDict(self._pb_body, preserving_proto_field_name=True)\n\n _id = f'{self._mermaid_id[:3]}~Document~'\n\n for idx, c in enumerate(self.chunks):\n results.append(\n f'{_id} --> \"{idx + 1}/{len(self.chunks)}\" {c._mermaid_id[:3]}~Document~: chunks'\n )\n results.append(c.__mermaid_str__())\n\n for idx, c in enumerate(self.matches):\n results.append(\n f'{_id} ..> \"{idx + 1}/{len(self.matches)}\" {c._mermaid_id[:3]}~Document~: matches'\n )\n results.append(c.__mermaid_str__())\n if 'chunks' in content:\n content.pop('chunks')\n if 'matches' in content:\n content.pop('matches')\n if content:\n results.append(f'class {_id}{{')\n for k, v in content.items():\n if isinstance(v, (str, int, float, bytes)):\n results.append(f'+{k} {str(v)[:10]}')\n else:\n results.append(f'+{k}({type(getattr(self, k, v))})')\n results.append('}')\n\n return '\\n'.join(results)\n\n def _mermaid_to_url(self, img_type: str) -> str:\n \"\"\"\n Rendering the current flow as a url points to a SVG, it needs internet connection\n\n :param img_type: the type of image to be generated\n :return: the url pointing to a SVG\n \"\"\"\n if img_type == 'jpg':\n img_type = 'img'\n\n mermaid_str = (\n \"\"\"\n %%{init: {'theme': 'base', 'themeVariables': { 'primaryColor': '#FFC666'}}}%%\n classDiagram\n\n \"\"\"\n + self.__mermaid_str__()\n )\n\n encoded_str = base64.b64encode(bytes(mermaid_str.strip(), 'utf-8')).decode(\n 'utf-8'\n )\n\n return f'https://mermaid.ink/{img_type}/{encoded_str}'\n\n def _ipython_display_(self):\n \"\"\"Displays the object in IPython as a side effect\"\"\"\n self.plot(inline_display=True)\n\n def plot(self, output: Optional[str] = None, inline_display: bool = False) -> None:\n \"\"\"\n Visualize the Document recursively.\n\n :param output: a filename specifying the name of the image to be created,\n the suffix svg/jpg determines the file type of the output image\n :param inline_display: show image directly inside the Jupyter Notebook\n \"\"\"\n image_type = 'svg'\n if output and output.endswith('jpg'):\n image_type = 'jpg'\n\n url = self._mermaid_to_url(image_type)\n showed = False\n if inline_display:\n try:\n from IPython.display import Image, display\n\n display(Image(url=url))\n showed = True\n except:\n # no need to panic users\n pass\n\n if output:\n download_mermaid_url(url, output)\n elif not showed:\n from jina.logging.predefined import default_logger\n\n default_logger.info(f'Document visualization: {url}')\n\n def _prettify_doc_dict(self, d: Dict):\n \"\"\"Changes recursively a dictionary to show nd array fields as lists of values\n\n :param d: the dictionary to prettify\n \"\"\"\n for key in _all_doc_array_keys:\n if key in d:\n value = getattr(self, key)\n if isinstance(value, np.ndarray):\n d[key] = value.tolist()\n if 'chunks' in d:\n for chunk_doc, chunk_dict in zip(self.chunks, d['chunks']):\n chunk_doc._prettify_doc_dict(chunk_dict)\n if 'matches' in d:\n for match_doc, match_dict in zip(self.matches, d['matches']):\n match_doc._prettify_doc_dict(match_dict)\n\n def dict(self, prettify_ndarrays=False, *args, **kwargs):\n \"\"\"Return the object in Python dictionary\n\n :param prettify_ndarrays: boolean indicating if the ndarrays need to be prettified to be shown as lists of values\n :param args: Extra positional arguments\n :param kwargs: Extra keyword arguments\n :return: dict representation of the object\n \"\"\"\n d = super().dict(*args, **kwargs)\n if prettify_ndarrays:\n self._prettify_doc_dict(d)\n return d\n\n def json(self, prettify_ndarrays=False, *args, **kwargs):\n \"\"\"Return the object in JSON string\n\n :param prettify_ndarrays: boolean indicating if the ndarrays need to be prettified to be shown as lists of values\n :param args: Extra positional arguments\n :param kwargs: Extra keyword arguments\n :return: JSON string of the object\n \"\"\"\n if prettify_ndarrays:\n import json\n\n d = super().dict(*args, **kwargs)\n self._prettify_doc_dict(d)\n return json.dumps(d, sort_keys=True, **kwargs)\n else:\n return super().json(*args, **kwargs)\n\n @property\n def non_empty_fields(self) -> Tuple[str]:\n \"\"\"Return the set fields of the current document that are not empty\n\n :return: the tuple of non-empty fields\n \"\"\"\n return tuple(field[0].name for field in self.ListFields())\n\n @staticmethod\n def attributes(\n include_proto_fields: bool = True,\n include_proto_fields_camelcase: bool = False,\n include_properties: bool = False,\n ) -> List[str]:\n \"\"\"Return all attributes supported by the Document, which can be accessed by ``doc.attribute``\n\n :param include_proto_fields: if set, then include all protobuf fields\n :param include_proto_fields_camelcase: if set, then include all protobuf fields in CamelCase\n :param include_properties: if set, then include all properties defined for Document class\n :return: a list of attributes in string.\n \"\"\"\n import inspect\n\n support_keys = []\n\n if include_proto_fields:\n support_keys = list(jina_pb2.DocumentProto().DESCRIPTOR.fields_by_name)\n if include_proto_fields_camelcase:\n support_keys += list(\n jina_pb2.DocumentProto().DESCRIPTOR.fields_by_camelcase_name\n )\n\n if include_properties:\n support_keys += [\n name\n for (name, value) in inspect.getmembers(\n Document, lambda x: isinstance(x, property)\n )\n ]\n return list(set(support_keys))\n\n def __getattr__(self, item):\n if item in self.ON_GETATTR:\n self._increaseVersion()\n if hasattr(self._pb_body, item):\n value = getattr(self._pb_body, item)\n elif '__' in item:\n value = dunder_get(self._pb_body, item)\n else:\n raise AttributeError(f'no attribute named `{item}`')\n return value\n\n def get_vocabulary(self, text_attrs: Tuple[str, ...] = ('text',)) -> Dict[str, int]:\n \"\"\"Get the text vocabulary in a counter dict that maps from the word to its frequency from all :attr:`text_fields`.\n\n :param text_attrs: the textual attributes where vocabulary will be derived from\n :return: a vocabulary in dictionary where key is the word, value is the frequency of that word in all text fields.\n \"\"\"\n all_tokens = Counter()\n\n for f in text_attrs:\n all_tokens.update(_text_to_word_sequence(getattr(self, f)))\n\n return all_tokens\n" ]
[ [ "numpy.array", "scipy.sparse.issparse" ] ]
ProkopHapala/ProbeParticleModel
[ "1afbd32cbf68440d71c2ee53f2066c898a00ae23" ]
[ "pyProbeParticle/HighLevel.py" ]
[ "#!/usr/bin/python\n\nimport numpy as np\nimport os\nfrom . import GridUtils as GU\nfrom . import basUtils as bU\nfrom . import fieldFFT\nfrom . import common as PPU\n\nfrom . import core\nfrom . import cpp_utils\n\n# overall procedure for importing the sample geometry:\n\ndef importGeometries( fname ):\n if (fname.lower().endswith(\".xyz\") or fname.lower().endswith(\".bas\")): \n atoms, nDim, lvec = bU.loadAtoms( fname )\n elif fname.lower().endswith(\".xsf\"):\n atoms, nDim, lvec = bU.loadXSFGeom( fname )\n elif fname.lower().endswith(\".cube\"):\n atoms, nDim, lvec = bU.loadAtomsCUBE( fname )\n elif fname.lower().endswith(\".in\"):\n atoms, nDim, lvec = bU.loadGeometryIN( fname )\n else:\n sys.exit(\"ERROR!!! Unknown format of geometry system. Supported \"\n \"formats are: .xyz, .bas., .xsf, .cube, .in \\n\\n\")\n if (nDim != []):\n PPU.params['gridN'] = nDim\n if (lvec != []):\n PPU.params['gridA'] = lvec[1]\n PPU.params['gridB'] = lvec[2]\n PPU.params['gridC'] = lvec[3]\n else:\n lvec=np.zeros((4,3))\n lvec[ 1,: ] = PPU.params['gridA'].copy() \n lvec[ 2,: ] = PPU.params['gridB'].copy()\n lvec[ 3,: ] = PPU.params['gridC'].copy()\n return atoms, lvec;\n\ndef parseAtoms( atoms, autogeom = False, PBC = True, FFparams=None ):\n if FFparams is None:\n raise ValueError(\"You should provide a list of LJ parameters!\")\n Rs = np.array([atoms[1],atoms[2],atoms[3]]); \n Natoms=[]\n elem_dict={}\n for i,ff in enumerate(FFparams):\n elem_dict[ff[3]] = i+1\n new_dict={ key.decode() : val for key, val in elem_dict.items() }\n elem_dict = new_dict\n for atm in atoms[0]:\n try:\n Natoms.append(int(atm))\n except:\n try:\n Natoms.append(elem_dict[atm])\n except:\n raise ValueError(\"Did not find atomkind: \"\n \"{}\".format(atm))\n iZs=np.array( Natoms )\n if autogeom:\n print(\" autoGeom \")\n PPU.autoGeom( Rs, shiftXY=True, fitCell=True, border=3.0 )\n Rs = np.transpose( Rs, (1,0) ).copy()\n Qs = np.array( atoms[4] )\n if PBC:\n iZs,Rs,Qs = PPU.PBCAtoms( iZs, Rs, Qs, avec=PPU.params['gridA'], bvec=PPU.params['gridB'] )\n return iZs,Rs,Qs\n\n\ndef perpareArrays( FF, Vpot ):\n if ( FF is None ):\n gridN = PPU.params['gridN']\n FF = np.zeros( (gridN[2],gridN[1],gridN[0],3) )\n else:\n PPU.params['gridN'] = np.shape( FF ) \n if ( Vpot ):\n V = np.zeros( (gridN[2],gridN[1],gridN[0]) )\n else:\n V=None\n core.setFF( gridF=FF, gridE=V )\n return FF, V \n\ndef computeLJ( Rs, iZs, FFLJ=None, FFparams=None, Vpot=False ):\n if FFparams is None:\n raise ValueError(\"You should provide a list of LJ parameters!\")\n FFLJ,VLJ = perpareArrays( FFLJ, Vpot )\n C6,C12 = PPU.getAtomsLJ( PPU.params['probeType'], iZs, FFparams )\n #core.setFF( gridF=FFLJ, gridE=VLJ )\n core.getLenardJonesFF( Rs, C6, C12 )\n return FFLJ, VLJ\n\ndef computeCoulomb( Rs, Qs, FFel=None , Vpot=False ):\n FFel,Vel = perpareArrays( FFel, Vpot )\n #core.setFF( gridF=FFel, gridE=Vel )\n core.getCoulombFF ( Rs, Qs * PPU.CoulombConst )\n return FFel, Vel\n\n\"\"\"\ndef prepareForceFields( store = True, storeXsf = False, autogeom = False, FFparams=None ):\n newEl = False\n newLJ = False\n head = None\n # --- try to load FFel or compute it from LOCPOT.xsf\n if ( os.path.isfile('FFel_x.xsf') ):\n print \" FFel_x.xsf found \"\n FFel, lvecEl, nDim, head = GU.loadVecField('FFel', FFel)\n PPU.lvec2params( lvecEl )\n else:\n print \"F Fel_x.xsf not found \"\n if ( xsfLJ and os.path.isfile('LOCPOT.xsf') ):\n print \" LOCPOT.xsf found \"\n V, lvecEl, nDim, head = GU.loadXSF('LOCPOT.xsf')\n PPU.lvec2params( lvecEl )\n FFel_x,FFel_y,FFel_z = fieldFFT.potential2forces( V, lvecEl, nDim, sigma = 1.0 )\n FFel = GU.packVecGrid( FFel_x,FFel_y,FFel_z )\n del FFel_x,FFel_y,FFel_z\n GU.saveVecFieldXsf( 'FFel', FF, lvecEl, head = head )\n else:\n print \" LOCPOT.xsf not found \"\n newEl = True\n # --- try to load FFLJ \n if ( os.path.isfile('FFLJ_x.xsf') ):\n print \" FFLJ_x.xsf found \"\n FFLJ, lvecLJ, nDim, head = GU.loadVecFieldXsf( 'FFLJ' )\n PPU.lvec2params( lvecLJ )\n else: \n newLJ = True\n # --- compute Forcefield by atom-wise interactions \n if ( newEl or newEl ):\n atoms = basUtils.loadAtoms('geom.bas')\n iZs,Rs,Qs = parseAtoms( atoms, autogeom = autogeom, PBC =\n PPU.params['PBC'], FFparams = FFparams )\n lvec = PPU.params2lvec( )\n if head is None:\n head = GU.XSF_HEAD_DEFAULT\n if newLJ:\n FFLJ = computeLJ ( Rs, iZs, FFparams=FFparams )\n GU.saveVecFieldXsf( 'FFLJ', FF, lvecEl, head = head )\n if newEl:\n FFel = computeCoulomb( Rs, Qs, FFel )\n GU.saveVecFieldXsf( 'FFel', FF, lvecEl, head = head )\n return FFLJ, FFel\n\"\"\" \n\ndef relaxedScan3D( xTips, yTips, zTips ):\n nstroke = len(zTips); \n rTip_ = np.zeros((nstroke,3))\n rPP_ = np.zeros((nstroke,3))\n F_ = np.zeros((nstroke,3))\n rTip_[:,2] = zTips[::-1] \n nx = len(zTips); ny = len(yTips ); nz = len(xTips);\n Fs = np.zeros( ( nx,ny,nz,3 ) );\n rPPs = np.zeros( ( nx,ny,nz,3 ) );\n rTips = np.zeros( ( nx,ny,nz,3 ) );\n for ix,x in enumerate( xTips ):\n print(\"relax ix:\", ix)\n rTip_[:,0] = x\n for iy,y in enumerate( yTips ):\n rTip_[:,1] = y\n itrav = core.relaxTipStroke( rTip_, rPP_, F_ ) / float( nstroke )\n Fs [:,iy,ix,:] = F_ [::-1,:]\n rPPs [:,iy,ix,:] = rPP_ [::-1,:] \n rTips[:,iy,ix,:] = rTip_[::-1,:] \n return Fs,rPPs,rTips\n\ndef Gauss(Evib, E0, w):\n return np.exp( -0.5*((Evib - E0)/w)**2);\n\ndef symGauss( Evib, E0, w):\n return Gauss(Evib, E0, w) - Gauss(Evib, -E0, w);\n \n\n" ]
[ [ "numpy.shape", "numpy.transpose", "numpy.array", "numpy.exp", "numpy.zeros" ] ]
pn11/anchor-stats
[ "51c4195ac65261b59c25991fc3e64dba085791ba" ]
[ "analyze.py" ]
[ "import glob\nimport os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef make_label(filename):\n filename = filename.split('/')[-1]\n filename = filename.split('_')[0]\n # sometimes unexpected '%'\n filename = filename.split('%')[0]\n filename = filename[:10]\n return filename\n\n\ndef parse_time(series):\n \"\"\"Convert to Datetime to get timedelta\"\"\"\n # ignore 24:00:00 (cannot parse with strptime)\n series = series.apply(lambda x: x.split(' ')[0])\n series = pd.to_datetime(series, format='%m/%d/%Y')\n return series\n\n\ndef create_timedelta(df):\n df_ = df[:]\n df_['Time (UTC)'] = parse_time(df_['Time (UTC)'])\n df_diff = pd.DataFrame.from_dict({\"diff\": df_[\"Time (UTC)\"][i]-df_[\n \"Time (UTC)\"][0], df_.columns.values[1]: df_.iat[i, 1]} for i in range(len(df_)))\n print(df_diff)\n return df_diff\n\n\ndef load_data():\n csvs = [csv for csv in glob.glob('./data/*.csv')]\n csvs = sorted(csvs, key=lambda x: make_label(x))\n df = pd.read_csv(f\"{csvs[0]}\")\n\n df = df.rename(columns={\"Time (UTC)\": \"Time (UTC)\",\n \"Plays\": f\"{make_label(csvs[0])}\"})\n df_diff = create_timedelta(df)\n\n for csv in csvs[1:]:\n df_ = pd.read_csv(f\"{csv}\")\n df_ = df_.rename(\n columns={\"Time (UTC)\": \"Time (UTC)\", \"Plays\": f\"{make_label(csv)}\"})\n df_diff = pd.merge(df_diff, create_timedelta(df_))\n df = pd.merge(df, df_, on='Time (UTC)', how='outer')\n df['Time (UTC)'] = parse_time(df['Time (UTC)'])\n df = df.fillna(0)\n df = df.sort_values('Time (UTC)')\n print(df)\n print(df_diff)\n df.to_csv(\"merge.csv\")\n\n return df, df_diff\n\n\ndef plot_data(df, df_diff):\n x = df['Time (UTC)']\n y = df.iloc[:, 1:]\n x_diff = df_diff['diff']\n x_diff = [a.days for a in x_diff]\n y_diff = df_diff.iloc[:, 1:]\n\n # plot\n\n fig = plt.figure(figsize=(15, 8))\n ax1 = fig.add_subplot(221)\n ax1.set_title('Plays / day')\n ax2 = fig.add_subplot(223)\n ax2.set_title('Plays (cumulative)')\n ax3 = fig.add_subplot(222)\n ax3.set_title('Plays / day (in first few days)')\n ax4 = fig.add_subplot(224)\n ax3.set_title('Plays / day (cumulative, in first few days)')\n for i, col in enumerate(y.columns.to_list()):\n ax1.plot(x, y.iloc[:, i], label=col)\n ax2.plot(x, np.cumsum(y.iloc[:, i]), label=col)\n ax3.plot(x_diff, y_diff.iloc[:, i], label=col)\n ax4.plot(x_diff, np.cumsum(y_diff.iloc[:, i]), label=col)\n\n ax1.legend()\n\n total = df.sum(axis=1)\n\n # Total\n fig2 = plt.figure(figsize=(15, 8))\n ax5 = fig2.add_subplot(121)\n ax5.plot(df['Time (UTC)'], total)\n ax5.set_title('Total Plays / day')\n\n ax6 = fig2.add_subplot(122)\n ax6.plot(df['Time (UTC)'], np.cumsum(total))\n ax6.set_title('Total Plays (cumulative)')\n # ax6.set_yscale('log')\n\n plt.show()\n\n\ndef main():\n df, df_diff = load_data()\n plot_data(df, df_diff)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.merge", "pandas.to_datetime", "pandas.read_csv", "numpy.cumsum", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
Amanuel4/Finger-Counter-project
[ "37987e9efc28f27adb11a1b0f32847a507217c54" ]
[ "Finger Counter.py" ]
[ "import cv2\r\nimport time\r\nimport pygame\r\nimport numpy as np\r\nimport mediapipe as mp\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\n\r\n# Initialize the mediapipe hands class.\r\nmp_hands = mp.solutions.hands\r\n\r\n# Set up the Hands functions for images and videos.\r\n#hands = mp_hands.Hands(static_image_mode=True, max_num_hands=2, min_detection_confidence=0.5)\r\nhands_videos = mp_hands.Hands(static_image_mode=False, max_num_hands=2, min_detection_confidence=0.5)\r\n\r\n# Initialize the mediapipe drawing class.\r\nmp_drawing = mp.solutions.drawing_utils\r\n\r\ndef detectHandsLandmarks(image, hands, draw=True, display = True):\r\n '''\r\n This function performs hands landmarks detection on an image.\r\n Args:\r\n image: The input image with prominent hand(s) whose landmarks needs to be detected.\r\n hands: The Hands function required to perform the hands landmarks detection.\r\n draw: A boolean value that is if set to true the function draws hands landmarks on the output image. \r\n display: A boolean value that is if set to true the function displays the original input image, and the output \r\n image with hands landmarks drawn if it was specified and returns nothing.\r\n Returns:\r\n output_image: A copy of input image with the detected hands landmarks drawn if it was specified.\r\n results: The output of the hands landmarks detection on the input image.\r\n '''\r\n \r\n # Create a copy of the input image to draw landmarks on.\r\n output_image = image.copy()\r\n \r\n # Convert the image from BGR into RGB format.\r\n imgRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n \r\n # Perform the Hands Landmarks Detection.\r\n results = hands.process(imgRGB)\r\n \r\n # Check if landmarks are found and are specified to be drawn.\r\n if results.multi_hand_landmarks and draw:\r\n \r\n # Iterate over the found hands.\r\n for hand_landmarks in results.multi_hand_landmarks:\r\n \r\n # Draw the hand landmarks on the copy of the input image.\r\n mp_drawing.draw_landmarks(image = output_image, landmark_list = hand_landmarks,\r\n connections = mp_hands.HAND_CONNECTIONS,\r\n landmark_drawing_spec=mp_drawing.DrawingSpec(color=(255,255,255),\r\n thickness=2, circle_radius=2),\r\n connection_drawing_spec=mp_drawing.DrawingSpec(color=(255,0,255),\r\n thickness=2, circle_radius=2))\r\n \r\n # Check if the original input image and the output image are specified to be displayed.\r\n if display:\r\n \r\n # Display the original input image and the output image.\r\n plt.figure(figsize=[15,15])\r\n plt.subplot(121);plt.imshow(image[:,:,::-1]);plt.title(\"Original Image\");plt.axis('off');\r\n plt.subplot(122);plt.imshow(output_image[:,:,::-1]);plt.title(\"Output\");plt.axis('off');\r\n \r\n # Otherwise\r\n else:\r\n \r\n # Return the output image and results of hands landmarks detection.\r\n return output_image, results \r\n\r\ndef countFingers(image, results, draw=True, display=True):\r\n '''\r\n This function will count the number of fingers up for each hand in the image.\r\n Args:\r\n image: The image of the hands on which the fingers counting is required to be performed.\r\n results: The output of the hands landmarks detection performed on the image of the hands.\r\n draw: A boolean value that is if set to true the function writes the total count of fingers of the hands on the\r\n output image.\r\n display: A boolean value that is if set to true the function displays the resultant image and returns nothing.\r\n Returns:\r\n output_image: A copy of the input image with the fingers count written, if it was specified.\r\n fingers_statuses: A dictionary containing the status (i.e., open or close) of each finger of both hands.\r\n count: A dictionary containing the count of the fingers that are up, of both hands.\r\n '''\r\n \r\n # Get the height and width of the input image.\r\n height, width, _ = image.shape\r\n \r\n # Create a copy of the input image to write the count of fingers on.\r\n output_image = image.copy()\r\n \r\n # Initialize a dictionary to store the count of fingers of both hands.\r\n count = {'RIGHT': 0, 'LEFT': 0}\r\n \r\n # Store the indexes of the tips landmarks of each finger of a hand in a list.\r\n fingers_tips_ids = [mp_hands.HandLandmark.INDEX_FINGER_TIP, mp_hands.HandLandmark.MIDDLE_FINGER_TIP,\r\n mp_hands.HandLandmark.RING_FINGER_TIP, mp_hands.HandLandmark.PINKY_TIP]\r\n \r\n # Initialize a dictionary to store the status (i.e., True for open and False for close) of each finger of both hands.\r\n fingers_statuses = {'RIGHT_THUMB': False, 'RIGHT_INDEX': False, 'RIGHT_MIDDLE': False, 'RIGHT_RING': False,\r\n 'RIGHT_PINKY': False, 'LEFT_THUMB': False, 'LEFT_INDEX': False, 'LEFT_MIDDLE': False,\r\n 'LEFT_RING': False, 'LEFT_PINKY': False}\r\n \r\n \r\n # Iterate over the found hands in the image.\r\n for hand_index, hand_info in enumerate(results.multi_handedness):\r\n \r\n # Retrieve the label of the found hand.\r\n hand_label = hand_info.classification[0].label\r\n \r\n # Retrieve the landmarks of the found hand.\r\n hand_landmarks = results.multi_hand_landmarks[hand_index]\r\n \r\n # Iterate over the indexes of the tips landmarks of each finger of the hand.\r\n for tip_index in fingers_tips_ids:\r\n \r\n # Retrieve the label (i.e., index, middle, etc.) of the finger on which we are iterating upon.\r\n finger_name = tip_index.name.split(\"_\")[0]\r\n \r\n # Check if the finger is up by comparing the y-coordinates of the tip and pip landmarks.\r\n if (hand_landmarks.landmark[tip_index].y < hand_landmarks.landmark[tip_index - 2].y):\r\n \r\n # Update the status of the finger in the dictionary to true.\r\n fingers_statuses[hand_label.upper()+\"_\"+finger_name] = True\r\n \r\n # Increment the count of the fingers up of the hand by 1.\r\n count[hand_label.upper()] += 1\r\n \r\n # Retrieve the y-coordinates of the tip and mcp landmarks of the thumb of the hand.\r\n thumb_tip_x = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP].x\r\n thumb_mcp_x = hand_landmarks.landmark[mp_hands.HandLandmark.THUMB_TIP - 2].x\r\n \r\n # Check if the thumb is up by comparing the hand label and the x-coordinates of the retrieved landmarks.\r\n if (hand_label=='Right' and (thumb_tip_x < thumb_mcp_x)) or (hand_label=='Left' and (thumb_tip_x > thumb_mcp_x)):\r\n \r\n # Update the status of the thumb in the dictionary to true.\r\n fingers_statuses[hand_label.upper()+\"_THUMB\"] = True\r\n \r\n # Increment the count of the fingers up of the hand by 1.\r\n count[hand_label.upper()] += 1\r\n \r\n # Check if the total count of the fingers of both hands are specified to be written on the output image.\r\n if draw:\r\n\r\n # Write the total count of the fingers of both hands on the output image.\r\n cv2.putText(output_image, \" Total Fingers: \", (10, 25),cv2.FONT_HERSHEY_COMPLEX, 1, (20,255,155), 2)\r\n cv2.putText(output_image, str(sum(count.values())), (width//2-150,240), cv2.FONT_HERSHEY_SIMPLEX,\r\n 8.9, (20,0,255), 10, 10)\r\n\r\n # Check if the output image is specified to be displayed.\r\n if display:\r\n \r\n # Display the output image.\r\n plt.figure(figsize=[10,10])\r\n plt.imshow(output_image[:,:,::-1]);plt.title(\"Output Image\");plt.axis('off');\r\n \r\n # Otherwise\r\n else:\r\n\r\n # Return the output image, the status of each finger and the count of the fingers up of both hands.\r\n return output_image, fingers_statuses, count\r\n\r\n\r\ndef annotate(image, results, fingers_statuses, count, display=True):\r\n '''\r\n This function will draw an appealing visualization of each fingers up of the both hands in the image.\r\n Args:\r\n image: The image of the hands on which the counted fingers are required to be visualized.\r\n results: The output of the hands landmarks detection performed on the image of the hands.\r\n fingers_statuses: A dictionary containing the status (i.e., open or close) of each finger of both hands. \r\n count: A dictionary containing the count of the fingers that are up, of both hands.\r\n display: A boolean value that is if set to true the function displays the resultant image and \r\n returns nothing.\r\n Returns:\r\n output_image: A copy of the input image with the visualization of counted fingers.\r\n '''\r\n \r\n # Get the height and width of the input image.\r\n height, width, _ = image.shape\r\n \r\n # Create a copy of the input image.\r\n output_image = image.copy()\r\n \r\n # Select the images of the hands prints that are required to be overlayed.\r\n ########################################################################################################################\r\n \r\n # Initialize a dictionaty to store the images paths of the both hands.\r\n # Initially it contains red hands images paths. The red image represents that the hand is not present in the image. \r\n HANDS_IMGS_PATHS = {'LEFT': ['media/left_hand_not_detected.png'], 'RIGHT': ['media/right_hand_not_detected.png']}\r\n \r\n # Check if there is hand(s) in the image.\r\n if results.multi_hand_landmarks:\r\n \r\n # Iterate over the detected hands in the image.\r\n for hand_index, hand_info in enumerate(results.multi_handedness):\r\n \r\n # Retrieve the label of the hand.\r\n hand_label = hand_info.classification[0].label\r\n \r\n # Update the image path of the hand to a green color hand image.\r\n # This green image represents that the hand is present in the image. \r\n HANDS_IMGS_PATHS[hand_label.upper()] = ['media/'+hand_label.lower()+'_hand_detected.png']\r\n \r\n # Check if all the fingers of the hand are up/open.\r\n if count[hand_label.upper()] == 5:\r\n \r\n # Update the image path of the hand to a hand image with green color palm and orange color fingers image.\r\n # The orange color of a finger represents that the finger is up.\r\n HANDS_IMGS_PATHS[hand_label.upper()] = ['media/'+hand_label.lower()+'_all_fingers.png']\r\n \r\n # Otherwise if all the fingers of the hand are not up/open.\r\n else:\r\n \r\n # Iterate over the fingers statuses of the hands.\r\n for finger, status in fingers_statuses.items():\r\n \r\n # Check if the finger is up and belongs to the hand that we are iterating upon.\r\n if status == True and finger.split(\"_\")[0] == hand_label.upper():\r\n \r\n # Append another image of the hand in the list inside the dictionary.\r\n # This image only contains the finger we are iterating upon of the hand in orange color.\r\n # As the orange color represents that the finger is up.\r\n HANDS_IMGS_PATHS[hand_label.upper()].append('media/'+finger.lower()+'.png')\r\n \r\n ########################################################################################################################\r\n \r\n # Overlay the selected hands prints on the input image.\r\n ########################################################################################################################\r\n \r\n # Iterate over the left and right hand.\r\n for hand_index, hand_imgs_paths in enumerate(HANDS_IMGS_PATHS.values()):\r\n \r\n # Iterate over the images paths of the hand.\r\n for img_path in hand_imgs_paths:\r\n \r\n # Read the image including its alpha channel. The alpha channel (0-255) determine the level of visibility. \r\n # In alpha channel, 0 represents the transparent area and 255 represents the visible area.\r\n hand_imageBGRA = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\r\n \r\n # Retrieve all the alpha channel values of the hand image. \r\n alpha_channel = hand_imageBGRA[:,:,-1]\r\n \r\n # Retrieve all the blue, green, and red channels values of the hand image.\r\n # As we also need the three-channel version of the hand image. \r\n hand_imageBGR = hand_imageBGRA[:,:,:-1]\r\n \r\n # Retrieve the height and width of the hand image.\r\n hand_height, hand_width, _ = hand_imageBGR.shape\r\n\r\n # Retrieve the region of interest of the output image where the handprint image will be placed.\r\n ROI = output_image[30 : 30 + hand_height,\r\n (hand_index * width//2) + width//12 : ((hand_index * width//2) + width//12 + hand_width)]\r\n \r\n # Overlay the handprint image by updating the pixel values of the ROI of the output image at the \r\n # indexes where the alpha channel has the value 255.\r\n ROI[alpha_channel==255] = hand_imageBGR[alpha_channel==255]\r\n\r\n # Update the ROI of the output image with resultant image pixel values after overlaying the handprint.\r\n output_image[30 : 30 + hand_height,\r\n (hand_index * width//2) + width//12 : ((hand_index * width//2) + width//12 + hand_width)] = ROI\r\n \r\n ########################################################################################################################\r\n \r\n # Check if the output image is specified to be displayed.\r\n if display:\r\n\r\n # Display the output image.\r\n plt.figure(figsize=[10,10])\r\n plt.imshow(output_image[:,:,::-1]);plt.title(\"Output Image\");plt.axis('off');\r\n \r\n # Otherwise\r\n else:\r\n\r\n # Return the output image\r\n return output_image\r\n\r\n# Initialize the VideoCapture object to read from the webcam.\r\ncamera_video = cv2.VideoCapture(0)\r\ncamera_video.set(3,1280)\r\ncamera_video.set(4,960)\r\n\r\n# Create named window for resizing purposes.\r\ncv2.namedWindow('Fingers Counter', cv2.WINDOW_NORMAL)\r\n\r\n# Iterate until the webcam is accessed successfully.\r\nwhile camera_video.isOpened():\r\n \r\n # Read a frame.\r\n ok, frame = camera_video.read()\r\n \r\n # Check if frame is not read properly then continue to the next iteration to read the next frame.\r\n if not ok:\r\n continue\r\n \r\n # Flip the frame horizontally for natural (selfie-view) visualization.\r\n frame = cv2.flip(frame, 1)\r\n \r\n # Perform Hands landmarks detection on the frame.\r\n frame, results = detectHandsLandmarks(frame, hands_videos, display=False)\r\n \r\n # Check if the hands landmarks in the frame are detected.\r\n if results.multi_hand_landmarks:\r\n \r\n # Count the number of fingers up of each hand in the frame.\r\n frame, fingers_statuses, count = countFingers(frame, results, display=False)\r\n \r\n # Display the frame.\r\n cv2.imshow('Fingers Counter', frame)\r\n \r\n # Wait for 1ms. If a key is pressed, retreive the ASCII code of the key.\r\n k = cv2.waitKey(1) & 0xFF\r\n \r\n # Check if 'ESC' is pressed and break the loop.\r\n if(k == 27):\r\n break\r\n\r\n# Release the VideoCapture Object and close the windows.\r\ncamera_video.release()\r\ncv2.destroyAllWindows() " ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
n-zhang/mmf
[ "9d76995ac76e70544315701a5057f4269949514f" ]
[ "tests/test_utils.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport argparse\nimport platform\nimport random\nimport socket\nimport unittest\n\nimport torch\n\n\ndef compare_tensors(a, b):\n return torch.equal(a, b)\n\n\ndef dummy_args(model=\"cnn_lstm\", dataset=\"clevr\"):\n args = argparse.Namespace()\n args.opts = [f\"model={model}\", f\"dataset={dataset}\"]\n args.config_override = None\n return args\n\n\ndef is_network_reachable():\n try:\n # check if host name can be resolved\n host = socket.gethostbyname(\"one.one.one.one\")\n # check if host is actually reachable\n s = socket.create_connection((host, 80), 2)\n s.close()\n return True\n except OSError as e:\n if e.errno == 101:\n pass\n return False\n\n\nNETWORK_AVAILABLE = is_network_reachable()\nCUDA_AVAILBLE = torch.cuda.is_available()\n\n\ndef skip_if_no_network(testfn, reason=\"Network is not available\"):\n return unittest.skipUnless(NETWORK_AVAILABLE, reason)(testfn)\n\n\ndef skip_if_no_cuda(testfn, reason=\"Cuda is not available\"):\n return unittest.skipUnless(CUDA_AVAILBLE, reason)(testfn)\n\n\ndef skip_if_windows(testfn, reason=\"Doesn't run on Windows\"):\n return unittest.skipIf(\"Windows\" in platform.system(), reason)(testfn)\n\n\ndef skip_if_macos(testfn, reason=\"Doesn't run on MacOS\"):\n return unittest.skipIf(\"Darwin\" in platform.system(), reason)(testfn)\n\n\ndef compare_state_dicts(a, b):\n same = True\n same = same and (list(a.keys()) == list(b.keys()))\n if not same:\n return same\n\n for val1, val2 in zip(a.values(), b.values()):\n if isinstance(val1, torch.Tensor):\n same = same and compare_tensors(val1, val2)\n elif not isinstance(val2, torch.Tensor):\n same = same and val1 == val2\n else:\n same = False\n if not same:\n return same\n\n return same\n\n\ndef build_random_sample_list():\n from mmf.common.sample import Sample, SampleList\n\n first = Sample()\n first.x = random.randint(0, 100)\n first.y = torch.rand((5, 4))\n first.z = Sample()\n first.z.x = random.randint(0, 100)\n first.z.y = torch.rand((6, 4))\n\n second = Sample()\n second.x = random.randint(0, 100)\n second.y = torch.rand((5, 4))\n second.z = Sample()\n second.z.x = random.randint(0, 100)\n second.z.y = torch.rand((6, 4))\n\n return SampleList([first, second])\n\n\nDATA_ITEM_KEY = \"test\"\n\n\nclass NumbersDataset(torch.utils.data.Dataset):\n def __init__(self, num_examples):\n self.num_examples = num_examples\n\n def __getitem__(self, idx):\n return {DATA_ITEM_KEY: torch.tensor(idx, dtype=torch.float32)}\n\n def __len__(self):\n return self.num_examples\n\n\nclass SimpleModel(torch.nn.Module):\n def __init__(self, size):\n super().__init__()\n self.linear = torch.nn.Linear(size, 4)\n\n def forward(self, prepared_batch):\n batch = prepared_batch[DATA_ITEM_KEY]\n model_output = {\"losses\": {\"loss\": torch.sum(self.linear(batch))}}\n return model_output\n" ]
[ [ "torch.equal", "torch.tensor", "torch.nn.Linear", "torch.rand", "torch.cuda.is_available" ] ]
rparak/Bezier_Curve_Simple
[ "06531e17601a52c65aef36c38d61673fee676751" ]
[ "src/Python/Animation/animation_2D.py" ]
[ "\"\"\"\n## =========================================================================== ## \nMIT License\nCopyright (c) 2021 Roman Parak\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n## =========================================================================== ## \nAuthor : Roman Parak\nEmail : Roman.Parak@outlook.com\nGithub : https://github.com/rparak\nFile Name: animation_2D.py\n## =========================================================================== ## \n\"\"\"\n\n# System (Default)\nimport sys\n# Numpy (Array computing) [pip3 install numpy]\nimport numpy as np\n# Matplotlib (Visualization) [pip3 install matplotlib]\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n# Own library for Bézier curve calculation\nimport Bezier\n\n# Input Data: \n# Visible name of title\ntitle_visibility = True\n# Name of GIF file: Quadratic, Cubic, N-Degree\ngif_file_name = 'Quadratic' \n\n# Create figure\nfig, ax = plt.subplots()\nfig.set_size_inches(18.5, 10.5, forward=True)\n\n# Initialization of lines:\nLine, = ax.plot([], [], '-', linewidth=2.25, color=[1.0,0.75,0.5,1.0], label=f'{gif_file_name} Bézier Curve')\n\n# Data initialization for animation:\nx_data = [] \ny_data = []\n\n# Input Points:\nif gif_file_name == 'Quadratic':\n points = [[1.0, 1.0], [1.25, 2.0], [1.75, 1.75]]\nelif gif_file_name == 'Cubic':\n points = [[1.0, 1.0], [1.25, 2.0], [1.75, 2.0], [2.0, 1.0]]\nelif gif_file_name == 'N-Degree':\n points = [[1.0, 1.0], [1.25, 2.0], [1.75, 2.0], [2.0, 1.0], [1.0, -1.0], [1.25, -2.0], [1.75, -2.0], [2.0, -1.0]]\n\ndef init_animation():\n \"\"\"\n Description: \n Initialize the individual animated lines that will move in the animation.\n\n Returns:\n (1) parameter [Float Vector]: Array of line.\n \"\"\"\n\n # Display points.\n for _, point in enumerate(points):\n ax.plot(point[0], point[1], marker = 'o', ms = 10, mfc = [0.2,0.4,0.6,0.5], markeredgecolor = [0.2,0.4,0.6,1.0], mew = 2.5)\n\n # Calculation and display the Linear Bézier Curve p(t).\n for i in range(len(points) - 1):\n result = Bezier.Linear(100, [points[i], points[i + 1]])\n ax.plot(result[0], result[1], '--', linewidth=0.75, color=[0.2,0.4,0.6,1.0])\n\n Line.set_data(np.array([]), np.array([]))\n\n return Line,\n\ndef update_animation(idx, Data):\n \"\"\"\n Description:\n Line graph animation update.\n\n Args:\n (1) i [INT]: Iteration of the graph. \n (2) Data [Float Vector, Float Vector]: Calculated Bézier curve data.\n\n Returns:\n (1) parameter [Float Vector]: Array of line.\n \"\"\"\n\n Line.set_data(np.array(Data[0][:idx]), np.array(Data[1][:idx]))\n\n print(f'[INFO] Progress in GIF creation: {np.round(100 * np.float(idx)/np.float(len(Data[0])),1)} %')\n\n return Line,\n\ndef main():\n # Initialization of the data\n # Number of samples to generate. Must be non-negative.\n num_of_samples = 100\n # Result Curve Data\n result_data = [[], []]\n\n if gif_file_name == 'Quadratic' and len(points) == 3:\n # Calculation of the Quadratic Bézier Curve p(t).\n for i in range(len(points) - 2):\n result = Bezier.Quadratic(num_of_samples, [points[i], points[i + 1], points[i + 2]])\n\n # Assign data to variables\n for i in range(len(result[0])):\n result_data[0].append(result[0][i])\n result_data[1].append(result[1][i])\n elif gif_file_name == 'Cubic' and len(points) == 4:\n # Calculation of the Cubic Bézier Curve p(t).\n for i in range(len(points) - 3):\n result = Bezier.Cubic(num_of_samples, [points[i], points[i + 1], points[i + 2], points[i + 3]])\n\n # Assign data to variables\n for i in range(len(result[0])):\n result_data[0].append(result[0][i])\n result_data[1].append(result[1][i])\n elif gif_file_name == 'N-Degree':\n \"\"\"\n Initialization Bezier Class\n \n Input:\n (1) num_of_samples [INT]: Number of samples to generate. Must be non-negative.\n \"\"\"\n num_of_samples = 100\n Bezier_Ndeg = Bezier.N_Degree(num_of_samples)\n\n # Automatic Curve Calculation (Result of the calculation) -> depends on the simplification factor\n result = Bezier_Ndeg.Solve(points, 1)\n\n # Assign data to variables\n result_data[0] = result[0]\n result_data[1] = result[1]\n\n print(f'[INFO] Number of input data: {len(result_data[0])}')\n\n if title_visibility == True:\n fig.suptitle('Bézier Curve', fontsize = 20)\n\n # Axis Parameters:\n # Label\n ax.set_xlabel(r'X - Position (Units)')\n ax.set_ylabel(r'Y - Position (Units)')\n # Other dependencies\n ax.grid(linewidth = 0.75, linestyle = '--')\n ax.legend(fontsize=10.0)\n\n # Start Animation\n anim = animation.FuncAnimation(fig, update_animation, init_func=init_animation, frames=len(result_data[0]), interval=10, \n fargs=(result_data, ), blit=True, repeat=False)\n # Save Animation \n anim.save(f'{gif_file_name}.gif', fps=30, bitrate=1000)\n print(f'[INFO] Progress in GIF creation: 100.0 %')\n print(f'[INFO] The GIF animation is successfully saved.')\n\nif __name__ == '__main__':\n sys.exit(main())\n" ]
[ [ "numpy.array", "matplotlib.pyplot.subplots", "numpy.float" ] ]
spenczar/thor
[ "d0f3d0f07ffb069d1ca94701ad32f3c0c013aa61" ]
[ "submitTHORJob.py" ]
[ "import logging\nimport argparse\nimport pandas as pd\nimport os\n\nimport pika\nfrom google.cloud.storage import Client as GCSClient\nimport google.cloud.exceptions\n\nlogger = logging.getLogger(\"thor\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Run Tracklet-less Heliocentric Orbit Recovery through a queue\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"preprocessed_observations\", type=str, help=\"Preprocessed observations.\"\n )\n parser.add_argument(\"test_orbits\", type=str, help=\"Path to test orbits.\")\n parser.add_argument(\n \"bucket\",\n type=str,\n help=\"name of the Google Cloud Storage bucket to use to hold inputs and outputs\",\n )\n parser.add_argument(\n \"queue\", type=str, help=\"name of the queue to submit the job to\"\n )\n parser.add_argument(\"out_dir\", type=str, help=\"destination path for results\")\n parser.add_argument(\n \"--config\", type=str, default=None,\n )\n parser.add_argument(\n \"--create-bucket\",\n action=\"store_true\",\n help=\"create the bucket if it does not exist already\",\n )\n parser.add_argument(\n \"--rabbit-host\",\n type=str,\n default=\"rabbit.c.moeyens-thor-dev.internal\",\n help=\"hostname of the rabbit broker\",\n )\n parser.add_argument(\n \"--rabbit-port\", type=int, default=5672, help=\"port of the rabbit broker\"\n )\n parser.add_argument(\n \"--rabbit-username\",\n type=str,\n default=\"thor\",\n help=\"username to connect with to the rabbit broker\",\n )\n parser.add_argument(\n \"--rabbit-password\",\n type=str,\n default=\"$RABBIT_PASSWORD env var\",\n help=\"password to connect with to the rabbit broker\",\n )\n parser.add_argument(\n \"--poll-interval\",\n type=float,\n default=5.0,\n help=\"time in seconds between checking whether there are more tasks available\",\n )\n args = parser.parse_args()\n if args.rabbit_password == \"$RABBIT_PASSWORD env var\":\n args.rabbit_password = os.environ[\"RABBIT_PASSWORD\"]\n\n return args\n\n\ndef main():\n args = parse_args()\n\n # Imports of thor modules are deferred until after argument parsing to avoid\n # numba JIT time if the arguments are invalid or the user asked for --help.\n import thor.utils.logging\n\n thor.utils.logging.setupLogger(\"thor\")\n\n from thor.taskqueue.client import Client as TaskQueueClient\n from thor.taskqueue.queue import TaskQueueConnection\n from thor.orbits import Orbits\n from thor.config import Config\n\n if not isinstance(args.config, str):\n config = Config\n else:\n config = Config.fromYaml(args.config)\n\n # Read observations\n preprocessed_observations = pd.read_csv(\n args.preprocessed_observations, index_col=False, dtype={\"obs_id\": str}\n )\n\n # Read test orbits\n test_orbits = Orbits.from_csv(args.test_orbits)\n\n # Connect to Rabbit\n queue = TaskQueueConnection(\n pika.ConnectionParameters(\n host=args.rabbit_host,\n port=args.rabbit_port,\n credentials=pika.PlainCredentials(\n username=args.rabbit_username, password=args.rabbit_password,\n ),\n ),\n args.queue,\n )\n queue.connect()\n\n # Connect to GCS bucket\n gcs = GCSClient()\n if args.create_bucket:\n try:\n gcs.create_bucket(args.bucket)\n except google.cloud.exceptions.Conflict:\n # Bucket already exists.\n pass\n bucket = gcs.bucket(args.bucket)\n taskqueue_client = TaskQueueClient(bucket, queue)\n\n manifest = taskqueue_client.launch_job(\n config, preprocessed_observations, test_orbits\n )\n taskqueue_client.monitor_job_status(manifest.job_id)\n taskqueue_client.download_results(manifest, args.out_dir)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_csv" ] ]
dropoutlabs/PySyft-TensorFlow
[ "70c80872babb4d7926d2b57242850751903bff31" ]
[ "syft_tensorflow/serde/serde_test.py" ]
[ "import tensorflow as tf\nimport syft\n\ndef test_serde_constant():\n z = tf.constant([1.0, 2.0])\n z.id = 123456\n\n ser = syft.serde.serialize(z)\n x = syft.serde.deserialize(ser)\n\n assert all(tf.math.equal(x, z))\n assert x.id == z.id\n assert x.dtype == z.dtype\n\ndef test_serde_tensorshape():\n hook = syft.TensorFlowHook(tf)\n syft.tensorflow.hook = hook\n\n z = tf.TensorShape([1, 2])\n\n ser = syft.serde.serialize(z)\n x = syft.serde.deserialize(ser)\n\n assert all(tf.math.equal(x, z))\n\n" ]
[ [ "tensorflow.math.equal", "tensorflow.TensorShape", "tensorflow.constant" ] ]
saman-codes/dldojo
[ "9fd828f1902ba3d46e9bb5f554ef37d07335b29e" ]
[ "src/network.py" ]
[ "# Standard Python\nimport os\nimport copy\nimport pickle\nimport logging\nfrom collections import OrderedDict\n\n# Local\nfrom layers import Layer\nfrom losses import BinaryCrossEntropy\n\n# Thirdparty\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nlogging.basicConfig(format='%(message)s', level=logging.INFO)\n\n\nclass Network():\n '''\n Base class for a neural network model\n '''\n def __init__(\n self,\n load_weights=False,\n ):\n self.__name__ = 'GenericNetwork'\n self.layers = list()\n self.load_weights = load_weights\n return\n\n def set_name(self, name):\n self.__name__ = name\n return self\n\n def train(\n self,\n x,\n y,\n loss,\n batch_size=1,\n epochs=100,\n learning_rate=5e-4,\n optimizer='minibatch_sgd',\n regularizer=None,\n verbose=False,\n plot_loss=False,\n shuffle_data=True,\n gradient_check=False,\n save_weights=False,\n ):\n '''\n First implement a forward pass and store the weighted products and activations\n Then implement a backward pass, computing the gradient for each weight matrix\n and adjusting the weights using gradient descent\n '''\n self.loss = loss\n self.training_loss = []\n self.data_size = x.shape[1]\n self.batch_size = batch_size\n self.set_optimizer(optimizer)\n\n if regularizer is not None:\n assert (isinstance(regularizer, tuple))\n self.regularizer, self.reg_lambda = regularizer\n else:\n self.regularizer = regularizer\n if shuffle_data:\n # Shuffle the original data\n s = np.random.permutation(self.data_size)\n x = x[:, s]\n y = y[:, s]\n if self.load_weights and os.path.exists(load_weights):\n try:\n weights_list = pickle.load(load_weights)\n for idx, l in enumerate(self.layers):\n l.weights = weights_list[idx][0]\n l.bias = weights_list[idx][1]\n except Exception as ex:\n logging.exception('Failed to load weights')\n\n logging.info(f'Begin training for network: {self.__name__}')\n\n for epoch in tqdm(range(epochs)):\n self.idx = 0\n while self.idx < self.data_size:\n minibatch_x, minibatch_y = self.get_minibatch(x, y)\n # Forward pass\n output = self.train_predict(minibatch_x)\n loss = self.loss(output, minibatch_y).mean()\n\n if verbose:\n logging.info(f'Training loss: {loss}')\n\n # Training step\n backward_gradient = self.loss.output_gradient(\n output, minibatch_y)\n for layer in reversed(self.layers):\n backward_gradient = layer.backward(backward_gradient)\n if self.regularizer:\n self._add_regularization_term(layer)\n if gradient_check:\n self._check_gradient(minibatch_x, minibatch_y, layer)\n else:\n # Do not update the weights if checking the gradients\n layer.update_weights(learning_rate, self.batch_size)\n\n # Save training loss at end of epoch for plotting\n self.training_loss.append((epoch, loss))\n\n if plot_loss:\n plt.title(f'Training loss for network: {self.__name__}')\n plt.plot([i[0] for i in self.training_loss],\n [i[1] for i in self.training_loss])\n plt.show()\n\n if save_weights:\n weights_list = [(l.weights, l.bias) for l in self.layers]\n net_name = '_'.join(self.__name__.split(' '))\n fname = os.path.join(os.getcwd(), f'{net_name}_{epoch}')\n with open(fname, 'w+') as f:\n os.chmod(fname, 777)\n pickle.dump(weights_list, fname)\n logging.info(f'Weights saved on file {fname}')\n\n return self\n\n def _add_regularization_term(self, layer):\n if self.regularizer == 'L2':\n # Derivative of the squared weights, so we lose the power of 2\n d_reg_term = layer.weights\n layer.gradient += self.reg_lambda * d_reg_term / d_reg_term.size\n return\n\n def test_predict(self, x):\n return self.train_predict(x, runtime='test')\n\n def train_predict(self, x, **kwargs):\n runtime = kwargs.get('runtime', 'train')\n # Get output shape from last layer\n os = self.layers[-1].shape\n input_layer = self.layers[0]\n output = input_layer.forward(x, runtime)\n for layer in self.layers[1:]:\n output = layer.forward(output, runtime)\n return output\n\n def add(self, layer):\n if isinstance(layer, Layer):\n self.layers.append(layer)\n else:\n raise Exception\n return self\n\n def set_optimizer(self, optimizer):\n for layer in self.layers:\n layer.set_optimizer(optimizer)\n return self\n\n def get_minibatch(self, x, y):\n if self.idx + self.batch_size <= self.data_size:\n minibatch_x = x[:, self.idx:self.idx + self.batch_size]\n minibatch_y = y[:, self.idx:self.idx + self.batch_size]\n else:\n # If remaining data is less than size of minibatch, take all remaining data\n minibatch_x = x[:, self.idx:]\n minibatch_y = y[:, self.idx:]\n\n self.idx += self.batch_size\n return minibatch_x, minibatch_y\n\n\n########################################################################\n\n### Utils ###\n\n def _check_gradient(self, x, y, layer):\n epsilon = 1e-6\n for i in tqdm(range(layer.shape[0])):\n for j in range(layer.shape[1]):\n layer.weights[i, j] += epsilon\n output_plus = self.test_predict(x)\n loss_plus = self.loss(output_plus, y)\n layer.weights[i, j] -= 2 * epsilon\n output_minus = self.test_predict(x)\n loss_minus = self.loss(output_minus, y)\n raw_gradient = ((loss_plus - loss_minus) / (2 * epsilon))\n gradient = ((loss_plus - loss_minus) / (2 * epsilon)).sum()\n backprop_gradient = layer.gradient[i, j]\n grad_rel_diff = (gradient - backprop_gradient) / (\n np.absolute(gradient) + np.absolute(backprop_gradient) + 1)\n if not grad_rel_diff < 1e-5:\n raise Exception(\n f\"Computed gradient is not correct for layer {layer}\")\n # Reset weights\n layer.weights[i, j] += epsilon\n if layer.use_bias:\n for i in tqdm(range(len(layer.bias))):\n layer.bias[i, 0] += epsilon\n output_plus = self.test_predict(x)\n loss_plus = self.loss(output_plus, y)\n layer.bias[i, 0] -= 2 * epsilon\n output_minus = self.test_predict(x)\n loss_minus = self.loss(output_minus, y)\n raw_gradient = ((loss_plus - loss_minus) / (2 * epsilon))\n gradient = ((loss_plus - loss_minus) / (2 * epsilon)).sum()\n backprop_gradient = layer.bias_gradient[i].sum()\n grad_rel_diff = (gradient - backprop_gradient) / (\n np.absolute(gradient) + np.absolute(backprop_gradient) + 1)\n if not grad_rel_diff < 1e-5:\n raise Exception(\n f\"Computed gradient is not correct for bias in layer {layer}\"\n )\n layer.bias[i, 0] += epsilon\n logging.info(f'All computed gradients are correct for layer {layer}')\n" ]
[ [ "numpy.absolute", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "numpy.random.permutation", "matplotlib.pyplot.show" ] ]
vprzybylo/IPAS
[ "9c9268097b9d7d02be1b14671b8fbfc1818e02c0" ]
[ "ipas/collection_no_db/iceagg_collection_nodask.py" ]
[ "\"\"\"\nMain function for running ice particle simulations\nICE-AGG collection\nincludes looping over ncrystals instead of doing that outside in a dask computation\n\"\"\"\n\nfrom ipas.collection_no_db.crystal import Crystal\nfrom ipas.collection_no_db.calculations import ClusterCalculations\nimport copy as cp\nimport numpy as np\n\n\ndef collect_clusters_iceagg(phio, r, nclusters, ncrystals, rand_orient):\n\n # NEW AGGREGATE PROPERTIES\n cplxs = np.empty((nclusters, ncrystals-1))\n agg_as = np.empty((nclusters, ncrystals-1))\n agg_bs = np.empty((nclusters, ncrystals-1))\n agg_cs = np.empty((nclusters, ncrystals-1))\n phi2Ds = np.empty((nclusters, ncrystals-1)) \n dds = np.empty((nclusters, ncrystals-1))\n\n # get a and c axes of monomer using phi and r\n a = (r ** 3 / phio) ** (1. / 3.)\n c = phio * a\n if c < a:\n plates = True\n else:\n plates = False\n\n # how many aggregates to create\n for n in range(nclusters):\n # create Crystal\n crystal1 = Crystal(a, c)\n crystal1.hold_clus = crystal1.points\n crystal1.orient_crystal(rand_orient)\n crystal1.recenter()\n\n # create cluster from initialized crystal\n # same orientation if crystal was reoriented\n cluster = ClusterCalculations(crystal1)\n\n l=0\n # number of monomers/crystals per aggregate/cluster\n while cluster.ncrystals < ncrystals: \n # initialize a new crystal\n crystal2 = Crystal(a,c)\n crystal2.hold_clus = crystal2.points\n crystal2.orient_crystal(rand_orient)\n crystal2.recenter()\n\n # move monomer on top of cluster\n agg_pt, new_pt = cluster.generate_random_point_fast(crystal2, 1)\n movediffx = new_pt.x - agg_pt.x\n movediffy = new_pt.y - agg_pt.y\n crystal2.move([-movediffx, -movediffy, 0])\n\n # move cluster and monomer together\n cluster.closest_points(crystal2)\n\n # ----------- DENSITY CHANGE ----------\n # get cluster ellipsoid axes before aggregation\n rx,ry,rz = cluster.ellipsoid_axes() \n # volume of ellipsoid around cluster before aggregation\n Ve_clus = 4./3.*np.pi*rx*ry*rz \n\n # a and c of monomers in cluster (all identical)\n a_clus=np.power((np.power(cluster.mono_r,3)/cluster.mono_phi),(1./3.))\n c_clus = cluster.mono_phi*a_clus\n # volume of all monomers in cluster\n Va_clus = 3*(np.sqrt(3)/2) * np.power(a_clus,2) * c_clus * cluster.ncrystals\n # density ratio of aggregate and ellipsoid\n d1 = Va_clus/Ve_clus\n\n # -------------------\n # add monomer points to original cluster (i.e., aggregate)\n cluster.add_crystal(crystal2)\n # save original points before reorienting for max area\n cluster.add_points = cp.deepcopy(cluster.points)\n # -------------------\n\n # monomer a and c axes\n a_mono = np.power((np.power(crystal2.r,3)/crystal2.phi),(1./3.))\n c_mono = crystal2.phi*a_mono\n # volume of monomer to collect\n Va_mono = 3*(np.sqrt(3)/2) * np.power(a_mono,2) * c_mono\n\n # get fit-ellipsoid radii (a-major, c-minor) after aggregation\n agg_a, agg_b, agg_c = cluster.ellipsoid_axes() \n agg_as[n,l] = agg_a\n agg_bs[n,l] = agg_b\n agg_cs[n,l] = agg_c\n\n # volume of ellipsoid around cluster after aggregation\n Ve3 = 4./3.*np.pi*agg_a*agg_b*agg_c #volume of ellipsoid for new agg\n d2 = (Va_clus+Va_mono)/Ve3\n # append relative change in density (after - before adding monomer)\n dds[n,l] = (d2-d1)/d1\n\n # ----------------------------\n # orient cluster after adding monomer\n if a>c and rand_orient== False:\n cluster.orient_cluster() \n else:\n cluster.orient_cluster(rand_orient) \n cluster.recenter()\n\n # ------other calculations------\n # save points before reorienting to calculate phi_2D_rotate\n cluster.orient_points = cp.deepcopy(cluster.points)\n cluster.complexity()\n cplxs[n,l], circle= cluster.complexity()\n phi2Ds[n,l] = cluster.phi_2D()\n # reset points back to how they were before phi_2D_rotate\n cluster.points = cluster.orient_points\n\n # -------- PLOTTING --------\n# print('w')\n# cluster.plot_ellipsoid_aggs([cluster, crystal2], view='w', circle=None, agg_agg=False)\n# print('x')\n# cluster.plot_ellipsoid_aggs([cluster, crystal2], view='x', circle=None, agg_agg=False)\n# print('y')\n# cluster.plot_ellipsoid_aggs([cluster, crystal2], view='y', circle=None, agg_agg=False)\n# print('z')\n# cluster.plot_ellipsoid_aggs([cluster, crystal2], view='z', circle=None, agg_agg=False)\n\n cluster_cp = cp.deepcopy(cluster)\n l+=1\n\n # characteristic values determined in postprocessing\n return agg_as, agg_bs, agg_cs, phi2Ds, cplxs, dds" ]
[ [ "numpy.sqrt", "numpy.empty", "numpy.power" ] ]
flodorner/safety-starter-agents
[ "d140ce69c817a7939232d2c7848c0dfa55de8e90" ]
[ "safe_rl/sac/sac.py" ]
[ "#!/usr/bin/env python\n\nfrom functools import partial\nimport numpy as np\nimport tensorflow as tf\nimport gym\nimport time\nfrom safe_rl.utils.logx import EpochLogger\nfrom safe_rl.utils.mpi_tf import sync_all_params, MpiAdamOptimizer\nfrom safe_rl.utils.mpi_tools import mpi_fork, mpi_sum, proc_id, mpi_statistics_scalar, num_procs\n\nEPS = 1e-8\n\ndef placeholder(dim=None):\n return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))\n\ndef placeholders(*args):\n return [placeholder(dim) for dim in args]\n\ndef mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)\n\ndef get_vars(scope):\n return [x for x in tf.global_variables() if scope in x.name]\n\ndef count_vars(scope):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])\n\ndef gaussian_likelihood(x, mu, log_std):\n pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))\n return tf.reduce_sum(pre_sum, axis=1)\n\ndef get_target_update(main_name, target_name, polyak):\n ''' Get a tensorflow op to update target variables based on main variables '''\n main_vars = {x.name: x for x in get_vars(main_name)}\n targ_vars = {x.name: x for x in get_vars(target_name)}\n assign_ops = []\n for v_targ in targ_vars:\n assert v_targ.startswith(target_name), f'bad var name {v_targ} for {target_name}'\n v_main = v_targ.replace(target_name, main_name, 1)\n assert v_main in main_vars, f'missing var name {v_main}'\n assign_op = tf.assign(targ_vars[v_targ], polyak*targ_vars[v_targ] + (1-polyak)*main_vars[v_main])\n assign_ops.append(assign_op)\n return tf.group(assign_ops)\n\n\n\"\"\"\nPolicies\n\"\"\"\n\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\ndef mlp_gaussian_policy(x, a, hidden_sizes, activation, output_activation):\n act_dim = a.shape.as_list()[-1]\n net = mlp(x, list(hidden_sizes), activation, activation)\n mu = tf.layers.dense(net, act_dim, activation=output_activation)\n log_std = tf.layers.dense(net, act_dim, activation=None)\n log_std = tf.clip_by_value(log_std, LOG_STD_MIN, LOG_STD_MAX)\n\n std = tf.exp(log_std)\n pi = mu + tf.random_normal(tf.shape(mu)) * std\n logp_pi = gaussian_likelihood(pi, mu, log_std)\n return mu, pi, logp_pi\n\ndef apply_squashing_func(mu, pi, logp_pi):\n # Adjustment to log prob\n logp_pi -= tf.reduce_sum(2*(np.log(2) - pi - tf.nn.softplus(-2*pi)), axis=1)\n\n # Squash those unbounded actions!\n mu = tf.tanh(mu)\n pi = tf.tanh(pi)\n return mu, pi, logp_pi\n\n\n\"\"\"\nActors and Critics\n\"\"\"\ndef mlp_actor(x, a, name='pi', hidden_sizes=(256,256), activation=tf.nn.relu,\n output_activation=None, policy=mlp_gaussian_policy, action_space=None):\n # policy\n with tf.variable_scope(name):\n mu, pi, logp_pi = policy(x, a, hidden_sizes, activation, output_activation)\n mu, pi, logp_pi = apply_squashing_func(mu, pi, logp_pi)\n\n # make sure actions are in correct range\n action_scale = action_space.high[0]\n mu *= action_scale\n pi *= action_scale\n\n return mu, pi, logp_pi\n\n\ndef mlp_critic(x, a, pi, name, hidden_sizes=(256,256), activation=tf.nn.relu,\n output_activation=None, policy=mlp_gaussian_policy, action_space=None):\n\n fn_mlp = lambda x : tf.squeeze(mlp(x=x,\n hidden_sizes=list(hidden_sizes)+[1],\n activation=activation,\n output_activation=None),\n axis=1)\n with tf.variable_scope(name):\n critic = fn_mlp(tf.concat([x,a], axis=-1))\n\n with tf.variable_scope(name, reuse=True):\n critic_pi = fn_mlp(tf.concat([x,pi], axis=-1))\n\n return critic, critic_pi\n\ndef mlp_critic_act_transform(x, a, pi, name, hidden_sizes=(256,256), activation=tf.nn.relu,\n output_activation=None, policy=mlp_gaussian_policy, action_space=None):\n\n fn_mlp = lambda x : tf.squeeze(mlp(x=x,\n hidden_sizes=list(hidden_sizes)+[1],\n activation=activation,\n output_activation=None),\n axis=1)\n fn_mlp_2 = lambda x : mlp(x=x,\n hidden_sizes=[64,64,1],\n activation=activation,\n output_activation=None)\n\n with tf.variable_scope(name):\n critic = fn_mlp(tf.concat([x,fn_mlp_2(a)], axis=-1))\n\n with tf.variable_scope(name, reuse=True):\n critic_pi = fn_mlp(tf.concat([x,fn_mlp_2(pi)], axis=-1))\n\n return critic, critic_pi\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for SAC agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.costs_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done, cost):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.costs_buf[self.ptr] = cost\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n costs=self.costs_buf[idxs],\n done=self.done_buf[idxs])\n\n\n\"\"\"\nSoft Actor-Critic\n\"\"\"\ndef sac(env_fn, actor_fn=mlp_actor, critic_fn=mlp_critic , ac_kwargs=dict(), seed=0,\n steps_per_epoch=1000, epochs=100, replay_size=int(1e6), gamma=0.99,\n polyak=0.995, lr=1e-4, batch_size=1024, local_start_steps=int(1e3),\n max_ep_len=1000, logger_kwargs=dict(), save_freq=10, local_update_after=int(1e3),\n update_freq=1, render=False, \n fixed_entropy_bonus=None, entropy_constraint=-1.0,\n fixed_cost_penalty=None, cost_constraint=None, cost_lim=None,\n reward_scale=1,penalty_lr=5e-2,use_discor=False,cost_maxq=True\n ):\n \"\"\"\n\n Args:\n env_fn : A function which creates a copy of the environment.\n The environment must satisfy the OpenAI Gym API.\n\n actor_fn: A function which takes in placeholder symbols\n for state, ``x_ph``, and action, ``a_ph``, and returns the actor\n outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``mu`` (batch, act_dim) | Computes mean actions from policy\n | given states.\n ``pi`` (batch, act_dim) | Samples actions from policy given\n | states.\n ``logp_pi`` (batch,) | Gives log probability, according to\n | the policy, of the action sampled by\n | ``pi``. Critical: must be differentiable\n | with respect to policy parameters all\n | the way through action sampling.\n =========== ================ ======================================\n\n critic_fn: A function which takes in placeholder symbols\n for state, ``x_ph``, action, ``a_ph``, and policy ``pi``,\n and returns the critic outputs from the agent's Tensorflow computation graph:\n\n =========== ================ ======================================\n Symbol Shape Description\n =========== ================ ======================================\n ``critic`` (batch,) | Gives one estimate of Q* for\n | states in ``x_ph`` and actions in\n | ``a_ph``.\n ``critic_pi`` (batch,) | Gives another estimate of Q* for\n | states in ``x_ph`` and actions in\n | ``a_ph``.\n =========== ================ ======================================\n\n ac_kwargs (dict): Any kwargs appropriate for the actor_fn / critic_fn\n function you provided to SAC.\n\n seed (int): Seed for random number generators.\n\n steps_per_epoch (int): Number of steps of interaction (state-action pairs)\n for the agent and the environment in each epoch.\n\n epochs (int): Number of epochs to run and train agent.\n\n replay_size (int): Maximum length of replay buffer.\n\n gamma (float): Discount factor. (Always between 0 and 1.)\n\n polyak (float): Interpolation factor in polyak averaging for target\n networks. Target networks are updated towards main networks\n according to:\n\n .. math:: \\\\theta_{\\\\text{targ}} \\\\leftarrow\n \\\\rho \\\\theta_{\\\\text{targ}} + (1-\\\\rho) \\\\theta\n\n where :math:`\\\\rho` is polyak. (Always between 0 and 1, usually\n close to 1.)\n\n lr (float): Learning rate (used for both policy and value learning).\n\n batch_size (int): Minibatch size for SGD.\n\n local_start_steps (int): Number of steps for uniform-random action selection,\n before running real policy. Helps exploration.\n\n max_ep_len (int): Maximum length of trajectory / episode / rollout.\n\n logger_kwargs (dict): Keyword args for EpochLogger.\n\n save_freq (int): How often (in terms of gap between epochs) to save\n the current policy and value function.\n\n fixed_entropy_bonus (float or None): Fixed bonus to reward for entropy.\n Units are (points of discounted sum of future reward) / (nats of policy entropy).\n If None, use ``entropy_constraint`` to set bonus value instead.\n\n entropy_constraint (float): If ``fixed_entropy_bonus`` is None,\n Adjust entropy bonus to maintain at least this much entropy.\n Actual constraint value is multiplied by the dimensions of the action space.\n Units are (nats of policy entropy) / (action dimenson).\n\n fixed_cost_penalty (float or None): Fixed penalty to reward for cost.\n Units are (points of discounted sum of future reward) / (points of discounted sum of future costs).\n If None, use ``cost_constraint`` to set penalty value instead.\n\n cost_constraint (float or None): If ``fixed_cost_penalty`` is None,\n Adjust cost penalty to maintain at most this much cost.\n Units are (points of discounted sum of future costs).\n Note: to get an approximate cost_constraint from a cost_lim (undiscounted sum of costs),\n multiply cost_lim by (1 - gamma ** episode_len) / (1 - gamma).\n If None, use cost_lim to calculate constraint.\n\n cost_lim (float or None): If ``cost_constraint`` is None,\n calculate an approximate constraint cost from this cost limit.\n Units are (expectation of undiscounted sum of costs in a single episode).\n If None, cost_lim is not used, and if no cost constraints are used, do naive optimization.\n \"\"\"\n use_costs = fixed_cost_penalty or cost_constraint or cost_lim\n\n logger = EpochLogger(**logger_kwargs)\n logger.save_config(locals())\n\n # Env instantiation\n env, test_env = env_fn(), env_fn()\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # Setting seeds\n tf.set_random_seed(seed)\n np.random.seed(seed)\n env.seed(seed)\n test_env.seed(seed)\n\n # Action limit for clamping: critically, assumes all dimensions share the same bound!\n act_limit = env.action_space.high[0]\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = env.action_space\n\n # Inputs to computation graph\n x_ph, a_ph, x2_ph, r_ph, d_ph, c_ph = placeholders(obs_dim, act_dim, obs_dim, None, None, None)\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n mu, pi, logp_pi = actor_fn(x_ph, a_ph, **ac_kwargs)\n qr1, qr1_pi = critic_fn(x_ph, a_ph, pi, name='qr1', **ac_kwargs)\n qr2, qr2_pi = critic_fn(x_ph, a_ph, pi, name='qr2', **ac_kwargs)\n qc1, qc1_pi = critic_fn(x_ph, a_ph, pi, name='qc1', **ac_kwargs)\n if cost_maxq:\n qc2, qc2_pi = critic_fn(x_ph, a_ph, pi, name='qc2', **ac_kwargs)\n if use_discor:\n er1, er1_targ = critic_fn(x_ph, a_ph, pi, name='er1', **ac_kwargs)\n er2, er2_targ = critic_fn(x_ph, a_ph, pi, name='er2', **ac_kwargs)\n ec1, ec1_targ = critic_fn(x_ph, a_ph, pi, name='ec1', **ac_kwargs)\n if cost_maxq:\n ec2, ec2_targ = critic_fn(x_ph, a_ph, pi, name='ec2', **ac_kwargs)\n\n\n with tf.variable_scope('main', reuse=True):\n # Additional policy output from a different observation placeholder\n # This lets us do separate optimization updates (actor, critics, etc)\n # in a single tensorflow op.\n _, pi2, logp_pi2 = actor_fn(x2_ph, a_ph, **ac_kwargs)\n\n # Target value network\n with tf.variable_scope('target'):\n _, qr1_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qr1', **ac_kwargs)\n _, qr2_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qr2', **ac_kwargs)\n _, qc1_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qc1', **ac_kwargs)\n if cost_maxq:\n _, qc2_pi_targ = critic_fn(x2_ph, a_ph, pi2, name='qc2', **ac_kwargs)\n if use_discor:\n _, er1_pi_targ = critic_fn(x_ph, a_ph, pi, name='er1', **ac_kwargs)\n _, er2_pi_targ = critic_fn(x_ph, a_ph, pi, name='er2', **ac_kwargs)\n _, ec1_pi_targ = critic_fn(x_ph, a_ph, pi, name='ec1', **ac_kwargs)\n if cost_maxq:\n _, ec2_pi_targ = critic_fn(x_ph, a_ph, pi, name='ec2', **ac_kwargs)\n\n # Entropy bonus\n if fixed_entropy_bonus is None:\n with tf.variable_scope('entreg'):\n soft_alpha = tf.get_variable('soft_alpha',\n initializer=0.0,\n trainable=True,\n dtype=tf.float32)\n alpha = tf.nn.softplus(soft_alpha)\n else:\n alpha = tf.constant(fixed_entropy_bonus)\n log_alpha = tf.log(alpha)\n\n # Cost penalty\n if use_costs:\n if fixed_cost_penalty is None:\n with tf.variable_scope('costpen'):\n soft_beta = tf.get_variable('soft_beta',\n initializer=0.0,\n trainable=True,\n dtype=tf.float32)\n beta = tf.nn.softplus(soft_beta)\n log_beta = tf.log(beta)\n else:\n beta = tf.constant(fixed_cost_penalty)\n log_beta = tf.log(beta)\n else:\n beta = 0.0 # costs do not contribute to policy optimization\n print('Not using costs')\n\n if use_discor:\n with tf.variable_scope('discor'):\n tr1 = tf.get_variable('tr1',\n initializer=10.0,\n trainable=False,\n dtype=tf.float32)\n tr2 = tf.get_variable('tr2',\n initializer=10.0,\n trainable=False,\n dtype=tf.float32)\n tc1 = tf.get_variable('tc1',\n initializer=10.0,\n trainable=False,\n dtype=tf.float32)\n if cost_maxq:\n tc2 = tf.get_variable('tc2',\n initializer=10.0,\n trainable=False,\n dtype=tf.float32)\n\n\n # Experience buffer\n replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)\n\n # Count variables\n if proc_id()==0:\n var_counts = tuple(count_vars(scope) for scope in \n ['main/pi', 'main/qr1', 'main/qr2', 'main/qc1', 'main'])\n print(('\\nNumber of parameters: \\t pi: %d, \\t qr1: %d, \\t qr2: %d, \\t qc1: %d, \\t total: %d\\n')%var_counts)\n\n # Min Double-Q:\n min_q_pi = tf.minimum(qr1_pi, qr2_pi)\n min_q_pi_targ = tf.minimum(qr1_pi_targ, qr2_pi_targ)\n\n if cost_maxq:\n max_qc_pi = tf.maximum(qc1_pi, qc2_pi)\n max_qc_pi_targ = tf.maximum(qc1_pi_targ, qc2_pi_targ)\n else:\n max_qc_pi = qc1_pi\n max_qc_pi_targ = qc1_pi_targ\n\n # Targets for Q and V regression\n q_backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*(min_q_pi_targ - alpha * logp_pi2))\n qc_backup = tf.stop_gradient(c_ph + gamma*(1-d_ph)*max_qc_pi_targ)\n\n if use_discor:\n er1_backup = tf.stop_gradient(tf.abs(qr1 - q_backup) + gamma*(1-d_ph)* er1_pi_targ)\n er2_backup = tf.stop_gradient(tf.abs(qr2 - q_backup) + gamma*(1-d_ph)* er2_pi_targ)\n ec1_backup = tf.stop_gradient(tf.abs(qc1 - qc_backup) + gamma*(1-d_ph)* ec1_pi_targ)\n if cost_maxq:\n ec2_backup = tf.stop_gradient(tf.abs(qc2 - qc_backup) + gamma*(1-d_ph)* ec2_pi_targ)\n\n qr1_loss = 0.5 * tf.reduce_sum(tf.nn.softmax(er1_backup / tr1,axis=0) * (q_backup - qr1) ** 2)\n qr2_loss = 0.5 * tf.reduce_sum(tf.nn.softmax(er2_backup / tr2,axis=0) * (q_backup - qr2) ** 2)\n qc1_loss = 0.5 * tf.reduce_sum(tf.nn.softmax(ec1_backup / tc1,axis=0) * (qc_backup - qc1) ** 2)\n if cost_maxq:\n qc2_loss = 0.5 * tf.reduce_sum(tf.nn.softmax(ec2_backup / tc2,axis=0) * (qc_backup - qc2) ** 2)\n else:\n qr1_loss = 0.5 * tf.reduce_mean((q_backup - qr1) ** 2)\n qr2_loss = 0.5 * tf.reduce_mean((q_backup - qr2) ** 2)\n qc1_loss = 0.5 * tf.reduce_mean((qc_backup - qc1) ** 2)\n if cost_maxq:\n qc2_loss = 0.5 * tf.reduce_mean((qc_backup - qc2) ** 2)\n # Soft actor-critic losses\n q_loss = qr1_loss + qr2_loss + qc1_loss\n if cost_maxq:\n q_loss += qc2_loss\n pi_loss = tf.reduce_mean(alpha * logp_pi - min_q_pi + beta * max_qc_pi)/(1+beta)\n\n if use_discor:\n er1_loss = 0.5 * tf.reduce_mean((er1_backup - er1) ** 2)\n er2_loss = 0.5 * tf.reduce_mean((er2_backup - er2) ** 2)\n ec1_loss = 0.5 * tf.reduce_mean((ec1_backup - ec1) ** 2)\n error_loss = er1_loss + er2_loss + ec1_loss\n if cost_maxq:\n ec2_loss = 0.5 * tf.reduce_mean((ec2_backup - ec2) ** 2)\n error_loss += + ec2_loss\n ec2_mean = tf.reduce_mean(ec2)\n er1_mean = tf.reduce_mean(er1)\n er2_mean = tf.reduce_mean(er2)\n ec1_mean = tf.reduce_mean(ec1)\n\n\n\n # Loss for alpha\n entropy_constraint *= act_dim\n pi_entropy = -tf.reduce_mean(logp_pi)\n # alpha_loss = - soft_alpha * (entropy_constraint - pi_entropy)\n alpha_loss = - alpha * (entropy_constraint - pi_entropy)\n print('using entropy constraint', entropy_constraint)\n\n # Loss for beta\n if use_costs and not fixed_cost_penalty:\n if cost_constraint is None:\n # Convert assuming equal cost accumulated each step\n # Note this isn't the case, since the early in episode doesn't usually have cost,\n # but since our algorithm optimizes the discounted infinite horizon from each entry\n # in the replay buffer, we should be approximately correct here.\n # It's worth checking empirical total undiscounted costs to see if they match.\n cost_constraint = cost_lim * (1 - gamma ** max_ep_len) / (1 - gamma) / max_ep_len\n print('using cost constraint', cost_constraint)\n beta_loss = beta * (cost_constraint - qc1)\n #TODO: What is the correct target here?\n # Policy train op\n # (has to be separate from value train op, because qr1_pi appears in pi_loss)\n train_pi_op = MpiAdamOptimizer(learning_rate=lr).minimize(pi_loss, var_list=get_vars('main/pi'), name='train_pi')\n\n # Value train op\n with tf.control_dependencies([train_pi_op]):\n train_q_op = MpiAdamOptimizer(learning_rate=lr).minimize(q_loss, var_list=get_vars('main/q'), name='train_q')\n with tf.control_dependencies([train_q_op]):\n if use_discor:\n train_e_op = MpiAdamOptimizer(learning_rate=lr).minimize(error_loss, var_list=get_vars('main/e'),name='train_e')\n with tf.control_dependencies([train_e_op]):\n if cost_maxq:\n train_e_out_op = tf.group([tf.assign(tr1, (1-polyak)*er1_mean+polyak*tr1),tf.assign(tr2, (1-polyak)*er2_mean+polyak*tr2),\n tf.assign(tc1, (1-polyak)*ec1_mean+polyak*tc1),tf.assign(tc2, (1-polyak)*ec2_mean+polyak*tc2)])\n else:\n train_e_out_op = tf.group([tf.assign(tr1, (1 - polyak) * er1_mean + polyak * tr1),\n tf.assign(tr2, (1 - polyak) * er2_mean + polyak * tr2),\n tf.assign(tc1, (1 - polyak) * ec1_mean + polyak * tc1)])\n else:\n train_e_out_op=tf.no_op()\n if fixed_entropy_bonus is None:\n entreg_optimizer = MpiAdamOptimizer(learning_rate=lr)\n with tf.control_dependencies([train_e_out_op]):\n train_entreg_op = entreg_optimizer.minimize(alpha_loss, var_list=get_vars('entreg'))\n if use_costs and fixed_cost_penalty is None:\n costpen_optimizer = MpiAdamOptimizer(learning_rate=penalty_lr)\n with tf.control_dependencies([train_entreg_op]):\n train_costpen_op = costpen_optimizer.minimize(beta_loss, var_list=get_vars('costpen'))\n\n\n # Polyak averaging for target variables\n target_update = get_target_update('main', 'target', polyak)\n\n # Single monolithic update with explicit control dependencies\n with tf.control_dependencies([train_pi_op]):\n with tf.control_dependencies([train_q_op]):\n if use_discor:\n with tf.control_dependencies([train_e_op]):\n with tf.control_dependencies([train_e_out_op]):\n grouped_update = tf.group([target_update])\n else:\n grouped_update = tf.group([target_update])\n\n if fixed_entropy_bonus is None:\n grouped_update = tf.group([grouped_update, train_entreg_op])\n if use_costs and fixed_cost_penalty is None:\n grouped_update_a = tf.group([grouped_update, train_costpen_op])\n\n\n\n # Initializing targets to match main variables\n # As a shortcut, use our exponential moving average update w/ coefficient zero\n target_init = get_target_update('main', 'target', 0.0)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n sess.run(target_init)\n\n # Sync params across processes\n sess.run(sync_all_params())\n\n # Setup model saving\n logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph},\n outputs={'mu': mu, 'pi': pi, 'qr1': qr1, 'qr2': qr2, 'qc1': qc1})\n\n def get_action(o, deterministic=False):\n act_op = mu if deterministic else pi\n return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0]\n\n def test_agent(n=10):\n for j in range(n):\n o, r, d, ep_ret, ep_cost, ep_len, ep_goals, = test_env.reset(), 0, False, 0, 0, 0, 0\n while not(d or (ep_len == max_ep_len)):\n # Take deterministic actions at test time\n o, r, d, info = test_env.step(get_action(o, True))\n if render and proc_id() == 0 and j == 0:\n test_env.render()\n ep_ret += r\n ep_cost += info.get('cost', 0)\n ep_len += 1\n ep_goals += 1 if info.get('goal_met', False) else 0\n logger.store(TestEpRet=ep_ret, TestEpCost=ep_cost, TestEpLen=ep_len, TestEpGoals=ep_goals)\n\n start_time = time.time()\n o, r, d, ep_ret, ep_cost, ep_len, ep_goals = env.reset(), 0, False, 0, 0, 0, 0\n total_steps = steps_per_epoch * epochs\n\n # variables to measure in an update\n vars_to_get = dict(LossPi=pi_loss, LossQR1=qr1_loss, LossQR2=qr2_loss, LossQC1=qc1_loss,\n QR1Vals=qr1, QR2Vals=qr2, QC1Vals=qc1, LogPi=logp_pi, PiEntropy=pi_entropy,\n Alpha=alpha, LogAlpha=log_alpha, LossAlpha=alpha_loss)\n if use_costs and not fixed_cost_penalty:\n vars_to_get.update(dict(Beta=beta, LogBeta=log_beta, LossBeta=beta_loss))\n if use_discor:\n vars_to_get.update(dict(TR1=tr1))\n\n print('starting training', proc_id())\n\n # Main loop: collect experience in env and update/log each epoch\n local_steps = 0\n local_steps_per_epoch = steps_per_epoch // num_procs()\n local_batch_size = batch_size // num_procs()\n epoch_start_time = time.time()\n for t in range(total_steps // num_procs()):\n \"\"\"\n Until local_start_steps have elapsed, randomly sample actions\n from a uniform distribution for better exploration. Afterwards,\n use the learned policy.\n \"\"\"\n if t > local_start_steps:\n a = get_action(o)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, info = env.step(a)\n r *= reward_scale # yee-haw\n c = info.get('cost', 0)\n ep_ret += r\n ep_cost += c\n ep_len += 1\n ep_goals += 1 if info.get('goal_met', False) else 0\n local_steps += 1\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n replay_buffer.store(o, a, r, o2, d, c)\n\n # Super critical, easy to overlook step: make sure to update\n # most recent observation!\n o = o2\n\n if d or (ep_len == max_ep_len):\n logger.store(EpRet=ep_ret, EpCost=ep_cost, EpLen=ep_len, EpGoals=ep_goals)\n o, r, d, ep_ret, ep_cost, ep_len, ep_goals = env.reset(), 0, False, 0, 0, 0, 0\n\n if t > 0 and t % update_freq == 0:\n for j in range(update_freq):\n batch = replay_buffer.sample_batch(local_batch_size)\n feed_dict = {x_ph: batch['obs1'],\n x2_ph: batch['obs2'],\n a_ph: batch['acts'],\n r_ph: batch['rews'],\n c_ph: batch['costs'],\n d_ph: batch['done'],\n }\n if t < local_update_after:\n logger.store(**sess.run(vars_to_get, feed_dict))\n else:\n if (not j==update_freq-1) or not (use_costs and not fixed_cost_penalty):\n values, _ = sess.run([vars_to_get, grouped_update], feed_dict)\n logger.store(**values)\n else:\n values, _ = sess.run([vars_to_get, grouped_update_a], feed_dict)\n logger.store(**values)\n\n # End of epoch wrap-up\n if t > 0 and t % local_steps_per_epoch == 0:\n epoch = t // local_steps_per_epoch\n\n # Save model\n if (epoch % save_freq == 0) or (epoch == epochs-1):\n logger.save_state({'env': env}, None)\n\n # Test the performance of the deterministic version of the agent.\n test_start_time = time.time()\n test_agent()\n logger.store(TestTime=time.time() - test_start_time)\n\n logger.store(EpochTime=time.time() - epoch_start_time)\n epoch_start_time = time.time()\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('TestEpRet', with_min_and_max=True)\n logger.log_tabular('EpCost', with_min_and_max=True)\n logger.log_tabular('TestEpCost', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TestEpLen', average_only=True)\n logger.log_tabular('EpGoals', average_only=True)\n logger.log_tabular('TestEpGoals', average_only=True)\n logger.log_tabular('TotalEnvInteracts', mpi_sum(local_steps))\n logger.log_tabular('QR1Vals', with_min_and_max=True)\n logger.log_tabular('QR2Vals', with_min_and_max=True)\n logger.log_tabular('QC1Vals', with_min_and_max=True)\n logger.log_tabular('LogPi', with_min_and_max=True)\n logger.log_tabular('LossPi', average_only=True)\n logger.log_tabular('LossQR1', average_only=True)\n logger.log_tabular('LossQR2', average_only=True)\n logger.log_tabular('LossQC1', average_only=True)\n logger.log_tabular('LossAlpha', average_only=True)\n logger.log_tabular('LogAlpha', average_only=True)\n logger.log_tabular('Alpha', average_only=True)\n if use_costs and not fixed_cost_penalty:\n logger.log_tabular('LossBeta', average_only=True)\n logger.log_tabular('LogBeta', average_only=True)\n logger.log_tabular('Beta', average_only=True)\n if use_discor:\n logger.log_tabular('TR1', average_only=True)\n logger.log_tabular('PiEntropy', average_only=True)\n logger.log_tabular('TestTime', average_only=True)\n logger.log_tabular('EpochTime', average_only=True)\n logger.log_tabular('TotalTime', time.time()-start_time)\n logger.dump_tabular()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='Safexp-PointGoal1-v0')\n parser.add_argument('--hid', type=int, default=256)\n parser.add_argument('--l', type=int, default=2)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--seed', '-s', type=int, default=0)\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--exp_name', type=str, default='sac')\n parser.add_argument('--steps_per_epoch', type=int, default=4000)\n parser.add_argument('--update_freq', type=int, default=100)\n parser.add_argument('--cpu', type=int, default=4)\n parser.add_argument('--render', default=False, action='store_true')\n parser.add_argument('--local_start_steps', default=500, type=int)\n parser.add_argument('--local_update_after', default=500, type=int)\n parser.add_argument('--batch_size', default=256, type=int)\n parser.add_argument('--fixed_entropy_bonus', default=None, type=float)\n parser.add_argument('--entropy_constraint', type=float, default=-1.0)\n parser.add_argument('--fixed_cost_penalty', default=None, type=float)\n parser.add_argument('--cost_constraint', type=float, default=None)\n parser.add_argument('--cost_lim', type=float, default=None)\n parser.add_argument('--penalty_lr', type=float, default=5e-2)\n parser.add_argument('--use_discor', default=False, action='store_true')\n parser.add_argument('--cost_maxq', default=False, action='store_true')\n parser.add_argument('--use_act_trans', default=False, action='store_true')\n args = parser.parse_args()\n\n try:\n import safety_gym\n except:\n print('Make sure to install Safety Gym to use constrained RL environments.')\n\n mpi_fork(args.cpu)\n\n from safe_rl.utils.run_utils import setup_logger_kwargs\n logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)\n\n if not args.use_act_trans:\n critic=mlp_critic\n else:\n critic=mlp_critic_act_transform\n\n sac(lambda : gym.make(args.env), actor_fn=mlp_actor, critic_fn=critic,\n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),\n gamma=args.gamma, seed=args.seed, epochs=args.epochs, batch_size=args.batch_size,\n logger_kwargs=logger_kwargs, steps_per_epoch=args.steps_per_epoch,\n update_freq=args.update_freq, lr=args.lr, render=args.render,\n local_start_steps=args.local_start_steps, local_update_after=args.local_update_after,\n fixed_entropy_bonus=args.fixed_entropy_bonus, entropy_constraint=args.entropy_constraint,\n fixed_cost_penalty=args.fixed_cost_penalty, cost_constraint=args.cost_constraint,cost_lim=args.cost_lim,\n penalty_lr=args.penalty_lr,use_discor=args.use_discor,cost_maxq=args.cost_maxq\n )\n" ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.global_variables", "tensorflow.abs", "tensorflow.tanh", "tensorflow.group", "numpy.random.randint", "tensorflow.layers.dense", "tensorflow.stop_gradient", "tensorflow.Session", "numpy.zeros", "numpy.log", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.global_variables_initializer", "tensorflow.no_op", "tensorflow.set_random_seed", "tensorflow.clip_by_value", "tensorflow.nn.softmax", "tensorflow.constant", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.maximum", "tensorflow.assign", "tensorflow.log", "tensorflow.variable_scope", "tensorflow.nn.softplus" ] ]
smnmnkr/geneticNLP
[ "f053cde54f90a3c213e355dc0c4038b0f7583bd0", "f053cde54f90a3c213e355dc0c4038b0f7583bd0" ]
[ "beyondGD/data/loader.py", "tests/test__nn.py" ]
[ "from typing import Union\n\n\nfrom torch.utils.data import (\n Dataset,\n IterableDataset,\n DataLoader,\n)\n\n#\n#\n# -------- batch_loader -----------\n#\ndef batch_loader(\n data_set: Union[IterableDataset, Dataset],\n batch_size: int = 32,\n shuffle: bool = False,\n num_workers: int = 0,\n) -> DataLoader:\n \"\"\"\n Create a batch data loader from the given data set.\n \"\"\"\n return DataLoader(\n data_set,\n batch_size=batch_size,\n collate_fn=lambda x: x,\n shuffle=shuffle,\n num_workers=num_workers,\n )\n", "import torch\n\nfrom beyondGD.nn import MLP, BILSTM\n\n# config\nin_size: int = 16\nhid_size: int = 8\n\nsent_len: int = 32\nbatch_len: int = 48\n\n# inputs\nx_vec: torch.Tensor = torch.randn(in_size)\nx_mat: torch.Tensor = torch.randn(sent_len, in_size)\nx_bat: list = [\n torch.randn(sent_len, in_size) for _ in range(batch_len)\n]\n\n\ndef test_BILSTM():\n\n net = BILSTM(\n in_size=in_size,\n hid_size=hid_size,\n depth=1,\n dropout=0.0,\n )\n\n # forward batch:\n assert len(net.forward(x_bat)) == 2\n assert net.forward(x_bat)[0].size()[0] == batch_len\n assert net.forward(x_bat)[0].size()[1] == sent_len\n assert net.forward(x_bat)[0].size()[2] == hid_size * 2\n\n\ndef test_mlp():\n\n net = MLP(\n in_size=in_size,\n hid_size=hid_size,\n dropout=0.0,\n )\n\n # forward vector:\n assert isinstance(net.forward(x_vec), torch.FloatTensor)\n assert net.forward(x_vec).size()[0] == hid_size\n\n # forward matrix:\n assert isinstance(net.forward(x_mat), torch.FloatTensor)\n assert net.forward(x_mat).size()[0] == sent_len\n assert net.forward(x_mat).size()[1] == hid_size\n" ]
[ [ "torch.utils.data.DataLoader" ], [ "torch.randn" ] ]
khansaadbinhasan/Low-Cost-Autonomous-Vehicle-for-Inventory-Movement-in-Warehouses
[ "3c24390a49a76f893675e606ca24fccdbcce43e2" ]
[ "src/Workstation/Misc/path planning/process_image.py" ]
[ "import cv2\nimport numpy as np\nimport time\nimport astarsearch\nimport traversal\nfrom sklearn.cluster import KMeans\nfrom skimage import io\nimport matplotlib.pyplot as plt\n\ndef main(source , dest, cap, grid_size,frame_width, frame_height,decision):\n\n\toccupied_grids = []\t\t# List to store coordinates of occupied grid \n\tplanned_path = {}\t\t# Dictionary to store information regarding path planning \t\n\t# print('aewfoineoif')\n\t# cap = cv2.VideoCapture(0)\n\n\t# image = cv2.imread(im)\n\n\t_,image = cap.read()\n\n\timage = cv2.resize(image , (frame_width, frame_height))\n\t# load the image and define the window width and height\n\t# image = cv2.imread(frame)\n\t(winW, winH) = (grid_size, grid_size)\t\t# Size of individual cropped images \n\n\tobstacles = []\t\t\t# List to store obstacles (black tiles) \n\tindex = [1,1]\n\tblank_image = np.zeros((grid_size,grid_size,3), np.uint8)\n\tlist_images = [[blank_image for i in range(frame_height//grid_size)] for i in range(frame_width//grid_size)] \t#array of list of images \n\tmaze = [[0 for i in range(frame_height//grid_size)] for i in range(frame_width//grid_size)] \t\t\t#matrix to represent the grids of individual cropped images\n\n\n\tkernel_open = np.ones((2,2))\n\tkernel_close = np.ones((5,5))\n\t\n\tyellow_lower = np.array([20, 100, 100])\n\tyellow_upper = np.array([30, 255, 255])\n\n\tfor (x, y, window) in traversal.sliding_window(image, stepSize=grid_size, windowSize=(winW, winH)):\n\t\t# if the window does not meet our desired window size, ignore it\n\t\tif window.shape[0] != winH or window.shape[1] != winW:\n\t\t\tcontinue\n\n\t#\tprint index\n\t\tclone = image.copy()\n\t\tcrop_img = clone[x:x + winW, y:y + winH] \t\t\t\t#crop the image\n\t\tlist_images[index[0]-1][index[1]-1] = crop_img.copy()\t\t\n\t\tcv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)\n\t\t\n\n\t\timg = crop_img\n\n\t\tcv2.imshow(\"second_window\",img)\n\t\tctl = img\n\t\n\n\t\t# hsv = cv2.cvtColor(ctl, cv2.COLOR_BGR2HSV)\n\n\t\t# hue,sat,val,ret = cv2.mean(hsv)\n\t\t# print(cv2.mean(hsv))\n\n\t\t# t_val = 160\n\t\t# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\t# lower = np.array([0, 0, 0]) #black color mask\n\t\t# upper = np.array([t_val, t_val, t_val])\n\t\t# mask = cv2.inRange(img, lower, upper)\n\n\t\t# ret,thresh = cv2.threshold(mask,127,255,cv2.THRESH_BINARY)\n\t\t# contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t\t# cv2.drawContours(image,contours,-1,(0,255,0),3)\n\n\t\t# biggest = 0\n\t\t# max_area = 0\n\t\t# min_size = thresh.size/4\n\t\t# index1 = 0\n\t\t# peri = 0\n\t\t# for i in contours:\n\t\t# \tarea = cv2.contourArea(i)\n\t\t# \tif area > 10000:\n\t\t# \t\tperi = cv2.arcLength(i,True)\n\n\t\t# \tif area > max_area:\n\t\t# \t\tbiggest = index1\n\t\t# \t\tmax_area = area\n\t\t# \t\tindex1 = index1 + 1\n\t\t\n\n\t\t# approx = cv2.approxPolyDP(contours[biggest],0.05,True)\n\n\t\t# cv2.polylines(image, [approx], True, (0,255,0), 3)\n\n\t\t# mask0 = cv2.inRange(hsv, yellow_lower, yellow_upper)\n\n\t\t# mask = mask0\n\n\t\t# mask_op = cv2.morphologyEx(mask , cv2.MORPH_OPEN, kernel_open)\n\n\t\t# mask_cl = cv2.morphologyEx(mask_op, cv2.MORPH_CLOSE, kernel_close)\n\n\t\t# Z = mask_cl.reshape((-1,1))\n\n\t\t# Z = np.float32(Z)\n\n\t\t# n_colors = 2\n\n\t\t# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1)\n\n\t\t# flags = cv2.KMEANS_RANDOM_CENTERS\n\n\t\t# _, labels, palette = cv2.kmeans(Z, n_colors, None, criteria, 10, flags)\n\n\t\t# _, counts = np.unique(labels, return_counts=True)\n\n\t\t# dominant = palette[np.argmax(counts)]\n\n\t\t# center = np.uint8(palette)\n\n\t\t# res = center[labels.flatten()]\n\n\t\t# res2 = res.reshape((mask_cl.shape))\n\n\t\t# cv2.imshow('res2',res2)\n\n\t\t\n\t\tif decision==1:\n\t\t\t# flag = 0\n\t\t\t# # print(palette)\n\t\t\t# for i in palette:\n\t\t\t# \tif i[0]>=250:\n\t\t\t# \t\t# print(i)\n\t\t\t# \t\tif (flag==0):\n\t\t\t#if(hue==0 and sat==255 and val==255):\n\n\t\t\t# elif(hue==60 and sat==255 and val==255):\n\n\t\t\tif (val==0):\n\t\t\t\tmaze[index[1]-1][index[0]-1] = 1\t\t\n\t\t\t\tcv2.rectangle(image, (x, y),(x + winW, y + winH), (255, 0, 0),-1)\t\t\n\t\t\t\toccupied_grids.append(tuple(index))\t\n\t\t\t\tflag = 1\n\t\t\t\n\t\t\tcv2.putText(clone,str(maze[index[1]-1][index[0]-1]),(x, y),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX ,1\n\t\t\t\t,(255,0,0),2, cv2.LINE_AA)\n\n\t\t\t# cv2.putText(clone,str(maze[index[1]-1][index[0]-1]),(x, y),\n\t\t\t# \tcv2.FONT_HERSHEY_SIMPLEX ,1\n\t\t\t# \t,(255,0,0),2, cv2.LINE_AA)\n\n\n\t\t# cv2.imshow(\"hist\", hist)\n\t\t# cv2.imshow(\"bar\", bar)\n\t\tcv2.imshow(\"display_Window\", clone)\n\t\tcv2.waitKey(1)\n\t\ttime.sleep(0.02)\n\t\n\t\t#Iterate\n\t\tindex[1] = index[1] + 1\t\t\t\t\t\t\t\n\t\tif(index[1]>(frame_width//grid_size)):\n\t\t\tindex[0] = index[0] + 1\n\t\t\tindex[1] = 1\n\t\n\t# Apply astar algorithm on the give maze image \n\tres = [[maze[j][i] for j in range(len(maze))] for i in range(len(maze[0]))]\n\tresult = astarsearch.astar(res,(source[0],source[1]),(dest[0],dest[1]), frame_width//grid_size, frame_height//grid_size)\n\t\n\t# printing the maze for checking\n\t# for i in range(len(maze)):\n\t# \tfor j in range(len(maze[0])):\n\t# \t\tprint(res[i][j],end=\" \")\n\t# \tprint(\" \")\t\n\t\n\tlist2=[]\n\t# print(result)\n\tfor t in result:\n\t\tx,y = t[0],t[1]\n\t\tlist2.append(tuple((x+1,y+1)))\t\t\t#Contains min path + startimage + endimage\n\tresult = list(list2[1:-1]) \t\t\t#Result contains the minimum path required \n\n\t# print(maze)\n\t# cv2.destroyAllWindows()\n\t# cap.release()\n\tkey = cv2.waitKey(1)\n\tif key==27:\n\t\tcv2.destroyAllWindows()\n\t\tcap.release()\n\n\treturn occupied_grids, list2\n\n\n\nif __name__ == '__main__':\n\n # change filename to check for other images\n image_filename = \"test_images/test_image3.jpg\"\n\n main(image_filename)\n\n cv2.waitKey(0)\n \n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones" ] ]
Jack-alope/tempers
[ "c5b019d2f5f14a55b6a9656e32a110c1d5c6c088" ]
[ "tests/test_tissue_class.py" ]
[ "\"\"\"\nTesting for the point finding class\n\"\"\"\n\n# import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef test_find_peaks(tissue_object):\n \"\"\"Testing peaks are where they schould be\"\"\"\n assert np.all([round(time % np.pi, 3) == 1.571 for time in tissue_object.peaks[0]])\n assert np.all(\n [round(amplitude, 3) == 3.000 for amplitude in tissue_object.peaks[1]]\n )\n\n\ndef test_find_basepoints_frontpoints(tissue_object):\n \"\"\"Testing that basepoints and peaks are as expected\"\"\"\n assert np.all([round(point, 3) == 1.000 for point in tissue_object.basepoints[1]])\n assert np.all([round(point, 3) == 1.000 for point in tissue_object.frontpoints[1]])\n\n\ndef test_find_analysispoints(tissue_object):\n \"\"\"Testing annalysis points disp is as expected\"\"\"\n assert np.all(\n [round(point, 2) == 1.200 for point in tissue_object.contract_points[0][1]]\n )\n assert np.all(\n [round(point, 2) == 1.400 for point in tissue_object.contract_points[1][1]]\n )\n assert np.all(\n [round(point, 2) == 2.000 for point in tissue_object.contract_points[2][1]]\n )\n assert np.all(\n [round(point, 2) == 2.600 for point in tissue_object.contract_points[3][1]]\n )\n assert np.all(\n [round(point, 2) == 2.800 for point in tissue_object.contract_points[4][1]]\n )\n\n assert np.all(\n [round(point, 2) == 1.200 for point in tissue_object.relax_points[0][1]]\n )\n assert np.all(\n [round(point, 2) == 1.400 for point in tissue_object.relax_points[1][1]]\n )\n assert np.all(\n [round(point, 2) == 2.000 for point in tissue_object.relax_points[2][1]]\n )\n assert np.all(\n [round(point, 2) == 2.600 for point in tissue_object.relax_points[3][1]]\n )\n assert np.all(\n [round(point, 2) == 2.800 for point in tissue_object.relax_points[4][1]]\n )\n\n\n# TODO: Test fringe conditions for basepoints and frontpoints\n\n\ndef test_beating_freq(tissue_object):\n \"\"\"Testing that the beatinf frequency is 1/period\"\"\"\n assert (\n (1 / (2 * np.pi) - 0.002)\n <= tissue_object.calculated_values[\"beating_freq\"]\n <= (1 / (2 * np.pi) + 0.002)\n )\n\n\ndef test_t2rel_50(tissue_object):\n \"\"\"Test that the tissue object properly uses calculation function\"\"\"\n assert (\n (np.pi - 0.002) / 2\n <= tissue_object.calculated_values[\"t2rel50\"]\n <= (np.pi + 0.002) / 2\n )\n\n\ndef test_t50(tissue_object):\n \"\"\"test that t50 is half period for sin\"\"\"\n assert np.pi - 0.002 <= tissue_object.calculated_values[\"t50\"] <= np.pi + 0.002\n\n t50 = (\n tissue_object.calculated_values[\"c50\"] + tissue_object.calculated_values[\"r50\"]\n )\n assert -0.002 <= np.absolute(t50 - tissue_object.calculated_values[\"t50\"]) <= 0.002\n assert (\n (np.pi - 0.002) / 2\n <= tissue_object.calculated_values[\"c50\"]\n <= (np.pi + 0.002) / 2\n )\n assert (\n (np.pi - 0.002) / 2\n <= tissue_object.calculated_values[\"r50\"]\n <= (np.pi + 0.002) / 2\n )\n\n\n# def test_dfdt(tissue_object):\n# \"\"\"Checks that dfdt is functioning properly with supplied values\"\"\"\n# assert tissue_object.calculated_values['negdfdt'] < 0\n\n# slope_diff = tissue_object.calculated_values['negdfdt'] + \\\n# tissue_object.calculated_values['dfdt']\n# assert -.001 <= slope_diff <= .001\n" ]
[ [ "numpy.absolute" ] ]
raimamathew/Brain-Tumor-Segmentation
[ "748bc37b61a2e89637a2ddf1da9029c0c820f400" ]
[ "train_script.py" ]
[ "import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport os\nfrom skimage import measure\nimport re\nimport nibabel as nib\nimport tensorflow as tf\nimport time\nfrom scipy.ndimage import zoom\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.layers import Input, Dropout, concatenate, Flatten, Dense, Reshape, BatchNormalization, Activation\nfrom tensorflow.keras.layers import Lambda\nfrom tensorflow.keras.layers import Conv3D, Conv3DTranspose, UpSampling3D\nfrom tensorflow.keras.layers import MaxPooling3D\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\nfrom tensorflow.keras import backend as K\nimport seaborn as sns\n\n\n\ndef H_brain(scan, tumour):\n \"\"\"\n Returns healthy brain voxels\n Args:\n scan - full scan\n tumor - segmented tumour\n \"\"\"\n return np.logical_and(scan, np.logical_not(tumour))\n\n\n\ndef get_obj(tumor_array, fname='out.obj'):\n \"\"\"\n Returns .obj file\n Args:\n tumour_array - np array\n fname - file name[OPTIONAL]\n \"\"\"\n verts, faces, normals, values = measure.marching_cubes_lewiner(tumor_array, 0)\n faces = faces + 1\n thefile = open(fname, 'w')\n for item in verts:\n thefile.write(\"v {0} {1} {2}\\n\".format(item[0],item[1],item[2]))\n\n for item in normals:\n thefile.write(\"vn {0} {1} {2}\\n\".format(item[0],item[1],item[2]))\n\n for item in faces:\n thefile.write(\"f {0}//{0} {1}//{1} {2}//{2}\\n\".format(item[0],item[1],item[2])) \n\n thefile.close()\n\nimport subprocess\nfolders = subprocess.check_output(\"ls ./HGG/\", shell=True)\nfolders = folders.decode().split(\"\\n\")\nfolders.pop()\n\nscan_list = []\n\nfor folder in folders:\n sc = subprocess.check_output(\"ls ./HGG/\" + str(folder), shell=True)\n sc = sc.decode().split(\"\\n\")\n sc.pop()\n sc = [\"./HGG/\"+str(folder)+\"/\" +i for i in sc]\n scan_list.append(tuple(sc))\n\n\n# In[17]:\n\n\nflair_scans = []\nfor i in scan_list:\n for _ in i:\n if \"flair\" in _:\n scan = _\n if \"seg\" in _:\n seg = _\n flair_scans.append((scan, seg))\nflair_scans[0]\n\n\n# In[18]:\n\n\nt1ce_scans = []\nfor i in scan_list:\n for _ in i:\n if \"t1ce\" in _:\n scan = _\n if \"seg\" in _:\n seg = _\n t1ce_scans.append((scan, seg))\nt1ce_scans[-1]\n\n\n# In[19]:\n\n\nt2_scans = []\nfor i in scan_list:\n for _ in i:\n if \"t2\" in _:\n scan = _\n if \"seg\" in _:\n seg = _\n t2_scans.append((scan, seg))\nt2_scans[0]\n\n\n# In[38]:\n\n\ndef get_scan(scan_path='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz'):\n \"\"\"\n Returns np array\n scan_path - path to .nib file\n \"\"\"\n x = nib.load(scan_path).get_fdata()[:,:,:]\n return np.expand_dims(np.append(x, np.zeros((240,240,5)), axis=-1), axis=-1)\ndef get_seg(scan_path='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz', contrast=1):\n \"\"\"\n Returns np array with true segmentation\n scan_path - path to .nib file\n \"\"\"\n x = nib.load(scan_path).get_fdata()==contrast\n return np.expand_dims(np.append(x, np.zeros((240,240,5)), axis=-1), axis=-1)\n\n\n# In[21]:\n\n\ndef show_scan(scan='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz'):\n \"\"\"\n plots the scan\n scan_path - path to .nib file\n \"\"\"\n plt.imshow(get_scan(scan)[:,:,76])\ndef show_seg(scan='HGG/Brats18_CBICA_AAB_1/Brats18_CBICA_AAB_1_seg.nii.gz', contrast=1):\n \"\"\"\n plots scan with true segmentation\n scan_path - path to .nib file\n \"\"\"\n plt.imshow(get_seg(scan)[:,:,76]==contrast)\n\n\n# In[22]:\n\n\ndef generate_set(scans, contrast=1, batch_size=1):\n \"\"\"\n Train/Test set Generator\n scans - list of paths to scans\n contrast - ground truth label\n \"\"\"\n for scan in scans:\n batch_x = []\n batch_y = []\n count = 0\n while True:\n (x, y) = scan\n x = get_scan(x)\n y = get_seg(y, contrast)\n batch_x.append(x)\n batch_y.append(y)\n count += 1\n if count == batch_size:\n count = 0\n yield np.array(batch_x), np.array(batch_y)\n batch_x = []\n batch_y = []\n\n\n\n\n# In[25]:\n\n\ndef BatchActivate(x):\n x = Activation('relu')(x)\n return x\n\ndef conv_block(x, filters, size, strides=(1,1,1), padding='same', activation=True):\n x = Conv3D(filters, (size,size,size), strides=strides, padding=padding)(x)\n if activation == True:\n x = BatchActivate(x)\n return x\n\ndef pool_block(x, size):\n return MaxPooling3D((size, size, size))(x)\n\ndef upsam_block(x, size):\n return UpSampling3D((size, size, size))(x)\n\ndef res_block(blockInput, num_filters, size, batch_activate = False):\n x = BatchActivate(blockInput)\n x = conv_block(x, num_filters, size)\n x = conv_block(x, num_filters, size, activation=True)\n x = Add()([x, blockInput])\n if batch_activate:\n x = BatchActivate(x)\n return x\n\n\ndef build_model(inputs, start_filters=8, filter_size=2, pool_size=2):\n #240 -> 120\n #152 -> 76\n conv1 = conv_block(inputs, start_filters, filter_size)\n conv1 = conv_block(conv1, start_filters, filter_size)\n pool1 = pool_block(conv1, pool_size)\n \n #120 -> 60\n #76 -> 38\n conv2 = conv_block(pool1, start_filters*2, filter_size)\n conv2 = conv_block(conv2, start_filters*2, filter_size)\n pool2 = pool_block(conv2, pool_size)\n \n #60 -> 30\n #38 -> 19\n conv3 = conv_block(pool2, start_filters*4, filter_size)\n conv3 = conv_block(conv3, start_filters*4, filter_size)\n pool3 = pool_block(conv3, pool_size)\n \n conv4 = conv_block(pool3, start_filters*8, filter_size)\n conv4 = conv_block(conv4, start_filters*8, filter_size)\n\n conv5 = upsam_block(conv4, pool_size)\n conv5 = concatenate([conv3, conv5])\n conv5 = conv_block(conv5, start_filters*4, filter_size)\n conv5 = conv_block(conv5, start_filters*4, filter_size)\n \n conv6 = upsam_block(conv5, pool_size)\n conv6 = concatenate([conv2, conv6])\n conv6 = conv_block(conv6, start_filters*2, filter_size)\n conv6 = conv_block(conv6, start_filters*2, filter_size)\n \n conv7 = upsam_block(conv6, pool_size)\n conv7 = concatenate([conv1, conv7])\n conv7 = conv_block(conv7, start_filters, filter_size)\n conv7 = conv_block(conv7, start_filters, filter_size)\n \n output = conv_block(conv7, 1, filter_size)\n \n return output\n\ninputs = Input((240,240,160,1))\noutputs = build_model(inputs, 16)\nmodel = Model(inputs=[inputs], outputs=[outputs])\nmodel.compile(optimizer='adam', loss='binary_crossentropy')\nmodel.summary()\n\n#sets = generate_set(flair_scans, 2)\n\nearlystopper = EarlyStopping(patience=5, verbose=1)\ncheckpointer = ModelCheckpoint('model-tgs-salt-1.h5', verbose=1, save_best_only=True)\nresults = model.fit_generator(generate_set(flair_scans, 2), steps_per_epoch=len(flair_scans), epochs=30, \n callbacks=[earlystopper, checkpointer])\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.logical_not", "tensorflow.keras.layers.Activation", "tensorflow.keras.models.Model", "tensorflow.keras.layers.MaxPooling3D", "tensorflow.keras.layers.UpSampling3D", "tensorflow.keras.layers.Conv3D", "tensorflow.keras.layers.concatenate", "numpy.array", "tensorflow.keras.callbacks.EarlyStopping", "numpy.zeros", "tensorflow.keras.layers.Input" ] ]
labsyspharm/deeptile
[ "b79a52f9613d5f8861b31e68a407cf507a90bcbe" ]
[ "sampling/run_tile_mcmc.py" ]
[ "import numpy as np\nimport pandas as pd\nimport time\nimport argparse\nimport os\nimport copy\nimport typing\n\nimport tqdm\nimport tensorflow as tf\n\n'''\n# turn on memory growth so GPU memory allocation becomes as-needed\n# for cases when training takes too much memory.\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n'''\n\nimport deeptile_model\nimport deeptile_dataset\nimport deeptile_sampling\n\nif __name__ == '__main__':\n # define tile shape\n # note: shape is based on image coordinate\n displacement_cells = 10 # width/2, in unit of cells\n cell_size = 10 # in unit of micro-meter\n pixel_size = 0.65 # in unit of micro-meter\n tile_width = 2*int(displacement_cells * cell_size / pixel_size) # in unit of pixel\n tile_shape = (tile_width, tile_width)\n # paths\n image_filepath = '/n/scratch2/hungyiwu/deeptile_data/26531POST/'\\\n 'input_data/26531POST.ome.tif'\n channel_info_filepath = '/n/scratch2/hungyiwu/deeptile_data/26531POST/'\\\n 'input_data/channel_info.csv'\n workspace_folderpath = '/n/scratch2/hungyiwu/deeptile_data/26531POST/'\\\n 'output/workspace'\n record_filepath = './training_history.csv'\n # target ROI obtained from PathViewer on OMERO server\n ROI = {\n 'image_x':23969,\n 'image_y':9398,\n 'image_width':5932, # delta-y\n 'image_height':5170, # delta-x\n }\n ROI_x_low = int(ROI['image_x']-ROI['image_height']/2)\n ROI_x_high = int(ROI['image_x']+ROI['image_height']/2)\n ROI_y_low = int(ROI['image_y']-ROI['image_width']/2)\n ROI_y_high = int(ROI['image_y']+ROI['image_width']/2)\n ROI_support_range = [(ROI_x_low, ROI_x_high), (ROI_y_low, ROI_y_high)]\n # parse arguments\n parser = argparse.ArgumentParser(description='Get verbosity.')\n parser.add_argument('--verbose', action='store_true', # default is False\n help='Turn on tqdm progress bar.')\n args = parser.parse_args()\n verbose = args.verbose\n # data\n loader = deeptile_dataset.tile_loader(\n workspace_folderpath=workspace_folderpath,\n warm_start=True,\n image_filepath=image_filepath,\n channel_filepath=channel_info_filepath,\n )\n # model\n cvae_model = deeptile_model.CVAE(\n latent_dim=20, \n input_shape=tile_shape+(loader.count_channel,),\n optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5),\n )\n # stepping function with normal prior\n def __random_step(current_point):\n next_x = np.random.normal(loc=0, scale=tile_shape[0])\n next_y = np.random.normal(loc=0, scale=tile_shape[1])\n next_point = (current_point[0]+int(next_x), current_point[1]+int(next_y))\n next_tile = loader.get_tile(\n tile_shape=tile_shape,\n center=next_point,\n need_validate=True,\n )\n return next_point, next_tile\n def random_step(current_point):\n next_point, next_tile = __random_step(current_point)\n while next_tile is None:\n next_point, next_tile = __random_step(current_point)\n next_tile = next_tile[np.newaxis, ...]\n return next_point, next_tile\n # MCMC loop\n batch_size = 1000\n ts_start = time.time()\n total_step = int(4e4)\n current_point = (ROI['image_x'], ROI['image_y'])\n record = []\n for step in tqdm.tqdm(\n iterable=range(total_step),\n desc='MCMC',\n disable=not verbose):\n # phase 1: train on current point\n current_tile = loader.get_tile(\n tile_shape=tile_shape,\n center=current_point,\n need_validate=True,\n )\n current_tile = current_tile[np.newaxis, ...]\n cvae_model.compute_apply_gradients(current_tile)\n # phase 2: pick next point based on prior\n next_point, next_tile = random_step(current_point)\n # phase 3: compare loss and make step decision\n current_loss = cvae_model.compute_loss(current_tile).numpy()[0]\n next_loss = cvae_model.compute_loss(next_tile).numpy()[0]\n ratio = min(1, next_loss/current_loss)\n if np.random.rand() < ratio:\n current_point = next_point\n if step % batch_size == 0:\n ts_end = time.time()\n print('step {} current_point {} current_loss {} runtime {:.3f}/step'.format(\n step, current_point, current_loss, (ts_end-ts_start)/batch_size))\n ts_start = time.time()\n print('Done.')\n\n" ]
[ [ "numpy.random.normal", "tensorflow.keras.optimizers.Adam", "numpy.random.rand" ] ]
Jeffyrao/translate
[ "ab928e0b692f476c0a43ee7f9d0fbd3ecbada2b4" ]
[ "pytorch_translate/utils.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport signal\nimport threading\nimport time\nfrom typing import List, Optional\n\nimport torch\nfrom fairseq import distributed_utils, tasks, utils\n\n\n# Helper type for argparse to enable flippable boolean flags. For example,\n# group.add_argument(\"--foo\", type=utils.bool_flag, nargs=\"?\", const=True,\n# default=False)\n# creates a --foo flag that defaults to False and can be set to True by\n# specifying `--foo True` or just `--foo`. Some other --bar flag that defaults\n# to True can be set to False by specifying `--bar False`.\ndef bool_flag(value):\n if value.lower() in (\"true\", \"t\", \"1\"):\n return True\n elif value.lower() in (\"false\", \"f\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\n f\"Expected boolean string such as 'true'/'false' instead of {value}.\"\n )\n\n\n# Variation on the fairseq StopwatchMeter that separates statistics by number\n# of tokens. Sentences longer than max_length are stored in the last bucket.\nclass BucketStopwatchMeter(object):\n def __init__(self, increment, max_length, sentences_per_batch):\n self.increment = increment\n self.n_buckets = max_length // increment + 1\n self.sentences_per_batch = sentences_per_batch\n self.reset()\n\n def start(self):\n self.start_time = time.time()\n\n def stop(self, n=1):\n if self.start_time is not None:\n delta = time.time() - self.start_time\n bucket_id = min(self.n_buckets - 1, n // self.increment)\n self.sum[bucket_id] += delta\n self.n[bucket_id] += n\n self.count[bucket_id] += 1\n self.start_time = None\n\n def reset(self):\n self.sum = [0] * self.n_buckets\n self.n = [0] * self.n_buckets\n self.count = [0] * self.n_buckets\n self.start_time = None\n\n def reset_bucket(self, bucket_id):\n if self.start_time is None:\n self.sum[bucket_id] = 0\n self.n[bucket_id] = 0\n self.count[bucket_id] = 0\n\n @property\n def avg(self):\n return sum(self.sum) / sum(self.n)\n\n @property\n def avgs(self):\n result = [0] * self.n_buckets\n for i in range(self.n_buckets):\n if self.n[i] != 0:\n result[i] = self.sum[i] / self.n[i]\n else:\n result[i] = 0\n return result\n\n\ndef load_diverse_ensemble_for_inference(\n filenames: List[str], task: Optional[tasks.FairseqTask] = None\n):\n \"\"\"Load an ensemble of diverse models for inference.\n\n This method is similar to fairseq.utils.load_ensemble_for_inference\n but allows to load diverse models with non-uniform args.\n\n Args:\n filenames: List of file names to checkpoints\n task: Optional[FairseqTask]. If this isn't provided, we setup the task\n using the first checkpoint's model args loaded from the saved state.\n\n Return:\n models, args: Tuple of lists. models contains the loaded models, args\n the corresponding configurations.\n task: Either the input task or the task created within this function\n using args\n \"\"\"\n\n # load model architectures and weights\n checkpoints_data = []\n for filename in filenames:\n if not os.path.exists(filename):\n raise IOError(\"Model file not found: {}\".format(filename))\n checkpoints_data.append(\n torch.load(\n filename,\n map_location=lambda s, l: torch.serialization.default_restore_location(\n s, \"cpu\"\n ),\n )\n )\n # build ensemble\n ensemble = []\n if task is None:\n task = tasks.setup_task(checkpoints_data[0][\"args\"])\n for checkpoint_data in checkpoints_data:\n model = task.build_model(checkpoint_data[\"args\"])\n model.load_state_dict(checkpoint_data[\"model\"])\n ensemble.append(model)\n args_list = [s[\"args\"] for s in checkpoints_data]\n return ensemble, args_list, task\n\n\ndef densify(t):\n \"\"\"Removes holes in an array.\n\n This function converts a 1-dimensional tensor of length n without duplicates\n to a 1-dimensional tensor of the same length with all elements less than n\n while preserving the order. For example,\n\n [1, 0, 4, 5, 10, 9] -> [1, 0, 2, 3, 5, 4]\n \"\"\"\n _, sorted_indices = torch.sort(t)\n _, dense_t = torch.sort(sorted_indices)\n return dense_t\n\n\ndef maybe_cat(tensors, dim, nullable=None):\n \"\"\"Like torch.cat, but skips elements in `tensors` which are None.\n\n Args:\n tensors: List of tensors (compare torch.cat())\n dim: Dimension along which to concatenate (compare to torch.cat())\n nullable: List of the same length as `tensors`. If specified, throw\n a RuntimeError if the i-th element in `tensors` is None and the\n i-th element in nullable is False.\n\n Returns:\n Concatenation of all tensors in `tensors` along `dim` which are not\n None.\n\n Throws:\n RuntimeError is `nullable` constraint is violated or all alements in\n `tensors` are None.\n \"\"\"\n if nullable is not None and any(\n (t is None) and not n for t, n in zip(tensors, nullable)\n ):\n raise RuntimeError(\"Unexpected element in tensors is None.\")\n filtered = [t for t in tensors if t is not None]\n if len(filtered) == 1:\n return filtered[0]\n return torch.cat(filtered, dim=dim)\n\n\ndef maybe_cuda(t):\n \"\"\"Calls `cuda()` on `t` if cuda is available.\"\"\"\n if torch.cuda.is_available():\n return t.cuda()\n return t\n\n\ndef average_tensors(tensor_list, norm_fn=None, weights=None):\n \"\"\"Averages a list of tensors.\n\n Average the elements in tensor_list as follows:\n w1*norm_fn(t1) + w2*norm_fn(t2) + ...\n The default behavior corresponds to a [weighted] mean. You can set norm_fn\n to F.softmax or F.log_softmax to average in probability or logprob space.\n\n Note: This implementation favours memory efficiency over numerical\n stability, and iterates through `tensor_list` in a Python for-loop rather\n than stacking it to a PyTorch tensor.\n\n Arguments:\n tensor_list (list): Python list of tensors of the same size and same type\n norm_fn (function): If set, apply norm_fn() to elements in `tensor_list`\n before averaging. If list of functions, apply n-th function to\n n-th tensor.\n weights (list): List of tensors or floats to use to weight models. Must\n be of the same length as `tensor_list`. If none, use uniform weights.\n\n Returns:\n Average of the tensors in `tensor_list`\n \"\"\"\n n_tensors = len(tensor_list)\n if weights is None:\n weights = [1.0 / float(n_tensors)] * n_tensors\n if not isinstance(norm_fn, list):\n norm_fn = [norm_fn] * n_tensors\n assert n_tensors == len(weights)\n assert n_tensors == len(norm_fn)\n\n def id_fn(x, dim):\n return x\n\n norm_fn = [id_fn if f is None else f for f in norm_fn]\n acc = torch.zeros_like(tensor_list[0])\n for f, w, t in zip(norm_fn, weights, tensor_list):\n acc += w * f(t, dim=-1)\n return acc\n\n\ndef load_embedding(embedding, dictionary, pretrained_embed):\n \"\"\"Loads pretrained embeddings.\n\n Loads pretrained embeddings into a nn.Embedding layer. pretrained_embed\n can either be a nn.Embedding layer, in which case the embedding is set\n to the pretrained_embed argument, or a path to an embedding file.\n\n Arguments:\n embedding (pytorch_translate.common_layers.Embedding):\n Embedding layer whose weights are to be set.\n dictionary (fairseq.data.dictionary.Dictionary): dictionary with the\n same vocabulary size as the embedding argument.\n pretrained_embed (Union(string, nn.Embedding)): source of the\n weights to be loaded.\n \"\"\"\n if pretrained_embed is None:\n return\n\n if isinstance(pretrained_embed, torch.nn.Embedding):\n embedding.weight = pretrained_embed.weight\n else:\n embed_dict = utils.parse_embedding(pretrained_embed)\n utils.load_embedding(embed_dict, dictionary, embedding)\n\n embedding.init_normalization_if_needed()\n\n\ndef torch_find(index, query, vocab_size):\n \"\"\"\n Finds elements of query from index, outputting the last (max) index for each\n query.\n preconditions: (1) index and query are flat arrays (can be different sizes)\n (2) all tokens in index and query have values < vocab_size\n \"\"\"\n full_to_index = maybe_cuda(torch.zeros(vocab_size).long())\n index_shape_range = maybe_cuda(torch.arange(index.shape[0]).long())\n full_to_index[index] = index_shape_range\n result = full_to_index[query]\n return result\n\n\ndef all_gather_from_master(args, data: List) -> List:\n if args.distributed_world_size == 1:\n return data\n\n gathered_data = distributed_utils.all_gather_list(data)\n # Converts [[x0, y0, z0, ...], [x1, y1, z1, ...], [x2, y2, z2, ...], ...]\n # to [[x0, x1, x2, ...], [y0, y1, y2, ...], [z0, z1, z2, ...], ...]\n gathered_data_list = list(zip(*gathered_data))\n\n output_data = []\n for data_index, all_data in enumerate(gathered_data_list):\n # The master's (process 0) data is guaranteed to be in position 0.\n master_data = all_data[0]\n # Sanity check that only the master returned any result.\n if master_data is None:\n raise RuntimeError(\n f\"Input data element {data_index} of all_gather_from_master \"\n f\"returned None from master. Results from all processes: {all_data}\"\n )\n for i in range(1, len(all_data)):\n if all_data[i] is not None:\n raise RuntimeError(\n f\"Input data element {data_index} of all_gather_from_master \"\n f\"should have returned None from non-master process {i}. \"\n f\"Results from all processes: {all_data}\"\n )\n output_data.append(master_data)\n return output_data\n\n\ndef get_source_tokens_tensor(src_tokens):\n \"\"\"\n To enable integration with PyText, src_tokens should be able to support\n more features than just token embeddings. Hence when dictionary features are\n passed from PyText it will be passed as a tuple\n (token_embeddings, dict_feat, ..). Thus, in this case where we need the source\n tokens tensor (eg to calculate batch size = source_tokens_tensor.size(0)),\n we get the first element on the tuple which is always guaranteed\n to be source tokens and do the necessary operation.\n eg : bsz, _ = get_source_tokens_tensor(source_tokens)[0].size(0)\n \"\"\"\n if type(src_tokens) is tuple:\n return src_tokens[0]\n else:\n return src_tokens\n" ]
[ [ "torch.zeros", "torch.cat", "torch.zeros_like", "torch.serialization.default_restore_location", "torch.sort", "torch.cuda.is_available", "torch.arange" ] ]
Koopakiller/Edu-NLA
[ "8376557cab9f74cedd19ee1573a8c71d7e415dd4", "8376557cab9f74cedd19ee1573a8c71d7e415dd4" ]
[ "serie4/plot.py", "serie2/Sum.py" ]
[ "import matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nimport numpy\nimport math\n\n\ndef plot(parameter_list, data_points_list):\n e = math.e\n\n x = numpy.arange(0, 25, 0.01)\n for entry in parameter_list:\n a, b, c, d, k, n, _, _, _ = entry\n plt.plot(x, a * (e ** (d*x)) + b * (e ** (-d * x)) + c, \"-\", label=\"k={0}; n={1}\".format(k, n))\n\n plt.plot(map(lambda pair: pair[0], data_points_list),\n map(lambda pair: pair[1], data_points_list),\n \"ro\", label=\"points\")\n\n plt.legend()\n\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n\n plt.show()\n", "# Authors: Tom Lambert (lambertt) and Yuuma Odaka-Falush (odafaluy)\n\n\nimport numpy\n\n\ndef sum_indices(addends, problems):\n \"\"\"\n Sums the given addends, starting by the first number and following the list\n \"\"\"\n result = numpy.float16(0)\n for addend in addends:\n if numpy.isnan(addend):\n problems.append(\"NaN\")\n elif numpy.isposinf(addend):\n problems.append(\"infinity\")\n elif numpy.isneginf(addend):\n problems.append(\"-infinity\")\n else:\n result += addend\n\n return result\n\n\ndef sum_ordered(addends, problems):\n \"\"\"\n Sums the given addends, starting by the smallest and following the natural order.\n \"\"\"\n result = numpy.float16(0)\n for addend in sorted(addends):\n if numpy.isnan(addend):\n problems.append(\"NaN\")\n elif numpy.isposinf(addend):\n problems.append(\"infinity\")\n elif numpy.isneginf(addend):\n problems.append(\"-infinity\")\n else:\n result += addend\n return result\n\n\ndef sum_ordered_grouped_by_sign(addends, problems):\n \"\"\"\n Sums the given addends; All negative and all positive values are added separately\n and will be added together in the last step.\n \"\"\"\n pos = numpy.float16(0)\n neg = numpy.float16(0)\n for addend in sorted(addends):\n if numpy.isnan(addend):\n problems.append(\"NaN\")\n elif numpy.isposinf(addend):\n problems.append(\"infinity\")\n elif numpy.isneginf(addend):\n problems.append(\"-infinity\")\n else:\n if addend > 0:\n pos += addend\n if addend < 0:\n neg += addend\n return pos + neg\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.use", "numpy.arange", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.isnan", "numpy.float16", "numpy.isposinf", "numpy.isneginf" ] ]
Shivam-Miglani/contextual_drl
[ "0f9930ff2b90522c3bd3319daa4b02167ab8190d" ]
[ "main_easdrl.py" ]
[ "# coding:utf-8\nimport time\nimport argparse\nimport tensorflow as tf\nfrom utils import get_time, plot_results\nfrom Agent import Agent\nfrom EADQN import DeepQLearner\nfrom Environment import Environment\nfrom ReplayMemory import ReplayMemory\nfrom gensim.models import KeyedVectors\nfrom tqdm import tqdm\nfrom keras.backend import set_image_data_format\nfrom keras.backend.tensorflow_backend import set_session\nfrom flair.embeddings import WordEmbeddings, FlairEmbeddings, StackedEmbeddings, ELMoEmbeddings, BertEmbeddings, BPEmb, CharacterEmbeddings\n\n\ndef preset_args():\n parser = argparse.ArgumentParser()\n\n envarg = parser.add_argument_group('Environment')\n envarg.add_argument(\"--domain\", type=str, default='cooking', help=\"\")\n envarg.add_argument(\"--contextual_embedding\", type=str, default='elmo', help=\"\")\n envarg.add_argument(\"--model_dim\", type=str, default=50, help=\"embedding dimension\") # word2vec 50.\n envarg.add_argument(\"--num_words\", type=int, default=500, help=\"number of words to consider for act model is 500. Arg model is 100\") # 100 if arguments.\n envarg.add_argument(\"--context_len\", type=int, default=100, help=\"\")\n envarg.add_argument(\"--word_dim\", type=int, default=868, help=\"dim of word embedding\")\n envarg.add_argument(\"--tag_dim\", type=int, default=868, help=\"\")\n envarg.add_argument(\"--dis_dim\", type=int, default=868, help=\"\")\n envarg.add_argument(\"--reward_assign\", type=list, default=[1, 2, 3], help='')\n envarg.add_argument(\"--reward_base\", type=float, default=50.0, help=\"\")\n envarg.add_argument(\"--object_rate\", type=float, default=0.07, help='')\n envarg.add_argument(\"--action_rate\", type=float, default=0.10, help=\"\")\n envarg.add_argument(\"--use_act_rate\", type=int, default=1, help='')\n\n memarg = parser.add_argument_group('Replay memory')\n memarg.add_argument(\"--positive_rate\", type=float, default=0.9, help=\"\")\n memarg.add_argument(\"--priority\", type=int, default=1, help=\"\")\n memarg.add_argument(\"--save_replay\", type=int, default=0, help=\"\")\n memarg.add_argument(\"--load_replay\", type=int, default=0, help=\"\")\n memarg.add_argument(\"--replay_size\", type=int, default=50000, help=\"\")\n memarg.add_argument(\"--save_replay_size\", type=int, default=1000, help=\"\")\n memarg.add_argument(\"--save_replay_name\", type=str, default='data/saved_replay_memory.pkl', help=\"\")\n\n netarg = parser.add_argument_group('Deep Q-learning network')\n netarg.add_argument(\"--batch_size\", type=int, default=32, help=\"\")\n netarg.add_argument(\"--num_filters\", type=int, default=32, help=\"\")\n netarg.add_argument(\"--dense_dim\", type=int, default=256, help=\"\")\n netarg.add_argument(\"--num_actions\", type=int, default=2, help=\"\")\n netarg.add_argument(\"--optimizer\", type=str, default='adam', help=\"\")\n netarg.add_argument(\"--learning_rate\", type=float, default=0.001, help=\"\")\n netarg.add_argument(\"--dropout\", type=float, default=0.5, help=\"\")\n netarg.add_argument(\"--gamma\", type=float, default=0.9, help=\"\")\n\n antarg = parser.add_argument_group('Agent')\n antarg.add_argument(\"--exploration_rate_start\", type=float, default=1, help=\"\")\n antarg.add_argument(\"--exploration_rate_end\", type=float, default=0.1, help=\"\")\n antarg.add_argument(\"--exploration_rate_test\", type=float, default=0.0, help=\"\")\n antarg.add_argument(\"--exploration_decay_steps\", type=int, default=1000, help=\"\")\n antarg.add_argument(\"--train_frequency\", type=int, default=1, help=\"\")\n antarg.add_argument(\"--train_repeat\", type=int, default=1, help=\"\")\n antarg.add_argument(\"--target_steps\", type=int, default=5, help=\"\")\n antarg.add_argument(\"--random_play\", type=int, default=0, help=\"\")\n antarg.add_argument(\"--display_training_result\", type=int, default=1, help='')\n antarg.add_argument(\"--filter_act_ind\", type=int, default=1, help='')\n\n mainarg = parser.add_argument_group('Main loop')\n mainarg.add_argument(\"--gui_mode\", type=bool, default=False, help='')\n mainarg.add_argument(\"--epochs\", type=int, default=1, help=\"\")\n mainarg.add_argument(\"--start_epoch\", type=int, default=0, help=\"\")\n mainarg.add_argument(\"--stop_epoch_gap\", type=int, default=5, help=\"\")\n mainarg.add_argument(\"--train_episodes\", type=int, default=50, help=\"\")\n mainarg.add_argument(\"--load_weights\", type=bool, default=False, help=\"\")\n mainarg.add_argument(\"--save_weights\", type=bool, default=True, help=\"\")\n mainarg.add_argument(\"--agent_mode\", type=str, default='act', help='action dqn or argument dqn')\n\n\n return parser.parse_args()\n\ndef args_init(args):\n # initialize word2vec\n args.word2vec = KeyedVectors.load_word2vec_format('data/mymodel-new-5-%d' % args.model_dim, binary=True)\n\n # initialize contextual embedding dimensions\n if args.contextual_embedding == 'word2vec':\n args.word_dim = args.tag_dim = args.dis_dim = 50\n args.stacked_embeddings = 'word2vec'\n elif args.contextual_embedding == 'elmo': #glove + elmo\n args.word_dim = args.tag_dim = args.dis_dim = 868\n ## stacked embeddings\n # create a StackedEmbedding object that combines glove and forward/backward flair embeddings\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n ELMoEmbeddings('small')\n ])\n\n elif args.contextual_embedding == 'bert': #glove + bert\n args.word_dim = args.tag_dim = args.dis_dim = 3172\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n BertEmbeddings('bert-base-uncased')\n ])\n args.batch_size = 8\n\n elif args.contextual_embedding == 'flair': #glove + flair-forward + flair-backward\n args.word_dim = args.tag_dim = args.dis_dim = 4196\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n FlairEmbeddings('mix-forward', chars_per_chunk=128),\n FlairEmbeddings('mix-backward', chars_per_chunk=128)\n ])\n if args.agent_mode == 'act':\n args.batch_size = 8\n else:\n args.batch_size = 8\n\n elif args.contextual_embedding == 'glove': # not tested\n args.word_dim = args.tag_dim = args.dis_dim = 100\n args.stacked_embeddings = StackedEmbeddings([\n WordEmbeddings('glove'),\n ])\n\n # weights loaded, set exploration rate to minimum\n if args.load_weights: # 1 to 0.1. decayed to minimum.\n args.exploration_rate_start = args.exploration_rate_end\n\n # agent mode arguments, set number of words to 100\n if args.agent_mode == 'arg':\n args.num_words = args.context_len\n args.display_training_result = 0\n\n args.result_dir = 'results/%s_%s_%s' % (args.domain, args.agent_mode, args.contextual_embedding)\n\n return args\n\ndef main(args):\n print('Current time is: %s' % get_time())\n print('Starting at main...')\n result = {'rec': [], 'pre': [], 'f1': [], 'rw': []}\n\n start = time.time()\n\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n set_session(tf.Session(config=config)) # global Keras session\n\n env_act = Environment(args, args.agent_mode)\n net_act = DeepQLearner(args, args.agent_mode, 'channels_last')\n mem_act = ReplayMemory(args, args.agent_mode)\n agent = Agent(env_act, mem_act, net_act, args) # agent takes in environment, memory, model and agent_mode\n\n # loop over epochs\n epoch_result = {'rec': [0.0], 'pre': [0.0], 'f1': [0.0], 'rw': [0.0]}\n training_result = {'rec': [], 'pre': [], 'f1': [], 'loss': [], 'rw': []}\n test_result = {'rec': [], 'pre': [], 'f1': [], 'loss': [], 'rw': []}\n log_epoch = 0\n\n\n # if we are loading weights, we don't need to train [no exploration is required. We have exploration rate start = end = 0.1], just test on test set.\n if args.load_weights:\n print('Loading weights ...')\n filename = 'weights/%s_%s_%s.h5' % (args.domain, args.agent_mode, args.contextual_embedding)\n net_act.load_weights(filename)\n #accuracy on test set\n with open(\"%s.txt\" % (args.result_dir + 'testset'), 'w') as outfile:\n rec, pre, f1, rw = agent.test(args.test_steps, outfile, test_flag=True)\n outfile.write('\\n\\n Test f1 value: {}, recall : {}, precision : {}, reward: {} \\n'.format(f1, rec,pre,rw ))\n print('\\n\\n Test f1 value: {}, recall : {}, precision : {}, reward: {} \\n'.format(f1, rec,pre,rw ))\n\n if not args.load_weights:\n with open(\"%s.txt\" % (args.result_dir), 'w') as outfile:\n print('\\n Arguments:')\n outfile.write('\\n Arguments:\\n')\n for k, v in sorted(args.__dict__.items(), key=lambda x: x[0]):\n print('{}: {}'.format(k, v))\n outfile.write('{}: {}\\n'.format(k, v))\n print('\\n')\n outfile.write('\\n')\n\n # do training\n\n for epoch in tqdm(range(args.start_epoch, args.start_epoch + args.epochs)):\n num_test = -1\n env_act.train_epoch_end_flag = False\n while not env_act.train_epoch_end_flag: #unless all documents are covered\n # training\n num_test += 1\n restart_init = False if num_test > 0 else True\n tmp_result = agent.train(args.train_steps, args.train_episodes, restart_init) #Train episodes = 50 , max episodes.\n for k in training_result:\n training_result[k].extend(tmp_result[k])\n\n rec, pre, f1, rw = agent.test(args.valid_steps, outfile) # not testing; actually validation\n\n if f1 > max(epoch_result['f1']):\n if args.save_weights:\n filename = 'weights/%s_%s_%s.h5' % (args.domain, args.agent_mode, args.contextual_embedding)\n net_act.save_weights(filename)\n\n epoch_result['f1'].append(f1)\n epoch_result['rec'].append(rec)\n epoch_result['pre'].append(pre)\n epoch_result['rw'].append(rw)\n log_epoch = epoch\n outfile.write('\\n\\n Best f1 value: {} best epoch: {}\\n'.format(epoch_result, log_epoch))\n print('\\n\\n Best f1 value: {} best epoch: {}\\n'.format(epoch_result, log_epoch))\n\n # if no improvement after args.stop_epoch_gap, break\n # EARLY STOPPING\n if epoch - log_epoch >= args.stop_epoch_gap:\n outfile.write('\\n\\nBest f1 value: {} best epoch: {}\\n'.format(epoch_result, log_epoch))\n print('\\nepoch: %d result_dir: %s' % (epoch, args.result_dir))\n print('-----Early stopping, no improvement after %d epochs-----\\n' % args.stop_epoch_gap)\n break\n\n # if args.save_replay: #0 by default\n # mem_act.save(args.save_replay_name, args.save_replay_size)\n\n filename = '%s_training_process.pdf' % (args.result_dir)\n plot_results(epoch_result, args.domain, filename)\n outfile.write('\\n\\n training process:\\n{}\\n\\n'.format(epoch_result))\n\n best_ind = epoch_result['f1'].index(max(epoch_result['f1']))\n for k in epoch_result:\n result[k].append(epoch_result[k][best_ind])\n outfile.write('{}: {}\\n'.format(k, result[k]))\n print(('{}: {}\\n'.format(k, result[k])))\n avg_f1 = sum(result['f1']) / len(result['f1'])\n avg_rw = sum(result['rw']) / len(result['rw'])\n outfile.write('\\nAvg f1: {} Avg reward: {}\\n'.format(avg_f1, avg_rw))\n print('\\nAvg f1: {} Avg reward: {}\\n'.format(avg_f1, avg_rw))\n\n tf.compat.v1.reset_default_graph()\n end = time.time()\n print('Total time cost: %ds' % (end - start))\n print('Current time is: %s\\n' % get_time())\n\nif __name__ == '__main__':\n args = args_init(preset_args())\n set_image_data_format('channels_last')\n main(args)\n\n" ]
[ [ "tensorflow.compat.v1.reset_default_graph", "tensorflow.compat.v1.ConfigProto", "tensorflow.Session" ] ]
Liuhongzhi2018/Car_detection
[ "f32fea9c348c691ccc30b9804a4f3fa32732bbae" ]
[ "video_demo_car.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 12 14:47:25 2020\r\n@author: NAT\r\n\"\"\"\r\nimport sys\r\nsys.path.append(\"./model/\")\r\nimport numpy as np\r\nimport cv2\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nfrom utils import *\r\nfrom torchvision import transforms\r\nimport torch\r\nfrom model.SSD300 import SSD300\r\nfrom model.vgg import VGG16BaseNet, AuxiliaryNet, PredictionNet\r\nimport argparse\r\nimport time\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"--trained_model\", default= \"model_state_ssd300.pth.tar\", type= str,\r\n help = \"Trained state_dict file path to open\")\r\nap.add_argument(\"--input\", type= str, help = \"Path to the input video\")\r\nap.add_argument(\"--output\", type= str, help= \"Path to the save video\")\r\nap.add_argument(\"--min_score\", default= 0.4, type= float, help = \"Min score for NMS\")\r\nap.add_argument(\"--max_overlap\", default= 0.45, type= float, help = \"Max overlap for NMS\")\r\nap.add_argument(\"--top_k\", default= 5, type= int, help = \"Top k for NMS\")\r\nap.add_argument(\"--save_fps\", default = 24, type= int, help = \"FPS for save output\")\r\nargs = ap.parse_args()\r\n\r\nimg_path = args.input\r\nout_path = args.output\r\ntrained_model = torch.load(args.trained_model)\r\nouput_path = args.output\r\nstart_epoch = trained_model[\"epoch\"] + 1\r\nprint('\\nLoaded model trained with epoch %d.\\n' % start_epoch)\r\n\r\nprint(\"input: \",img_path)\r\nprint(\"output: \",out_path)\r\n\r\nmodel = trained_model['model']\r\nmodel = model.to(device)\r\nmodel.eval()\r\n\r\n#Init video stream, writer to out video\r\nvs = cv2.VideoCapture(img_path)\r\nwriter = None\r\n(W, H) = (None, None)\r\n\r\nresize = transforms.Resize((300, 300))\r\nto_tensor = transforms.ToTensor()\r\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n\r\nfont = ImageFont.truetype('./Font/Arial.ttf',15)\r\n\r\ndef detect(original_image, min_score, max_overlap, top_k, suppress = None):\r\n image = normalize(to_tensor(resize(original_image)))\r\n \r\n image = image.to(device)\r\n \r\n locs_pred, cls_pred = model(image.unsqueeze(0))\r\n \r\n detect_boxes, detect_labels, detect_scores = model.detect(locs_pred, cls_pred, \r\n min_score, max_overlap, top_k)\r\n \r\n detect_boxes = detect_boxes[0].to('cpu')\r\n \r\n original_dims = torch.FloatTensor(\r\n [W, H, W, H]).unsqueeze(0)\r\n \r\n detect_boxes = detect_boxes * original_dims\r\n \r\n detect_labels = [rev_label_map[l] for l in detect_labels[0].to('cpu').tolist()]\r\n print(\"detect_labels: \",detect_labels)\r\n \r\n if detect_labels == [\"background\"]:\r\n return original_image\r\n \r\n annotated_image = original_image\r\n draw = ImageDraw.Draw(annotated_image)\r\n #font = ImageFont.truetype(\"arial.ttf\", 15)\r\n \r\n for i in range(detect_boxes.size(0)):\r\n if suppress is not None:\r\n if detect_labels[i] in suppress:\r\n continue\r\n\r\n box_location = detect_boxes[i].tolist()\r\n print(\"detect_labels[i]: \",detect_labels[i])\r\n if detect_labels[i] == 'car':\r\n color = (255,0,0)\r\n draw.rectangle(xy=box_location, outline=color)\r\n \r\n draw.rectangle(xy=[l + 1. for l in box_location], outline=color)\r\n\r\n text_size = font.getsize(detect_labels[i].upper())\r\n text_location = [box_location[0] + 2., box_location[1] - text_size[1]]\r\n textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,\r\n box_location[1]]\r\n draw.rectangle(xy=textbox_location, fill=color)\r\n draw.text(xy=text_location, text=detect_labels[i].upper(), fill='white',font=font)\r\n\r\n return annotated_image\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # if writer is None:\r\n # writer = cv2.VideoWriter(out_path, 0x7634706d, args.save_fps ,(W, H), True)\r\n if not vs.isOpened():\r\n raise IOError(\"Couldn't open webcam or video\")\r\n # video_fps = vs.get(cv2.CAP_PROP_FPS)\r\n video_FourCC = int(vs.get(cv2.CAP_PROP_FOURCC))\r\n video_fps = vs.get(cv2.CAP_PROP_FPS)\r\n video_size = (int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)),\r\n int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT)))\r\n video_FourCC = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\r\n writer = cv2.VideoWriter(out_path, fourcc=video_FourCC, fps=video_fps, frameSize=video_size)\r\n\r\n while True:\r\n (grabbed, frame) = vs.read()\r\n if not grabbed:\r\n break\r\n if W is None or H is None:\r\n (H, W) = frame.shape[:2]\r\n start = time.time()\r\n converted = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n #Convert image format cv2 to pil\r\n pil_img = Image.fromarray(converted)\r\n annotated_image = detect(pil_img, args.min_score, args.max_overlap, args.top_k)\r\n \r\n #Convert image format pil to cv2\r\n cv_annotated_image = np.array(annotated_image)\r\n cv_annotated_image = cv_annotated_image[:, :, ::-1].copy()\r\n writer.write(cv_annotated_image)\r\n print(\"FPS: {0:.2f}\".format(1/(time.time() - start)))\r\n\r\n print(\"Done! ^_^\")\r\n writer.release()\r\n vs.release()" ]
[ [ "numpy.array", "torch.FloatTensor", "torch.cuda.is_available", "torch.load" ] ]
john-hawkins/Minimum_Required_MLModel_Estimator
[ "dbcb7622b8fced2db8bfdbf0f608936534925971" ]
[ "minvime/generator.py" ]
[ "\"\"\" \n Distribution Generators for use in Performance Estimation for Regression Models\n\"\"\"\nimport random\nimport numpy as np\nimport pandas as pd\n\n######################################################################\ndef extract_distribution_from_sample(filepath):\n \"\"\" Extract a sample of target values from a file on the given path \"\"\"\n try:\n df = pd.read_csv(filepath)\n indecies = [ np.issubdtype(x, np.number) for x in df.dtypes]\n only_numeric = df.loc[:,indecies]\n if len(only_numeric.columns)==0:\n return [], \" your sample file: Please provide a CSV with a column of numeric values.\"\n else:\n return list(only_numeric.iloc[:,0]), \"\"\n #try:\n # df = pd.read_csv('evaluate.py')\n except pd.errors.ParserError:\n return [], \"Problem Parsing your sample file: Please provide a CSV with a column of numeric values.\"\n except:\n return [], \"There was an unanticipated problem with your file. Please provide a CSV with a column of numeric values.\"\n\n######################################################################\ndef produce_distribution_sample(mean, max, min):\n \"\"\" Given some simple parameters we generate a sample of target values. TODO: This needs work \"\"\"\n # START WITH SAMPLES BETWEEN MIN AND MAX\n baseline = generate_min_max_baseline(min, max)\n threshold = (max-min)/200\n enhanced = resample_toward_mean(baseline, mean, threshold)\n return enhanced, \"\"\n\n######################################################################\ndef resample_toward_mean(baseline, mean, threshold):\n current_mean = np.mean(baseline)\n rez = baseline.copy()\n print(\"Target Mean:\", mean, \" baseline sample: \", len(baseline) )\n while abs(mean - current_mean) > threshold:\n temp = rez.copy()\n new_sample = random.sample(rez, 1)[0]\n temp.append(new_sample)\n temp_mean = np.mean(temp)\n if abs(mean-temp_mean)<abs(mean - current_mean):\n current_mean = temp_mean\n rez = temp\n print(\"Sample accepted. New Mean:\", current_mean)\n else:\n print(\"Sample rejected.\")\n return rez\n\n######################################################################\ndef generate_min_max_baseline(min, max, sample_size=1000):\n difference = max-min\n return [min + (difference * x/(sample_size-1)) for x in range(sample_size)]\n\n\n########################################################################\ndef generate_candidate_predictions(dist):\n sigma = np.std(dist)\n factors = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n candidates = []\n for index, factor in enumerate(factors):\n for variant in range(10):\n candidates.append( copy_with_noise(dist, factor*sigma) )\n return candidates\n\n########################################################################\ndef copy_with_noise(dist, scale):\n rez = list(map(lambda x: x + scale*np.random.normal(), dist))\n return rez\n\n" ]
[ [ "pandas.read_csv", "numpy.issubdtype", "numpy.std", "numpy.random.normal", "numpy.mean" ] ]
CactusBall/MobaAnalysis
[ "efe6c58cbffaf94220db79e7fda579dea97cb3ac" ]
[ "analysis/test.py" ]
[ "import pandas as pd\n\ncsv = pd.read_csv('/Users/emrys/Documents/BattleList.csv')\nopenids = csv.get('openid')\ngames = csv.get('game_seq')\nfor o in openids:\n # print(o)\n if 'owanlsn0eMVK8aKyiWf0sYWxW5VU' == o:\n\n print('yes')\n\n# print('owanlsoStRgq-MP1HX3ueUHLE73U' is 'owanlsoStRgq-MP1HX3ueUHLE73U')" ]
[ [ "pandas.read_csv" ] ]
S-ayanide/Human-Action-Classifier-And-Pedestrian-Detection
[ "553ad687a677f155e4c0bb4f59d4949e70fd6cfc" ]
[ "posenet-py/webcam_demo.py" ]
[ "import tensorflow as tf\nimport cv2\nimport time\nimport argparse\n\nimport posenet\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model', type=int, default=101)\nparser.add_argument('--cam_id', type=int, default=0)\nparser.add_argument('--cam_width', type=int, default=1280)\nparser.add_argument('--cam_height', type=int, default=720)\nparser.add_argument('--scale_factor', type=float, default=0.7125)\nparser.add_argument('--file', type=str, default=None, help=\"Optionally use a video file instead of a live camera\")\nargs = parser.parse_args()\n\n\ndef main():\n with tf.compat.v1.Session() as sess:\n model_cfg, model_outputs = posenet.load_model(args.model, sess)\n output_stride = model_cfg['output_stride']\n\n if args.file is not None:\n cap = cv2.VideoCapture(args.file)\n else:\n cap = cv2.VideoCapture(args.cam_id)\n\n # cap=cv2.VideoCapture(\"video.mp4\")\n # cap.set(cv2.CAP_PROP_POS_MSEC,12000)\n cap.set(3, args.cam_width)\n cap.set(4, args.cam_height)\n\n start = time.time()\n frame_count = 0\n while True:\n input_image, display_image, output_scale = posenet.read_cap(\n cap, scale_factor=args.scale_factor, output_stride=output_stride)\n\n heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(\n model_outputs,\n feed_dict={'image:0': input_image}\n )\n\n pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(\n heatmaps_result.squeeze(axis=0),\n offsets_result.squeeze(axis=0),\n displacement_fwd_result.squeeze(axis=0),\n displacement_bwd_result.squeeze(axis=0),\n output_stride=output_stride,\n max_pose_detections=10,\n min_pose_score=0.15)\n\n keypoint_coords *= output_scale\n\n # TODO this isn't particularly fast, use GL for drawing and display someday...\n overlay_image = posenet.draw_skel_and_kp(\n display_image, pose_scores, keypoint_scores, keypoint_coords,\n min_pose_score=0.15, min_part_score=0.1)\n\n cv2.imshow('posenet', overlay_image)\n frame_count += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n print('Average FPS: ', frame_count / (time.time() - start))\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "tensorflow.compat.v1.Session" ] ]
aluquecerp/U-Time
[ "a9ed4892da77d165a71dbfef1d069d782c909757" ]
[ "utime/preprocessing/scaling.py" ]
[ "\"\"\"\nFunctions for channel-wise scaling of PSG data\n\nImplements the MultiChannelScaler, which fits and applies scalers from the\nsklearn.preprocessing module individually to channels of a PSG ndarray\n\"\"\"\n\nimport sklearn.preprocessing as preprocessing\nimport numpy as np\n\n\ndef assert_scaler(scaler):\n \"\"\"\n Checks whether a scaler (string) exists in the sklearn.preprocessing\n module.\n\n Args:\n scaler: String class name representation of a potential\n sklearn.preprocessing Scaler object.\n\n Returns:\n True if the scaler exists in the module, otherwise False\n \"\"\"\n if isinstance(scaler, str):\n scaler = [scaler]\n ok = []\n for sc in scaler:\n ok.append(sc in preprocessing.__dict__)\n return all(ok)\n\n\ndef get_scaler(scaler):\n \"\"\"\n Returns a MultiChannelScaler initialized to perform scaling according to\n the 'scaler' argument\n\n Uses default parameters for all scalers except QuantileTransformer, which\n uses output data according to a normal distribution instead of the default\n uniform.\n\n Args:\n scaler: Either a string specifying a single scaler from the\n sklearn.preprocessing module to apply to all PSG channels\n (one such fit to each channel separately), or\n a list of strings specifying what scaler to fit and apply in\n each channel. E.g. ['StandardScaler', 'RobustScaler'] will\n apply the StandardScaler to channel 0 and RobustScaler to\n channel 1.\n\n Returns:\n A MultiChannelScaler object.\n \"\"\"\n if isinstance(scaler, str):\n scaler = [scaler]\n scalers = []\n for sc in scaler:\n kwargs = {} # Currently kwargs cannot be passed to the scalers\n if sc == \"QuantileTransformer\":\n kwargs[\"output_distribution\"] = \"normal\"\n scalers.append((preprocessing.__dict__[sc], kwargs))\n return MultiChannelScaler(scalers=scalers)\n\n\ndef apply_scaling(X, scaler):\n \"\"\"\n Initializes a MultiChannelScaler object and applies it immediately to the\n same data. Also returns the fit scaler\n\n Args:\n X: A ndarray, PSG data, shape [N, C]\n scaler: A string or list of strings, see 'get_scaler'\n\n Returns:\n Transformed X data\n The fit MultiChannelScaler object\n \"\"\"\n # Get scaler\n multi_scaler = get_scaler(scaler).fit(X)\n\n # Fit and apply transformation\n return multi_scaler.transform(X), multi_scaler\n\n\nclass MultiChannelScaler(object):\n \"\"\"\n Wraps around Scaler objects from the sklearn.preprocessing module,\n initializing, fitting, storing and applying such scalers to/for individual\n channels of a [N, C] shaped ndarray (e.g. PSG data with C channels)\n \"\"\"\n def __init__(self, scalers, with_centering=True):\n \"\"\"\n Initializes the scaler object with a set of scaler strings. Does not\n fit or transform any data yet.\n\n Args:\n scalers: A list of 2-tuples/lists each of format\n (scaler class name, kwargs). The outer list should\n be of length equal to the number of channels of the\n PSG passed to self.fit, self.transform,\n self.fit_transform.\n with_centering: Apply centering to the data. If False, only\n scaling of the data is applied.\n \"\"\"\n err = \"'scalers' should be a list of 2-tuples/lists (each of format \" \\\n \"(scaler class, kwargs to scaler init), got {}\".format(scalers)\n if not isinstance(scalers, (tuple, list, np.ndarray)):\n raise ValueError(err)\n if any([len(s) != 2 for s in scalers]):\n raise ValueError(err)\n # Store scaler class and passed parameters\n self.scaler_tuples = scalers\n self.with_centering = with_centering\n\n # Store list of initialized scalers fit to each channel\n self.scalers = []\n\n # Store number of channels\n self.n_channels = None\n\n def fit(self, X):\n \"\"\"\n Fit all scalers specified in self.scalers to individual channels of X\n\n Args:\n X: ndarray of shape [N, C], where C == len(self.scaler_tuples)\n\n Returns:\n self\n \"\"\"\n if X.ndim != 2:\n raise ValueError(\"Invalid shape for X (%s)\" % X.shape)\n # Set number of channels\n self.n_channels = X.shape[-1]\n if len(self.scaler_tuples) == 1:\n scaler_tups = self.scaler_tuples * self.n_channels\n else:\n if len(self.scaler_tuples) != self.n_channels:\n raise ValueError(\"Number of passed scalers ({}) does not \"\n \"match the number of channels in X \"\n \"({})\".format(len(self.scaler_tuples),\n self.n_channels))\n scaler_tups = self.scaler_tuples\n\n # Fit the scalers to each channel of X\n fit_scalers = []\n for i, (scaler_cls, scaler_kwargs) in enumerate(scaler_tups):\n try:\n scaler_cls = scaler_cls(**scaler_kwargs,\n with_centering=self.with_centering)\n except TypeError:\n scaler_cls = scaler_cls(**scaler_kwargs,\n with_mean=self.with_centering)\n xs = X[:, i]\n scaler_cls.fit(xs.reshape(-1, 1))\n fit_scalers.append(scaler_cls)\n self.scalers = fit_scalers\n return self\n\n def transform(self, X):\n \"\"\"\n Transform each channel in X according to the (fitted) scalers in\n self.scalers\n\n Args:\n X: ndarray of shape [N, C], where C == len(self.scalers)\n\n Returns:\n X, transformed data, shape [N, C]\n \"\"\"\n if X.shape[-1] != self.n_channels:\n raise ValueError(\"Invalid input of dimension %i, expected \"\n \"last axis with %i channels\" % (X.ndim,\n self.n_channels))\n # Prepare volume like X to store results\n transformed = np.empty_like(X)\n for i in range(self.n_channels):\n scl = self.scalers[i]\n s = scl.transform(X[:, i].reshape(-1, 1))\n transformed[:, i] = s.reshape(X.shape[:-1])\n return transformed\n\n def fit_transform(self, X):\n \"\"\" Fits scalers to and immediately transform data in X \"\"\"\n self.fit(X)\n return self.transform(X)\n" ]
[ [ "numpy.empty_like" ] ]
Tinusf/imbalanced-learn
[ "67ea479fc907f52d7d7776b0d722654bfd699fa2" ]
[ "imblearn/under_sampling/_prototype_selection/_random_under_sampler.py" ]
[ "\"\"\"Class to perform random under-sampling.\"\"\"\n\n# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>\n# Christos Aridas\n# License: MIT\n\nimport numpy as np\n\nfrom sklearn.utils import check_array\nfrom sklearn.utils import check_consistent_length\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils import _safe_indexing\n\nfrom ..base import BaseUnderSampler\nfrom ...utils import check_target_type\nfrom ...utils import Substitution\nfrom ...utils._docstring import _random_state_docstring\n\n\n@Substitution(\n sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,\n random_state=_random_state_docstring,\n)\nclass RandomUnderSampler(BaseUnderSampler):\n \"\"\"Class to perform random under-sampling.\n\n Under-sample the majority class(es) by randomly picking samples\n with or without replacement.\n\n Read more in the :ref:`User Guide <controlled_under_sampling>`.\n\n Parameters\n ----------\n {sampling_strategy}\n\n {random_state}\n\n replacement : bool, default=False\n Whether the sample is with or without replacement.\n\n Attributes\n ----------\n sample_indices_ : ndarray of shape (n_new_samples)\n Indices of the samples selected.\n\n .. versionadded:: 0.4\n\n See Also\n --------\n NearMiss : Undersample using near-miss samples.\n\n Notes\n -----\n Supports multi-class resampling by sampling each class independently.\n Supports heterogeneous data as object array containing string and numeric\n data.\n\n Examples\n --------\n\n >>> from collections import Counter\n >>> from sklearn.datasets import make_classification\n >>> from imblearn.under_sampling import \\\nRandomUnderSampler # doctest: +NORMALIZE_WHITESPACE\n >>> X, y = make_classification(n_classes=2, class_sep=2,\n ... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,\n ... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)\n >>> print('Original dataset shape %s' % Counter(y))\n Original dataset shape Counter({{1: 900, 0: 100}})\n >>> rus = RandomUnderSampler(random_state=42)\n >>> X_res, y_res = rus.fit_resample(X, y)\n >>> print('Resampled dataset shape %s' % Counter(y_res))\n Resampled dataset shape Counter({{0: 100, 1: 100}})\n \"\"\"\n\n def __init__(\n self, sampling_strategy=\"auto\", random_state=None, replacement=False\n ):\n super().__init__(sampling_strategy=sampling_strategy)\n self.random_state = random_state\n self.replacement = replacement\n\n def _check_X_y(self, X, y):\n y, binarize_y = check_target_type(y, indicate_one_vs_all=True)\n X = check_array(X, accept_sparse=[\"csr\", \"csc\"], dtype=None,\n force_all_finite=False, allow_nd=True)\n y = check_array(\n y, accept_sparse=[\"csr\", \"csc\"], dtype=None, ensure_2d=False\n )\n check_consistent_length(X, y)\n return X, y, binarize_y\n\n def _fit_resample(self, X, y):\n random_state = check_random_state(self.random_state)\n\n idx_under = np.empty((0,), dtype=int)\n\n for target_class in np.unique(y):\n if target_class in self.sampling_strategy_.keys():\n n_samples = self.sampling_strategy_[target_class]\n index_target_class = random_state.choice(\n range(np.count_nonzero(y == target_class)),\n size=n_samples,\n replace=self.replacement,\n )\n else:\n index_target_class = slice(None)\n\n idx_under = np.concatenate(\n (\n idx_under,\n np.flatnonzero(y == target_class)[index_target_class],\n ),\n axis=0,\n )\n\n self.sample_indices_ = idx_under\n\n return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)\n\n def _more_tags(self):\n return {\n \"X_types\": [\"2darray\", \"string\"],\n \"sample_indices\": True,\n \"allow_nan\": True,\n }\n" ]
[ [ "sklearn.utils._safe_indexing", "numpy.unique", "sklearn.utils.check_consistent_length", "sklearn.utils.check_array", "numpy.flatnonzero", "numpy.count_nonzero", "sklearn.utils.check_random_state", "numpy.empty" ] ]
kitkatkandybar/RFSoC-Spectrum-Monitoring
[ "11eb7b1d1e27f35aa2161bbc7c8531eed1c8cca5" ]
[ "front_end/stream_callbacks.py" ]
[ "import dash\r\nfrom dash import dcc\r\nfrom dash import html\r\nimport dash_bootstrap_components as dbc\r\nimport orjson\r\nimport time\r\nimport shutil\r\nimport digital_rf\r\n\r\nimport os.path\r\nimport numpy as np\r\n\r\nimport config as cfg\r\n\r\n\r\n###################################################################################################\r\n#\r\n# Live Streaming Callbacks\r\n#\r\n###################################################################################################\r\n\r\n\r\n@dash.callback(\r\n dash.Output({'type': 'stream-picker', 'index': dash.ALL,}, 'options'),\r\n dash.Input('stream-picker-div', 'n_clicks'),\r\n dash.State(\"content-tabs\", 'value'),\r\n)\r\ndef get_active_streams(n, tab):\r\n \"\"\"\r\n gets the list of boards available for live streaming when the streams tab is clicked\r\n \"\"\"\r\n if n and tab == 'content-tab-2':\r\n streams = cfg.redis_instance.smembers(\"active_streams\")\r\n print(f\"Got current active streams: {streams}\")\r\n picker_options = [\r\n {'label': s.decode(), 'value': s.decode()} for s in streams\r\n ]\r\n\r\n return [picker_options]\r\n\r\n raise dash.exceptions.PreventUpdate\r\n\r\n\r\n@dash.callback(\r\n dash.Output({'type': 'stream-metadata-accordion', 'index': 0,}, 'children'),\r\n dash.Input({'type': 'stream-picker', 'index': dash.ALL,}, 'value'),\r\n)\r\ndef update_stream_metadata(stream_names):\r\n \"\"\"\r\n Updates the sidebar with metadata from a board live stream\r\n \"\"\"\r\n if not stream_names[0]:\r\n return html.P(\"Metadata will appear here when you pick a stream\"),\r\n\r\n print(f\"Getting metadata for {stream_names[0]}\")\r\n metadata = cfg.redis_instance.hgetall(f\"metadata:{stream_names[0]}\")\r\n\r\n\r\n if not metadata:\r\n raise dash.exceptions.PreventUpdate\r\n\r\n metadata = {k.decode(): v.decode() for k,v in metadata.items()}\r\n metadata['y_max'] = float(metadata['y_max'] )\r\n metadata['y_min'] = float(metadata['y_min'] )\r\n\r\n cfg.spec_datas = {}\r\n cfg.spec_datas['metadata'] = metadata\r\n\r\n print(\"Got stream metadata:\\n%s\", metadata)\r\n\r\n sfreq = float(metadata['sfreq'])\r\n n_samples = int(metadata['n_samples'])\r\n cfreq = float(metadata['cfreq'])\r\n\r\n cfg.sa.spec.sample_frequency = sfreq\r\n cfg.sa.spectrogram.sample_frequency = sfreq\r\n\r\n # TODO: This is likely not the best way to set center frequency for the graphs\r\n cfg.sa.spec.centre_frequency = sfreq/4\r\n cfg.sa.spectrogram.centre_frequency = sfreq/4\r\n cfg.sa.spec.number_samples = n_samples\r\n cfg.sa.spectrogram.number_samples = n_samples\r\n\r\n # TODO: Set the decimation factor some other way?\r\n cfg.sa.spectrogram.decimation_factor = 2\r\n cfg.sa.spec.decimation_factor = 2\r\n\r\n\r\n if sfreq > 1e9:\r\n sfreq = f\"{sfreq / 1e9} GHz\"\r\n elif sfreq > 1e6:\r\n sfreq = f\"{sfreq / 1e6} MHz\"\r\n elif sfreq > 1e3:\r\n sfreq = f\"{sfreq / 1e3} kHz\"\r\n else:\r\n sfreq = f\"{sfreq } Hz\"\r\n\r\n\r\n if cfreq > 1e9:\r\n cfreq = f\"{cfreq / 1e9} GHz\"\r\n elif cfreq > 1e6:\r\n cfreq = f\"{cfreq / 1e6} MHz\"\r\n elif sfreq > 1e3:\r\n cfreq = f\"{cfreq / 1e3} kHz\"\r\n else:\r\n cfreq = f\"{cfreq } Hz\"\r\n\r\n children = [\r\n html.Table([\r\n html.Tr([\r\n html.Th(\"Name:\"), \r\n html.Td(stream_names[0]),\r\n ]),\r\n html.Tr([\r\n html.Th(\"Sample Rate:\"), \r\n html.Td(sfreq),\r\n ]),\r\n html.Tr([\r\n html.Th([\"Center Frequency:\"]), \r\n html.Td([cfreq]),\r\n ]),\r\n html.Tr([\r\n html.Th([\"Channel:\"]), \r\n html.Td([cfg.spec_datas['metadata']['channel']]),\r\n ]),\r\n ], \r\n style={'width': '100%'}),\r\n ]\r\n return children\r\n\r\n\r\n\r\n@dash.callback(\r\n dash.Output('stream-data', 'data'),\r\n dash.Input('stream-graph-interval', 'n_intervals'),\r\n dash.State({'type': 'stream-picker', 'index': dash.ALL,}, 'value'),\r\n prevent_initial_call=True\r\n)\r\ndef get_next_data(n, stream_name):\r\n \"\"\"\r\n Gets the newest point of data in a live stream and set its to be displayed on the dashboard graphs.\r\n\r\n This function gets called at every tick of 'stream-graph-interval'\r\n \"\"\"\r\n name = stream_name[0]\r\n\r\n # TODO: Make sure we don't get duplicates!\r\n # get newest data point\r\n rstrm = cfg.redis_instance.xrevrange(f'stream:{name}', count=1) \r\n\r\n if (len(rstrm) == 0):\r\n print(f\"Got no new stream data from stream:{name}\")\r\n raise dash.exceptions.PreventUpdate\r\n\r\n d = orjson.loads(rstrm[0][1][b'data'])\r\n return d\r\n \r\n@dash.callback(\r\n dash.Output('stream-graph-interval', 'disabled'),\r\n dash.Input({'type': 'play-stream-data', 'index': dash.ALL,}, 'n_clicks'),\r\n dash.Input({'type': 'pause-stream-data', 'index': dash.ALL,}, 'n_clicks'),\r\n dash.Input(\"content-tabs\", 'value'),\r\n prevent_initial_call=True\r\n )\r\ndef handle_graph_stream_interval(play_n, pause_n, tab):\r\n \"\"\"\r\n Handles enabling/disabling the Interval which controls when to fetch live stream data\r\n \"\"\"\r\n ctx = dash.callback_context\r\n\r\n prop_id = ctx.triggered[0]['prop_id'].split('.')[0]\r\n if 'play' in prop_id and play_n[0] > 0 and tab == 'content-tab-2':\r\n cfg.sa.spectrogram.clear_data()\r\n return False\r\n\r\n return True\r\n\r\n\r\n@dash.callback(\r\n dash.Output({'type': 'play-stream-data', 'index': 0,}, 'disabled'),\r\n dash.Input({'type': 'stream-picker', 'index': dash.ALL,}, 'value'),\r\n)\r\ndef handle_disable_play_stream_button(stream_val):\r\n if not stream_val[0]:\r\n return True\r\n\r\n return False\r\n\r\n@dash.callback(\r\n dash.Output({'type': 'pause-stream-data', 'index': 0,}, 'disabled'),\r\n dash.Input({'type': 'play-stream-data', 'index': dash.ALL,}, 'disabled'),\r\n dash.Input({'type': 'stream-picker', 'index': dash.ALL,}, 'value'),\r\n)\r\ndef handle_disable_pause_stream_button(play_disabled, stream_val):\r\n if not stream_val[0]:\r\n return True\r\n\r\n return False\r\n\r\n\r\n###################################################################################################\r\n#\r\n# Data Download Callbacks\r\n#\r\n###################################################################################################\r\n\r\n\r\n@dash.callback(\r\n dash.Output(\"download-modal\", \"is_open\"),\r\n dash.Input(\"open-download-modal-button\", \"n_clicks\"), \r\n dash.Input(\"close-download-modal-button\", \"n_clicks\"),\r\n dash.Input({'type': 'download-button', 'index': dash.ALL,}, 'n_clicks'),\r\n dash.State(\"download-modal\", \"is_open\"),\r\n)\r\ndef toggle_download_modal(n_open, n_close, n_load, is_open):\r\n if n_open or n_close or n_load[0]:\r\n return not is_open\r\n return is_open\r\n\r\n\r\n@dash.callback(\r\n dash.Output({'type': 'download-board-picker', 'index': dash.ALL,}, 'options'),\r\n dash.Input('download-board-picker-div', 'n_clicks'),\r\n dash.State(\"content-tabs\", 'value'),\r\n)\r\ndef get_active_download_boards(n, tab):\r\n \"\"\"\r\n Gets the list of boards available for downloading data from Redis \r\n \"\"\"\r\n if n and tab == 'content-tab-2':\r\n boards = cfg.redis_instance.smembers(\"active_command_boards\")\r\n print(\"Got list of boards from active_command_boards: {boards}\")\r\n\r\n picker_options = [\r\n {'label': s.decode(), 'value': s.decode()} for s in boards\r\n ]\r\n\r\n return [picker_options]\r\n\r\n\r\n raise dash.exceptions.PreventUpdate\r\n\r\n\r\ndef write_drf_file(rstrm_real, rstrm_imag, metadata):\r\n \"\"\"\r\n Writes raw IQ to a digitalRF file\r\n\r\n Note: This was done very last minute. It needs heavy reworking\r\n \"\"\"\r\n n_points = len(rstrm_real)\r\n print(f\"n points: {n_points}\")\r\n\r\n datadir = os.path.join(os.path.dirname(__file__), \"drf_data\")\r\n chdir = os.path.join(datadir, \"channel0\")\r\n\r\n # writing parameters\r\n sample_rate_numerator = metadata['sample_rate_numerator']\r\n sample_rate_denominator = metadata['sample_rate_denominator']\r\n sample_rate = np.longdouble(sample_rate_numerator) / sample_rate_denominator\r\n dtype_str = metadata['dtype_str']\r\n sub_cadence_secs = metadata['sub_cadence_secs'] \r\n file_cadence_seconds = metadata['file_cadence_seconds'] \r\n compression_level = metadata['compression_level'] \r\n checksum = metadata['checksum'] \r\n is_complex = metadata['is_complex'] \r\n is_continuous = metadata['is_continuous'] \r\n num_subchannels = metadata['num_subchannels'] \r\n marching_periods = metadata['marching_periods'] \r\n uuid = metadata['uuid'] \r\n vector_length = metadata['number_samples'] # number of samples written for each call - typically MUCH longer\r\n \r\n # create short data in r/i to test using that to write\r\n arr_data = np.ones(\r\n (vector_length*n_points,), dtype=[(\"r\", np.float), (\"i\", np.float)]\r\n )\r\n\r\n start_global_index = int(np.uint64(metadata['start_time']* sample_rate)) + 1\r\n\r\n # set up top level directory\r\n shutil.rmtree(chdir, ignore_errors=True)\r\n os.makedirs(chdir)\r\n\r\n\r\n # init\r\n dwo = digital_rf.DigitalRFWriter(\r\n chdir,\r\n dtype_str,\r\n sub_cadence_secs,\r\n file_cadence_seconds*1000, #it's in miliseconds\r\n start_global_index,\r\n sample_rate_numerator,\r\n sample_rate_denominator,\r\n uuid,\r\n compression_level,\r\n checksum,\r\n is_complex,\r\n num_subchannels,\r\n is_continuous,\r\n marching_periods,\r\n )\r\n\r\n # write\r\n for i in range(n_points):\r\n arr_data[\"r\"][i*vector_length:i*vector_length+vector_length] = orjson.loads(rstrm_real[i][1][b'data'])\r\n arr_data[\"i\"][i*vector_length:i*vector_length+vector_length] = orjson.loads(rstrm_imag[i][1][b'data'])\r\n result = dwo.rf_write(arr_data) \r\n\r\n # close\r\n dwo.close()\r\n\r\n\r\n #METADATA\r\n #metadata parameters\r\n file_name = \"metadata\"\r\n metadata_dir = os.path.join(chdir, \"metadata\")\r\n\r\n shutil.rmtree(metadata_dir, ignore_errors=True)\r\n os.makedirs(metadata_dir)\r\n\r\n dmw = digital_rf.DigitalMetadataWriter(\r\n metadata_dir,\r\n sub_cadence_secs,\r\n file_cadence_seconds,\r\n sample_rate_numerator,\r\n sample_rate_denominator,\r\n file_name,\r\n )\r\n print(\"first create okay\")\r\n\r\n data_dict = {}\r\n # To save an array of data, make sure the first axis has the same length\r\n # as the samples index\r\n idx_arr = np.arange(10, dtype=np.int64) + start_global_index\r\n\r\n # TODO: This may not be the best way of setting the center frequency\r\n # but this is the only way we were able to get accurate bounds from the board's data\r\n data_dict[\"center_frequencies\"] = [metadata['sfreq'] /4 ] # [metadata['cfreq'] ]\r\n\r\n\r\n sub_dict_processing = metadata['processing']\r\n data_dict[\"processing\"] = sub_dict_processing\r\n\r\n #Not real values for receiver, copied them for digital rf we already\r\n sub_dict_receiver = metadata['receiver']\r\n data_dict[\"receiver\"] = sub_dict_receiver\r\n\r\n data_dict[\"sample_rate_denominator\"] = metadata['sample_rate_denominator']\r\n data_dict[\"sample_rate_numerator\"] = metadata['sample_rate_numerator']\r\n data_dict[\"uuid_str\"] = metadata['uuid_str']\r\n\r\n dmw.write(idx_arr, data_dict)\r\n\r\n return datadir\r\n\r\n\r\n@dash.callback(\r\n dash.Output('download-board-data', 'data'),\r\n dash.Input({'type': 'download-button', 'index': dash.ALL,}, 'n_clicks'),\r\n dash.State({'type': 'download-board-picker', 'index': dash.ALL,}, 'value'),\r\n dash.State({'type': 'duration-download-input', 'index': dash.ALL,}, 'value'),\r\n dash.State({'type': 'download-time-unit-dropdown', 'index': dash.ALL,}, 'value'),\r\n dash.State({'type': 'download-name-input', 'index': dash.ALL,}, 'value'),\r\n)\r\ndef handle_download_request(n, board, duration, time_unit, name):\r\n \"\"\"\r\n Handles a board data request once the user hits submit on the board request form. \r\n\r\n\r\n NOTE: This function was put together last minute. It's very sloppy and needs heavy reworking. \r\n \"\"\"\r\n if not n or n[0] < 1:\r\n raise dash.exceptions.PreventUpdate\r\n\r\n if time_unit[0] == 's':\r\n dur = duration[0]\r\n elif time_unit[0] == \"ms\":\r\n dur = duration[0] / 1e3\r\n else: # usec\r\n dur = duration[0] / 1e6\r\n\r\n board_name = board[0]\r\n\r\n req = {\r\n 'duration' : dur\r\n }\r\n\r\n req_id = cfg.redis_instance.incr(f'board-request-id:{board_name}')\r\n\r\n res_prefix = f'board-responses:{board_name}:{req_id}'\r\n\r\n\r\n print(f\"DOWNLOAD REQ ID: {req_id} for BOARD NAME: {board_name}\")\r\n cfg.redis_instance.publish(f'board-requests:{board_name}:{req_id}', orjson.dumps(req))\r\n\r\n time.sleep(0.3)\r\n\r\n\r\n # get data\r\n # poll data status\r\n status = cfg.redis_instance.get(f'{res_prefix}:complete').decode()\r\n print(f\"Got status: {status}\")\r\n while status == \"False\":\r\n time.sleep(0.5)\r\n status = cfg.redis_instance.get(f'{res_prefix}:complete').decode()\r\n print(f\"Got status: {status}\")\r\n print(\"Status is complete\")\r\n\r\n\r\n # get entire stream\r\n # TODO: if the data is large, this should likely be broken into multiple steps\r\n rstrm_real = cfg.redis_instance.xrange(f'{res_prefix}:real') \r\n rstrm_imag = cfg.redis_instance.xrange(f'{res_prefix}:imag')\r\n\r\n # get metadata\r\n rstrm = cfg.redis_instance.xread({f'{res_prefix}:metadata'.encode(): '0-0'.encode()}, block=10000, count=1) \r\n metadata = orjson.loads(rstrm[0][1][0][1][b'data'])\r\n print(f\"received download metadata:\\n{metadata}\")\r\n\r\n\r\n # create the Digital RF file using this data\r\n datadir = write_drf_file(rstrm_real, rstrm_imag, metadata)\r\n print(\"Done writing\")\r\n\r\n\r\n # Delete the data from Redis\r\n cfg.redis_instance.delete(f'{res_prefix}:real')\r\n cfg.redis_instance.delete(f'{res_prefix}:imag')\r\n cfg.redis_instance.delete(f'{res_prefix}:metadata')\r\n\r\n print(\"Making zip file...\")\r\n\r\n # Zip up the DigitalRF directory and send to the user's browser\r\n zip_file_name = name[0]\r\n # TODO: Delete the zip file from the web server after some amount of time???\r\n zip_path = shutil.make_archive(zip_file_name, 'zip', datadir)\r\n shutil.rmtree(datadir, ignore_errors=True)\r\n\r\n\r\n print(\"Sending file to user...\")\r\n return dcc.send_file(zip_path)" ]
[ [ "numpy.longdouble", "numpy.arange", "numpy.uint64", "numpy.ones" ] ]
tbennun/pytorch
[ "39a2acbd8e7b50b77a728f48f0a3ebb49fd4e982" ]
[ "torch/fx/experimental/fx2trt/converters/acc_ops_converters.py" ]
[ "import math\nimport operator\n\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nimport torch.fx.experimental.fx_acc.acc_utils as acc_utils\nimport numpy as np\nimport tensorrt as trt\nimport torch\nfrom torch.fx.experimental.fx2trt.fx2trt import (\n tensorrt_converter,\n torch_dtype_from_trt,\n get_dynamic_dims,\n)\nfrom typing import Optional\n\n\ndef to_numpy(tensor: Optional[torch.Tensor]):\n \"\"\"\n Convert a PyTorch Tensor to a Numpy Array.\n \"\"\"\n if tensor is None:\n return tensor\n\n if tensor.is_quantized:\n tensor = tensor.dequantize()\n\n assert isinstance(tensor, torch.Tensor), f\"to_numpy can't be called on None or a torch.Tensor, got: {tensor}\"\n\n return tensor.cpu().detach().contiguous().numpy()\n\n\ndef has_dynamic_shape(shape):\n return any(s == -1 for s in shape)\n\n\ndef get_axes_for_reduce_op(dim, has_implicit_batch_dimension):\n if isinstance(dim, int):\n dim = (dim,)\n\n if has_implicit_batch_dimension:\n assert 0 not in dim, \"Can't reduce over batch dimension when it's implicit.\"\n\n axes = 0\n for d in dim:\n axes |= 1 << (d - (1 if has_implicit_batch_dimension else 0))\n\n return axes\n\n\ndef create_constant(network, tensor, name, dtype):\n if isinstance(tensor, int):\n tensor = torch.IntTensor([tensor])\n\n if isinstance(tensor, float):\n tensor = torch.Tensor([tensor])\n\n if dtype:\n tensor = tensor.to(dtype)\n\n constant = network.add_constant(tensor.shape, to_numpy(tensor))\n constant.name = name\n return constant.get_output(0)\n\n\ndef get_trt_tensor(network, input_val, name, dtype=None) -> \"trt.tensorrt.ITensor\":\n if isinstance(input_val, (torch.Tensor, int, float)):\n return create_constant(network, input_val, name, dtype)\n elif not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"Received input {input_val} of name {name} that \"\n \"is not part of the TensorRT region!\"\n )\n else:\n return input_val\n\n\ndef append_ones(network, input, name, num_prepend_ones):\n layer = network.add_shuffle(input)\n\n if has_dynamic_shape(input.shape):\n input_shape_layer = network.add_shape(input)\n input_shape_layer.name = f\"{name}_broadcast_orig_shape\"\n prepend_shape_layer = network.add_constant(\n (num_prepend_ones,), np.ones((num_prepend_ones,), dtype=np.int32)\n )\n prepend_shape_layer.name = f\"{name}_broadcast_prepend_ones\"\n reshape_dim_layer = network.add_concatenation(\n [prepend_shape_layer.get_output(0), input_shape_layer.get_output(0)]\n )\n reshape_dim_layer.axis = 0\n reshape_dim_layer.name = f\"{name}_broadcast_final_shape\"\n layer.set_input(1, reshape_dim_layer.get_output(0))\n else:\n layer.reshape_dims = (1,) * num_prepend_ones + tuple(input.shape)\n\n layer.name = name\n return layer.get_output(0)\n\n\ndef broadcast(network, a, b, a_name, b_name, preset_diff=0):\n \"\"\"\n Broadcast two TensorRT tensors to the same number of dimensions by\n prepending 1s to the tensor with less number of dimensions.\n\n Args:\n network: TensorRT network object.\n a: A TensorRT tensor.\n b: A TensorRT tensor.\n a_name: Name of tensor a.\n b_name: Name of tensor b.\n preset_diff: The difference of number of dimensions after broadcast.\n A positive number means after broadcast, tensor `a` would have\n `preset_diff` more dimensions than `b`. This is used in matmul,\n since we need to broadcast tensors but not always to the same\n number of dimension. The reason is that matmul supports Matrix\n x Vector and in this case broadcasted vector should have 1 less\n number of dimensions than the matrix tensor.\n \"\"\"\n a_shape = tuple(a.shape)\n b_shape = tuple(b.shape)\n\n diff = len(a_shape) - len(b_shape) - preset_diff\n if diff > 0:\n b = append_ones(network, b, f\"{b_name}_broadcast\", diff)\n elif diff < 0:\n a = append_ones(network, a, f\"{a_name}_broadcast\", -diff)\n\n return a, b\n\ndef add_binary_elementwise_layer(network, lhs_val, rhs_val, op_type, name):\n \"\"\"\n This function adds a TensorRT elementwise layer. We only allow at most one\n operand to not be a trt tensor, otherwise, we should const fold it first.\n If any operand is not a trt tensor, we make it a trt constant layer which\n has the same type as the other trt tensor. Then we broadcast these two inputs\n to have the same number of dimensions.\n\n Limitation:\n If we are using implicit batch dim mode, the operand that is not a trt\n tensor are not allowed to have larger ranks than the trt tensor operand.\n\n Args:\n network: TensorRT network object.\n lhs_val: Left operand of the binary operation. Could be a TensorRT tensor,\n a PyTorch tensor or a simple value.\n rhs_val: Right operand of the binary operation. Similar to lhs_val.\n op_type: Type of the TensorRT elementwise binary operation.\n name: The name we want to assign to the created TensorRT layer.\n\n Returns:\n The output of TensorRT elementwise layer.\n \"\"\"\n dtype = None\n is_lhs_trt_tensor = False\n is_rhs_trt_tensor = False\n if isinstance(lhs_val, trt.tensorrt.ITensor):\n dtype = torch_dtype_from_trt(lhs_val.dtype)\n is_lhs_trt_tensor = True\n if isinstance(rhs_val, trt.tensorrt.ITensor):\n dtype = torch_dtype_from_trt(rhs_val.dtype)\n is_rhs_trt_tensor = True\n if not is_lhs_trt_tensor and not is_rhs_trt_tensor:\n raise RuntimeError(f\"Both operands of the binary elementwise op {name}\"\n \"are constant. In this case, please consider constant fold the model first.\")\n\n lhs_val = get_trt_tensor(network, lhs_val, f\"{name}_lhs\", dtype)\n rhs_val = get_trt_tensor(network, rhs_val, f\"{name}_rhs\", dtype)\n\n # Check the limitation in the doc string.\n if network.has_implicit_batch_dimension:\n if is_lhs_trt_tensor and not is_rhs_trt_tensor:\n assert len(lhs_val.shape) >= len(rhs_val.shape)\n elif not is_lhs_trt_tensor and is_rhs_trt_tensor:\n assert len(rhs_val.shape) >= len(lhs_val.shape)\n\n lhs_val, rhs_val = broadcast(\n network, lhs_val, rhs_val, f\"{name}_lhs\", f\"{name}_rhs\"\n )\n layer = network.add_elementwise(lhs_val, rhs_val, op_type)\n layer.name = name\n return layer.get_output(0)\n\n\ndef add_unary_layer(network, input_val, operation_type, name):\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"{operation_type} received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n layer = network.add_unary(input_val, operation_type)\n layer.name = name\n return layer.get_output(0)\n\n\ndef add_activation_layer(network, input_val, operation_type, name):\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"{operation_type} received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n layer = network.add_activation(input_val, operation_type)\n layer.name = name\n return layer.get_output(0)\n\n\ndef add_transpose_layer(\n network, input_val, dim_0, dim_1, name, ignore_implicit_batch=False\n):\n \"\"\"Adds a transpose layer to the TensorRT network\n Args:\n network: TensorRT Network object\n input_val: tensorrt.ITensor\n dim_0, dim_1: dimensions for transpose, e.g. dim_0=1, dim_1=0 means transpose\n the first two dimensions\n name: Name of the layer\n ignore_implicit_batch: activations might have implicit batch, but weights do\n not, when this is True, we'll ignore the implicit batch and use the dimension\n argument as is\n Returns:\n output TensorRT ITensor from the transpose layer\n \"\"\"\n if not ignore_implicit_batch and network.has_implicit_batch_dimension:\n assert (\n dim_0 != 0 and dim_1 != 0\n ), \"It's not allowed to call transpose on non-constant when batch dim is implicit!\"\n dim_0 -= 1\n dim_1 -= 1\n\n permutation = list(range(len(input_val.shape)))\n permutation[dim_0] = dim_1\n permutation[dim_1] = dim_0\n\n layer = network.add_shuffle(input_val)\n layer.second_transpose = tuple(permutation)\n layer.name = name\n return layer.get_output(0)\n\ndef add_matrix_multiply_layer(network, input_val, other_val, name, transpose_input=False, transpose_other=False):\n \"\"\" Adds a matrix multiply layer to the TensorRT network\n Args:\n network: TensorRT Network\n input_val: input matrix/vector TensorRT ITensor\n other_val: another input matrix/vector TensorRT ITensor\n name: Name of the matrix multiply layer\n transpose_input: boolean indicating whether to transpose the input_val Tensor or not\n transpose_other: boolean indicaiton whether to transpose the other_val Tensor or not\n Returns:\n output TensorRT ITensor from the matrix multiply layer\n \"\"\"\n input_matrix_op = other_matrix_op = trt.MatrixOperation.NONE\n preset_diff = 0\n\n if len(input_val.shape) == 1:\n assert not transpose_input, \"can't transpose input vector\"\n preset_diff -= 1\n input_matrix_op = trt.MatrixOperation.VECTOR\n elif transpose_input:\n input_matrix_op = trt.MatrixOperation.TRANSPOSE\n\n if len(other_val.shape) == 1:\n assert not transpose_input, \"can't transpose other vector\"\n preset_diff += 1\n other_matrix_op = trt.MatrixOperation.VECTOR\n elif transpose_other:\n other_matrix_op = trt.MatrixOperation.TRANSPOSE\n\n\n input_val, other_val = broadcast(network, input_val, other_val, f\"{name}_input\", f\"{name}_other\", preset_diff)\n layer = network.add_matrix_multiply(input_val, input_matrix_op, other_val, other_matrix_op)\n layer.name = name\n return layer.get_output(0)\n\ndef process_attr(val, num_elem):\n if not isinstance(val, tuple):\n val = (val,) * num_elem\n return val\n\n\n@tensorrt_converter(acc_ops.conv2d)\ndef acc_ops_conv2d(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"Conv2d received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n if has_dynamic_shape(input_val.shape):\n assert input_val.shape[1] != -1, \"Channel dim can't be dynamic for convolution.\"\n\n # for now we'll assume bias is constant Tensor or None,\n # and bias being ITensor is not supported in TensorRT api\n # right now\n bias = to_numpy(kwargs[\"bias\"])\n\n if network.has_explicit_precision:\n weight = get_trt_tensor(network, kwargs[\"weight\"], f\"{name}_weight\")\n weight_shape = tuple(kwargs[\"weight\"].shape)\n # will need to use uninitialized weight and set it later to support\n # ITensor weights\n dummy_weight = trt.Weights()\n\n layer = network.add_convolution(\n input=input_val,\n num_output_maps=weight.shape[0],\n kernel_shape=weight.shape[2:],\n kernel=dummy_weight,\n bias=bias,\n )\n\n layer.set_input(1, weight)\n else:\n weight = to_numpy(kwargs[\"weight\"])\n layer = network.add_convolution(\n input=input_val,\n num_output_maps=weight.shape[0],\n kernel_shape=weight.shape[2:],\n kernel=weight,\n bias=bias,\n )\n\n layer.name = name\n layer.stride = kwargs[\"stride\"]\n layer.padding = kwargs[\"padding\"]\n layer.dilation = kwargs[\"dilation\"]\n if kwargs[\"groups\"] is not None:\n layer.num_groups = kwargs[\"groups\"]\n\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.flatten)\ndef acc_ops_flatten(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"flatten received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n num_dims = len(input_val.shape) + (1 if network.has_implicit_batch_dimension else 0)\n start_dim = (kwargs[\"start_dim\"] if \"start_dim\" in kwargs else 0) % num_dims\n end_dim = (kwargs[\"end_dim\"] if \"end_dim\" in kwargs else -1) % num_dims\n\n if network.has_implicit_batch_dimension:\n assert start_dim != 0, \"Can't flatten batch dimension when it's implicit.\"\n start_dim -= 1\n end_dim -= 1\n\n layer = network.add_shuffle(input_val)\n layer.name = name\n\n # If there're dynamic shapes then we need to use shape layers\n # to figure out the final shape after flatten. We first slice\n # the input shape to three parts:\n # 1. dimensions before start_dim\n # 2. dimensions between start_dim and end_dim\n # 3. dimensions after end_dim\n # Part 1 and 3 might not exist if start_dim is 0 or end_dim is\n # last dim. Then we do a reduced multiplication over part 2 to\n # get flattened dim. Finally, we concatenate the three parts to\n # get the final shape.\n if has_dynamic_shape(input_val.shape):\n input_shape_layer = network.add_shape(input_val)\n input_shape_layer.name = f\"{name}_orig_shape\"\n\n final_shapes = []\n\n # Shapes before start_dim\n if start_dim > 0:\n prefix_shape_layer = network.add_slice(\n input_shape_layer.get_output(0),\n start=(0,),\n shape=(start_dim,),\n stride=(1,),\n )\n prefix_shape_layer.name = f\"{name}_pre_shape\"\n final_shapes.append(prefix_shape_layer.get_output(0))\n\n flatten_shape_layer = network.add_slice(\n input_shape_layer.get_output(0),\n start=(start_dim,),\n shape=(end_dim - start_dim + 1,),\n stride=(1,),\n )\n flatten_shape_layer.name = f\"{name}_need_flatten\"\n flatten_shape_layer = network.add_reduce(\n flatten_shape_layer.get_output(0),\n trt.ReduceOperation.PROD,\n axes=get_axes_for_reduce_op(0, False),\n keep_dims=True,\n )\n flatten_shape_layer.name = f\"{name}_flatten_dim\"\n final_shapes.append(flatten_shape_layer.get_output(0))\n\n # Shapes after start_dim\n if end_dim < len(input_val.shape) - 1:\n suffix_shape_layer = network.add_slice(\n input_shape_layer.get_output(0),\n start=(end_dim + 1,),\n shape=(len(input_val.shape) - end_dim - 1,),\n stride=(1,),\n )\n suffix_shape_layer.name = f\"{name}_suffix_shape\"\n final_shapes.append(suffix_shape_layer.get_output(0))\n\n final_shape_layer = network.add_concatenation(final_shapes)\n final_shape_layer.axis = 0\n final_shape_layer.name = f\"{name}_final_shape\"\n layer.set_input(1, final_shape_layer.get_output(0))\n else:\n final_shape = []\n flatten_dim = 1\n for i, s in enumerate(input_val.shape):\n if i >= start_dim and i <= end_dim:\n flatten_dim *= s\n elif i == end_dim + 1:\n final_shape.append(flatten_dim)\n final_shape.append(s)\n else:\n final_shape.append(s)\n if end_dim == len(input_val.shape) - 1:\n final_shape.append(flatten_dim)\n\n layer.reshape_dims = tuple(final_shape)\n\n return layer.get_output(0)\n\n\n# For implicit batch dim mode, we use this to represent batch dim if we\n# ever trying to retrieve it via size() and we hope it will fail hard if\n# it's used somewhere else.\nIMPLICIT_BATCH_DIM = -999\n\n\n@tensorrt_converter(acc_ops.size)\ndef acc_ops_size(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"size received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n if not has_dynamic_shape(input_val.shape):\n if network.has_implicit_batch_dimension:\n return torch.Size((IMPLICIT_BATCH_DIM,) + tuple(input_val.shape))\n return torch.Size(input_val.shape)\n\n layer = network.add_shape(input_val)\n layer.name = name\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.batch_norm)\ndef acc_ops_batch_norm(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"BatchNorm2d received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n if has_dynamic_shape(input_val.shape):\n assert input_val.shape[1] != -1, \"Channel dim can't be dynamic for batch norm.\"\n\n scale = to_numpy(kwargs[\"weight\"]) / np.sqrt(\n to_numpy(kwargs[\"running_var\"]) + kwargs[\"eps\"]\n )\n bias = (\n to_numpy(kwargs[\"bias\"])\n - to_numpy(kwargs[\"running_mean\"]) * scale\n )\n power = np.ones_like(scale)\n\n layer = network.add_scale(input_val, trt.ScaleMode.CHANNEL, bias, scale, power)\n layer.name = name\n\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.layer_norm)\ndef acc_ops_layer_norm(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"LayerNorm received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n shape = kwargs[\"weight\"].shape\n broadcasted_shape = (1,) * (len(input_val.shape) - len(shape)) + shape\n gamma = to_numpy(kwargs[\"weight\"].reshape(*shape))\n beta = to_numpy(kwargs[\"bias\"].reshape(*shape))\n eps = kwargs[\"eps\"]\n normalized_shape = kwargs[\"normalized_shape\"]\n\n axes = 0\n for d in range(len(normalized_shape)):\n axes |= 1 << (len(input_val.shape) - d - 1)\n\n # E[x]\n mean_expected_layer = network.add_reduce(input_val, trt.ReduceOperation.AVG, axes, keep_dims=True)\n mean_expected_layer.name = f\"{name}_mean_expected\"\n # X-E[x]\n sub_trt = add_binary_elementwise_layer(\n network, input_val, mean_expected_layer.get_output(0), trt.ElementWiseOperation.SUB, f\"{name}_sub\"\n )\n # Variance = mean(pow(x_sub_mean,2))\n pow_tensor = network.add_constant(\n (1,) * len(input_val.shape), trt.Weights(np.ascontiguousarray([2.0], dtype=np.float32))\n )\n pow_tensor.name = f\"{name}_power\"\n pow_var = add_binary_elementwise_layer(\n network, sub_trt, pow_tensor.get_output(0), trt.ElementWiseOperation.POW, f\"{name}_pow_var\"\n )\n mean_trt_layer = network.add_reduce(pow_var, trt.ReduceOperation.AVG, axes, keep_dims=True)\n mean_trt_layer.name = f\"{name}_mean\"\n # Variance + eps\n eps_tensor = network.add_constant(\n (1,) * len(input_val.shape), trt.Weights(np.ascontiguousarray([eps], dtype=np.float32))\n )\n eps_tensor.name = f\"{name}_eps\"\n add_trt = add_binary_elementwise_layer(\n network, mean_trt_layer.get_output(0), eps_tensor.get_output(0), trt.ElementWiseOperation.SUM, f\"{name}_add\"\n )\n # SQRT((Var + eps))\n sqrt_trt = add_unary_layer(network, add_trt, trt.UnaryOperation.SQRT, f\"{name}_sqrt\")\n # (x - E[x]) / sqrt((var + eps))\n div_trt = add_binary_elementwise_layer(network, sub_trt, sqrt_trt, trt.ElementWiseOperation.DIV, f\"{name}_div_trt\")\n\n gamma_tensor = network.add_constant(gamma.shape, trt.Weights(np.ascontiguousarray(gamma)))\n gamma_tensor.name = f\"{name}_gamma\"\n beta_tensor = network.add_constant(gamma.shape, trt.Weights(np.ascontiguousarray(beta)))\n beta_tensor.name = f\"{name}_beta\"\n # y * gamma + beta\n scale_layer = add_binary_elementwise_layer(\n network, div_trt, gamma_tensor.get_output(0), trt.ElementWiseOperation.PROD, f\"{name}_scale\"\n )\n return add_binary_elementwise_layer(\n network, scale_layer, beta_tensor.get_output(0), trt.ElementWiseOperation.SUM, name\n )\n\n@tensorrt_converter(acc_ops.softmax)\ndef acc_ops_softmax(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n dim = kwargs[\"dim\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"softmax received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n # Used to get dim when dim is None. Copied from PyTorch softmax implementation.\n def get_softmax_dim(ndim):\n if ndim == 0 or ndim == 1 or ndim == 3:\n ret = 0\n else:\n ret = 1\n return ret\n\n if dim is None:\n dim = get_softmax_dim(\n len(input_val.shape)\n if not network.has_implicit_batch_dimension\n else len(input_val.shape) + 1\n )\n\n if network.has_implicit_batch_dimension:\n assert dim != 0, \"Can't apply softmax on batch dimension when it's implicit.\"\n dim = (dim % (len(input_val.shape) + 1)) - 1\n\n layer = network.add_softmax(input_val)\n layer.axes = 1 << dim\n layer.name = name\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.tile)\ndef acc_ops_tile(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"tile received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n dims = kwargs[\"dims\"]\n n_input_dims = len(input_val.shape) + (1 if network.has_implicit_batch_dimension else 0)\n\n if len(dims) > n_input_dims:\n assert not network.has_implicit_batch_dimension\n layer = network.add_shuffle(input_val)\n layer.name = f\"{name}_reshape\"\n num_preceding_ones = len(dims) - n_input_dims\n\n if len(get_dynamic_dims(input_val.shape)) > 1:\n input_shape_layer = network.add_shape(input_val)\n input_shape_layer.name = f\"{name}_input_shape\"\n preceding_ones = network.add_constant(\n (num_preceding_ones,), np.ascontiguousarray([1] * num_preceding_ones, np.int32)\n ).get_output(0)\n reshape_layer = network.add_concatenation([preceding_ones, input_shape_layer.get_output(0)])\n reshape_layer.axis = 0\n reshape_layer.name = f\"{name}_reshape_dims\"\n layer.set_input(1, reshape_layer.get_output(0))\n else:\n layer.reshape_dims = (1,) * (len(dims) - n_input_dims) + tuple(input_val.shape)\n input_val = layer.get_output(0)\n else:\n dims = (1,) * (n_input_dims - len(dims)) + dims\n\n if network.has_implicit_batch_dimension:\n assert dims[0] == 1, \"Can't tile the batch dim when it's implicit.\"\n dims = dims[1:]\n\n starts = [0] * len(dims)\n shapes = [i * j for i, j in zip(input_val.shape, dims)]\n # If there's dynmaic dim then there would be negative dims in shapes which is not allowed.\n # Here we build a dummy shapes array.\n if has_dynamic_shape(input_val.shape):\n shapes = [1] * len(dims)\n strides = [1] * len(dims)\n layer = network.add_slice(input_val, starts, shapes, strides)\n layer.mode = trt.SliceMode.WRAP\n layer.name = name\n\n if has_dynamic_shape(input_val.shape):\n starts_tensor = network.add_constant(\n (len(dims),), np.ascontiguousarray([0] * len(dims), np.int32)\n ).get_output(0)\n dims_tensor = network.add_constant(\n (len(dims),), np.ascontiguousarray(dims, np.int32)\n ).get_output(0)\n input_shape_layer = network.add_shape(input_val)\n input_shape_layer.name = f\"{name}_slice_input_shape\"\n slice_shapes_tensor = add_binary_elementwise_layer(\n network,\n input_shape_layer.get_output(0),\n dims_tensor,\n trt.ElementWiseOperation.PROD,\n f\"{name}_slice_shapes\",\n )\n layer.set_input(1, starts_tensor)\n layer.set_input(2, slice_shapes_tensor)\n\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.relu)\ndef acc_ops_relu(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.ActivationType.RELU\n return add_activation_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.sin)\ndef acc_ops_sin(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.SIN\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.cos)\ndef acc_ops_cos(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.COS\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.tan)\ndef acc_ops_tan(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.TAN\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.sinh)\ndef acc_ops_sinh(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.SINH\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.cosh)\ndef acc_ops_cosh(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.COSH\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.tanh)\ndef acc_ops_tanh(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.ActivationType.TANH\n return add_activation_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.asin)\ndef acc_ops_asin(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.ASIN\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.acos)\ndef acc_ops_acos(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.ACOS\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.atan)\ndef acc_ops_atan(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.ATAN\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.exp)\ndef acc_ops_exp(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.EXP\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.log)\ndef acc_ops_log(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.LOG\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.sqrt)\ndef acc_ops_sqrt(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.SQRT\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.reciprocal)\ndef acc_ops_reciprocal(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.RECIP\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.abs)\ndef acc_ops_abs(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.ABS\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.neg)\ndef acc_ops_neg(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.NEG\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.floor)\ndef acc_ops_floor(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.FLOOR\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.ceil)\ndef acc_ops_ceil(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n operation_type = trt.UnaryOperation.CEIL\n return add_unary_layer(network, input_val, operation_type, name)\n\n\n@tensorrt_converter(acc_ops.sum)\ndef acc_ops_sum(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"sum received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n # If dim is specified, then we are computing reduced sum over certain dimensions.\n # Otherwise, we are dong summation over all elements, which is only supported in\n # explicit batch dimension.\n if \"dim\" not in kwargs:\n assert (\n not network.has_implicit_batch_dimension\n ), \"Do not support sum all the elements for implicit batch.\"\n dim = range(0, len(input_val.shape))\n else:\n dim = kwargs[\"dim\"]\n\n keepdim = False if \"keepdim\" not in kwargs else kwargs[\"keepdim\"]\n layer = network.add_reduce(\n input_val,\n trt.ReduceOperation.SUM,\n get_axes_for_reduce_op(dim, network.has_implicit_batch_dimension),\n keepdim,\n )\n layer.name = name\n return layer.get_output(0)\n\n\ndef add_acc_ops_full_reduce(network, target, args, kwargs, name, reduce_op):\n input_val = kwargs[\"input\"]\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"max received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n assert (\n not network.has_implicit_batch_dimension\n ), \"Do not support max over all the elements for implicit batch.\"\n\n dim = range(len(input_val.shape))\n\n layer = network.add_reduce(\n input_val,\n reduce_op,\n get_axes_for_reduce_op(dim, network.has_implicit_batch_dimension),\n False,\n )\n layer.name = name\n return layer.get_output(0)\n\ndef add_acc_ops_dim_reduce(network, target, args, kwargs, name, reduce_op):\n new_kwargs = kwargs.copy()\n new_kwargs['k'] = 1\n\n\n if reduce_op == trt.ReduceOperation.MAX:\n new_kwargs['largest'] = True\n elif reduce_op == trt.ReduceOperation.MIN:\n new_kwargs['largest'] = False\n new_kwargs['sorted'] = False\n\n\n (topk_out0, topk_out1) = acc_ops_topk(network, target, args, new_kwargs, name + \"_topk\")\n\n topk_out0.name = f\"{name}_topk0\"\n topk_out1.name = f\"{name}_topk1\"\n\n if 'keepdim' in new_kwargs and new_kwargs['keepdim']:\n return (topk_out0, topk_out1)\n\n dim = new_kwargs['dim']\n if network.has_implicit_batch_dimension:\n assert dim != 0, \"can't reduce on dim == 0 when network has implicit batch dimension\"\n # we remove the first dim in the shape tuple when it is implicit\n dim -= 1\n input_val = topk_out0\n shape = input_val.shape\n\n output_shape = []\n for i, s in enumerate(shape):\n if i == dim and s == 1:\n continue\n output_shape.append(s)\n\n shuffle_layer0 = network.add_shuffle(input_val)\n shuffle_layer0.reshape_dims = tuple(output_shape)\n shuffle_layer0.name = name + '_shuffle0'\n\n input_val = topk_out1\n shape = input_val.shape\n\n shuffle_layer1 = network.add_shuffle(input_val)\n shuffle_layer1.reshape_dims = tuple(output_shape)\n shuffle_layer1.name = name + '_shuffle1'\n\n\n return (shuffle_layer0.get_output(0), shuffle_layer1.get_output(0))\n\n@tensorrt_converter(acc_ops.max_full_reduce)\ndef acc_ops_max_full_reduce(network, target, args, kwargs, name):\n return add_acc_ops_full_reduce(network, target, args, kwargs, name, trt.ReduceOperation.MAX)\n\n@tensorrt_converter(acc_ops.min_full_reduce)\ndef acc_ops_min_full_reduce(network, target, args, kwargs, name):\n return add_acc_ops_full_reduce(network, target, args, kwargs, name, trt.ReduceOperation.MIN)\n\n@tensorrt_converter(acc_ops.max_dim_reduce)\ndef acc_ops_max_dim_reduce(network, target, args, kwargs, name):\n return add_acc_ops_dim_reduce(network, target, args, kwargs, name, trt.ReduceOperation.MAX)\n\n@tensorrt_converter(acc_ops.min_dim_reduce)\ndef acc_ops_min_dim_reduce(network, target, args, kwargs, name):\n return add_acc_ops_dim_reduce(network, target, args, kwargs, name, trt.ReduceOperation.MIN)\n\n@tensorrt_converter(acc_ops.maximum)\ndef acc_ops_maximum(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"other\"], trt.ElementWiseOperation.MAX, name\n )\n\n@tensorrt_converter(acc_ops.minimum)\ndef acc_ops_minimum(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"other\"], trt.ElementWiseOperation.MIN, name\n )\n\n@tensorrt_converter(acc_ops.max_pool2d)\ndef acc_ops_max_pool2d(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"MaxPool2d received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n kernel_size = process_attr(kwargs[\"kernel_size\"], 2)\n stride = process_attr(kwargs[\"stride\"], 2)\n padding = process_attr(kwargs[\"padding\"], 2)\n dilation = process_attr(kwargs[\"dilation\"], 2)\n ceil_mode = kwargs[\"ceil_mode\"]\n\n if dilation != (1, 1):\n raise RuntimeError(\n f\"Only support dilation=(1, 1) for maxpool, but got {dilation}\"\n )\n\n layer = network.add_pooling(\n input=input_val, type=trt.PoolingType.MAX, window_size=kernel_size\n )\n layer.stride = stride\n layer.padding = padding\n layer.name = name\n\n if ceil_mode:\n layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP\n\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.squeeze)\ndef acc_ops_squeeze(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"squeeze received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n dim = kwargs[\"dim\"] if \"dim\" in kwargs else None\n # Squeeze with dim=None would only work in explicit batch dim mode without any dynamic\n # dim, which is a very rare case. For now we just claim not supporting dim=None.\n assert dim is not None, \"We don't support dim=None right now.\"\n\n dim = dim % (len(input_val.shape) + (1 if network.has_implicit_batch_dimension else 0))\n if network.has_implicit_batch_dimension:\n assert dim != 0, \"We don't support squeeze batch dim when it's implicit.\"\n dim -= 1\n\n assert input_val.shape[dim] != -1, \"We don't support squeeze dynamic dim.\"\n assert (\n len(get_dynamic_dims(input_val.shape)) <= 1\n ), \"Currently more than one dynamic dim for input to squeeze is not supported.\"\n\n output_shape = []\n for i, s in enumerate(input_val.shape):\n if i == dim and s == 1:\n continue\n output_shape.append(s)\n layer = network.add_shuffle(input_val)\n layer.reshape_dims = tuple(output_shape)\n layer.name = name\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.add)\ndef acc_ops_add(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"other\"], trt.ElementWiseOperation.SUM, name\n )\n\n\n@tensorrt_converter(acc_ops.sub)\ndef acc_ops_sub(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"other\"], trt.ElementWiseOperation.SUB, name\n )\n\n\n@tensorrt_converter(acc_ops.div)\ndef acc_ops_div(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"other\"], trt.ElementWiseOperation.DIV, name\n )\n\n\n@tensorrt_converter(acc_ops.mul)\ndef acc_ops_mul(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"other\"], trt.ElementWiseOperation.PROD, name\n )\n\n@tensorrt_converter(acc_ops.pow)\ndef acc_ops_pow(network, target, args, kwargs, name):\n return add_binary_elementwise_layer(\n network, kwargs[\"input\"], kwargs[\"exponent\"], trt.ElementWiseOperation.POW, name\n )\n\n@tensorrt_converter(acc_ops.unsqueeze)\ndef acc_ops_unsqueeze(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"unsqueeze received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n dim = kwargs[\"dim\"]\n if network.has_implicit_batch_dimension:\n assert dim != 0\n dim -= 1\n\n assert len(get_dynamic_dims(input_val.shape)) <= 1, \"Currently we don't support unsqueeze with more than one dynamic dims.\"\n layer = network.add_shuffle(input_val)\n layer.reshape_dims = tuple(input_val.shape)[:dim] + (1,) + tuple(input_val.shape)[dim:]\n layer.name = name\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.topk)\ndef acc_ops_topk(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"topk received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n if kwargs[\"sorted\"] and kwargs[\"k\"] != 1:\n raise RuntimeError(\"Currently we don't support sorted=True in topk.\")\n\n if not network.has_implicit_batch_dimension and len(input_val.shape) <= 1:\n raise RuntimeError(\"At least 2 dimensions are required for input to topk.\")\n\n num_dims = len(input_val.shape) + (1 if network.has_implicit_batch_dimension else 0)\n k = kwargs[\"k\"]\n dim = (kwargs[\"dim\"] if kwargs[\"dim\"] is not None else -1) % num_dims\n operation = trt.TopKOperation.MAX if kwargs[\"largest\"] else trt.TopKOperation.MIN\n layer = network.add_topk(\n input_val, operation, k, get_axes_for_reduce_op(dim, network.has_implicit_batch_dimension)\n )\n layer.name = name\n return layer.get_output(0), layer.get_output(1)\n\n@tensorrt_converter(acc_ops.adaptive_avg_pool2d)\ndef acc_ops_adaptive_avg_pool2d(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"AdaptiveAvgPool2d received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n assert (\n input_val.shape[-1] != -1 and input_val.shape[-1] != -1\n ), \"AdaptiveAvgPool2d currently doesn't support dynamic shapes for last two dims.\"\n output_size = kwargs[\"output_size\"]\n\n for input_dim, output_dim in zip(input_val.shape[-2:], output_size):\n if input_dim % output_dim != 0:\n raise RuntimeError(\n \"For AdaptiveAvgPool, input dim has to be integer multiple of output dim.\"\n f\"Got input dim {input_dim}, output dim {output_dim}\"\n )\n\n stride = (\n input_val.shape[-2] // output_size[0],\n input_val.shape[-1] // output_size[1],\n )\n kernel_size = (\n input_val.shape[-2] - (output_size[0] - 1) * stride[0],\n input_val.shape[-1] - (output_size[1] - 1) * stride[1],\n )\n layer = network.add_pooling(\n input=input_val, type=trt.PoolingType.AVERAGE, window_size=kernel_size\n )\n layer.stride = stride\n layer.name = name\n\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.avg_pool2d)\ndef acc_ops_avg_pool2d(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"AvgPool2d received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n kernel_size = process_attr(kwargs[\"kernel_size\"], 2)\n stride = process_attr(kwargs[\"stride\"], 2)\n padding = process_attr(kwargs[\"padding\"], 2)\n ceil_mode = kwargs[\"ceil_mode\"]\n count_include_pad = kwargs[\"count_include_pad\"]\n divisor_override = kwargs[\"divisor_override\"]\n\n if divisor_override:\n raise RuntimeError(\"TensorRT does not support divisor_override.\")\n\n layer = network.add_pooling(\n input=input_val, type=trt.PoolingType.AVERAGE, window_size=kernel_size\n )\n layer.stride = stride\n layer.padding = padding\n layer.average_count_excludes_padding = False if count_include_pad else True\n\n if ceil_mode:\n layer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP\n\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.reshape)\ndef acc_ops_reshape(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"Reshape received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n shape = acc_utils.get_field_from_acc_out_ty(kwargs[\"acc_out_ty\"], \"shape\")\n if network.has_implicit_batch_dimension:\n shape = shape[1:]\n\n layer = network.add_shuffle(input_val)\n\n if all(isinstance(s, int) for s in shape):\n layer.reshape_dims = tuple(shape)\n else:\n # Convert all the dimensions to trt Tensors.\n trt_shape = []\n\n for i, s in enumerate(shape):\n if isinstance(s, trt.tensorrt.ITensor):\n if len(s.shape) == 0:\n s = append_ones(network, s, f\"{name}_{i}\", 1)\n trt_shape.append(s)\n else:\n trt_shape.append(\n get_trt_tensor(network, s, f\"{name}_{i}\")\n )\n\n shape_layer = network.add_concatenation(inputs=trt_shape)\n shape_layer.axis = 0\n shape_layer.name = f\"{name}_output_shape\"\n layer.set_input(1, shape_layer.get_output(0))\n\n layer.name = name\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.slice_tensor)\ndef acc_ops_slice_tensor(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"slice_tensor received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n ranks = len(input_val.shape) + (1 if network.has_implicit_batch_dimension else 0)\n dims = [dim % ranks for dim in kwargs[\"dims\"]]\n\n if network.has_implicit_batch_dimension:\n if not len(dims):\n raise RuntimeError(\"dim argument cannot be empty!\")\n if any([dim == 0 for dim in dims]):\n raise RuntimeError(\n f\"We do not support slice_tensor at batch dim when it's implicit, got {dims}!\"\n )\n dims = [d - 1 for d in dims]\n else:\n raise RuntimeError(\"We don't support slice_tensor with explicit batch dimension yet!\")\n\n start = [0] * len(input_val.shape)\n stride = [1] * len(start)\n output_shape = list(input_val.shape)\n starts = kwargs[\"starts\"]\n stops = kwargs[\"stops\"]\n steps = kwargs[\"steps\"]\n\n for i, dim in enumerate(dims):\n start[dim] = starts[i]\n stride[dim] = steps[i]\n output_shape[dim] = (stops[i] - starts[i]) // steps[i]\n\n layer = network.add_slice(input_val, start=start, shape=output_shape, stride=stride)\n layer.name = name\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.split)\ndef acc_ops_split(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"split received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n dim = kwargs[\"dim\"]\n if network.has_implicit_batch_dimension:\n assert dim != 0, \"Can't split on batch dim when it's implicit!\"\n dim -= 1\n else:\n raise RuntimeError(\"We don't support split with explicit batch dimension yet!\")\n\n split_size = kwargs[\"split_size\"]\n start = [0] * len(input_val.shape)\n stride = [1] * len(start)\n offset = 0\n num_splits = (input_val.shape[dim] + split_size - 1) // split_size\n if num_splits < 1:\n raise RuntimeError(f\"Invalid split: {input_val.shape[dim]} wuth split_size={split_size}\")\n\n max_offset = input_val.shape[dim]\n # add slice layers\n output = []\n for i in range(num_splits):\n shape = list(input_val.shape)\n shape[dim] = min(split_size, max_offset - offset)\n start[dim] = offset\n layer = network.add_slice(input_val, start=start, shape=shape, stride=stride)\n offset += split_size\n layer.name = f\"{name}_{i}\"\n output.append(layer.get_output(0))\n return output\n\n@tensorrt_converter(acc_ops.linear)\ndef acc_ops_linear(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"Linear received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n dynamic_dims = get_dynamic_dims(input_val.shape)\n assert len(dynamic_dims) < 2 and input_val.shape[-1] != -1, (\n \"Currently we only support one dynmaic \"\n \"dim for linear and it can't be the last dim.\"\n )\n\n # TODO: Need to benchmark the performance of lowering linear as fully_connected versus\n # lowering as matmul + add. TensorRT documentation suggests to always lower it as\n # matmul + add but we found in some cases this results in performance regression compared\n # with lowering to fully_connected layer.\n layer = network.add_shuffle(input_val)\n layer.reshape_dims = tuple(input_val.shape) + (1, 1)\n layer.name = f\"{name}_pre_shuffle\"\n bias = to_numpy(kwargs[\"bias\"])\n\n if network.has_explicit_precision:\n weight = get_trt_tensor(network, kwargs[\"weight\"], f\"{name}_weight\")\n # will need to use uninitialized weight and set it later to support\n # ITensor weights\n dummy_weight = trt.Weights()\n\n # add fully connected\n layer = network.add_fully_connected(\n input=layer.get_output(0),\n num_outputs=weight.shape[0],\n kernel=dummy_weight,\n bias=bias,\n )\n layer.set_input(1, weight)\n else:\n weight = to_numpy(kwargs[\"weight\"])\n layer = network.add_fully_connected(\n input=layer.get_output(0),\n num_outputs=weight.shape[0],\n kernel=weight,\n bias=bias,\n )\n layer.name = f\"{name}_linear\"\n\n # reshape back\n layer = network.add_shuffle(layer.get_output(0))\n layer.reshape_dims = tuple(input_val.shape[:-1]) + (kwargs[\"weight\"].shape[0],)\n layer.name = f\"{name}_post_shuffle\"\n\n return layer.get_output(0)\n\ndef add_clamp(network, input, val, op):\n acc_ops_clamp_shape = (1,) * len(input.shape) # broadcast all dimensions\n acc_ops_clamp_tensor = (\n val\n * torch.ones(acc_ops_clamp_shape, dtype=torch_dtype_from_trt(input.dtype))\n .cpu()\n .numpy()\n )\n acc_ops_clamp_trt = network.add_constant(acc_ops_clamp_shape, acc_ops_clamp_tensor)\n layer = network.add_elementwise(input, acc_ops_clamp_trt.get_output(0), op)\n\n return layer\n\n\n@tensorrt_converter(acc_ops.clamp)\ndef acc_ops_clamp(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n min_val = kwargs[\"min\"]\n max_val = kwargs[\"max\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"Clamp received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n if min_val is not None:\n clamp_min_layer = add_clamp(\n network, input_val, min_val, trt.ElementWiseOperation.MAX\n )\n clamp_min_layer.name = f\"{name}_clamp_min\"\n input_val = clamp_min_layer.get_output(0)\n if max_val is not None:\n clamp_max_layer = add_clamp(\n network, input_val, max_val, trt.ElementWiseOperation.MIN\n )\n clamp_max_layer.name = f\"{name}_clamp_max\"\n input_val = clamp_max_layer.get_output(0)\n\n return input_val\n\n@tensorrt_converter(acc_ops.tuple_construct)\ndef acc_ops_tuple_construct(network, target, args, kwargs, name):\n return kwargs[\"tensors\"]\n\n\n@tensorrt_converter(acc_ops.contiguous)\ndef acc_ops_contiguous(network, target, args, kwargs, name):\n return kwargs[\"input\"]\n\n\n@tensorrt_converter(acc_ops.getitem)\ndef acc_ops_getitem(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n slices = kwargs[\"idx\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n return operator.getitem(input_val, slices)\n\n assert not has_dynamic_shape(\n input_val.shape\n ), \"Currently we don't support slicing tensor if it has dynamic shape.\"\n\n def num_slice_types(slices):\n \"\"\"\n Gather the number of slice in getitem slices.\n \"\"\"\n num_slice = 0\n for s in slices:\n if isinstance(s, slice) or isinstance(s, int):\n num_slice += 1\n return num_slice\n\n def slice_to_trt_params(py_slice, dim_size):\n \"\"\"\n Convert python slice to TensorRT slice layer parameters.\n \"\"\"\n start = (py_slice.start % dim_size) if py_slice.start else 0\n stride = py_slice.step if py_slice.step else 1\n stop = (py_slice.stop % dim_size) if py_slice.stop else dim_size\n size = math.ceil((stop - start) * 1.0 / stride)\n return start, size, stride\n\n if not isinstance(slices, tuple):\n slices = (slices,)\n\n if network.has_implicit_batch_dimension:\n # Raise an error if it's trying to subscript batch dimension unless it's\n # slice(None, None, None).\n batch_subscript = slices[0]\n if batch_subscript != slice(None, None, None):\n raise RuntimeError(\n f\"{name}: Can't subscript batch dimension when it's implicit. Got {slices}\"\n )\n\n # Remove batch_dim subscript\n slices = slices[1:]\n\n # Replace ellipsis with expanded slices.\n # Compute the number of dim ellipsis represent.\n num_ellipsis = len(input_val.shape) - num_slice_types(slices)\n new_slices = []\n for s in slices:\n if s == Ellipsis:\n while num_ellipsis > 0:\n new_slices.append(slice(None, None, None))\n num_ellipsis -= 1\n else:\n new_slices.append(s)\n slices = new_slices\n\n # Build trt slice layer params\n start = []\n size = []\n stride = []\n\n i = 0\n for s in slices:\n if s is None:\n continue\n\n if isinstance(s, slice):\n params = slice_to_trt_params(s, input_val.shape[i])\n start.append(params[0])\n size.append(params[1])\n stride.append(params[2])\n else:\n start.append(s % input_val.shape[i])\n size.append(1)\n stride.append(1)\n i += 1\n\n while i < len(input_val.shape):\n start.append(0)\n size.append(input_val.shape[i])\n stride.append(1)\n i += 1\n\n layer = network.add_slice(\n input=input_val,\n start=start,\n shape=size,\n stride=stride,\n )\n layer.name = name\n\n # Add shuffle layer to insert dimensions for 'None' and remove dimensions for 'int'.\n if any(not isinstance(s, slice) for s in slices):\n slice_out = layer.get_output(0)\n layer = network.add_shuffle(slice_out)\n final_shape = []\n original_idx = 0\n for s in slices:\n # If it's a slice, keep the dim.\n if isinstance(s, slice):\n final_shape.append(slice_out.shape[original_idx])\n original_idx += 1\n # If it's None, extend the dim.\n elif s is None:\n final_shape.append(1)\n # If it's a int, remove the dim.\n else:\n original_idx += 1\n layer.reshape_dims = tuple(final_shape) + tuple(slice_out.shape)[original_idx:]\n\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.cat)\ndef acc_ops_cat(network, target, args, kwargs, name):\n tensors = kwargs[\"tensors\"]\n\n if any(not isinstance(t, trt.tensorrt.ITensor) for t in tensors):\n raise RuntimeError(\n f\"cat received inputs {tensors} that is not part \" \"of the TensorRT region!\"\n )\n\n layer = network.add_concatenation(inputs=tensors)\n layer.axis = kwargs[\"dim\"] - (1 if network.has_implicit_batch_dimension else 0)\n layer.name = name\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.matmul)\ndef acc_ops_matmul(network, target, args, kwargs, name):\n input_val = get_trt_tensor(network, kwargs[\"input\"], f\"{name}_input\")\n other_val = get_trt_tensor(network, kwargs[\"other\"], f\"{name}_other\")\n\n for i in [input_val, other_val]:\n if not isinstance(i, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"matmul received input {i} that is not part \" \"of the TensorRT region!\"\n )\n\n return add_matrix_multiply_layer(network, input_val, other_val, name)\n\n@tensorrt_converter(acc_ops.sigmoid)\ndef acc_ops_sigmoid(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"Sigmoid received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n layer = network.add_activation(input=input_val, type=trt.ActivationType.SIGMOID)\n layer.name = name\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.permute)\ndef acc_ops_permute(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n ranks = len(input_val.shape) + (1 if network.has_implicit_batch_dimension else 0)\n permutation = [i % ranks for i in kwargs[\"permutation\"]]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(\n f\"permute received input {input_val} that is not part \"\n \"of the TensorRT region!\"\n )\n\n if network.has_implicit_batch_dimension:\n assert permutation[0] == 0, \"Can't permute batch dimension when it's implicit.\"\n permutation = [i - 1 for i in permutation[1:]]\n\n layer = network.add_shuffle(input_val)\n layer.second_transpose = tuple(permutation)\n layer.name = name\n return layer.get_output(0)\n\n@tensorrt_converter(acc_ops.quantize_per_tensor)\ndef acc_ops_quantize_per_tensor(network, target, args, kwargs, name):\n input_val = get_trt_tensor(network, kwargs[\"input\"], f\"{name}_input\")\n\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"{name} received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n qparams = acc_utils.get_field_from_acc_out_ty(kwargs[\"acc_out_ty\"], \"qparams\")\n q_scale = qparams[\"scale\"]\n q_zero_point = qparams[\"zero_point\"]\n dtype = acc_utils.get_field_from_acc_out_ty(kwargs[\"acc_out_ty\"], \"dtype\")\n if dtype not in (torch.quint8, torch.qint8, torch.qint32):\n raise RuntimeError(\"Only support (torch.quint8, torch.qint8, torch.qint32) \"\n f\"quantized type in quantize_per_tensor, get {dtype}.\")\n\n if q_zero_point != 0:\n raise RuntimeError(f\"Only support zero_point == 0, get {q_zero_point}\")\n\n scale_layer = network.add_constant((1,), trt.Weights(np.ascontiguousarray([float(q_scale)], dtype=np.float32)))\n scale_layer.name = input_val.name + \".per_tensor_quant.scale\"\n scale = scale_layer.get_output(0)\n # assert trt.__version__ > \"8.0\", \"Explicit quantize op is only supported in \"\n # \"TensorRT 8.0 or above, current TensorRT version:\" + trt.__version__\n layer = network.add_quantize(input=input_val, scale=scale)\n layer.axis = 0\n layer.name = input_val.name + \".per_tensor_quant\"\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.quantize_per_channel)\ndef acc_ops_quantize_per_channel(network, target, args, kwargs, name):\n input_val = get_trt_tensor(network, kwargs[\"input\"], f\"{name}_input\")\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"{name} received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n qparams = acc_utils.get_field_from_acc_out_ty(kwargs[\"acc_out_ty\"], \"qparams\")\n q_per_channel_scales = qparams[\"scale\"]\n q_per_channel_zero_points = qparams[\"zero_point\"]\n q_per_channel_axis = qparams[\"axis\"]\n dtype = acc_utils.get_field_from_acc_out_ty(kwargs[\"acc_out_ty\"], \"dtype\")\n if dtype not in (torch.quint8, torch.qint8, torch.qint32):\n raise RuntimeError(\"Only support (torch.quint8, torch.qint8, torch.qint32) \"\n f\"quantized type in quantize_per_tensor, get {dtype}.\")\n\n # Make sure zero_points are all 0 because only symmetric quantization\n # is supported in TensorRT\n if not torch.equal(\n q_per_channel_zero_points,\n torch.zeros(q_per_channel_zero_points.shape, dtype=q_per_channel_zero_points.dtype)):\n raise RuntimeError(f\"Only support zero_point == 0, get {q_per_channel_zero_points}\")\n\n if not torch.all(torch.ge(q_per_channel_scales, 0)):\n raise RuntimeError(f\"All scale values must be >= 0, get {q_per_channel_scales}\")\n\n scale_layer = network.add_constant(\n q_per_channel_scales.shape,\n trt.Weights(np.ascontiguousarray(q_per_channel_scales, dtype=np.float32)))\n scale_layer.name = input_val.name + \".per_channel_quant.scale\"\n scale = scale_layer.get_output(0)\n # assert trt.__version__ > \"8.0\", \"Explicit quantize op is only supported in \"\n # \"TensorRT 8.0 or above, current TensorRT version:\" + trt.__version__\n layer = network.add_quantize(input=input_val, scale=scale)\n layer.axis = q_per_channel_axis\n layer.name = input_val.name + \".per_channel_quant\"\n return layer.get_output(0)\n\n\n@tensorrt_converter(acc_ops.dequantize)\ndef acc_ops_dequantize(network, target, args, kwargs, name):\n input_val = kwargs[\"input\"]\n input_val_tensor_meta = kwargs[\"_itensor_to_tensor_meta\"][input_val]\n\n if not isinstance(input_val, trt.tensorrt.ITensor):\n raise RuntimeError(f\"{name} received input {input_val} that is not part \"\n \"of the TensorRT region!\")\n\n qparams = acc_utils.get_field_from_acc_out_ty(input_val_tensor_meta, \"qparams\")\n qscheme = qparams[\"qscheme\"]\n if qscheme == torch.per_tensor_affine:\n q_scale = qparams[\"scale\"]\n q_zero_point = qparams[\"zero_point\"]\n q_axis = 0\n scale_shape = (1,)\n if q_zero_point != 0:\n raise RuntimeError(f\"Only support zero_point == 0, get {q_zero_point}\")\n elif qscheme == torch.per_channel_affine:\n q_scale = qparams[\"scale\"]\n q_zero_point = qparams[\"zero_point\"]\n q_axis = qparams[\"axis\"]\n scale_shape = q_scale.shape\n if not torch.equal(q_zero_point, torch.zeros(q_zero_point.shape, dtype=q_zero_point.dtype)):\n raise RuntimeError(f\"Only support zero_point == 0, get {q_zero_point}\")\n else:\n raise RuntimeError(\"Unsupported qscheme in dequantize: {qscheme}\")\n\n dtype = acc_utils.get_field_from_acc_out_ty(input_val_tensor_meta, \"dtype\")\n\n if dtype not in (torch.quint8, torch.qint8, torch.qint32):\n raise RuntimeError(\"Only support (torch.quint8, torch.qint8, torch.qint32) \"\n f\"quantized type in dequantize, get {dtype}.\")\n\n scale_layer = network.add_constant(scale_shape, trt.Weights(np.ascontiguousarray(q_scale, dtype=np.float32)))\n scale_layer.name = input_val.name + \".dequant.scale\"\n scale = scale_layer.get_output(0)\n # assert trt.__version__ > \"8.0\", \"Explicit dequantize op is only supported in \"\n # \"TensorRT 8.0 or above, current TensorRT version:\" + trt.__version__\n layer = network.add_dequantize(input=input_val, scale=scale)\n layer.name = input_val.name + \".dequant\"\n layer.axis = q_axis\n return layer.get_output(0)\n" ]
[ [ "torch.fx.experimental.fx2trt.fx2trt.get_dynamic_dims", "torch.Size", "torch.fx.experimental.fx_acc.acc_utils.get_field_from_acc_out_ty", "numpy.ones_like", "torch.ge", "torch.Tensor", "torch.zeros", "numpy.ascontiguousarray", "torch.fx.experimental.fx2trt.fx2trt.tensorrt_converter", "numpy.ones", "torch.fx.experimental.fx2trt.fx2trt.torch_dtype_from_trt", "torch.IntTensor" ] ]
ssabhijith/active-learning
[ "02cd16bbefb23e7a77c80c3d1987bf353e60b5f7" ]
[ "utils/utils.py" ]
[ "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for run_experiment.py.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport os\nimport pickle\nimport sys\n\nimport numpy as np\nimport scipy\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\n\nfrom tensorflow.compat.v1 import gfile\n\n\nfrom kernel_block_solver import BlockKernelSolver\nfrom small_cnn import SmallCNN\nfrom allconv import AllConv\n\n\nclass Logger(object):\n \"\"\"Logging object to write to file and stdout.\"\"\"\n\n def __init__(self, filename):\n self.terminal = sys.stdout\n self.log = gfile.GFile(filename, \"w\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n self.terminal.flush()\n\n def flush_file(self):\n self.log.flush()\n\n\ndef create_checker_unbalanced(split, n, grid_size):\n \"\"\"Creates a dataset with two classes that occupy one color of checkboard.\n\n Args:\n split: splits to use for class imbalance.\n n: number of datapoints to sample.\n grid_size: checkerboard size.\n Returns:\n X: 2d features.\n y: binary class.\n \"\"\"\n y = np.zeros(0)\n X = np.zeros((0, 2))\n for i in range(grid_size):\n for j in range(grid_size):\n label = 0\n n_0 = int(n/(grid_size*grid_size) * split[0] * 2)\n if (i-j) % 2 == 0:\n label = 1\n n_0 = int(n/(grid_size*grid_size) * split[1] * 2)\n x_1 = np.random.uniform(i, i+1, n_0)\n x_2 = np.random.uniform(j, j+1, n_0)\n x = np.vstack((x_1, x_2))\n x = x.T\n X = np.concatenate((X, x))\n y_0 = label * np.ones(n_0)\n y = np.concatenate((y, y_0))\n return X, y\n\n\ndef flatten_X(X):\n shape = X.shape\n flat_X = X\n if len(shape) > 2:\n flat_X = np.reshape(X, (shape[0], np.product(shape[1:])))\n return flat_X\n\n\ndef get_mldata(data_dir, name):\n \"\"\"Loads data from data_dir.\n\n Looks for the file in data_dir.\n Assumes that data is in pickle format with dictionary fields data and target.\n\n\n Args:\n data_dir: directory to look in\n name: dataset name, assumes data is saved in the save_dir with filename\n <name>.pkl\n Returns:\n data and targets\n Raises:\n NameError: dataset not found in data folder.\n \"\"\"\n dataname = name\n if dataname == \"checkerboard\":\n X, y = create_checker_unbalanced(split=[1./5, 4./5], n=10000, grid_size=4)\n else:\n filename = os.path.join(data_dir, dataname + \".pkl\")\n if not gfile.Exists(filename):\n raise NameError(\"ERROR: dataset not available\")\n data = pickle.load(gfile.GFile(filename, \"r\"))\n X = data[\"data\"]\n y = data[\"target\"]\n if \"keras\" in dataname:\n X = X / 255\n y = y.flatten()\n return X, y\n\n\ndef filter_data(X, y, keep=None):\n \"\"\"Filters data by class indicated in keep.\n\n Args:\n X: train data\n y: train targets\n keep: defaults to None which will keep everything, otherwise takes a list\n of classes to keep\n\n Returns:\n filtered data and targets\n \"\"\"\n if keep is None:\n return X, y\n keep_ind = [i for i in range(len(y)) if y[i] in keep]\n return X[keep_ind], y[keep_ind]\n\n\ndef get_class_counts(y_full, y):\n \"\"\"Gets the count of all classes in a sample.\n\n Args:\n y_full: full target vector containing all classes\n y: sample vector for which to perform the count\n Returns:\n count of classes for the sample vector y, the class order for count will\n be the same as long as same y_full is fed in\n \"\"\"\n classes = np.unique(y_full)\n classes = np.sort(classes)\n unique, counts = np.unique(y, return_counts=True)\n complete_counts = []\n for c in classes:\n if c not in unique:\n complete_counts.append(0)\n else:\n index = np.where(unique == c)[0][0]\n complete_counts.append(counts[index])\n return np.array(complete_counts)\n\n\ndef flip_label(y, percent_random):\n \"\"\"Flips a percentage of labels for one class to the other.\n\n Randomly sample a percent of points and randomly label the sampled points as\n one of the other classes.\n Does not introduce bias.\n\n Args:\n y: labels of all datapoints\n percent_random: percent of datapoints to corrupt the labels\n\n Returns:\n new labels with noisy labels for indicated percent of data\n \"\"\"\n classes = np.unique(y)\n y_orig = copy.copy(y)\n indices = range(y_orig.shape[0])\n np.random.shuffle(indices)\n sample = indices[0:int(len(indices) * 1.0 * percent_random)]\n fake_labels = []\n for s in sample:\n label = y[s]\n class_ind = np.where(classes == label)[0][0]\n other_classes = np.delete(classes, class_ind)\n np.random.shuffle(other_classes)\n fake_label = other_classes[0]\n assert fake_label != label\n fake_labels.append(fake_label)\n y[sample] = np.array(fake_labels)\n assert all(y[indices[len(sample):]] == y_orig[indices[len(sample):]])\n return y\n\n\ndef get_model(method, seed=13):\n \"\"\"Construct sklearn model using either logistic regression or linear svm.\n\n Wraps grid search on regularization parameter over either logistic regression\n or svm, returns constructed model\n\n Args:\n method: string indicating scikit method to use, currently accepts logistic\n and linear svm.\n seed: int or rng to use for random state fed to scikit method\n\n Returns:\n scikit learn model\n \"\"\"\n # TODO(lishal): extend to include any scikit model that implements\n # a decision function.\n # TODO(lishal): for kernel methods, currently using default value for gamma\n # but should probably tune.\n if method == \"logistic\":\n model = LogisticRegression(random_state=seed, multi_class=\"multinomial\",\n solver=\"lbfgs\", max_iter=200)\n params = {\"C\": [10.0**(i) for i in range(-4, 5)]}\n elif method == \"logistic_ovr\":\n model = LogisticRegression(random_state=seed)\n params = {\"C\": [10.0**(i) for i in range(-5, 4)]}\n elif method == \"linear_svm\":\n model = LinearSVC(random_state=seed)\n params = {\"C\": [10.0**(i) for i in range(-4, 5)]}\n elif method == \"kernel_svm\":\n model = SVC(random_state=seed)\n params = {\"C\": [10.0**(i) for i in range(-4, 5)]}\n elif method == \"kernel_ls\":\n model = BlockKernelSolver(random_state=seed)\n params = {\"C\": [10.0**(i) for i in range(-6, 1)]}\n elif method == \"small_cnn\":\n # Model does not work with weighted_expert or simulate_batch\n model = SmallCNN(random_state=seed)\n return model\n elif method == \"allconv\":\n # Model does not work with weighted_expert or simulate_batch\n model = AllConv(random_state=seed)\n return model\n\n else:\n raise NotImplementedError(\"ERROR: \" + method + \" not implemented\")\n\n model = GridSearchCV(model, params, cv=3)\n return model\n\n\ndef calculate_entropy(batch_size, y_s):\n \"\"\"Calculates KL div between training targets and targets selected by AL.\n\n Args:\n batch_size: batch size of datapoints selected by AL\n y_s: vector of datapoints selected by AL. Assumes that the order of the\n data is the order in which points were labeled by AL. Also assumes\n that in the offline setting y_s will eventually overlap completely with\n original training targets.\n Returns:\n entropy between actual distribution of classes and distribution of\n samples selected by AL\n \"\"\"\n n_batches = int(np.ceil(len(y_s) * 1.0 / batch_size))\n counts = get_class_counts(y_s, y_s)\n true_dist = counts / (len(y_s) * 1.0)\n entropy = []\n for b in range(n_batches):\n sample = y_s[b * batch_size:(b + 1) * batch_size]\n counts = get_class_counts(y_s, sample)\n sample_dist = counts / (1.0 * len(sample))\n entropy.append(scipy.stats.entropy(true_dist, sample_dist))\n return entropy\n\n\ndef get_train_val_test_splits(X, y, max_points, seed, confusion, seed_batch,\n split=(2./3, 1./6, 1./6)):\n \"\"\"Return training, validation, and test splits for X and y.\n\n Args:\n X: features\n y: targets\n max_points: # of points to use when creating splits.\n seed: seed for shuffling.\n confusion: labeling noise to introduce. 0.1 means randomize 10% of labels.\n seed_batch: # of initial datapoints to ensure sufficient class membership.\n split: percent splits for train, val, and test.\n Returns:\n indices: shuffled indices to recreate splits given original input data X.\n y_noise: y with noise injected, needed to reproduce results outside of\n run_experiments using original data.\n \"\"\"\n np.random.seed(seed)\n X_copy = copy.copy(X)\n y_copy = copy.copy(y)\n\n # Introduce labeling noise\n y_noise = flip_label(y_copy, confusion)\n\n indices = np.arange(len(y))\n\n if max_points is None:\n max_points = len(y_noise)\n else:\n max_points = min(len(y_noise), max_points)\n train_split = int(max_points * split[0])\n val_split = train_split + int(max_points * split[1])\n assert seed_batch <= train_split\n\n # Do this to make sure that the initial batch has examples from all classes\n min_shuffle = 3\n n_shuffle = 0\n y_tmp = y_noise\n\n # Need at least 4 obs of each class for 2 fold CV to work in grid search step\n while (any(get_class_counts(y_tmp, y_tmp[0:seed_batch]) < 4)\n or n_shuffle < min_shuffle):\n np.random.shuffle(indices)\n y_tmp = y_noise[indices]\n n_shuffle += 1\n\n X_train = X_copy[indices[0:train_split]]\n X_val = X_copy[indices[train_split:val_split]]\n X_test = X_copy[indices[val_split:max_points]]\n y_train = y_noise[indices[0:train_split]]\n y_val = y_noise[indices[train_split:val_split]]\n y_test = y_noise[indices[val_split:max_points]]\n # Make sure that we have enough observations of each class for 2-fold cv\n assert all(get_class_counts(y_noise, y_train[0:seed_batch]) >= 4)\n # Make sure that returned shuffled indices are correct\n assert all(y_noise[indices[0:max_points]] ==\n np.concatenate((y_train, y_val, y_test), axis=0))\n return (indices[0:max_points], X_train, y_train,\n X_val, y_val, X_test, y_test, y_noise)\n" ]
[ [ "numpy.product", "numpy.concatenate", "sklearn.svm.LinearSVC", "tensorflow.compat.v1.gfile.GFile", "numpy.where", "numpy.unique", "numpy.zeros", "numpy.delete", "sklearn.svm.SVC", "numpy.array", "sklearn.model_selection.GridSearchCV", "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "tensorflow.compat.v1.gfile.Exists", "numpy.random.shuffle", "numpy.sort", "numpy.ones", "scipy.stats.entropy", "numpy.random.uniform", "numpy.vstack" ] ]
aelerojas/masksRecognition
[ "001348e608fc4edf7f40ea7e5d55898e150d7eaa" ]
[ "export_tfserving.py" ]
[ "import time\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom yolov3_tf2.models import (\n YoloV3, YoloV3Tiny\n)\nfrom yolov3_tf2.dataset import transform_images\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.util import nest\n\nflags.DEFINE_string('weights', './checkpoints/yolov3_train_44.tf',\n 'path to weights file')\nflags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')\nflags.DEFINE_string('output', './serving/yolov3/1', 'path to saved_model')\nflags.DEFINE_string('classes', './data/voc2012.names', 'path to classes file')\nflags.DEFINE_string('image', './data/00002_Mask.jpg', 'path to input image')\nflags.DEFINE_integer('num_classes', 2, 'number of classes in the model')\n\n\ndef main(_argv):\n if FLAGS.tiny:\n yolo = YoloV3Tiny(classes=FLAGS.num_classes)\n else:\n yolo = YoloV3(classes=FLAGS.num_classes)\n\n yolo.load_weights(FLAGS.weights)\n logging.info('weights loaded')\n\n tf.saved_model.save(yolo, FLAGS.output)\n logging.info(\"model saved to: {}\".format(FLAGS.output))\n\n model = tf.saved_model.load(FLAGS.output)\n infer = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n logging.info(infer.structured_outputs)\n\n class_names = [c.strip() for c in open(FLAGS.classes).readlines()]\n logging.info('classes loaded')\n\n img = tf.image.decode_image(open(FLAGS.image, 'rb').read(), channels=3)\n img = tf.expand_dims(img, 0)\n img = transform_images(img, 416)\n\n t1 = time.time()\n outputs = infer(img)\n boxes, scores, classes, nums = outputs[\"yolo_nms\"], outputs[\n \"yolo_nms_1\"], outputs[\"yolo_nms_2\"], outputs[\"yolo_nms_3\"]\n t2 = time.time()\n logging.info('time: {}'.format(t2 - t1))\n\n logging.info('detections:')\n for i in range(nums[0]):\n logging.info('\\t{}, {}, {}'.format(class_names[int(classes[0][i])],\n scores[0][i].numpy(),\n boxes[0][i].numpy()))\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.saved_model.save", "tensorflow.expand_dims", "tensorflow.saved_model.load" ] ]
SamH3pn3r/bitcoin-prediction
[ "90c7f18f3257ad6f2a8fcaa91f36ea99f77b3f0d" ]
[ "pages/insights.py" ]
[ "import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nfrom app import app\n\nfrom joblib import load\npipeline = load('assets/pipeline.joblib')\n\nimport plotly.graph_objects as go\nimport pandas as pd\nbitcoin = pd.read_csv('bitcoin.csv')\n\nbitcoin['string_date'] = bitcoin['Date'].copy()\nbitcoin['Date'] = pd.to_datetime(bitcoin['Date'])\nbitcoin['Year'] = bitcoin['Date'].dt.year\nbitcoin['Month'] = bitcoin['Date'].dt.month\nbitcoin['Day'] = bitcoin['Date'].dt.day\n\ndrop_columns = ['Date','Close**', 'Volume', 'Market Cap', 'High', 'Low']\nnew_bitcoin = bitcoin.drop(columns=drop_columns)\n\ntrain = new_bitcoin[new_bitcoin.Year < 2016]\nval = new_bitcoin[(new_bitcoin.Year > 2015) & (new_bitcoin.Year < 2018)]\ntest = new_bitcoin[new_bitcoin.Year >= 2013]\n\ny_pred = pipeline.predict(test)\ntest['pred_price'] = y_pred\n\ncolumn1 = dbc.Col(\n [\n dcc.Markdown(\n \"\"\"\n\n ## Insights\n\n To the right, you'll see a Partial Dependence Plot(PDP).\n\n Essentially what it's saying is, as the opening price increases you'll expect \n either higher prices, lower prices, or no change in prices.\n (depending on what opening price you're looking at)\n\n If you look at the graph below the PDP,\n you'll notice that my model fits really well for years 2013 - 2017.\n However, for years 2018 - 2019, the model is not as tight.\n This is likely due to the fall of bitcoin from Dec. 2017 - February 2018.\n\n \"\"\"\n ),\n\n ],\n md=4,\n)\n\nfig = go.Figure()\nfig.add_trace(go.Scatter(\n x=bitcoin.Date,\n y=bitcoin['Close**'],\n name='Closing Price',\n line_color = 'green',\n opacity=0.8)\n )\n\nfig.add_trace(go.Scatter(\n x=bitcoin.Date,\n y=test['pred_price'],\n name='Predicted close',\n line_color = 'red',\n opacity=0.8)\n )\n\nfig.update_layout(xaxis_range=['2013-04-28','2019-08-12'],\n title_text='Bitcoin Historical Data')\n\ncolumn2 = dbc.Col(\n [\n html.Img(src='assets/PDP.png', className='img-fluid'),\n dcc.Graph(figure=fig)\n ]\n)\n\nlayout = dbc.Row([column1, column2])" ]
[ [ "pandas.read_csv", "pandas.to_datetime" ] ]
hunto/mmrazor
[ "e1483b4f4ca69de98130b01a27a6dd7ed44bda4f" ]
[ "mmrazor/datasets/utils.py" ]
[ "from torch.utils.data import random_split\n\n\ndef split_dataset(dataset):\n dset_length = len(dataset)\n\n first_dset_length = dset_length // 2\n second_dset_length = dset_length - first_dset_length\n split_tuple = (first_dset_length, second_dset_length)\n first_dset, second_dset = random_split(dataset, split_tuple)\n\n first_dset.CLASSES = dataset.CLASSES\n second_dset.CLASSES = dataset.CLASSES\n\n return [first_dset, second_dset]\n" ]
[ [ "torch.utils.data.random_split" ] ]
firiceguo/my-pycuda
[ "5c9691bcc380a1e1ff55615bf3135fcdacb54691" ]
[ "filter.py" ]
[ "'''\nDescription:\n This is the separable filter algorithm of image convolution using CUDA.\n\nUsage:\n $python filter.py\n\nNote:\n When changing the scale of tile, and the width of image,\n please ensure that IMAGE_W >= TILE_W and IMAGE_W % TILE_W == 0,\n or there will be an AssertionError.\n Also, I haven't tried the situation of TILE_W != TILE_H.\n\nCorrectness test:\n Use the FilTest function in utils.py to test the correctness of the GPU output.\n'''\n\nimport numpy as np\nimport pycuda.autoinit\nimport pycuda.driver as drv\nfrom pycuda.compiler import SourceModule\nimport string\n\nKERNEL_R = 10\nKERNEL_L = 2 * KERNEL_R + 1\nIMAGE_W = 16\nTILE_W = TILE_H = 16\n\n\ndef conv_filter(pic, filterx, filtery, IMAGE_W):\n '''\n Get the runtime of separable filter convolution algorithm.\n input:\n np.ndarray pic[IMAGE_W][IMAGE_W] -- the input image matrix\n np.ndarray filterx[KERNEL_L] -- the input row vector\n np.ndarray filtery[KERNEL_L] -- the input column vector\n int IMAGE_W -- the width of image\n output:\n float secgpu -- the runtime usage (microseconds) \n '''\n\n # test the input parameters\n assert pic.dtype == 'float32', 'source image must be float32'\n assert filterx.shape == filtery.shape == (KERNEL_L, ), 'Try changing KERNEL_L'\n assert IMAGE_W >= TILE_W and IMAGE_W % TILE_W == 0, 'Ensure that IMAGE_W >= TILE_W and IMAGE_W % TILE_W == 0'\n assert TILE_W == TILE_H, 'Ensure that TILE_W == TILE_H'\n\n # convert the scale of inputs, from 2D to 1D\n pic_vector = np.reshape(pic, (-1))\n filterx = np.reshape(filterx, (-1))\n filtery = np.reshape(filtery, (-1))\n\n # init the intermediate image and output image\n intermediateImage = np.zeros_like(pic_vector)\n destImage = np.zeros_like(pic_vector)\n\n # init the GPU memory and load the data from CPU\n sourceImage_gpu = drv.mem_alloc(pic_vector.nbytes)\n intermediateImage_gpu = drv.mem_alloc(intermediateImage.nbytes)\n destImage_gpu = drv.mem_alloc(destImage.nbytes)\n filterx_gpu = drv.mem_alloc(filterx.nbytes)\n filtery_gpu = drv.mem_alloc(filtery.nbytes)\n\n drv.memcpy_htod(sourceImage_gpu, pic_vector)\n drv.memcpy_htod(intermediateImage_gpu, intermediateImage)\n drv.memcpy_htod(destImage_gpu, destImage)\n drv.memcpy_htod(filterx_gpu, filterx)\n drv.memcpy_htod(filtery_gpu, filtery)\n\n # calculate the grid and block scale according to the width of image and scale of tile\n grids = (IMAGE_W / TILE_W * IMAGE_W / TILE_H, 1)\n blocks = (TILE_W * (TILE_H + 2 * KERNEL_R), 1, 1)\n\n # run the kernel function and compute the runtime\n start = drv.Event()\n end = drv.Event()\n start.record()\n convolutionRowGPU(intermediateImage_gpu, sourceImage_gpu, filterx_gpu,\n block=blocks, grid=grids)\n convolutionColGPU(destImage_gpu, intermediateImage_gpu, filtery_gpu,\n block=blocks, grid=grids)\n end.record()\n end.synchronize()\n secgpu = start.time_till(end)\n\n # load the output from GPU memory\n out = np.zeros_like(pic_vector)\n drv.memcpy_dtoh(out, destImage_gpu)\n out = np.reshape(out, (IMAGE_W, IMAGE_W))\n\n sourceImage_gpu.free()\n intermediateImage_gpu.free()\n destImage_gpu.free()\n filterx_gpu.free()\n filtery_gpu.free()\n\n return(secgpu)\n\n\nif __name__ == '__main__':\n template = '''\n# define IDIVUP(a, b) ( ((a)+1)/(b) + int(((a)+1) %(MOD)s (b) > 0) -1 ) // a is index, b is width, get line index\n# define IMUL(a,b) __mul24((a),(b))\n\n// Row convolution filter\n__global__ void convolutionRowGPU(\n float out[%(IMAGE_W)s * %(IMAGE_W)s],\n float pic[%(IMAGE_W)s * %(IMAGE_W)s],\n float kernel[%(KERNEL_L)s]\n)\n{\n /* Load data as:\n x123x\n x123x\n */\n __shared__ float data[ %(TILE_H)s * (%(TILE_W)s + %(KERNEL_R)s * 2) ];\n\n // global thread index\n int idx = threadIdx.x + IMUL(blockIdx.x, blockDim.x);\n\n // tile index - block base\n int tx = IDIVUP(blockIdx.x, (%(IMAGE_W)s / %(TILE_W)s) );\n int ty = blockIdx.x - IMUL(tx, (%(IMAGE_W)s / %(TILE_W)s));\n\n // global picture index\n int px = tx * %(TILE_H)s + IDIVUP(threadIdx.x, (%(TILE_W)s + 2 * %(KERNEL_R)s));\n int py = ty * %(TILE_W)s + threadIdx.x - IDIVUP(threadIdx.x, %(TILE_W)s + 2 * %(KERNEL_R)s) * (%(TILE_W)s + 2 * %(KERNEL_R)s) - (%(KERNEL_R)s);\n\n // data index\n int dx = IDIVUP(threadIdx.x, (%(TILE_W)s + %(KERNEL_R)s * 2));\n int dy = threadIdx.x - IMUL(dx, (%(TILE_W)s + %(KERNEL_R)s * 2));\n\n // load the pic block into shared memory\n if ((py < 0) || (py > %(IMAGE_W)s - 1)) {\n data[threadIdx.x] = 0;\n }\n else {\n data[threadIdx.x] = pic[px * %(IMAGE_W)s + py];\n }\n __syncthreads();\n\n // convolution\n float sum = 0;\n if (dy < %(KERNEL_R)s || dy > %(TILE_W)s + %(KERNEL_R)s - 1) {;}\n else {\n# pragma unroll\n for (int i = -%(KERNEL_R)s; i <= %(KERNEL_R)s; i++){\n sum += data[dx * (%(TILE_W)s + %(KERNEL_R)s * 2) + dy + i] * kernel[%(KERNEL_R)s + i];\n }\n out[px * %(IMAGE_W)s + py] = sum;\n }\n __syncthreads();\n}\n\n\n// Column convolution filter\n__global__ void convolutionColGPU(\n float out[%(IMAGE_W)s * %(IMAGE_W)s],\n float pic[%(IMAGE_W)s * %(IMAGE_W)s],\n float kernel[%(KERNEL_L)s]\n)\n{\n /* Load data as:\n xxx\n 123\n 123\n xxx\n */\n __shared__ float data[ %(TILE_W)s * (%(TILE_H)s + %(KERNEL_R)s * 2) ];\n\n // global thread index\n int idx = threadIdx.x + IMUL(blockIdx.x, blockDim.x);\n\n // tile index - block base\n int tx = IDIVUP(blockIdx.x, (%(IMAGE_W)s / %(TILE_W)s) );\n int ty = blockIdx.x - IMUL(tx, (%(IMAGE_W)s / %(TILE_W)s));\n\n // global picture index\n int px = tx * %(TILE_H)s + IDIVUP(threadIdx.x, %(TILE_W)s) - %(KERNEL_R)s;\n int py = ty * %(TILE_W)s + threadIdx.x - IDIVUP(threadIdx.x, %(TILE_W)s) * (%(TILE_W)s);\n\n // data index\n int dx = IDIVUP(threadIdx.x, %(TILE_W)s);\n int dy = threadIdx.x - IMUL(dx, %(TILE_W)s);\n\n // load the pic block into shared memory\n if ((px < 0) || (px > %(IMAGE_W)s - 1)) {\n data[threadIdx.x] = 0;\n }\n else {\n data[threadIdx.x] = pic[px * %(IMAGE_W)s + py];\n }\n\n __syncthreads();\n\n // convolution\n float sum = 0;\n if (dx < %(KERNEL_R)s || dx > %(TILE_H)s + %(KERNEL_R)s - 1) {;}\n else {\n# pragma unroll\n for (int i = -%(KERNEL_R)s; i <= %(KERNEL_R)s; i++){\n sum += data[(dx + i) * %(TILE_W)s + dy] * kernel[%(KERNEL_R)s + i];\n }\n out[px * %(IMAGE_W)s + py] = sum;\n }\n __syncthreads();\n}\n''' % {\n 'KERNEL_R': KERNEL_R, 'KERNEL_L': KERNEL_L, 'MOD': '%',\n 'IMAGE_W': IMAGE_W, 'TILE_W': TILE_W, 'TILE_H': TILE_H\n }\n module = SourceModule(template)\n convolutionRowGPU = module.get_function('convolutionRowGPU')\n convolutionColGPU = module.get_function('convolutionColGPU')\n\n pic = np.random.randn(IMAGE_W, IMAGE_W).astype(np.float32)\n filterx = np.random.randn(KERNEL_L).astype(np.float32)\n secs = conv_filter(pic, filterx, filterx, IMAGE_W)\n print(\"IMAGE_W:%d:Time:%f:ms\" % (IMAGE_W, secs))\n" ]
[ [ "numpy.reshape", "numpy.random.randn", "numpy.zeros_like" ] ]
tonyreina/neon
[ "bab09ddb4bafd00d8415b831ba6da676a2fd178e", "bab09ddb4bafd00d8415b831ba6da676a2fd178e" ]
[ "luna16/old_code/LUNA16_inferenceTestingVGG_noBatch.py", "luna16/old_code/LUNA16_extract_patches.py" ]
[ "#!/usr/bin/env python\n# ----------------------------------------------------------------------------\n# Copyright 2015-2017 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\"\"\"\nLoad and test a pre-trained model against the entire data subset.\n\npython LUNA16_inferenceTesting.py -b gpu -i 0 -z 16\n\n\"\"\"\n\nfrom neon import logger as neon_logger\nfrom neon.transforms import Misclassification, PrecisionRecall\nfrom neon.models import Model\nfrom aeon import DataLoader\nfrom neon.util.argparser import NeonArgparser, extract_valid_args\nfrom neon.backends import gen_backend\nfrom neon.data.dataloader_transformers import TypeCast, OneHot\nimport numpy as np\n\n# parse the command line arguments\nparser = NeonArgparser(__doc__)\n\nargs = parser.parse_args()\n\ntestFileName = 'manifest_subset7_SMALL.csv'\n\n# hyperparameters\nnum_epochs = args.epochs\n\n# Next line gets rid of the deterministic warning\nargs.deterministic = None\n\nif (args.rng_seed is None):\n args.rng_seed = 16\n\nprint('Batch size = {}'.format(args.batch_size))\n\n# setup backend\nbe = gen_backend(**extract_valid_args(args, gen_backend))\n\n# Set up the testset to load via aeon\nimage_config = dict(height=64, width=64, channels=1)\nlabel_config = dict(binary=False)\nconfig = dict(type=\"image,label\",\n image=image_config,\n label=label_config,\n manifest_filename=testFileName,\n minibatch_size=args.batch_size,\n subset_fraction=1)\ntest_set = DataLoader(config, be)\ntest_set = TypeCast(test_set, index=0, dtype=np.float32) # cast image to float\ntest_set = OneHot(test_set, index=1, nclasses=2)\n\nlunaModel = Model('LUNA16_VGG_model_no_batch.prm')\n\npred, target = lunaModel.get_outputs(test_set, return_targets=True) # Reshape to a single prediction vector\npred = pred.T\ntarget = target.T\n\nnp.set_printoptions(precision=3, suppress=True)\nprint(' ')\nprint(pred)\nprint(target)\n\n", "\n#!/usr/bin/env python\n# ----------------------------------------------------------------------------\n# Copyright 2015-2017 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\n\"\"\"\n This loads the LUNA16 mhd files (3D images), extracts the transverse patches (64x64)\n around the candidate positions, and then saves those patches to a subdirectory.\n In another script we'll take those patches and run them through a modified\n VGG model to see if we can correctly classify nodule (class 1) from\n non-nodule (class 0).\n\"\"\"\n\nimport SimpleITK as sitk\nimport numpy as np\nimport pandas as pd\nimport os\nimport ntpath\nfrom neon.util.argparser import NeonArgparser\n\nimport logging\n\n# parse the command line arguments\nparser = NeonArgparser(__doc__)\n\nparser.add_argument(\"--subset\", default='subset0',\n help='LUNA16 subset directory to process')\n\nargs = parser.parse_args()\n\n# To get the original LUNA16 MHD data:\n# wget https://www.dropbox.com/sh/mtip9dx6zt9nb3z/AAAs2wbJxbNM44-uafZyoMVca/subset5.zip\n# The files are 7-zipped. Regular linux unzip won't work to uncompress them. Use 7za instead.\n# 7za e subset5.zip\n\nDATA_DIR = '/mnt/data/medical/luna16/'\nSUBSET = args.subset\ncand_path = 'CSVFILES/candidates_with_annotations.csv' # Candidates file tells us the centers of the ROI for candidate nodules\n\n# Set up logging\nlogger = logging.getLogger(__name__)\nhdlr = logging.FileHandler('all_'+SUBSET+'.log')\nformatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\nhdlr.setFormatter(formatter)\nlogger.addHandler(hdlr) \nlogger.setLevel(logging.INFO)\n\ndef extractCandidates(img_file):\n \n # Get the name of the file\n subjectName = ntpath.splitext(ntpath.basename(img_file))[0] # Strip off the .mhd extension\n \n # Read the list of candidate ROI\n dfCandidates = pd.read_csv(DATA_DIR+cand_path)\n \n \n numCandidates = dfCandidates[dfCandidates['seriesuid']==subjectName].shape[0]\n print('Subject {}: There are {} candidate nodules in this file.'.format(subjectName, numCandidates))\n \n numNonNodules = sum(dfCandidates[dfCandidates['seriesuid']==subjectName]['class'] == 0)\n numNodules = sum(dfCandidates[dfCandidates['seriesuid']==subjectName]['class'] == 1)\n print('{} are true nodules (class 1) and {} are non-nodules (class 0)'.format(numNodules, numNonNodules))\n \n # Read if the candidate ROI is a nodule (1) or non-nodule (0)\n candidateValues = dfCandidates[dfCandidates['seriesuid']==subjectName]['class'].values\n \n # Get the world coordinates (mm) of the candidate ROI center\n worldCoords = dfCandidates[dfCandidates['seriesuid']==subjectName][['coordX', 'coordY', 'coordZ']].values\n \n # Use SimpleITK to read the mhd image\n itkimage = sitk.ReadImage(img_file)\n\n # Normalize the image to be 1.0 x 1.0 x 1.0 mm voxel size\n itkimage = normalize_img(itkimage)\n \n # Get the real world origin (mm) for this image\n originMatrix = np.tile(itkimage.GetOrigin(), (numCandidates,1)) # Real world origin for this image (0,0)\n \n # Subtract the real world origin and scale by the real world (mm per pixel)\n # This should give us the X,Y,Z coordinates for the candidates\n candidatesPixels = (np.round(np.absolute(worldCoords - originMatrix) / itkimage.GetSpacing())).astype(int)\n \n candidatePatches = []\n \n imgAll = sitk.GetArrayFromImage(itkimage) # Read the image volume\n\n valueArray = []\n\n for candNum in range(numCandidates):\n \n #print('Extracting candidate patch #{}'.format(candNum))\n candidateVoxel = candidatesPixels[candNum,:]\n xpos = int(candidateVoxel[0])\n ypos = int(candidateVoxel[1])\n zpos = int(candidateVoxel[2])\n \n # Need to handle the candidates where the window would extend beyond the image boundaries\n windowSize = 64 # Center a 64 pixel by 64 pixel patch around the candidate position\n x_lower = np.max([0, xpos - windowSize//2]) # Return 0 if position off image\n x_upper = np.min([xpos + windowSize//2, itkimage.GetWidth()]) # Return maxWidth if position off image\n \n y_lower = np.max([0, ypos - windowSize//2]) # Return 0 if position off image\n y_upper = np.min([ypos + windowSize//2, itkimage.GetHeight()]) # Return maxHeight if position off image\n \n z_lower = np.max([0, zpos - windowSize//2]) # Return 0 if position off image\n z_upper = np.min([zpos + windowSize//2, itkimage.GetDepth()]) # Return maxHeight if position off image\n \n skipPatch = False\n if ((xpos - windowSize//2) < 0) | ((xpos + windowSize//2) > itkimage.GetWidth()):\n logger.info('img file {} off x for candidate {}, label {}'.format(img_file, candNum, candidateValues[candNum]))\n skipPatch = True\n\n if ((ypos - windowSize//2) < 0) | ((ypos + windowSize//2) > itkimage.GetHeight()):\n logger.info('img file {} off y for candidate {}, label {}'.format(img_file, candNum, candidateValues[candNum]))\n skipPatch = True\n\n # SimpleITK is x,y,z. Numpy is z, y, x.\n imgPatch = imgAll[zpos, y_lower:y_upper, x_lower:x_upper]\n \n #imgPatch = imgAll[zpos, :, :]\n \n # Normalize to the Hounsfield units\n imgPatchNorm = normalizePlanes(imgPatch)\n \n if not skipPatch:\n candidatePatches.append(imgPatchNorm) # Append the candidate image patches to a python list\n valueArray.append(candidateValues[candNum])\n\n return candidatePatches, valueArray\n\n\"\"\"\nNormalize pixel depth into Hounsfield units (HU)\n\nThis tries to get all pixels between -1000 and 400 HU.\nAll other HU will be masked.\nThen we normalize pixel values between 0 and 1.\n\n\"\"\"\ndef normalizePlanes(npzarray):\n \n maxHU = 400.\n minHU = -1000.\n \n npzarray = (npzarray - minHU) / (maxHU - minHU)\n npzarray[npzarray>1] = 1.\n npzarray[npzarray<0] = 0.\n return npzarray\n\ndef normalize_img(img):\n \n '''\n Sets the MHD image to be approximately 1.0 mm voxel size\n \n https://itk.org/ITKExamples/src/Filtering/ImageGrid/ResampleAnImage/Documentation.html\n '''\n new_x_size = int(img.GetSpacing()[0]*img.GetWidth()) # Number of pixels you want for x dimension\n new_y_size = int(img.GetSpacing()[1]*img.GetHeight()) # Number of pixels you want for y dimension\n new_z_size = int(img.GetSpacing()[2]*img.GetDepth()) # Number of pixels you want for z dimesion\n new_size = [new_x_size, new_y_size, new_z_size]\n \n# new_spacing = [old_sz*old_spc/new_sz for old_sz, old_spc, new_sz in zip(img.GetSize(), img.GetSpacing(), new_size)]\n\n new_spacing = [1,1,1] # New spacing to be 1.0 x 1.0 x 1.0 mm voxel size\n interpolator_type = sitk.sitkLinear\n\n return sitk.Resample(img, new_size, sitk.Transform(), interpolator_type, img.GetOrigin(), new_spacing, img.GetDirection(), 0.0, img.GetPixelIDValue())\n \n\nfrom scipy.misc import toimage\n\n\"\"\"\nSave the image patches for a given data file\n\"\"\"\n# We need to save the array as an image.\n# This is the easiest way. Matplotlib seems to like adding a white border that is hard to kill.\ndef SavePatches(manifestFilename, img_file, patchesArray, valuesArray):\n \n saveDir = ntpath.dirname(img_file) + '/patches_ALL'\n\n try:\n os.stat(saveDir)\n except:\n os.mkdir(saveDir) \n\n with open(manifestFilename, 'a') as f: # Write to the manifest file for aeon loader\n\n subjectName = ntpath.splitext(ntpath.basename(img_file))[0]\n \n\n print('Saving image patches for file {}/{}.'.format(SUBSET, subjectName))\n for i in range(len(valuesArray)):\n\n \n #print('\\r{} of {}'.format(i+1, len(valuesArray))),\n im = toimage(patchesArray[i])\n\n pngName = saveDir + '/{}_{}_{}.png'.format(subjectName, i, valuesArray[i])\n im.save(pngName)\n\n f.write('{},label_{}.txt\\n'.format(pngName, valuesArray[i]))\n\n f.close()\n\n print('{}: Finished {}\\n'.format(SUBSET, subjectName))\n\n\n\"\"\"\nLoop through all .mhd files within the data directory and process them.\n\"\"\"\n\n# Reset the manifest file to empty\nmanifestFilename = 'manifest_{}_ALL.csv'.format(SUBSET)\nf = open(manifestFilename, 'w')\nf.close()\n\nfor root, dirs, files in os.walk(DATA_DIR+SUBSET):\n \n for file in files:\n \n if (file.endswith('.mhd')) & ('__MACOSX' not in root): # Don't get the Macintosh directory\n \n\n img_file = os.path.join(root, file)\n\n patchesArray, valuesArray = extractCandidates(img_file) \n \n SavePatches(manifestFilename, img_file, patchesArray, valuesArray)\n \n \n\n\n\n" ]
[ [ "numpy.set_printoptions" ], [ "numpy.max", "pandas.read_csv", "numpy.absolute", "scipy.misc.toimage" ] ]
MapleNSteel/AckermannControl
[ "bca97ca0900dde416f247a3a1c30fbf7dba51aa6" ]
[ "Controllers/LinearMPC.py" ]
[ "import numpy as np\nimport cvxopt\n\ncvxopt.matrix_repr = cvxopt.printing.matrix_str_default\ncvxopt.printing.options['dformat'] = '%.4f'\ncvxopt.printing.options['width'] = -1\ncvxopt.solvers.options['show_progress'] = False\ncvxopt.solvers.options['maxiters'] = 30\ncvxopt.solvers.options['abstol'] = 1e-08\ncvxopt.solvers.options['feastol'] = 1e-08 \n\ndef getControl(A, B, C, x, r, g1, g2, h1, h2, stateLength, controlLength, N, Q, R, S, Cbar):\n\n\tA=cvxopt.matrix(A)\n\tB=cvxopt.matrix(B)\n\tC=cvxopt.matrix(C)\n\n\tx=cvxopt.matrix(x)\n\tr=cvxopt.matrix(r)\n\n\tif(not (g1==None or h1==None or g2==None or h2==None)):\n\t\tg1=cvxopt.matrix(g1)\n\t\th1=cvxopt.matrix(h1)\n\t\tg2=cvxopt.matrix(g2)\n\t\th2=cvxopt.matrix(h2)\n\n\tQ=cvxopt.matrix(Q)\n\tR=cvxopt.matrix(R)\n\tS=cvxopt.matrix(S)\n\t\n\tCbar=cvxopt.matrix(Cbar)\n\n\tRhat=cvxopt.spdiag([R for i in range(0,N)])\n\tQhat=cvxopt.spdiag([C.trans()*Q*C if i<(N-1) else C.trans()*S*C for i in range(0,N)])\n\n\tb=[]\n\ttemp=cvxopt.matrix(np.eye(stateLength))\n\tfor i in range(0,N):\n\t\tb.append(temp)\n\t\ttemp=temp+A*b[-1]\n\n\tT1=cvxopt.sparse(b)\n\tChat=T1*Cbar\n\n\tThat=cvxopt.spdiag([Q*C if i<(N-1) else C.trans()*S*C for i in range(0,N)])\n\t\n\n\tb=[]\n\tfor i in range(0,N):\n\t\ta=[]\n\t\tfor j in range(0,N):\n\t\t\tif(j<=i):\n\t\t\t\ta.append((cvxopt.matrix(np.linalg.matrix_power(A, i-j))*B).trans())\n\t\t\telse:\n\t\t\t\ta.append(cvxopt.matrix(np.zeros(B.size)).trans())\n\t\tb.append(a)\n\n\tBhat=cvxopt.sparse(b).trans()\n\tAhat=cvxopt.sparse([cvxopt.matrix(np.linalg.matrix_power(A, i+1)) for i in range(0,N)])\n\n\t#Final Matrices\n\tP1=(Bhat.trans()*Qhat*Bhat+Rhat)\n\tq=(x.trans()*Ahat.trans()*Qhat*Bhat-r.trans()*That*Bhat+Chat.trans()*Qhat*Bhat).trans()\n\n\tif(not (g1==None or h1==None or g2==None or h2==None)):\n\t\tG=cvxopt.sparse([g1, g2*Bhat])\n\t\tH=cvxopt.matrix(cvxopt.sparse([h1, h2-g2*Chat-g2*Ahat*x]), tc='d')\n\telse:\n\t\tG=None\n\t\tH=None\n\n\tsol=cvxopt.solvers.qp(P1,q,G,H)\n\n\treturn sol['x'], Ahat*x+Bhat*sol['x']+Chat\n\n" ]
[ [ "numpy.eye", "numpy.linalg.matrix_power", "numpy.zeros" ] ]
MichaelORegan/46887-COMPUTATIONAL-THINKING-WITH-ALGORITHMS-PROJECT
[ "8d699e147fadd4eeb8652195c311a13c2d942a5b" ]
[ "benchmarkmerge.py" ]
[ "# Michael O'Regan 05/May/2019\n# http://interactivepython.org/courselib/static/pythonds/SortSearch/TheMergeSort.html\n\nimport time\nimport statistics\nimport numpy as np # importing numpy as np\n\nnp.random.seed(1) # seeding random on seed 1 so that all the arrays are the same\n\na = np.random.randint(200000, size=100) # array of size 100\nb = np.random.randint(200000, size=250) # array of size 250\nc = np.random.randint(200000, size=500) # array of size 500\nd = np.random.randint(200000, size=750) # array of size 750\ne = np.random.randint(200000, size=1000) # array of size 1000\nf = np.random.randint(200000, size=2500) # array of size 2500\ng = np.random.randint(200000, size=5000) # array of size 5000\nh = np.random.randint(200000, size=7500) # array of size 7500\ni = np.random.randint(200000, size=10000) # array of size 10000\nj = np.random.randint(200000, size=15000) # array of size 15000\nk = np.random.randint(200000, size=20000) # array of size 20000\nl = np.random.randint(200000, size=30000) # array of size 30000\nm = np.random.randint(200000, size=50000) # array of size 50000\n\ndef mergeSort(alist):\n if len(alist)>1:\n mid = len(alist)//2\n lefthalf = alist[:mid]\n righthalf = alist[mid:]\n\n mergeSort(lefthalf)\n mergeSort(righthalf)\n\n i=0\n j=0\n k=0\n while i < len(lefthalf) and j < len(righthalf):\n if lefthalf[i] < righthalf[j]:\n alist[k]=lefthalf[i]\n i=i+1\n else:\n alist[k]=righthalf[j]\n j=j+1\n k=k+1\n\n while i < len(lefthalf):\n alist[k]=lefthalf[i]\n i=i+1\n k=k+1\n\n while j < len(righthalf):\n alist[k]=righthalf[j]\n j=j+1\n k=k+1\n\nnum_runs = 10 # set the benchamrk number of runs at 10\nfor r in range(num_runs): # looping the timing of the algorithm for num_runs\n np.random.seed(1) # setting the random seed (1)\n a = np.random.randint(200000, size=100) # array of size 100\n start_time = time.time_ns() # setting start time to the time now in nanoseconds, https://docs.python.org/3/library/time.html#time.time\n mergeSort(a) # calling algorithm for array a\n end_time = time.time_ns() # setting end time to the time now in nanoseconds\n time_elapsed = (end_time - start_time)*10**-6 # setting time elapsed to the difference of start and end to get the length of the running time of the algorithm\n resultsa = [] # setting results a to an empty array to fill with results\n resultsa.append(time_elapsed) # append the results array ie fill the results array with results of the runs as per num runs\nra = round(statistics.mean(resultsa),3) # setting ra to the mean of results rounded to 3 decimal places https://www.geeksforgeeks.org/python-statistics-mean-function/\nfor r in range(num_runs):\n np.random.seed(1)\n b = np.random.randint(200000, size=250) # array of size 250\n start_time = time.time_ns()\n mergeSort(b) # calling algorithm for array b\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsb = []\n resultsb.append(time_elapsed)\nrb = round(statistics.mean(resultsb),3) # setting rb to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n c = np.random.randint(200000, size=500) # array of size 500\n start_time = time.time_ns()\n mergeSort(c) # calling algorithm for array c\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsc = []\n resultsc.append(time_elapsed)\nrc = round(statistics.mean(resultsc),3) # setting rc to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n d = np.random.randint(200000, size=750) # array of size 750\n start_time = time.time_ns()\n mergeSort(d) # calling algorithm for array d\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsd = []\n resultsd.append(time_elapsed)\nrd = round(statistics.mean(resultsd),3) # setting rd to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n e = np.random.randint(200000, size=1000) # array of size 1000\n start_time = time.time_ns()\n mergeSort(e) # calling algorithm for array e\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultse = []\n resultse.append(time_elapsed)\nre = round(statistics.mean(resultse),3) # setting re to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n f = np.random.randint(200000, size=2500) # array of size 2500\n start_time = time.time_ns()\n mergeSort(f) # calling algorithm for array f\n end_time = time.time_ns()\n resultsf = []\n time_elapsed = (end_time - start_time)*10**-6\n resultsf.append(time_elapsed)\nrf = round(statistics.mean(resultsf),3) # setting rf to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n g = np.random.randint(200000, size=5000) # array of size 5000\n start_time = time.time_ns()\n mergeSort(g) # calling algorithm for array g\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsg = []\n resultsg.append(time_elapsed)\nrg = round(statistics.mean(resultsg),3) # setting rg to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n h = np.random.randint(200000, size=7500) # array of size 7500\n start_time = time.time_ns()\n mergeSort(h) # calling algorithm for array h\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsh = []\n resultsh.append(time_elapsed)\nrh = round(statistics.mean(resultsh),3) # setting rg to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n i = np.random.randint(200000, size=10000) # array of size 10000\n start_time = time.time_ns()\n mergeSort(i) # calling algorithm for array i\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsi = []\n resultsi.append(time_elapsed)\nri = round(statistics.mean(resultsi),3) # setting ri to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n j = np.random.randint(200000, size=15000) # array of size 15000\n start_time = time.time_ns()\n mergeSort(j) # calling algorithm for array j\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsj = []\n resultsj.append(time_elapsed)\nrj = round(statistics.mean(resultsj),3) # setting rj to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n k = np.random.randint(200000, size=20000) # array of size 20000\n start_time = time.time_ns()\n mergeSort(k) # calling algorithm for array k\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsk = []\n resultsk.append(time_elapsed)\nrk = round(statistics.mean(resultsk),3) # setting rk to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n l = np.random.randint(200000, size=30000) # array of size 30000\n start_time = time.time_ns()\n mergeSort(l) # calling algorithm for array l\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsl = []\n resultsl.append(time_elapsed)\nrl = round(statistics.mean(resultsl),3) # setting rl to the mean of results rounded to 3 decimal places\nfor r in range(num_runs):\n np.random.seed(1)\n m = np.random.randint(200000, size=50000) # array of size 50000\n start_time = time.time_ns()\n mergeSort(m) # calling algorithm for array m\n end_time = time.time_ns()\n time_elapsed = (end_time - start_time)*10**-6\n resultsm = []\n resultsm.append(time_elapsed)\nrm = round(statistics.mean(resultsm),3) # setting rl to the mean of results rounded to 3 decimal places\n\nmergebench = [ra, rb, rc, rd, re, rf, rg, rh, ri, rj, rk, rl, rm] # setting mergebench to an array of the results of timings for sorting arrays a-m" ]
[ [ "numpy.random.seed", "numpy.random.randint" ] ]
TNTtian/Faasm
[ "377f4235063a7834724cc750697d3e0280d4a581" ]
[ "tasks/matrix_data.py" ]
[ "from os.path import join\n\nimport numpy as np\nfrom invoke import task\nfrom numpy import int32\nfrom pyfaasm.config import MatrixConf\nfrom pyfaasm.matrix import random_matrix\nfrom pyfaasm.matrix_data import subdivide_matrix_into_files\n\nfrom tasks.util.matrices import get_matrix_dir, MATRIX_CONF_STATE_KEY, SUBMATRICES_KEY_A, SUBMATRICES_KEY_B\n\n\n@task\ndef generate_all_matrix_data(ctx):\n splits = [2]\n sizes = [100, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000]\n for n_splits in splits:\n for matrix_size in sizes:\n generate_matrix_data(ctx, matrix_size, n_splits)\n\n\n@task\ndef generate_matrix_data(ctx, matrix_size, n_splits):\n matrix_size = int(matrix_size)\n n_splits = int(n_splits)\n data_dir = get_matrix_dir(matrix_size, n_splits)\n\n conf = MatrixConf(matrix_size, n_splits)\n\n params_path = join(data_dir, MATRIX_CONF_STATE_KEY)\n\n # Write params to file\n print(\"Generating {}x{} matrix with {} splits\".format(matrix_size, matrix_size, n_splits))\n params = np.array((matrix_size, n_splits), dtype=int32)\n with open(params_path, \"wb\") as fh:\n fh.write(params.tobytes())\n\n mat_a = random_matrix(matrix_size)\n mat_b = random_matrix(matrix_size)\n subdivide_matrix_into_files(conf, mat_a, data_dir, SUBMATRICES_KEY_A)\n subdivide_matrix_into_files(conf, mat_b, data_dir, SUBMATRICES_KEY_B)\n" ]
[ [ "numpy.array" ] ]
Mohamed0gad/hoggorm
[ "4debdb49a8d1d8858abb783be2ad67ffc96fd3ab" ]
[ "tests/test_pls1.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nTest whether PLS1 results are as expected.\n\"\"\"\nimport os.path as osp\nimport numpy as np\nimport pytest\nfrom hoggorm import nipalsPLS1 as PLS1\n\n\n# If the following equation is element-wise True, then allclose returns True.\n# absolute(a - b) <= (atol + rtol * absolute(b))\n# default: rtol=1e-05, atol=1e-08\nrtol = 1e-05\natol = 1e-08\n\n\nATTRS = [\n 'modelSettings',\n 'X_means',\n 'X_scores',\n 'X_loadings',\n 'X_corrLoadings',\n 'X_residuals',\n 'X_calExplVar',\n 'X_cumCalExplVar_indVar',\n 'X_cumCalExplVar',\n 'X_predCal',\n 'X_PRESSE_indVar',\n 'X_PRESSE',\n 'X_MSEE_indVar',\n 'X_MSEE',\n 'X_RMSEE_indVar',\n 'X_RMSEE',\n 'X_valExplVar',\n 'X_cumValExplVar_indVar',\n 'X_cumValExplVar',\n 'X_predVal',\n 'X_PRESSCV_indVar',\n 'X_PRESSCV',\n 'X_MSECV_indVar',\n 'X_MSECV',\n 'X_RMSECV_indVar',\n 'X_RMSECV',\n 'X_scores_predict',\n 'Y_means',\n 'Y_loadings',\n 'Y_corrLoadings',\n 'Y_residuals',\n 'Y_calExplVar',\n 'Y_cumCalExplVar',\n 'Y_predCal',\n 'Y_PRESSE',\n 'Y_MSEE',\n 'Y_RMSEE',\n 'Y_valExplVar',\n 'Y_cumValExplVar',\n 'Y_predVal',\n 'Y_PRESSCV',\n 'Y_MSECV',\n 'Y_RMSECV',\n 'cvTrainAndTestData',\n 'corrLoadingsEllipses',\n]\n\n\n@pytest.fixture(scope=\"module\")\ndef pls1cached(cfldat, csecol2dat):\n \"\"\"\n Run PLS1 from current hoggorm installation and compare results against reference results.\n \"\"\"\n return PLS1(arrX=cfldat, vecy=csecol2dat, cvType=[\"loo\"])\n\n\ntestMethods = [\"X_scores\", \"X_loadings\", \"X_corrLoadings\", \"X_cumCalExplVar_indVar\", \"X_cumCalExplVar\",\n \"Y_loadings\", \"Y_corrLoadings\", \"Y_cumCalExplVar\"] \n# testMethods = [\"Y_corrLoadings\"]\n@pytest.fixture(params=testMethods)\ndef pls1ref(request, datafolder):\n \"\"\"\n Load reference numerical results from file.\n \"\"\"\n rname = request.param\n refn = \"ref_PLS1_{}.tsv\".format(rname.lower())\n try:\n refdat = np.loadtxt(osp.join(datafolder, refn))\n except FileNotFoundError:\n refdat = None\n\n return (rname, refdat)\n\n\ndef test_compare_reference(pls1ref, pls1cached):\n \"\"\"\n Check whether numerical outputs are the same (or close enough).\n \"\"\"\n rname, refdat = pls1ref\n res = getattr(pls1cached, rname)()\n\n if refdat is None:\n dump_res(rname, res)\n assert False, \"Missing reference data for {}, data is dumped\".format(rname)\n elif rname == 'Y_corrLoadings' or rname == 'Y_loadings':\n if not np.allclose(res[:3], refdat.reshape(1, -1)[:3], rtol=rtol, atol=atol):\n dump_res(rname, res)\n assert False, \"Difference in {}, data is dumped\".format(rname) \n elif rname == 'X_cumCalExplVar' or rname == 'Y_cumCalExplVar':\n if not np.allclose(np.array(res[:3]), refdat[:3], rtol=rtol, atol=atol):\n dump_res(rname, res)\n assert False, \"Difference in {}, data is dumped\".format(rname)\n elif not np.allclose(res[:, :3], refdat[:, :3], rtol=rtol, atol=atol):\n dump_res(rname, res)\n assert False, \"Difference in {}, data is dumped\".format(rname)\n else:\n assert True\n\n\ndef dump_res(rname, dat):\n \"\"\"\n Dumps information to file if reference data is missing or difference is larger than tolerance.\n \"\"\"\n dumpfolder = osp.realpath(osp.dirname(__file__))\n dumpfn = \"dump_PLS1_{}.tsv\".format(rname.lower())\n np.savetxt(osp.join(dumpfolder, dumpfn), dat, fmt='%.9e', delimiter='\\t')\n\n\ndef test_api_verify(pls1cached, cfldat):\n \"\"\"\n Check whether all methods in list ATTR are also available in nipalsPLS1 class.\n \"\"\"\n # Loop through all methods in ATTR\n for fn in ATTRS:\n if fn == 'X_scores_predict':\n res = pls1cached.X_scores_predict(Xnew=cfldat)\n print('fn:', 'X_scores_predict')\n print('type(res):', type(res))\n print('shape:', res.shape, '\\n\\n')\n else: \n res = getattr(pls1cached, fn)()\n print('fn:', fn)\n print('type(res):', type(res))\n if isinstance(res, np.ndarray):\n print('shape:', res.shape, '\\n\\n')\n else:\n print('\\n')\n\n\ndef test_constructor_api_variants(cfldat, csecol2dat):\n \"\"\"\n Check whether various combinations of keyword arguments work.\n \"\"\"\n print(cfldat.shape, csecol2dat.shape)\n pls1_1 = PLS1(arrX=cfldat, vecy=csecol2dat, numComp=3, Xstand=False, cvType=[\"loo\"])\n print('pls1_1', pls1_1)\n pls1_2 = PLS1(cfldat, csecol2dat)\n print('pls1_2', pls1_2)\n pls1_3 = PLS1(cfldat, csecol2dat, numComp=300, cvType=[\"loo\"])\n print('pls1_3', pls1_3)\n pls1_4 = PLS1(arrX=cfldat, vecy=csecol2dat, cvType=[\"loo\"], numComp=5, Xstand=False)\n print('pls1_4', pls1_4)\n pls1_5 = PLS1(arrX=cfldat, vecy=csecol2dat, Xstand=True)\n print('pls1_5', pls1_5)\n pls1_6 = PLS1(arrX=cfldat, vecy=csecol2dat, numComp=2, Xstand=False, cvType=[\"KFold\", 3])\n print('pls1_6', pls1_6)\n pls1_7 = PLS1(arrX=cfldat, vecy=csecol2dat, numComp=2, Xstand=False, cvType=[\"lolo\", [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]])\n print('pls1_7', pls1_7)\n assert True\n" ]
[ [ "numpy.array", "numpy.allclose" ] ]
SirJakesalot/MinecraftObjectRecognition
[ "7b70c9d8cc3bfc729baa072d94b8fc861dc9e579" ]
[ "agents/RecorderAgent.py" ]
[ "import time\nimport os\n# vendors\nimport MalmoPython\nimport cv2\nimport numpy as np\n\nfrom BaseAgent import BaseAgent\n\nclass RecorderAgent(BaseAgent):\n imgCounter = 1\n imgDir = 'imgs/tmp'\n def __init__(self, config):\n BaseAgent.__init__(self, config)\n if not os.path.exists(self.imgDir):\n os.makedirs(self.imgDir)\n assert os.path.isdir(self.imgDir)\n\n def setupAgentHost(self):\n self.agent_host = MalmoPython.AgentHost()\n self.agent_host.setVideoPolicy(MalmoPython.VideoPolicy.KEEP_ALL_FRAMES)\n self.agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.KEEP_ALL_OBSERVATIONS)\n\n def agentAction(self):\n while self.world_state.number_of_video_frames_since_last_state < 1 and self.world_state.is_mission_running:\n self.logger.info('Waiting for frames...')\n time.sleep(0.05)\n self.world_state = self.agent_host.getWorldState()\n\n self.logger.info('Got frame!')\n\n if self.world_state.is_mission_running:\n self.processFrame(self.world_state.video_frames[0].pixels)\n\n def processFrame(self, pixels):\n frame = np.asarray(pixels).reshape(480,640,3)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n imgPath = os.path.join(self.imgDir, '{0}.jpg'.format(self.imgCounter))\n cv2.imwrite(imgPath, frame_rgb)\n self.imgCounter += 1\n\nconfig = {\n 'mission_file': r'C:\\Users\\armentrout\\Documents\\GitHub\\MinecraftObjectRecognition\\missions\\flat_world.xml',\n 'recording': {\n 'path': 'data.tgz',\n 'fps': 1,\n 'bit_rate': 400000\n },\n}\nra = RecorderAgent(config)\nra.startMission()" ]
[ [ "numpy.asarray" ] ]
hyoputer/open-unmix-pytorch
[ "ffe568592bb508fcfdf2174553efd2139e61de4c" ]
[ "openunmix/cli.py" ]
[ "from pathlib import Path\nimport torch\nimport torchaudio\nimport json\nimport numpy as np\n\n\nfrom openunmix import utils\nfrom openunmix import predict\nfrom openunmix import data\n\nimport argparse\n\n\ndef separate():\n parser = argparse.ArgumentParser(\n description=\"UMX Inference\",\n add_help=True,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\"input\", type=str, nargs=\"+\", help=\"List of paths to wav/flac files.\")\n\n parser.add_argument(\n \"--model\",\n default=\"umxhq\",\n type=str,\n help=\"path to mode base directory of pretrained models\",\n )\n\n parser.add_argument(\n \"--targets\",\n nargs=\"+\",\n type=str,\n help=\"provide targets to be processed. \\\n If none, all available targets will be computed\",\n )\n\n parser.add_argument(\n \"--outdir\",\n type=str,\n help=\"Results path where audio evaluation results are stored\",\n )\n\n parser.add_argument(\n \"--ext\",\n type=str,\n default=\".wav\",\n help=\"Output extension which sets the audio format\",\n )\n\n parser.add_argument(\"--start\", type=float, default=0.0, help=\"Audio chunk start in seconds\")\n\n parser.add_argument(\n \"--duration\",\n type=float,\n help=\"Audio chunk duration in seconds, negative values load full track\",\n )\n\n parser.add_argument(\n \"--no-cuda\", action=\"store_true\", default=False, help=\"disables CUDA inference\"\n )\n\n parser.add_argument(\n \"--audio-backend\",\n type=str,\n default=\"sox_io\",\n help=\"Set torchaudio backend \"\n \"(`sox_io`, `sox`, `soundfile` or `stempeg`), defaults to `sox_io`\",\n )\n\n parser.add_argument(\n \"--niter\",\n type=int,\n default=1,\n help=\"number of iterations for refining results.\",\n )\n\n parser.add_argument(\n \"--wiener-win-len\",\n type=int,\n default=300,\n help=\"Number of frames on which to apply filtering independently\",\n )\n\n parser.add_argument(\n \"--residual\",\n type=str,\n default=None,\n help=\"if provided, build a source with given name\"\n \"for the mix minus all estimated targets\",\n )\n\n parser.add_argument(\n \"--aggregate\",\n type=str,\n default=None,\n help=\"if provided, must be a string containing a valid expression for \"\n \"a dictionary, with keys as output target names, and values \"\n \"a list of targets that are used to build it. For instance: \"\n '\\'{\"vocals\":[\"vocals\"], \"accompaniment\":[\"drums\",'\n '\"bass\",\"other\"]}\\'',\n )\n\n parser.add_argument(\n \"--filterbank\",\n type=str,\n default=\"torch\",\n help=\"filterbank implementation method. \"\n \"Supported: `['torch', 'asteroid']`. `torch` is ~30% faster\"\n \"compared to `asteroid` on large FFT sizes such as 4096. However\"\n \"asteroids stft can be exported to onnx, which makes is practical\"\n \"for deployment.\",\n )\n args = parser.parse_args()\n\n if args.audio_backend != \"stempeg\":\n torchaudio.set_audio_backend(args.audio_backend)\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print(\"Using \", device)\n # parsing the output dict\n aggregate_dict = None if args.aggregate is None else json.loads(args.aggregate)\n\n # create separator only once to reduce model loading\n # when using multiple files\n separator = utils.load_separator(\n model_str_or_path=args.model,\n targets=args.targets,\n niter=args.niter,\n residual=args.residual,\n wiener_win_len=args.wiener_win_len,\n device=device,\n pretrained=True,\n filterbank=args.filterbank,\n )\n\n separator.freeze()\n separator.to(device)\n\n if args.audio_backend == \"stempeg\":\n try:\n import stempeg\n except ImportError:\n raise RuntimeError(\"Please install pip package `stempeg`\")\n\n # loop over the files\n for input_file in args.input:\n if args.audio_backend == \"stempeg\":\n audio, rate = stempeg.read_stems(\n input_file,\n start=args.start,\n duration=args.duration,\n sample_rate=separator.sample_rate,\n dtype=np.float32,\n )\n audio = torch.tensor(audio)\n else:\n audio, rate = data.load_audio(input_file, start=args.start, dur=args.duration)\n estimates = predict.separate(\n audio=audio,\n rate=rate,\n aggregate_dict=aggregate_dict,\n separator=separator,\n device=device,\n )\n if not args.outdir:\n model_path = Path(args.model)\n if not model_path.exists():\n outdir = Path(Path(input_file).stem + \"_\" + args.model)\n else:\n outdir = Path(Path(input_file).stem + \"_\" + model_path.stem)\n else:\n outdir = Path(args.outdir) / Path(input_file).stem\n outdir.mkdir(exist_ok=True, parents=True)\n\n # write out estimates\n if args.audio_backend == \"stempeg\":\n target_path = str(outdir / Path(\"target\").with_suffix(args.ext))\n # convert torch dict to numpy dict\n estimates_numpy = {}\n for target, estimate in estimates.items():\n estimates_numpy[target] = torch.squeeze(estimate).detach().cpu().numpy().T\n\n stempeg.write_stems(\n target_path,\n estimates_numpy,\n sample_rate=separator.sample_rate,\n writer=stempeg.FilesWriter(multiprocess=True, output_sample_rate=rate),\n )\n else:\n for target, estimate in estimates.items():\n target_path = str(outdir / Path(target).with_suffix(args.ext))\n torchaudio.save(\n target_path,\n torch.squeeze(estimate).to(\"cpu\"),\n sample_rate=separator.sample_rate,\n )\n" ]
[ [ "torch.device", "torch.squeeze", "torch.cuda.is_available", "torch.tensor" ] ]
jongtaeklee/dsmil-wsi
[ "e637f8295e0bc580e20569586b11ce69a75190c6" ]
[ "train_tcga.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport torchvision.transforms.functional as VF\nfrom torchvision import transforms\n\nimport sys, argparse, os, copy, itertools, glob, datetime\nimport pandas as pd\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import roc_curve, roc_auc_score, precision_recall_fscore_support\nfrom sklearn.datasets import load_svmlight_file\nfrom collections import OrderedDict\n\ndef get_bag_feats(csv_file_df, args):\n if args.dataset == 'TCGA-lung-default':\n feats_csv_path = 'datasets/tcga-dataset/tcga_lung_data_feats/' + csv_file_df.iloc[0].split('/')[1] + '.csv'\n else:\n feats_csv_path = csv_file_df.iloc[0]\n df = pd.read_csv(feats_csv_path)\n feats = shuffle(df).reset_index(drop=True)\n feats = feats.to_numpy()\n label = np.zeros(args.num_classes)\n if args.num_classes==1:\n label[0] = csv_file_df.iloc[1]\n else:\n if int(csv_file_df.iloc[1])<=(len(label)-1):\n label[int(csv_file_df.iloc[1])] = 1\n \n return label, feats\n\ndef train(train_df, milnet, criterion, optimizer, args):\n csvs = shuffle(train_df).reset_index(drop=True)\n total_loss = 0\n bc = 0\n Tensor = torch.cuda.FloatTensor\n for i in range(len(train_df)):\n optimizer.zero_grad()\n label, feats = get_bag_feats(train_df.iloc[i], args)\n bag_label = Variable(Tensor([label]))\n bag_feats = Variable(Tensor([feats]))\n bag_feats = bag_feats.view(-1, args.feats_size)\n ins_prediction, bag_prediction, _, _ = milnet(bag_feats)\n max_prediction, _ = torch.max(ins_prediction, 0) \n bag_loss = criterion(bag_prediction.view(1, -1), bag_label.view(1, -1))\n max_loss = criterion(max_prediction.view(1, -1), bag_label.view(1, -1))\n loss = 0.5*bag_loss + 0.5*max_loss\n loss.backward()\n optimizer.step()\n total_loss = total_loss + loss.item()\n sys.stdout.write('\\r Training bag [%d/%d] bag loss: %.4f' % (i, len(train_df), loss.item()))\n return total_loss / len(train_df)\n\ndef test(test_df, milnet, criterion, optimizer, args):\n csvs = shuffle(test_df).reset_index(drop=True)\n total_loss = 0\n test_labels = []\n test_predictions = []\n Tensor = torch.cuda.FloatTensor\n with torch.no_grad():\n for i in range(len(test_df)):\n label, feats = get_bag_feats(test_df.iloc[i], args)\n bag_label = Variable(Tensor([label]))\n bag_feats = Variable(Tensor([feats]))\n bag_feats = bag_feats.view(-1, args.feats_size)\n ins_prediction, bag_prediction, _, _ = milnet(bag_feats)\n max_prediction, _ = torch.max(ins_prediction, 0) \n bag_loss = criterion(bag_prediction.view(1, -1), bag_label.view(1, -1))\n max_loss = criterion(max_prediction.view(1, -1), bag_label.view(1, -1))\n loss = 0.5*bag_loss + 0.5*max_loss\n total_loss = total_loss + loss.item()\n sys.stdout.write('\\r Testing bag [%d/%d] bag loss: %.4f' % (i, len(test_df), loss.item()))\n test_labels.extend([label])\n test_predictions.extend([(0.0*torch.sigmoid(max_prediction)+1.0*torch.sigmoid(bag_prediction)).squeeze().cpu().numpy()])\n test_labels = np.array(test_labels)\n test_predictions = np.array(test_predictions)\n auc_value, _, thresholds_optimal = multi_label_roc(test_labels, test_predictions, args.num_classes, pos_label=1)\n if args.num_classes==1:\n class_prediction_bag = test_predictions\n class_prediction_bag[class_prediction_bag>=thresholds_optimal[0]] = 1\n class_prediction_bag[class_prediction_bag<thresholds_optimal[0]] = 0\n test_predictions = class_prediction_bag\n test_labels = np.squeeze(test_labels)\n else: \n for i in range(args.num_classes):\n class_prediction_bag = test_predictions[:, i]\n class_prediction_bag[class_prediction_bag>=thresholds_optimal[i]] = 1\n class_prediction_bag[class_prediction_bag<thresholds_optimal[i]] = 0\n test_predictions[:, i] = class_prediction_bag\n bag_score = 0\n for i in range(0, len(test_df)):\n bag_score = np.array_equal(test_labels[i], test_predictions[i]) + bag_score \n avg_score = bag_score / len(test_df)\n \n return total_loss / len(test_df), avg_score, auc_value, thresholds_optimal\n\ndef multi_label_roc(labels, predictions, num_classes, pos_label=1):\n fprs = []\n tprs = []\n thresholds = []\n thresholds_optimal = []\n aucs = []\n if len(predictions.shape)==1:\n predictions = predictions[:, None]\n for c in range(0, num_classes):\n label = labels[:, c]\n prediction = predictions[:, c]\n fpr, tpr, threshold = roc_curve(label, prediction, pos_label=1)\n fpr_optimal, tpr_optimal, threshold_optimal = optimal_thresh(fpr, tpr, threshold)\n c_auc = roc_auc_score(label, prediction)\n aucs.append(c_auc)\n thresholds.append(threshold)\n thresholds_optimal.append(threshold_optimal)\n return aucs, thresholds, thresholds_optimal\n\ndef optimal_thresh(fpr, tpr, thresholds, p=0):\n loss = (fpr - tpr) - p * tpr / (fpr + tpr + 1)\n idx = np.argmin(loss, axis=0)\n return fpr[idx], tpr[idx], thresholds[idx]\n\ndef main():\n parser = argparse.ArgumentParser(description='Train DSMIL on 20x patch features learned by SimCLR')\n parser.add_argument('--num_classes', default=2, type=int, help='Number of output classes [2]')\n parser.add_argument('--feats_size', default=512, type=int, help='Dimension of the feature size [512]')\n parser.add_argument('--lr', default=0.0002, type=float, help='Initial learning rate [0.0002]')\n parser.add_argument('--num_epochs', default=200, type=int, help='Number of total training epochs [40|200]')\n parser.add_argument('--gpu_index', type=int, nargs='+', default=(0,), help='GPU ID(s) [0]')\n parser.add_argument('--weight_decay', default=5e-3, type=float, help='Weight decay [5e-3]')\n parser.add_argument('--dataset', default='TCGA-lung-default', type=str, help='Dataset folder name')\n parser.add_argument('--split', default=0.2, type=float, help='Training/Validation split [0.2]')\n parser.add_argument('--model', default='dsmil', type=str, help='MIL model [dsmil]')\n args = parser.parse_args()\n gpu_ids = tuple(args.gpu_index)\n os.environ['CUDA_VISIBLE_DEVICES']=','.join(str(x) for x in gpu_ids)\n \n if args.model == 'dsmil':\n import dsmil as mil\n elif args.model == 'abmil':\n import abmil as mil\n \n i_classifier = mil.FCLayer(in_size=args.feats_size, out_size=args.num_classes).cuda()\n b_classifier = mil.BClassifier(input_size=args.feats_size, output_class=args.num_classes).cuda()\n milnet = mil.MILNet(i_classifier, b_classifier).cuda()\n if args.model == 'dsmil':\n state_dict_weights = torch.load('init.pth')\n milnet.load_state_dict(state_dict_weights, strict=False)\n criterion = nn.BCEWithLogitsLoss()\n \n optimizer = torch.optim.Adam(milnet.parameters(), lr=args.lr, betas=(0.5, 0.9), weight_decay=args.weight_decay)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.num_epochs, 0.000005)\n \n if args.dataset == 'TCGA-lung-default':\n bags_csv = 'datasets/tcga-dataset/TCGA.csv'\n else:\n bags_csv = os.path.join('datasets', args.dataset, args.dataset+'.csv')\n \n bags_path = pd.read_csv(bags_csv)\n train_path = bags_path.iloc[0:int(len(bags_path)*(1-args.split)), :]\n test_path = bags_path.iloc[int(len(bags_path)*(1-args.split)):, :]\n best_score = 0\n save_path = os.path.join('weights', datetime.date.today().strftime(\"%m%d%Y\"))\n os.makedirs(save_path, exist_ok=True)\n run = len(glob.glob(os.path.join(save_path, '*.pth')))\n for epoch in range(1, args.num_epochs):\n train_path = shuffle(train_path).reset_index(drop=True)\n test_path = shuffle(test_path).reset_index(drop=True)\n train_loss_bag = train(train_path, milnet, criterion, optimizer, args) # iterate all bags\n test_loss_bag, avg_score, aucs, thresholds_optimal = test(test_path, milnet, criterion, optimizer, args)\n if args.dataset=='TCGA-lung':\n print('\\r Epoch [%d/%d] train loss: %.4f test loss: %.4f, average score: %.4f, auc_LUAD: %.4f, auc_LUSC: %.4f' % \n (epoch, args.num_epochs, train_loss_bag, test_loss_bag, avg_score, aucs[0], aucs[1]))\n else:\n print('\\r Epoch [%d/%d] train loss: %.4f test loss: %.4f, average score: %.4f, AUC: ' % \n (epoch, args.num_epochs, train_loss_bag, test_loss_bag, avg_score) + '|'.join('class-{}>>{}'.format(*k) for k in enumerate(aucs))) \n scheduler.step()\n current_score = (sum(aucs) + avg_score + 1 - test_loss_bag)/4\n if current_score >= best_score:\n best_score = current_score\n save_name = os.path.join(save_path, str(run+1)+'.pth')\n torch.save(milnet.state_dict(), save_name)\n if args.dataset=='TCGA-lung':\n print('Best model saved at: ' + save_name + ' Best thresholds: LUAD %.4f, LUSC %.4f' % (thresholds_optimal[0], thresholds_optimal[1]))\n else:\n print('Best model saved at: ' + save_name)\n print('Best thresholds ===>>> '+ '|'.join('class-{}>>{}'.format(*k) for k in enumerate(thresholds_optimal)))\n \n\nif __name__ == '__main__':\n main()" ]
[ [ "sklearn.metrics.roc_auc_score", "torch.sigmoid", "pandas.read_csv", "torch.max", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.load", "numpy.array_equal", "sklearn.utils.shuffle", "numpy.squeeze", "sklearn.metrics.roc_curve", "torch.nn.BCEWithLogitsLoss", "numpy.argmin", "torch.no_grad", "numpy.array", "numpy.zeros" ] ]
Goschjann/ssltsc
[ "08d6b1bf711bb1c8f19f9bfb66a98d4e423e932e" ]
[ "ssltsc/models/utils.py" ]
[ "\"\"\"Utility functions for submodule 'models'\n\"\"\"\nfrom copy import deepcopy\nimport math\nimport numpy as np\nimport pandas as pd\nimport pdb\nimport torch\n\nfrom sklearn.metrics import log_loss, roc_auc_score, f1_score\nfrom uncertainty_metrics.numpy import ece\n\ndef ema_update(student, teacher, alpha=0.9, verbose=False):\n \"\"\"Update a teacher model based on the exponential moving average\n of its weights and that of the current studen model.\n\n Controlled by alpha \\\\in [0, 1] with\n * alpha -> 1: teacher = past teacher\n * alpha -> 0: teacher = student, std SGD training\n Args:\n student: the student model\n teacher: the teacher\n alpha: ema alpha rate\n verbose: {bool} for checking: with alpha = 0.0 this should print True\n only as weights from both models should be equal\n \"\"\"\n for teacher_param, student_param in zip(teacher.parameters(), student.parameters()):\n # alpha * theta'_t-1 + (1-a) * theta_t\n teacher_param.data.mul_(alpha).add_(student_param.data, alpha=1 - alpha)\n if verbose:\n print(teacher_param.data.equal(student_param.data))\n\n\nclass SigmoidScheduler:\n \"\"\"\"sigmoid rampup for learning rate as used in the\n mean teacher implement\n \"\"\"\n def __init__(self, optimizer, rampup_length):\n self.optimizer = optimizer\n self.rampup_length = rampup_length\n self.counter = 0\n self.init_lr = optimizer.param_groups[0]['lr']\n self.last_lr = 0.0\n\n def step(self):\n self.optimizer.param_groups[0]['lr'] = self.init_lr * rampup(self.counter, self.rampup_length)\n self.counter += 1\n self.last_lr = self.optimizer.param_groups[0]['lr']\n\n def get_last_lr(self):\n return [self.last_lr]\n\n\ndef rampup(current, rampup_length):\n \"\"\"sigmoid rampup\n \"\"\"\n if current < rampup_length:\n p = max(0.0, float(current)) / float(rampup_length)\n p = 1.0 - p\n return float(np.exp(-p * p * 5.0))\n else:\n return 1.0\n\n\ndef linear_rampup(step, rampup_length=10):\n \"\"\"linear rampup factor for the mixmatch model\n step = current step\n rampup_length = amount of steps till final weight\n \"\"\"\n if rampup_length == 0:\n return 1.0\n else:\n return float(np.clip(step / rampup_length, 0, 1))\n\n\ndef calculate_classification_metrics(pred_prob_y, true_y) -> dict:\n \"\"\"\n Wrapper to calculate all kinds of classification metrics\n which are then passed to the (mlflow) logger\n Args:\n pred_prob_y:\n true_y:\n Returns:\n A dictionary of metrics.\n \"\"\"\n assert pred_prob_y[:, 0].shape == true_y.shape\n idx_labelled = np.where(true_y != -1)[0]\n pred_prob_y = pred_prob_y[idx_labelled]\n true_y = true_y[idx_labelled]\n yhat_hard = pred_prob_y.argmax(axis=1)\n\n # catch the binary case\n if pred_prob_y.shape[1] == 2:\n pred_prob_y = pred_prob_y[:, 1]\n metrics = {}\n # explicitly add list of possible labels in case of too small batch sizes\n # catch binary case as well\n labels = np.arange(pred_prob_y.shape[1]) if len(pred_prob_y.shape) > 1 else np.arange(2)\n metrics['ece'] = ece(labels=true_y, probs=pred_prob_y, num_bins=30)\n metrics['accuracy'] = sum(yhat_hard == true_y) / len(true_y)\n metrics['cross_entropy'] = log_loss(y_true=true_y, y_pred=pred_prob_y, labels=labels)\n metrics['weighted_auc'] = roc_auc_score(y_true=true_y, y_score=pred_prob_y, average='weighted', multi_class='ovr', labels=labels)\n metrics['macro_auc'] = roc_auc_score(y_true=true_y, y_score=pred_prob_y, average='macro', multi_class='ovo', labels=labels)\n metrics['macro_f1'] = f1_score(y_true=true_y, y_pred=yhat_hard, average='macro', labels=labels)\n metrics['micro_f1'] = f1_score(y_true=true_y, y_pred=yhat_hard, average='micro', labels=labels)\n metrics['weighted_f1'] = f1_score(y_true=true_y, y_pred=yhat_hard, average='weighted', labels=labels)\n\n return metrics\n\n\ndef get_cosine_schedule_with_warmup(optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=7./16.,\n last_epoch=-1):\n def _lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n no_progress = float(current_step - num_warmup_steps) / \\\n float(max(1, num_training_steps - num_warmup_steps))\n return max(0., math.cos(math.pi * num_cycles * no_progress))\n\n return torch.optim.lr_scheduler.LambdaLR(optimizer, _lr_lambda, last_epoch)\n\n\nclass ModelEMA(object):\n def __init__(self, model, ema_decay, device):\n self.ema = deepcopy(model)\n self.ema.to(device)\n self.ema.eval()\n self.decay = ema_decay\n self.ema_has_module = hasattr(self.ema, 'module')\n # Fix EMA. https://github.com/valencebond/FixMatch_pytorch thank you!\n self.param_keys = [k for k, _ in self.ema.named_parameters()]\n self.buffer_keys = [k for k, _ in self.ema.named_buffers()]\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n needs_module = hasattr(model, 'module') and not self.ema_has_module\n with torch.no_grad():\n msd = model.state_dict()\n esd = self.ema.state_dict()\n for k in self.param_keys:\n if needs_module:\n j = 'module.' + k\n else:\n j = k\n model_v = msd[j].detach()\n ema_v = esd[k]\n esd[k].copy_(ema_v * self.decay + (1. - self.decay) * model_v)\n\n for k in self.buffer_keys:\n if needs_module:\n j = 'module.' + k\n else:\n j = k\n esd[k].copy_(msd[j])\n\n\ndef interleave(x, size):\n s = list(x.shape)\n return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])\n\n\ndef de_interleave(x, size):\n s = list(x.shape)\n return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.reshape(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0)\n res.append(correct_k.mul_(1.0 / batch_size))\n return res\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\n Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n" ]
[ [ "sklearn.metrics.roc_auc_score", "torch.optim.lr_scheduler.LambdaLR", "numpy.clip", "numpy.arange", "sklearn.metrics.log_loss", "torch.no_grad", "sklearn.metrics.f1_score", "numpy.exp", "numpy.where" ] ]
evgenykurbatov/kb21-hotjup-migration-adv
[ "c346a6af8e0a79e69b619f3988db34e71efe131d" ]
[ "plot_wind.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nfrom numpy import pi, sqrt, exp, sin, cos, tan, log, log10\n\nimport h5py\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\n\nfrom aux import *\n\n\n\n##\n## Read simulation data\n##\n\nfrom dataclasses import dataclass\n\n@dataclass\nclass Snapshot:\n pass\n\n\ndef load(path, dotM_p_ref, alpha, beta, t_ini, a_ini):\n ss = Snapshot()\n ss.alpha = alpha\n ss.beta = beta\n ss.a_ini = a_ini\n\n fname = '%s/%.0e_%g_%g_%g_%.2f.h5' % (path, dotM_p_ref, alpha, beta, t_ini, a_ini)\n with h5py.File(fname, 'r') as f:\n ss.M_s = f['M_s'][()]\n ss.mu = f['mu'][()]\n ss.cs = f['cs'][()]\n ss.t = f['t'][:]\n ss.a = f['a'][:]\n ss.r_0 = f['r_0'][:]\n ss.Sigma_max = f['Sigma_max'][:]\n\n ss.H = ss.cs / sqrt(const.G*ss.M_s/ss.r_0**3)\n ss.N = ss.Sigma_max / (2*ss.H * ss.mu*const.m_H)\n\n return ss\n\n\nss = [ load('xray', dotM_p_ref=5e11, alpha=0.01, beta=1.5, t_ini=1e7, a_ini=a_ini)\n for a_ini in [0.2, 0.3, 0.4, 0.5] ]\n\nkappa_X = 2e-22 / const.m_p\n\n\n\n##\n## Read Parker model\n##\n\nparker_data = np.loadtxt('Parker1958.csv', delimiter=';')\nc = 12\nr = parker_data[:,c]\nN_w = parker_data[:,c+1]\nv_w = parker_data[:,c+2]\n\n\n\n##\n## Plot\n##\n\n## rc settings (see http://matplotlib.sourceforge.net/users/customizing.html#customizing-matplotlib)\nmpl.rc('font', family='serif')\nmpl.rc('font', size='6.0')\nmpl.rc('text', usetex=True)\nmpl.rc('lines', linewidth=0.75)\nmpl.rc('axes', linewidth=0.5)\nmpl.rc('legend', frameon=False)\nmpl.rc('legend', handlelength=2.5)\n\nfigwidth = 8.0 / 2.54 ## convert cm to in\nfigheight = 6.0 / 2.54 ## convert cm to in\nmpl.rc('figure', figsize=[figwidth, figheight])\n\n## [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b', u'#e377c2', u'#7f7f7f', u'#bcbd22', u'#17becf']\n\n#dashes = [ [], [6,2], [6,2,1,2], [1,2] ]\ndashes = [ [], [8,3], [8,2,1,2], [4,2] ]\n\nfig, ax = plt.subplots(nrows=1, ncols=1)\n\nfig.suptitle(r\"$\\alpha = 0.01$, $\\beta = 1.5$\")\n\n\nax_ = ax\nfor i, ss_ in enumerate(ss):\n cond = ss_.t/const.yr > 1.001e7\n #ax_.plot(ss_.r_0[cond]/const.AU, (ss_.N*cs**2)[cond], dashes=dashes[i],\n # label=(r\"$a_\\mathrm{ini} = %g$ AU\" % ss_.a_ini))\n ax_.plot(ss_.r_0[cond]/const.AU, (const.m_H*ss_.N*cs**2 / (1e-3*const.mbar))[cond])\n#ax_.plot(r/const.AU, N_w*v_w**2, '-k', label=r\"Parker wind\")\nax_.plot(r/const.AU, const.m_p*N_w*v_w**2 / (1e-3*const.mbar), '-k')\nax_.legend()\nax_.set_xscale('log')\nax_.set_xlim(0.047, 1.0)\nax_.set_xlabel(r\"$a$ [AU]\")\nax_.set_yscale('log')\n#ax_.set_ylabel(r\"[g cm$^{-1}$ s$^{-2}$]\")\nax_.set_ylabel(r\"[$\\mu$bar]\")\n\n\nplt.tight_layout()\nplt.savefig('wind.pdf')\n" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.loadtxt", "matplotlib.rc" ] ]
Pratyush2703/DensityPeakCluster
[ "958b4b62cbbc36f8651211a067bb0e065c29004a" ]
[ "cluster.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport sys\nimport math\nimport logging\nimport numpy as np\n\nlogger = logging.getLogger(\"dpc_cluster\")\n\n\ndef load_paperdata(distance_f):\n '''\n Load distance from data\n\n Args:\n distance_f : distance file, the format is column1-index 1, column2-index 2, column3-distance\n\n Returns:\n distances dict, max distance, min distance, max continues id\n '''\n logger.info(\"PROGRESS: load data\")\n distances = {}\n min_dis, max_dis = sys.float_info.max, 0.0\n max_id = 0\n with open(distance_f, 'r') as fp:\n for line in fp:\n x1, x2, d = line.strip().split(' ')\n x1, x2 = int(x1), int(x2)\n max_id = max(max_id, x1, x2)\n dis = float(d)\n if dis > 0.001:\n min_dis = min(min_dis, dis)\n max_dis = max(max_dis, dis)\n distances[(x1, x2)] = float(d)\n distances[(x2, x1)] = float(d)\n for i in xrange(max_id):\n distances[(i, i)] = 0.0\n logger.info(\"PROGRESS: load end\")\n return distances, max_dis, min_dis, max_id\n\n\ndef select_dc(max_id, max_dis, min_dis, distances, auto=False):\n '''\n Select the local density threshold, default is the method used in paper, auto is `autoselect_dc`\n\n Args:\n max_id : max continues id\n max_dis : max distance for all points\n min_dis : min distance for all points\n distances : distance dict\n auto : use auto dc select or not\n\n Returns:\n dc that local density threshold\n '''\n logger.info(\"PROGRESS: select dc\")\n if auto:\n return autoselect_dc(max_id, max_dis, min_dis, distances)\n percent = 2.0\n position = int(max_id * (max_id + 1) / 2 * percent / 100)\n dc = sorted(distances.values())[position * 2 + max_id]\n if dc == 0:\n zero = np.nonzero(sorted(distances.values()))\n dc = sorted(distances.values())[position * 2 + max_id + zero[0][0]]\n logger.info(\"PROGRESS: dc - \" + str(dc))\n return dc\n\n\ndef autoselect_dc(max_id, max_dis, min_dis, distances):\n '''\n Auto select the local density threshold that let average neighbor is 1-2 percent of all nodes.\n\n Args:\n max_id : max continues id\n max_dis : max distance for all points\n min_dis : min distance for all points\n distances : distance dict\n\n Returns:\n dc that local density threshold\n '''\n dc = (max_dis + min_dis) / 2\n\n while True:\n nneighs = sum([1 for v in distances.values() if v < dc]) / max_id ** 2\n if nneighs >= 0.01 and nneighs <= 0.002:\n break\n # binary search\n if nneighs < 0.01:\n min_dis = dc\n else:\n max_dis = dc\n dc = (max_dis + min_dis) / 2\n if max_dis - min_dis < 0.0001:\n break\n return dc\n\n\ndef local_density(max_id, distances, dc, guass=True, cutoff=False):\n '''\n Compute all points' local density\n\n Args:\n max_id : max continues id\n distances : distance dict\n gauss : use guass func or not(can't use together with cutoff)\n cutoff : use cutoff func or not(can't use together with guass)\n\n Returns:\n local density vector that index is the point index that start from 1\n '''\n assert guass and cutoff == False and guass or cutoff == True\n logger.info(\"PROGRESS: compute local density\")\n def guass_func(dij, dc): return math.exp(- (dij / dc) ** 2)\n def cutoff_func(dij, dc): return 1 if dij < dc else 0\n func = guass and guass_func or cutoff_func\n rho = [-1] + [0] * max_id\n for i in xrange(1, max_id):\n for j in xrange(i + 1, max_id + 1):\n rho[i] += func(distances[(i, j)], dc)\n rho[j] += func(distances[(i, j)], dc)\n if i % (max_id / 10) == 0:\n logger.info(\"PROGRESS: at index #%i\" % (i))\n return np.array(rho, np.float32)\n\n\ndef min_distance(max_id, max_dis, distances, rho):\n '''\n Compute all points' min distance to the higher local density point(which is the nearest neighbor)\n\n Args:\n max_id : max continues id\n max_dis : max distance for all points\n distances : distance dict\n rho : local density vector that index is the point index that start from 1\n\n Returns:\n min_distance vector, nearest neighbor vector\n '''\n logger.info(\"PROGRESS: compute min distance to nearest higher density neigh\")\n sort_rho_idx = np.argsort(-rho)\n delta, nneigh = [0.0] + [float(max_dis)] * (len(rho) - 1), [0] * len(rho)\n delta[sort_rho_idx[0]] = -1.\n for i in xrange(1, max_id):\n for j in xrange(0, i):\n old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]\n if distances[(old_i, old_j)] < delta[old_i]:\n delta[old_i] = distances[(old_i, old_j)]\n nneigh[old_i] = old_j\n if i % (max_id / 10) == 0:\n logger.info(\"PROGRESS: at index #%i\" % (i))\n delta[sort_rho_idx[0]] = max(delta)\n return np.array(delta, np.float32), np.array(nneigh, np.float32)\n\n\nclass DensityPeakCluster(object):\n def local_density(self, load_func, distance_f, dc=None, auto_select_dc=False):\n '''\n Just compute local density\n\n Args:\n load_func : load func to load data\n distance_f : distance data file\n dc : local density threshold, call select_dc if dc is None\n autoselect_dc : auto select dc or not\n\n Returns:\n distances dict, max distance, min distance, max index, local density vector\n '''\n assert not (dc != None and auto_select_dc)\n distances, max_dis, min_dis, max_id = load_func(distance_f)\n if dc == None:\n dc = select_dc(max_id, max_dis, min_dis,\n distances, auto=auto_select_dc)\n rho = local_density(max_id, distances, dc)\n return distances, max_dis, min_dis, max_id, rho, dc\n\n def cluster(self, load_func, distance_f, density_threshold, distance_threshold, dc=None, auto_select_dc=False):\n '''\n Cluster the data\n\n Args:\n load_func : load func to load data\n distance_f : distance data file\n dc : local density threshold, call select_dc if dc is None\n density_threshold : local density threshold for choosing cluster center\n distance_threshold : min distance threshold for choosing cluster center\n autoselect_dc : auto select dc or not\n\n Returns:\n local density vector, min_distance vector, nearest neighbor vector\n '''\n assert not (dc != None and auto_select_dc)\n distances, max_dis, min_dis, max_id, rho, dc = self.local_density(\n load_func, distance_f, dc=dc, auto_select_dc=auto_select_dc)\n delta, nneigh = min_distance(max_id, max_dis, distances, rho)\n logger.info(\"PROGRESS: start cluster\")\n cluster, ccenter = {}, {} # cl/icl in cluster_dp.m\n\n for idx, (ldensity, mdistance, nneigh_item) in enumerate(zip(rho, delta, nneigh)):\n if idx == 0:\n continue\n if ldensity >= density_threshold and mdistance >= distance_threshold:\n ccenter[idx] = idx\n cluster[idx] = idx\n else:\n cluster[idx] = -1\n\n # assignation\n ordrho = np.argsort(-rho)\n for i in range(ordrho.shape[0]-1):\n if ordrho[i] == 0:\n continue\n if cluster[ordrho[i]] == -1:\n cluster[ordrho[i]] = cluster[nneigh[ordrho[i]]]\n if i % (max_id / 10) == 0:\n logger.info(\"PROGRESS: at index #%i\" % (i))\n\n # halo\n halo, bord_rho = {}, {}\n for i in range(1, ordrho.shape[0]):\n halo[i] = cluster[i]\n if len(ccenter) > 0:\n for idx in ccenter.keys():\n bord_rho[idx] = 0.0\n for i in range(1, rho.shape[0]-1):\n for j in range(i + 1, rho.shape[0]):\n if cluster[i] != cluster[j] and distances[i, j] <= dc:\n rho_aver = (rho[i] + rho[j]) / 2.0\n if rho_aver > bord_rho[cluster[i]]:\n bord_rho[cluster[i]] = rho_aver\n if rho_aver > bord_rho[cluster[j]]:\n bord_rho[cluster[j]] = rho_aver\n for i in range(1, rho.shape[0]):\n if rho[i] < bord_rho[cluster[i]]:\n halo[i] = 0\n for i in range(1, rho.shape[0]):\n if halo[i] == 0:\n cluster[i] = - 1\n\n self.cluster, self.ccenter = cluster, ccenter\n self.distances = distances\n self.max_id = max_id\n logger.info(\"PROGRESS: ended\")\n return rho, delta, nneigh\n" ]
[ [ "numpy.argsort", "numpy.array" ] ]
cgalaz01/acur_mri
[ "3be85e62a3362f9498169edb87c233e4da35ddd8" ]
[ "functions/cataloguing/split.py" ]
[ "import os\nimport re\n\nfrom typing import Any, Dict, Union\nfrom pathlib import Path\n\nimport pydicom\n\nimport plotly.graph_objects as go\n\n\nclass MetadataSplit():\n \n __dicom_tags = ['MRAcquisitionType', 'SliceThickness', 'RepetitionTime', 'EchoTime',\n 'NumberOfAverages', 'SpacingBetweenSlices', 'EchoTrainLength',\n 'FlipAngle', 'PixelSpacing', 'AcquisitionMatrix', 'Rows', 'Columns',\n 'MagneticFieldStrength', 'CardiacNumberOfImages']\n \n \n def __init__(self):\n self.entries = {}\n \n \n @staticmethod\n def get_tag_info(dicom_directory: Union[str, Path]) -> Dict[str, Any]:\n filename = os.listdir(dicom_directory)[0]\n dataset = pydicom.dcmread(os.path.join(dicom_directory, filename))\n \n tags = {}\n for tag_key in MetadataSplit.__dicom_tags:\n try:\n tags[tag_key] = dataset.data_element(tag_key).value\n except:\n tags[tag_key] = None\n \n #if tags['SpacingBetweenSlices'] is None:\n # tags['SpacingBetweenSlices'] = tags['SliceThickness']\n \n return tags\n \n \n @staticmethod\n def decouple_tags(entry_values: Dict) -> Dict:\n expanded_values = {}\n \n for tag_key in MetadataSplit.__dicom_tags:\n if tag_key == 'MRAcquisitionType':\n expanded_values[tag_key] = re.findall(r'\\d+', entry_values[tag_key])[0]\n elif tag_key == 'PixelSpacing':\n new_key = tag_key + 'Row'\n expanded_values[new_key] = entry_values[tag_key][0]\n new_key = tag_key + 'Column'\n expanded_values[new_key] = entry_values[tag_key][1]\n elif tag_key == 'AcquisitionMatrix':\n row = entry_values['AcquisitionMatrix'][0] if entry_values['AcquisitionMatrix'][0] != 0 else entry_values['AcquisitionMatrix'][2]\n column = entry_values['AcquisitionMatrix'][1] if entry_values['AcquisitionMatrix'][1] != 0 else entry_values['AcquisitionMatrix'][3]\n pixel_size_row = entry_values['Rows'] * entry_values['PixelSpacing'][0] / row\n pixel_size_column = entry_values['Columns'] * entry_values['PixelSpacing'][1] / column\n \n expanded_values['PixelSizeRow'] = pixel_size_row \n expanded_values['PixelSizeColumn'] = pixel_size_column\n elif tag_key == 'Rows' or tag_key == 'Columns':\n continue\n else:\n expanded_values[tag_key] = entry_values[tag_key]\n \n return expanded_values\n \n \n @staticmethod\n def is_similar(entry_a: Dict[str, Any], entry_b: Dict[str, Any]) -> bool:\n def is_any_null(value_a: Any, value_b: Any) -> bool:\n if value_a is None or value_b is None:\n return True\n \n def is_both_null(value_a: Any, value_b: Any) -> bool:\n if value_a is None and value_b is None:\n return True\n \n if entry_a['MRAcquisitionType'] != entry_b['MRAcquisitionType']:\n return False\n \n if not is_both_null(entry_a['SliceThickness'], entry_b['SliceThickness']):\n if (is_any_null(entry_a['SliceThickness'], entry_b['SliceThickness']) or\n abs(entry_a['SliceThickness'] - entry_b['SliceThickness']) > 0.5):\n return False\n \n if not is_both_null(entry_a['RepetitionTime'], entry_b['RepetitionTime']):\n if (is_any_null(entry_a['RepetitionTime'], entry_b['RepetitionTime']) or\n abs(entry_a['RepetitionTime'] - entry_b['RepetitionTime']) > 0.2):\n return False\n \n if not is_both_null(entry_a['EchoTime'], entry_b['EchoTime']):\n if (is_any_null(entry_a['EchoTime'], entry_b['EchoTime']) or\n abs(entry_a['EchoTime'] - entry_b['EchoTime']) > 0.1):\n return False\n \n if entry_a['NumberOfAverages'] != entry_b['NumberOfAverages']:\n return False\n \n if not is_both_null(entry_a['SpacingBetweenSlices'], entry_b['SpacingBetweenSlices']):\n if (is_any_null(entry_a['SpacingBetweenSlices'], entry_b['SpacingBetweenSlices']) or\n abs(entry_a['SpacingBetweenSlices'] - entry_b['SpacingBetweenSlices']) > 0.5):\n return False\n \n if not is_both_null(entry_a['EchoTrainLength'], entry_b['EchoTrainLength']):\n if (is_any_null(entry_a['EchoTrainLength'], entry_b['EchoTrainLength']) or\n abs(entry_a['EchoTrainLength'] - entry_b['EchoTrainLength']) > 5):\n return False\n \n if not is_both_null(entry_a['FlipAngle'], entry_b['FlipAngle']):\n if (is_any_null(entry_a['FlipAngle'], entry_b['FlipAngle']) or\n abs(entry_a['FlipAngle'] - entry_b['FlipAngle']) > 10):\n return False\n \n if not is_both_null(entry_a['PixelSpacing'], entry_b['PixelSpacing']):\n if is_any_null(entry_a['PixelSpacing'], entry_b['PixelSpacing']):\n return False\n \n if abs(entry_a['PixelSpacing'][0] - entry_b['PixelSpacing'][0]) > 0.1:\n return False\n \n if abs(entry_a['PixelSpacing'][1] - entry_b['PixelSpacing'][1]) > 0.1:\n return False\n \n if not is_both_null(entry_a['AcquisitionMatrix'], entry_b['AcquisitionMatrix']):\n if is_any_null(entry_a['AcquisitionMatrix'], entry_b['AcquisitionMatrix']):\n return False\n \n row_a = entry_a['AcquisitionMatrix'][0] if entry_a['AcquisitionMatrix'][0] != 0 else entry_a['AcquisitionMatrix'][2]\n row_b = entry_b['AcquisitionMatrix'][0] if entry_b['AcquisitionMatrix'][0] != 0 else entry_b['AcquisitionMatrix'][2]\n \n pixel_size_row_a = entry_a['Rows'] * entry_a['PixelSpacing'][0] / row_a\n pixel_size_row_b = entry_b['Rows'] * entry_b['PixelSpacing'][0] / row_b\n \n if abs(pixel_size_row_a - pixel_size_row_b) > 0.5:\n return False\n \n column_a = entry_a['AcquisitionMatrix'][1] if entry_a['AcquisitionMatrix'][1] != 0 else entry_a['AcquisitionMatrix'][3]\n column_b = entry_b['AcquisitionMatrix'][1] if entry_b['AcquisitionMatrix'][1] != 0 else entry_b['AcquisitionMatrix'][3]\n \n pixel_size_column_a = entry_a['Columns'] * entry_a['PixelSpacing'][1] / column_a\n pixel_size_column_b = entry_b['Columns'] * entry_b['PixelSpacing'][1] / column_b\n \n if abs(pixel_size_column_a - pixel_size_column_b) > 0.5:\n return False\n \n if entry_a['MagneticFieldStrength'] != entry_b['MagneticFieldStrength']:\n return False\n \n if entry_a['CardiacNumberOfImages'] != entry_b['CardiacNumberOfImages']:\n return False\n \n return True\n \n \n def get_split_id(self, type_label: str, anatomy_label: Union[str, None],\n metadata: Dict[str, Any]) -> int:\n \n #key = type_label + '+' + ('' if anatomy_label is None else anatomy_label)\n key = type_label + ('' if anatomy_label is None else '_' + anatomy_label)\n \n split_id = -1\n \n if key in self.entries:\n entry_list = self.entries[key]\n for i in range(len(entry_list)):\n if self.is_similar(metadata, entry_list[i]['values']):\n entry_list[i]['count'] += 1\n split_id = i + 1\n return split_id\n \n new_entry = {'values': metadata, 'count': 1}\n entry_list.append(new_entry)\n split_id = len(entry_list)\n else:\n entry = [{'values': metadata, 'count': 1}]\n self.entries[key] = entry\n split_id = 1 \n \n return split_id\n \n \n def visualise_split(self, output_path: Union[str, Path]) -> None:\n \n if output_path is None:\n output_path = Path('.')\n new_output_path = os.path.join(output_path, 'splits')\n os.makedirs(new_output_path, exist_ok=True)\n \n for key, values in self.entries.items():\n label = [key]\n color = ['blue']\n source = []\n target = []\n value = []\n for i in range(len(values)):\n label.append(str(i + 1))\n color.append('red')\n source.append(0)\n target.append(i + 1)\n value.append(values[i]['count'])\n \n fig = go.Figure(data=[go.Sankey(\n node = dict(\n pad = 15,\n thickness = 10,\n line = dict(color = \"black\", width = 0.25),\n label = label,\n color = color\n ),\n link = dict(\n source = source, # indices correspond to labels, eg A1, A2, A1, B1, ...\n target = target,\n value = value\n ))])\n \n fig.update_layout(title_text=key + ' Breakdown', font_size=14)\n \n fig.write_html(os.path.join(new_output_path, key + '.html'))\n \n import matplotlib.pyplot as plt\n import numpy as np\n \n key = 'CINE_LVSA'\n values = self.entries[key]\n data = []\n for i in values:\n data.append(i['count'])\n \n print(data)\n fig = plt.figure()\n plt.title('CINE LVSA Groups Split')\n plt.xlabel('Group Index')\n plt.ylabel('Total Datasets')\n #plt.hist(data, bins=range(len(data), len(data) + 1, 1))\n \n plt.bar(np.arange(len(data)), data)\n fig.savefig('catalogue_output\\\\cine_lvsa_groups.png', bbox_inches='tight')\n \n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.title", "matplotlib.pyplot.figure" ] ]
scc-usc/ReCOVER-COVID-19
[ "29d5872d5da0aa3621c520f1edbac510ae99b8df" ]
[ "results/format-covid-forecast/format_data_county_case.py" ]
[ "import math\nimport datetime\nimport pytz\nimport pandas as pd\nimport csv\nimport urllib.request\nimport io\n\nFORECAST_DATE = datetime.datetime.now(pytz.timezone('US/Pacific'))\nFORECAST_DATE = FORECAST_DATE.replace(tzinfo=None)\nfor i in range(0, 8):\n if FORECAST_DATE.weekday() == 6:\n break\n FORECAST_DATE -= datetime.timedelta(1)\n# FIRST_WEEK is the first Saturday after forecast date.\nFIRST_WEEK = FORECAST_DATE + datetime.timedelta(6)\n# for i in range(0, 8):\n# if FIRST_WEEK.weekday() == 5:\n# break\n# FIRST_WEEK += datetime.timedelta(1)\nINPUT_FILENAME = \"county_forecasts_quarantine_0.csv\"\nOUTPUT_FILENAME = FORECAST_DATE.strftime(\"%Y-%m-%d\") + \"-USC-SI_kJalpha.csv\"\nCOLUMNS = [\"forecast_date\", \"target\", \"target_end_date\", \"location\", \"type\", \"quantile\", \"value\"]\nID_REGION_MAPPING = {}\n\ndef load_id_region_mapping():\n \"\"\"\n Return a mapping of <region id, region name>.\n \"\"\"\n\n MAPPING_CSV = \"./locations.csv\"\n with open(MAPPING_CSV) as f:\n reader = csv.reader(f)\n id_region_mapping = {}\n\n # Skip the header\n next(reader)\n\n for row in reader:\n region_id = row[1]\n region_name = row[2]\n id_region_mapping[region_id] = region_name\n\n return id_region_mapping\n\n\ndef load_truth_cumulative_cases():\n dataset = {}\n with open(\"county_data.csv\") as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n for row in reader:\n region_id = row[1].strip().zfill(5)\n if region_id not in ID_REGION_MAPPING:\n continue\n date = header[-1]\n val = int(row[-1])\n if date not in dataset:\n dataset[date] = {}\n\n dataset[date][region_id] = val\n\n return dataset\n\n\ndef load_csv(input_filename):\n \"\"\"\n Read our forecast reports and return a dictionary structuring of <date_str, <region_id, value>>\n e.g.\n {\n \"2020-06-22\": {\n '10': 2000.0,\n '11': 3000.0,\n ...\n },\n\n \"2020-06-23\": {\n '10': 800.0,\n '11': 900.0,\n ...\n },\n ...\n }\n \"\"\"\n dataset = {}\n with open(input_filename) as f:\n reader = csv.reader(f)\n header = next(reader, None)\n\n for i in range(2, len(header)):\n date_str = header[i]\n # Initialize the dataset entry on each date.\n dataset[date_str] = {}\n\n for row in reader:\n region_id = row[1].strip().zfill(5)\n\n # Skip the region if it is not listed in reichlab's region list.\n if region_id not in ID_REGION_MAPPING:\n continue\n\n for i in range(2, len(header)):\n date_str = header[i]\n val = float(row[i])\n if math.isnan(val) or val < 0:\n val = 0\n dataset[date_str][region_id] = val\n\n return dataset\n\n\ndef generate_new_row(forecast_date, target, target_end_date,\n location, type, quantile, value):\n \"\"\"\n Return a new row to be added to the pandas dataframe.\n \"\"\"\n new_row = {}\n new_row[\"forecast_date\"] = forecast_date\n new_row[\"target\"] = target\n new_row[\"target_end_date\"] = target_end_date\n new_row[\"location\"] = location\n new_row[\"type\"] = type\n new_row[\"quantile\"] = quantile\n new_row[\"value\"] = value\n return new_row\n\n\n\ndef add_to_dataframe(dataframe, forecast, observed):\n \"\"\"\n Given a dataframe, forecast, and observed data,\n add county level weekly incident cases predictions to the dataframe.\n \"\"\"\n\n # Write incident forecasts.\n cum_week = 0\n forecast_date_str = FORECAST_DATE.strftime(\"%Y-%m-%d\")\n for target_end_date_str in sorted(forecast.keys()):\n target_end_date = datetime.datetime.strptime(target_end_date_str, \"%Y-%m-%d\")\n # Terminate the loop after 8 weeks of forecasts.\n if cum_week >= 8:\n break\n\n # Skip forecasts before the forecast date.\n if target_end_date <= FORECAST_DATE:\n continue\n\n if (target_end_date_str == FIRST_WEEK.strftime(\"%Y-%m-%d\")) or \\\n (target_end_date > FIRST_WEEK and target_end_date.weekday() == 5):\n cum_week += 1\n target = str(cum_week) + \" wk ahead inc case\"\n last_week_date = target_end_date - datetime.timedelta(7)\n last_week_date_str = last_week_date.strftime(\"%Y-%m-%d\")\n\n if last_week_date_str in observed:\n for region_id in forecast[target_end_date_str].keys():\n if region_id in observed[last_week_date_str]:\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=region_id,\n type=\"point\",\n quantile=\"NA\",\n value=max(forecast[target_end_date_str][region_id]-observed[last_week_date_str][region_id], 0)\n ), ignore_index=True)\n\n elif last_week_date_str in forecast:\n for region_id in forecast[target_end_date_str].keys():\n dataframe = dataframe.append(\n generate_new_row(\n forecast_date=forecast_date_str,\n target=target,\n target_end_date=target_end_date_str,\n location=region_id,\n type=\"point\",\n quantile=\"NA\",\n value=max(forecast[target_end_date_str][region_id]-forecast[last_week_date_str][region_id], 0)\n ), ignore_index=True)\n\n return dataframe\n\n\n# Main function\nif __name__ == \"__main__\":\n ID_REGION_MAPPING = load_id_region_mapping()\n print(\"loading forecast...\")\n forecast = load_csv(INPUT_FILENAME)\n observed = load_truth_cumulative_cases()\n dataframe = pd.read_csv(OUTPUT_FILENAME, na_filter=False)\n dataframe = add_to_dataframe(dataframe, forecast, observed)\n print(\"writing files...\")\n dataframe.to_csv(OUTPUT_FILENAME, index=False)\n print(\"done\")\n" ]
[ [ "pandas.read_csv" ] ]
smallporridge/Socialformer
[ "a2e26e2c4d0fc6e355e46f3a1098f0bfa23ce0d6" ]
[ "dataprocess/subgraph.py" ]
[ "import numpy as np\nfrom torch import index_put_\n\n'''\nremove subgraph_edges\n'''\ndef get_subgraph_edge(psg,graph_matrix,subgraph_num=8,max_subgraph_node=128):\n '''\n Args:\n psg: [list] a list consisting of token_ids representing the original long passage\n graph_matrix: [numpy.ndarray] the adjacency matrix to seperate\n subgraph_num: [int] the number of subgraphs you want\n max_subgraph_node:[int] the max node number of a subgraph\n Return:\n a list consisting of subgraphs, and each subgraph is represented by a list like the input psg\n '''\n node_num=graph_matrix.shape[0]\n subgraph_list = [] \n\n # the passage is too short to split into the given number of subgraphs\n if subgraph_num>node_num:\n for i in range(node_num):\n subgraph_list.append(psg[i])\n for i in range(subgraph_num-1):\n subgraph_list.append([])\n return subgraph_list\n\n # print sparsity of the original graph\n print(\"sparsity:\",np.sum(graph_matrix)/node_num/node_num)\n\n # remove self_loop\n for i in range(node_num):\n graph_matrix[i][i]=0\n \n # extract subgraphs\n for _ in range(subgraph_num): \n # no available node left\n if np.sum(graph_matrix)==0:\n subgraph_list.append([])\n continue \n\n degree=[np.sum(graph_matrix[i]) for i in range(node_num)]\n\n # get node with the biggest degree\n cur_node=degree.index(max(degree))\n\n # get all the neighboring nodes of cur_node\n subgraph=[cur_node]\n for i in range(node_num):\n if graph_matrix[cur_node][i]==1 and i!=cur_node:\n subgraph.append(i)\n if len(subgraph)>=max_subgraph_node-1: break\n\n # delete the select subgraph from original graph\n for i in range(len(subgraph)):\n for j in range(i,len(subgraph)):\n u,v=subgraph[i],subgraph[j]\n if graph_matrix[u][v]==1:\n # delete the edge from the original graph\n graph_matrix[u][v]=0\n graph_matrix[v][u]=0\n\n # convert the index of node into corresponding token_id \n subgraph_list.append([psg[idx] for idx in subgraph])\n return subgraph_list\n\n'''\nremove subgraph_node\n'''\ndef get_subgraph_node(psg,graph_matrix,subgraph_num=8,max_subgraph_node=128):\n '''\n Args:\n psg: [list] a list consisting of token_ids representing the original long passage\n graph_matrix: [numpy.ndarray] the adjacency matrix to seperate\n subgraph_num: [int] the number of subgraphs you want\n max_subgraph_node:[int] the max node number of a subgraph\n Return:\n a list consisting of subgraphs, and each subgraph is represented by a list like the input psg\n '''\n node_num=graph_matrix.shape[0]\n subgraph_list = [] \n\n # the passage is too short to split into the given number of subgraphs\n if subgraph_num>node_num:\n for i in range(node_num):\n subgraph_list.append(psg[i])\n for i in range(subgraph_num-1):\n subgraph_list.append([])\n return subgraph_list\n\n # print sparsity of the original graph\n print(\"sparsity:\",np.sum(graph_matrix)/node_num/node_num)\n\n # remove self_loop\n for i in range(node_num):\n graph_matrix[i][i]=0\n\n degree=[np.sum(graph_matrix[i]) for i in range(node_num)]\n\n # extract subgraphs\n for _ in range(subgraph_num): \n # no available node left\n if np.sum(graph_matrix)==0:\n subgraph_list.append([])\n continue \n\n degree=[np.sum(graph_matrix[i]) for i in range(node_num)]\n\n # get node with the biggest degree\n cur_node=degree.index(max(degree))\n subgraph=[cur_node]\n for i in range(node_num):\n if graph_matrix[cur_node][i]==1 and i!=cur_node: \n if len(subgraph)<max_subgraph_node: \n # delete the node from the original graph\n graph_matrix[:,i]=0\n graph_matrix[i,:]=0 \n subgraph.append(i)\n \n subgraph_list.append([psg[idx] for idx in subgraph])\n\n return subgraph_list\n\n\n\n" ]
[ [ "numpy.sum" ] ]
SymbiFlow/prjuray
[ "bd446a50d94498829a25170ed342c32944f1d807" ]
[ "utils/spec/rclk_int_8.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020 The Project U-Ray Authors.\n#\n# Use of this source code is governed by a ISC-style\n# license that can be found in the LICENSE file or at\n# https://opensource.org/licenses/ISC\n#\n# SPDX-License-Identifier: ISC\n\nimport numpy as np\n\nfrom utils import util\nfrom utils.clock_utils import ClockColumns, GlobalClockBuffers\nfrom prjuray.db import Database\n\n\ndef gen_sites(grid):\n for tile_name in sorted(grid.tiles()):\n loc = grid.loc_of_tilename(tile_name)\n gridinfo = grid.gridinfo_at_loc(loc)\n\n for site, site_type in gridinfo.sites.items():\n if site_type in ['SLICEM', 'SLICEL']:\n yield site\n\n\ndef print_top(seed):\n np.random.seed(seed)\n\n db = Database(util.get_db_root(), util.get_part())\n grid = db.grid()\n\n clocks = ClockColumns(grid)\n\n disabled_columns = set()\n for key in clocks.columns():\n if np.random.choice([1, 0], p=[.25, .75]):\n disabled_columns.add(key)\n\n clocks.remove_column(disabled_columns)\n\n site_to_site_type = {}\n site_to_tile = {}\n for tile_name in sorted(grid.tiles()):\n loc = grid.loc_of_tilename(tile_name)\n gridinfo = grid.gridinfo_at_loc(loc)\n\n for site, site_type in gridinfo.sites.items():\n site_to_site_type[site] = site_type\n site_to_tile[site] = tile_name\n\n bufgs = GlobalClockBuffers('../bufg_outputs.csv')\n\n slices = sorted(gen_sites(grid))\n np.random.shuffle(slices)\n\n with open('complete_top.tcl', 'w') as f:\n #print('place_design -directive Quick', file=f)\n print(\n \"\"\"\nset_property CLOCK_DEDICATED_ROUTE FALSE [get_nets]\nplace_design\nroute_design\n\"\"\",\n file=f)\n\n print(\"\"\"\nmodule top();\n \"\"\")\n\n N_LUTS = 4\n\n ce_inputs = [0, 1]\n for idx in range(N_LUTS):\n\n print(\"\"\"\n wire lut_o_{idx};\n (* BEL=\"A6LUT\", LOC=\"{loc}\", KEEP, DONT_TOUCH *) LUT5 lut{idx} (.O(lut_o_{idx}));\n \"\"\".format(idx=idx, loc=slices.pop()))\n\n ce_inputs.append('lut_o_{}'.format(idx))\n\n N_GCLK = 1\n bufg_wires = {}\n bufg_str = {}\n for idx in range(N_GCLK):\n bufg, hroute_number = bufgs.random_bufg(np.random.choice)\n site_type = site_to_site_type[bufg]\n\n if site_type in ['BUFGCE', 'BUFGCE_HDIO']:\n s = \"\"\"\n wire bufg_o_{idx};\n (* LOC=\"{loc}\", KEEP, DONT_TOUCH *) BUFGCE #(\n .IS_CE_INVERTED({invert_ce}),\n .CE_TYPE(\"{ce_type}\")\n ) bufg_{idx} (\n .CE({ce}),\n .O(bufg_o_{idx})\n );\"\"\".format(\n loc=bufg,\n idx=idx,\n invert_ce=np.random.randint(2),\n ce_type=np.random.choice([\"SYNC\", \"ASYNC\"]),\n ce=np.random.choice(ce_inputs))\n elif site_type == 'BUFGCE_DIV':\n s = \"\"\"\n wire bufg_o_{idx};\n (* LOC=\"{loc}\", KEEP, DONT_TOUCH *) BUFGCE_DIV #(\n .IS_CE_INVERTED({invert_ce}),\n .CE_TYPE(\"{ce_type}\"),\n .BUFGCE_DIVIDE({bufce_divide})\n ) bufg_{idx} (\n .CE({ce}),\n .CLR({clr}),\n .O(bufg_o_{idx})\n );\"\"\".format(\n loc=bufg,\n idx=idx,\n invert_ce=np.random.randint(2),\n ce_type=np.random.choice([\"SYNC\", \"ASYNC\"]),\n ce=np.random.choice(ce_inputs),\n clr=np.random.choice(ce_inputs),\n bufce_divide=np.random.choice(range(1, 9)))\n elif site_type == 'BUFG_PS':\n s = \"\"\"\n wire bufg_o_{idx};\n (* LOC=\"{loc}\", KEEP, DONT_TOUCH *) BUFG_PS #(\n ) bufg_{idx} (\n .O(bufg_o_{idx})\n );\"\"\".format(\n loc=bufg, idx=idx)\n elif site_type == 'BUFGCTRL':\n preselect_i0 = np.random.randint(2)\n if not preselect_i0:\n preselect_i1 = np.random.randint(2)\n else:\n preselect_i1 = 0\n\n s0 = np.random.choice(ce_inputs)\n s1 = np.random.choice(ce_inputs)\n if s0 == '0':\n while s1 == '0':\n s1 = np.random.choice(ce_inputs)\n\n if s0 == '0' and s1 == '1':\n invert_s0 = np.random.randint(2)\n invert_s1 = 0\n elif s0 == '1' and s1 == '0':\n invert_s1 = np.random.randint(2)\n invert_s0 = 0\n elif s0 == '1' and s1 == '1':\n invert_s0 = np.random.randint(2)\n if invert_s0:\n invert_s1 = 0\n else:\n invert_s0 = np.random.randint(2)\n invert_s1 = np.random.randint(2)\n\n s = \"\"\"\n wire bufg_o_{idx};\n (* LOC=\"{loc}\", KEEP, DONT_TOUCH *) BUFGCTRL #(\n .INIT_OUT({init_out}),\n .IS_CE0_INVERTED({invert_ce0}),\n .IS_CE1_INVERTED({invert_ce1}),\n .IS_S0_INVERTED({invert_s0}),\n .IS_S1_INVERTED({invert_s1}),\n .IS_IGNORE0_INVERTED({invert_ignore0}),\n .IS_IGNORE1_INVERTED({invert_ignore1}),\n .PRESELECT_I0({preselect_i0}),\n .PRESELECT_I1({preselect_i1})\n ) bufg_{idx} (\n .IGNORE0({ignore0}),\n .IGNORE1({ignore1}),\n .S0({s0}),\n .S1({s1}),\n .CE0({ce0}),\n .CE1({ce1}),\n .O(bufg_o_{idx})\n );\"\"\".format(\n loc=bufg,\n idx=idx,\n init_out=np.random.randint(2),\n s0=s0,\n s1=s1,\n ce0=np.random.choice(ce_inputs),\n ce1=np.random.choice(ce_inputs),\n ignore0=np.random.choice(ce_inputs),\n ignore1=np.random.choice(ce_inputs),\n invert_ce0=np.random.randint(2),\n invert_ce1=np.random.randint(2),\n invert_s0=invert_s0,\n invert_s1=invert_s1,\n invert_ignore0=np.random.randint(2),\n invert_ignore1=np.random.randint(2),\n preselect_i0=preselect_i0,\n preselect_i1=preselect_i1,\n )\n else:\n assert False, site_type\n\n bufg_wires[hroute_number] = 'bufg_o_{idx}'.format(idx=idx)\n bufg_str[hroute_number] = s\n\n bufg_used = set()\n slices = sorted(clocks.sites.keys())\n\n num_slices = np.random.randint(1, len(slices) + 1)\n\n for idx, slice_loc in enumerate(slices):\n if idx >= num_slices:\n break\n\n all_clocks = list(bufg_wires.keys())\n np.random.shuffle(all_clocks)\n while True:\n hroute_number = all_clocks.pop()\n clock_str = bufg_wires[hroute_number]\n if clocks.add_clock(slice_loc, clock_str):\n if hroute_number not in bufg_used:\n print(bufg_str[hroute_number])\n bufg_used.add(hroute_number)\n\n print(\n ' (* LOC=\"{loc}\", KEEP, DONT_TOUCH *) FDCE ff_{i}(.C({clock_str}));'\n .format(loc=slice_loc, i=idx, clock_str=clock_str))\n break\n\n print('endmodule')\n" ]
[ [ "numpy.random.shuffle", "numpy.random.randint", "numpy.random.seed", "numpy.random.choice" ] ]
boostcampaitech2/model-optimization-level3-nlp-02
[ "bcc2da9cbb7bf41f8635dba6a63af87c9f9dd181", "bcc2da9cbb7bf41f8635dba6a63af87c9f9dd181" ]
[ "train_student.py", "src/modules/mbconv.py" ]
[ "\"\"\"Baseline train\n- Author: Junghoon Kim\n- Contact: placidus36@gmail.com\n\"\"\"\n\nimport argparse\nimport os\nfrom datetime import datetime\nfrom typing import Any, Dict, Tuple, Union\n\nimport timm\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\n\nfrom src.dataloader import create_dataloader\nfrom src.loss import CustomCriterion\nfrom src.model import Model\nfrom src.trainer import TorchTrainer\nfrom src.utils.common import get_label_counts, read_yaml\nfrom src.utils.torch_utils import check_runtime, model_info\nimport random\nimport numpy as np\n\ndef seed_everything(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if use multi-GPU\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n random.seed(seed)\n\ndef train(\n model_config: Dict[str, Any],\n data_config: Dict[str, Any],\n log_dir: str,\n fp16: bool,\n device: torch.device,\n flag : bool = False\n) -> Tuple[float, float, float]:\n \"\"\"Train.\"\"\"\n seed_everything(2)\n # save model_config, data_config\n with open(os.path.join(log_dir, \"data.yml\"), \"w\") as f:\n yaml.dump(data_config, f, default_flow_style=False)\n with open(os.path.join(log_dir, \"model.yml\"), \"w\") as f:\n yaml.dump(model_config, f, default_flow_style=False)\n\n # model_instance = Model(model_config, verbose=True)\n\n model_path = os.path.join(log_dir, \"best.pt\")\n print(f\"Model save path: {model_path}\")\n # if os.path.isfile(model_path):\n # model_instance.model.load_state_dict(\n # torch.load(model_path, map_location=device)\n # )\n # model_instance.model.to(device)\n student_model = timm.create_model('tf_mobilenetv3_small_minimal_100', num_classes=6, pretrained=True)\n\n # teacher_model = timm.create_model('vit_large_patch16_224', num_classes=6, pretrained=True)\n # teacher_model.load_state_dict(torch.load(\"/opt/ml/code/exp/ViT/best.pt\"))\n teacher_model = timm.create_model('tf_efficientnet_b7_ns', num_classes=6, pretrained=True)\n teacher_model.load_state_dict(torch.load(\"/opt/ml/code/exp/Teacher/best.pt\"))\n \n student_model.to(device)\n teacher_model.to(device)\n \n\n # Create dataloader\n train_dl, val_dl, test_dl = create_dataloader(data_config)\n\n # Create criterion\n criterion = CustomCriterion(\n samples_per_cls=get_label_counts(data_config[\"DATA_PATH\"])\n if data_config[\"DATASET\"] == \"TACO\"\n else None,\n device=device,\n )\n # Amp loss scaler\n scaler = (\n torch.cuda.amp.GradScaler() if fp16 and device != torch.device(\"cpu\") else None\n )\n\n # Create trainer\n trainer = TorchTrainer(\n model=student_model,\n criterion=criterion,\n hyperparams=data_config,\n scaler=scaler,\n device=device,\n model_path=model_path,\n verbose=1,\n flag = flag\n )\n best_acc, best_f1 = trainer.KD_train(\n train_dataloader=train_dl,\n n_epoch=data_config[\"EPOCHS\"],\n val_dataloader=val_dl if val_dl else test_dl,\n teacher=teacher_model\n )\n\n # evaluate model with test set\n student_model.load_state_dict(torch.load(model_path))\n test_loss, test_f1, test_acc = trainer.test(\n model=student_model, test_dataloader=val_dl if val_dl else test_dl\n )\n return test_loss, test_f1, test_acc\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Train model.\")\n parser.add_argument(\n \"--model\",\n default=\"mobilenetv3\",\n type=str,\n help=\"model config file name\",\n )\n parser.add_argument(\n \"--data\", default=\"taco\", type=str, help=\"data config file name\"\n )\n parser.add_argument(\n \"--sam_flag\", action='store_true',\n )\n args = parser.parse_args()\n\n model_cfg_path = os.path.join(\"configs/model\", args.model + \".yaml\")\n data_cfg_path = os.path.join(\"configs/data\", args.data + \".yaml\")\n\n model_config = read_yaml(cfg=model_cfg_path)\n data_config = read_yaml(cfg=data_cfg_path)\n\n data_config[\"DATA_PATH\"] = os.environ.get(\"SM_CHANNEL_TRAIN\", data_config[\"DATA_PATH\"])\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n log_dir = os.environ.get(\"SM_MODEL_DIR\", os.path.join(\"exp_sam\", 'latest'))\n\n if os.path.exists(log_dir): \n modified = datetime.fromtimestamp(os.path.getmtime(log_dir + '/best.pt'))\n new_log_dir = os.path.dirname(log_dir) + '/' + modified.strftime(\"%Y-%m-%d_%H-%M-%S\")\n os.rename(log_dir, new_log_dir)\n\n os.makedirs(log_dir, exist_ok=True)\n\n test_loss, test_f1, test_acc = train(\n model_config=model_config,\n data_config=data_config,\n log_dir=log_dir,\n fp16=data_config[\"FP16\"],\n device=device,\n flag=args.sam_flag\n )\n\n", "import math\n\nimport torch\nimport torch.nn as nn\n\nfrom src.modules.base_generator import GeneratorAbstract\n\n\nclass MBConv(nn.Module):\n \"\"\"MBConvBlock used in Efficientnet.\n\n Reference:\n https://github.com/narumiruna/efficientnet-pytorch/blob/master/efficientnet/models/efficientnet.py\n Note:\n Drop connect rate is disabled.\n \"\"\"\n\n def __init__(\n self,\n in_planes,\n out_planes,\n expand_ratio,\n kernel_size,\n stride,\n reduction_ratio=4,\n drop_connect_rate=0.2,\n ):\n super(MBConv, self).__init__()\n self.drop_connect_rate = drop_connect_rate\n self.use_residual = in_planes == out_planes and stride == 1\n assert stride in [1, 2]\n assert kernel_size in [3, 5]\n\n hidden_dim = in_planes * expand_ratio\n reduced_dim = max(1, in_planes // reduction_ratio)\n\n layers = []\n # pw\n if in_planes != hidden_dim:\n layers.append(ConvBNReLU(in_planes, hidden_dim, 1))\n\n layers.extend(\n [\n # dw\n ConvBNReLU(\n hidden_dim,\n hidden_dim,\n kernel_size,\n stride=stride,\n groups=hidden_dim,\n ),\n # se\n SqueezeExcitation(hidden_dim, reduced_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, out_planes, 1, bias=False),\n nn.BatchNorm2d(out_planes),\n ]\n )\n self.conv = nn.Sequential(*layers)\n\n def _drop_connect(self, x):\n if not self.training:\n return x\n if self.drop_connect_rate >= 1.0:\n return x\n keep_prob = 1.0 - self.drop_connect_rate\n batch_size = x.size(0)\n random_tensor = keep_prob\n random_tensor += torch.rand(batch_size, 1, 1, 1, device=x.device)\n binary_tensor = random_tensor.floor()\n return x.div(keep_prob) * binary_tensor\n\n def forward(self, x):\n if self.use_residual:\n return x + self._drop_connect(self.conv(x))\n else:\n return self.conv(x)\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size,\n stride,\n padding=padding,\n groups=groups,\n bias=False,\n ),\n nn.BatchNorm2d(out_planes),\n Swish(),\n )\n\n\nclass SwishImplementation(torch.autograd.Function):\n @staticmethod\n def forward(ctx, i):\n result = i * torch.sigmoid(i)\n ctx.save_for_backward(i)\n return result\n\n @staticmethod\n def backward(ctx, grad_output):\n i = ctx.saved_variables[0]\n sigmoid_i = torch.sigmoid(i)\n return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))\n\n\nclass Swish(nn.Module):\n def forward(self, x):\n return SwishImplementation.apply(x)\n\n\ndef _round_repeats(repeats, depth_mult):\n if depth_mult == 1.0:\n return repeats\n return int(math.ceil(depth_mult * repeats))\n\n\nclass SqueezeExcitation(nn.Module):\n \"\"\"Squeeze-Excitation layer used in MBConv.\"\"\"\n\n def __init__(self, in_planes, reduced_dim):\n super(SqueezeExcitation, self).__init__()\n self.se = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_planes, reduced_dim, 1),\n Swish(),\n nn.Conv2d(reduced_dim, in_planes, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n return x * self.se(x)\n\n\nclass MBConvGenerator(GeneratorAbstract):\n \"\"\"Bottleneck block generator.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @property\n def out_channel(self) -> int:\n \"\"\"Get out channel size.\"\"\"\n return self._get_divisible_channel(self.args[1] * self.width_multiply)\n\n @property\n def base_module(self) -> nn.Module:\n \"\"\"Returns module class from src.common_modules based on the class name.\"\"\"\n return getattr(__import__(\"src.modules\", fromlist=[\"\"]), self.name)\n\n def __call__(self, repeat: int = 1):\n \"\"\"call method.\n\n InvertedResidualv3 args consists,\n repeat(=n), [c, t, s] // note original notation from paper is [t, c, n, s]\n \"\"\"\n module = []\n t, c, s, k = self.args # c is equivalent as self.out_channel\n inp, oup = self.in_channel, self.out_channel\n for i in range(repeat):\n stride = s if i == 0 else 1\n module.append(\n self.base_module(\n in_planes=inp,\n out_planes=oup,\n expand_ratio=t,\n stride=stride,\n kernel_size=k,\n )\n )\n inp = oup\n return self._get_module(module)\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.cuda.manual_seed", "torch.manual_seed", "torch.cuda.amp.GradScaler", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device" ], [ "torch.nn.Sequential", "torch.sigmoid", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.AdaptiveAvgPool2d", "torch.rand", "torch.nn.BatchNorm2d" ] ]
riteshkumarumassedu/BERT-with-SOP
[ "5fa5d34b1fc919ca2ddac2112d74d54e0dbd4b77" ]
[ "pretrain_SOP2.py" ]
[ "\"\"\"\nPretrain transformer with Masked LM and and different SOP variants\n\"\"\"\nfrom random import randint, shuffle\nfrom random import random as rand\n\nimport fire\nimport torch\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\n\nimport models\nimport optim\nimport tokenization\nimport train\nfrom utils import set_seeds, get_device, get_random_word, truncate_tokens_pair\n\n\n# Input file format :\n# 1. One sentence per line. These should ideally be actual sentences,\n# not entire paragraphs or arbitrary spans of text. (Because we use\n# the sentence boundaries for the \"next sentence prediction\" task).\n# 2. Blank lines between documents. Document boundaries are needed\n# so that the \"next sentence prediction\" task doesn't span between documents.\n\ndef get_random_offset(f, back_margin=2000):\n \"\"\" seek random offset of file pointer \"\"\"\n f.seek(0, 2)\n # we remain some amount of text to read\n max_offset = f.tell() - back_margin\n f.seek(randint(0, max_offset), 0)\n f.readline() # throw away an incomplete sentence\n\n\nclass data_loader_for_sentence_pair():\n \"\"\" Load sentence pair (sequential or random order) from corpus \"\"\"\n\n def __init__(self, file, batch_size, tokenize, max_len, short_sampling_prob=0.1, pipeline=[]):\n super().__init__()\n self.f_pos = open(file, \"r\", encoding='utf-8', errors='ignore') # for a positive sample\n self.f_neg = open(file, \"r\", encoding='utf-8', errors='ignore') # for a negative (random) sample\n self.tokenize = tokenize # tokenize function\n self.max_len = max_len # maximum length of tokens\n self.short_sampling_prob = short_sampling_prob\n self.pipeline = pipeline\n self.batch_size = batch_size\n\n def read_tokens(self, f, length, discard_last_and_restart=True):\n \"\"\" Read tokens from file pointer with limited length \"\"\"\n tokens = []\n while len(tokens) < length:\n line = f.readline()\n if not line:\n # end of file\n return None\n if not line.strip():\n # blank line (delimiter of documents)\n if discard_last_and_restart:\n tokens = [] # throw all and restart\n continue\n else:\n return tokens # return last tokens in the document\n tokens.extend(self.tokenize(line.strip()))\n return tokens\n\n def read_neg_tokens(self, f, discard_last_and_restart=True):\n \"\"\" Read tokens from file pointer with limited length \"\"\"\n tokens = []\n while True:\n line = f.readline()\n if not line:\n # end of file\n return None\n if not line.strip():\n # blank line (delimiter of documents)\n # if discard_last_and_restart:\n # tokens = [] # throw all and restart\n continue\n # else:\n # return tokens # return last tokens in the document\n return self.tokenize(line.strip())\n\n def read_para_tokens(self, f, discard_last_and_restart=True):\n \"\"\" Read tokens from file pointer with limited length \"\"\"\n tokens = []\n tokens_a = None\n while True:\n line = f.readline()\n if not line:\n # end of file\n return None, None\n if not line.strip():\n # blank line (delimiter of documents)\n # if discard_last_and_restart:\n # tokens = [] # throw all and restart\n break\n # else:\n # return tokens # return last tokens in the document\n if tokens_a is None:\n tokens_a = self.tokenize(line.strip())\n tokens.extend(self.tokenize(line.strip()))\n return tokens, tokens_a\n\n def read_tokens_at_pos(self, f, length, pos, discard_last_and_restart=True):\n\n # skipping lines\n for x in range(pos):\n _skipping = f.readline()\n\n tokens = []\n while len(tokens) < length:\n line = f.readline()\n if not line: # end of file\n return None\n if not line.strip(): # blank line (delimiter of documents)\n if discard_last_and_restart:\n tokens = [] # throw all and restart\n continue\n else:\n return tokens # return last tokens in the document\n tokens.extend(self.tokenize(line.strip()))\n return tokens\n\n def __iter__(self): # iterator to load data\n while True:\n batch = []\n print(\"-----SOP2-----\")\n for i in range(self.batch_size):\n # sampling length of each tokens_a and tokens_b\n # sometimes sample a short sentence to match between train and test sequences\n len_tokens = randint(1, int(self.max_len / 2)) \\\n if rand() < self.short_sampling_prob \\\n else int(self.max_len / 2)\n # Number of tokens in the paragraph\n # len_para_tokens = 40\n is_next = rand() < 0.5 # whether token_b is next to token_a or not\n tokens_para, tokens_a = self.read_para_tokens(self.f_pos)\n # tokens_a = self.read_tokens(self.f_pos, len_tokens, True)\n # seek_random_offset(self.f_neg)\n # f_next = self.f_pos if is_next else self.f_neg\n # tokens_b = self.read_tokens(f_next, len_tokens, False)\n #\n # if tokens_a is None or tokens_b is None: # end of file\n # self.f_pos.seek(0, 0) # reset file pointer\n # return\n #\n # instance = (is_next, tokens_a, tokens_b)\n\n \"\"\"\"\n 1. Get which line to read from the current A line (eg 1st from A, 2nd from A, 3rd from A)\n 0 means 1st\n 1 means 2nd \n 2 means 3rd\n \n 2. Get that line from the current A line\n \n 3. is_next would be the relative position of line B from line A\n \n \"\"\"\n # possible_orders = [1,2,3,4]\n # line_b_rel_pos = random.choice(possible_orders)\n # set f_next to f_pos\n f_next = self.f_pos\n\n # tokens_b = self.read_tokens_at_pos(f_next, len_tokens,line_b_rel_pos, False)\n tokens_b = self.read_neg_tokens(self.f_neg)\n if tokens_a is None or tokens_b is None: # end of file\n self.f_pos.seek(0, 0) # reset file pointer\n return\n\n # SOP, sentence-order prediction\n # instance = (line_b_rel_pos-1, tokens_a, tokens_b)\n instance = (is_next, tokens_para, tokens_a) if is_next \\\n else (is_next, tokens_para, tokens_b)\n\n for proc in self.pipeline:\n instance = proc(instance)\n\n batch.append(instance)\n\n # To Tensor\n batch_tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*batch)]\n yield batch_tensors\n\n\nclass Pipeline():\n \"\"\" Pre-process Pipeline Class : callable \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def __call__(self, instance):\n raise NotImplementedError\n\n\nclass Pretrain_transformer(Pipeline):\n \"\"\"\n Pre-processing steps for pretraining transformer\n \"\"\"\n\n def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512):\n super().__init__()\n self.max_pred = max_pred # max tokens of prediction\n self.mask_prob = mask_prob # masking probability\n self.vocab_words = vocab_words # vocabulary (sub)words\n self.indexer = indexer # function from token to token index\n self.max_len = max_len\n\n def __call__(self, instance):\n is_next, tokens_a, tokens_b = instance\n\n # -3 for special tokens [CLS], [SEP], [SEP]\n truncate_tokens_pair(tokens_a, tokens_b, self.max_len - 3)\n\n # Add Special Tokens\n tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']\n segment_ids = [0] * (len(tokens_a) + 2) + [1] * (len(tokens_b) + 1)\n input_mask = [1] * len(tokens)\n\n # For masked Language Models\n masked_tokens, masked_pos = [], []\n # the number of prediction is sometimes less than max_pred when sequence is short\n n_pred = min(self.max_pred, max(1, int(round(len(tokens) * self.mask_prob))))\n # candidate positions of masked tokens\n cand_pos = [i for i, token in enumerate(tokens)\n if token != '[CLS]' and token != '[SEP]']\n shuffle(cand_pos)\n for pos in cand_pos[:n_pred]:\n masked_tokens.append(tokens[pos])\n masked_pos.append(pos)\n if rand() < 0.8: # 80%\n tokens[pos] = '[MASK]'\n elif rand() < 0.5: # 10%\n tokens[pos] = get_random_word(self.vocab_words)\n # when n_pred < max_pred, we only calculate loss within n_pred\n masked_weights = [1] * len(masked_tokens)\n\n # Token Indexing\n input_ids = self.indexer(tokens)\n masked_ids = self.indexer(masked_tokens)\n\n # Zero Padding\n n_pad = self.max_len - len(input_ids)\n input_ids.extend([0] * n_pad)\n segment_ids.extend([0] * n_pad)\n input_mask.extend([0] * n_pad)\n\n # Zero Padding for masked target\n if self.max_pred > n_pred:\n n_pad = self.max_pred - n_pred\n masked_ids.extend([0] * n_pad)\n masked_pos.extend([0] * n_pad)\n masked_weights.extend([0] * n_pad)\n\n return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next)\n\n\nclass Bert_model_for_pretraining(nn.Module):\n \"\"\"\n Bert Model for Pretrain : Masked LM and SOP variants\n \"\"\"\n\n def __init__(self, cfg):\n super().__init__()\n self.transformer = models.Transformer(cfg)\n self.fc = nn.Linear(cfg.dim, cfg.dim)\n self.activ1 = nn.Tanh()\n self.linear = nn.Linear(cfg.dim, cfg.dim)\n self.activ2 = models.gelu\n self.norm = models.LayerNorm(cfg)\n self.classifier = nn.Linear(cfg.dim, 4)\n # decoder is shared with embedding layer\n embed_weight = self.transformer.embed.tok_embed.weight\n n_vocab, n_dim = embed_weight.size()\n self.decoder = nn.Linear(n_dim, n_vocab, bias=False)\n self.decoder.weight = embed_weight\n self.decoder_bias = nn.Parameter(torch.zeros(n_vocab))\n\n def forward(self, input_ids, segment_ids, input_mask, masked_pos):\n h = self.transformer(input_ids, segment_ids, input_mask)\n pooled_h = self.activ1(self.fc(h[:, 0]))\n masked_pos = masked_pos[:, :, None].expand(-1, -1, h.size(-1))\n h_masked = torch.gather(h, 1, masked_pos)\n h_masked = self.norm(self.activ2(self.linear(h_masked)))\n logits_lm = self.decoder(h_masked) + self.decoder_bias\n logits_clsf = self.classifier(pooled_h)\n\n return logits_lm, logits_clsf\n\n\ndef main(train_cfg='config/pretrain.json', model_cfg='config/bert_base.json',\n data_file='../tbc/books_large_all.txt', model_file=None,\n data_parallel=True, vocab='../uncased_L-12_H-768_A-12/vocab.txt',\n save_dir='../exp/bert/pretrain', log_dir='../exp/bert/pretrain/runs',\n max_len=512, max_pred=20, mask_prob=0.15):\n \"\"\"\n :param train_cfg: json file containig the pretraining params\n :param model_cfg: json file contianig the BERT model details\n :param data_file: data file containing the wikitext-103 unlabelled data\n :param model_file: model file if finetuning\n :param data_parallel: if running data parallel\n :param vocab: text file containng the vocab words\n :param save_dir: directory to save model checkpoints\n :param log_dir: directory to save tensorflow logs\n :param max_len: maximum sequence lenght\n :param max_pred: how many words to predict\n :param mask_prob: maxking probability\n \"\"\"\n\n cfg = train.Config.from_json(train_cfg)\n model_cfg = models.Config.from_json(model_cfg)\n\n set_seeds(cfg.seed)\n\n tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)\n tokenize = lambda x: tokenizer.tokenize(tokenizer.convert_to_unicode(x))\n\n pipeline = [Pretrain_transformer(max_pred,\n mask_prob,\n list(tokenizer.vocab.keys()),\n tokenizer.convert_tokens_to_ids,\n max_len)]\n data_iter = data_loader_for_sentence_pair(data_file,\n cfg.batch_size,\n tokenize,\n max_len,\n pipeline=pipeline)\n\n model = Bert_model_for_pretraining(model_cfg)\n # checkpoint.load_model(model.transformer,\"/mnt/nfs/scratch1/riteshkumar/nlp_code/bert_with_sop/uncased_L-12_H-768_A-12/bert_model.ckpt\")\n criterion1 = nn.CrossEntropyLoss(reduction='none')\n criterion2 = nn.CrossEntropyLoss()\n\n optimizer = optim.optim4GPU(cfg, model)\n trainer = train.Trainer(cfg, model, data_iter, optimizer, save_dir, get_device())\n\n writer = SummaryWriter(log_dir=log_dir) # for tensorboardX\n\n def Compute_combined_loss(model, batch, global_step): # make sure loss is tensor\n input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next = batch\n\n logits_lm, logits_clsf = model(input_ids, segment_ids, input_mask, masked_pos)\n loss_lm = criterion1(logits_lm.transpose(1, 2), masked_ids) # for masked LM\n loss_lm = (loss_lm * masked_weights.float()).mean()\n loss_clsf = criterion2(logits_clsf, is_next) # for sentence classification\n writer.add_scalars('data/scalar_group',\n {'loss_lm': loss_lm.item(),\n 'loss_clsf': loss_clsf.item(),\n 'loss_total': (loss_lm + loss_clsf).item(),\n 'lr': optimizer.get_lr()[0],\n },\n global_step)\n return loss_lm + loss_clsf\n\n # pretrain_file = '/mnt/nfs/scratch1/vjayapati/bert_sop2/from_scratch/model_steps_60000.pt'\n trainer.train(Compute_combined_loss, model_file, None, data_parallel)\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.zeros", "torch.nn.Tanh", "torch.tensor", "torch.nn.Linear", "torch.gather" ] ]
iross/blackstack
[ "4e44679f889d86626cd7cd263a0b770e1d5e9e64" ]
[ "helpers.py" ]
[ "import math\nimport numpy as np\nimport itertools\nfrom PIL import Image\nfrom shapely.geometry import Polygon\nfrom shapely.ops import cascaded_union\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\nfrom difflib import SequenceMatcher\nfrom bs4 import BeautifulSoup\n\nimport classifier\n\nclf = classifier.create()\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\ndef similar_to_keyword(d):\n keywords = ['figure', 'fig', 'table', 'appendix', 'map']\n for word in keywords:\n if similar(d, word) > 0.6:\n return True\n\n return False\n\ndef clean_range(candidates):\n good_vals = []\n for idx, val in enumerate(candidates):\n if (idx == 0) or (idx == len(candidates)) or (idx != len(candidates) and idx != 0 and val <= int(candidates[(idx - 1)])*2):\n # cool\n good_vals.append(val)\n else:\n break\n\n complete = []\n for idx, val in enumerate(good_vals):\n if len(complete) == 0:\n complete.append(1)\n\n elif val > complete[-1] + 1:\n # Fill in the blanks\n for i in range(complete[-1] + 1, val + 1):\n complete.append(i)\n else:\n complete.append(val)\n\n return complete\n\n\ndef make_polygon(area):\n return Polygon([(area['x1'], area['y1']), (area['x1'], area['y2']), (area['x2'], area['y2']), (area['x2'], area['y1']), (area['x1'], area['y1'])])\n\n\ndef polygon_to_extract(polygon):\n bounds = polygon.bounds\n return {\n 'x1': bounds[0],\n 'y1': bounds[1],\n 'x2': bounds[2],\n 'y2': bounds[3]\n }\n\n\ndef union_extracts(extracts):\n unioned = cascaded_union([ make_polygon(p) for p in extracts ])\n\n if unioned.geom_type == 'Polygon':\n return [ polygon_to_extract(unioned) ]\n else:\n return [ polygon_to_extract(geom) for geom in unioned ]\n\n\ndef extract_table(doc, page, extract):\n image = Image.open('%s/png/page_%s.png' % (doc, page))\n image.crop((extract['x1'], extract['y1'], extract['x2'], extract['y2'])).save(doc + '/extracts/page_' + str(page) + '_' + extract['name'].replace(' ', '_').replace('.', '') + '.png', 'png')\n\n\ndef enlarge_extract(extract, area):\n return {\n 'x1': min([extract['x1'], area['x1']]),\n 'y1': min([extract['y1'], area['y1']]),\n 'x2': max([extract['x2'], area['x2']]),\n 'y2': max([extract['y2'], area['y2']])\n }\n\n\ndef rectangles_intersect(a, b):\n if not 'x1' in a or not 'x1' in b:\n return False\n # Determine whether or not two rectangles intersect\n if (a['x1'] < b['x2']) and (a['x2'] > b['x1']) and (a['y1'] < b['y2']) and (a['y2'] > b['y1']):\n return True\n else:\n return False\n\n\ndef extractbbox(title):\n if not title:\n return {}\n # Given a tesseract title string, extract the bounding box coordinates\n for part in title.split(';'):\n if part.strip()[0:4] == 'bbox':\n bbox = part.replace('bbox', '').strip().split()\n return {\n 'x1': int(bbox[0]),\n 'y1': int(bbox[1]),\n 'x2': int(bbox[2]),\n 'y2': int(bbox[3])\n }\n return {}\n\n\ndef meanOfDifferences(d):\n return np.nanmean([abs(each[0] - each[1]) for each in list(itertools.combinations(d, 2))])\n\n\ndef centroid(x):\n return {\n 'x': x['x1'] + (float(x['x2'] - x['x1']) / 2),\n 'y': x['y1'] + (float(x['y2'] - x['y1']) / 2)\n }\n\n\ndef min_distance(a, b):\n # Calculate 3 different distances and return the best one\n return min([ distance(a, b), top_left_distance(a, b), bottom_right_distance(a, b) ])\n\ndef top_left_distance(a, b):\n return abs(math.sqrt(math.pow((b['x1'] - a['x1']), 2) + math.pow((b['y1'] - a['y1']), 2)))\n\ndef bottom_right_distance(a, b):\n return abs(math.sqrt(math.pow((b['x2'] - a['x2']), 2) + math.pow((b['y2'] - a['y2']), 2)))\n\ndef distance(a, b):\n centroid_a = centroid(a)\n centroid_b = centroid(b)\n return abs(math.sqrt(math.pow((centroid_b['x'] - centroid_a['x']), 2) + math.pow((centroid_b['y'] - centroid_a['y']), 2)))\n\ndef get_gaps(x_axis):\n '''\n Presence of contiguous vertical white space is a good indicator that\n an area is a table. Given a list of 0s (white space) and 1s (content)\n returns a list of integers that correspond to contiguous pixels of\n whitespace.\n Ex: [1,1,1,1,0,0,0,0,0,0,1,1,0,0,0,0] -> [6, 4]\n '''\n gaps = []\n currentGap = 0\n for x in x_axis:\n if x == 1:\n if currentGap != 0:\n gaps.append(currentGap)\n currentGap = 0\n else:\n currentGap += 1\n\n return gaps\n\n\ndef expand_area(input_area, all_areas):\n text_blocks = [area for area in all_areas if area['type'] == 'body']\n candidate_areas = [area for area in all_areas if area['type'] != 'body' and area['type'] != 'decoration']\n\n extract = {\n 'x1': input_area['x1'],\n 'y1': input_area['y1'],\n 'x2': input_area['x2'],\n 'y2': input_area['y2']\n }\n\n for area in candidate_areas:\n # Create a geometry that is the current extract + the current area\n candidate_new_extract = enlarge_extract(extract, area)\n\n valid_extraction = True\n for block in text_blocks:\n will_intersect = rectangles_intersect(candidate_new_extract, block)\n if will_intersect:\n valid_extraction = False\n\n if valid_extraction:\n extract.update(candidate_new_extract)\n\n return extract\n\n# Translated from the C++ implementation found here - http://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/\ndef lines_intersect(l1, l2):\n\n def on_segment(p1, p2, p3):\n if (\n (p2['x'] <= max([p1['x'], p3['x']])) and\n (p2['x'] >= min([p1['x'], p3['x']])) and\n (p2['y'] <= max([p1['y'], p3['y']])) and\n (p2['y'] >= min([p1['y'], p3['y']]))\n ):\n return True\n else:\n return False\n\n def orientation(p1, p2, p3):\n val = ((p2['y'] - p1['y']) * (p3['x'] - p2['x'])) - ((p2['x'] - p1['x']) * (p3['y'] - p2['y']))\n\n # colinear\n if val == 0:\n return 0\n # clockwise\n elif val > 0:\n return 1\n # counterclockwise\n else:\n return 2\n\n o1 = orientation({\n 'x': l1['x1'],\n 'y': l1['y1']\n }, {\n 'x': l1['x2'],\n 'y': l1['y2']\n }, {\n 'x': l2['x1'],\n 'y': l2['y1']\n })\n\n o2 = orientation({\n 'x': l1['x1'],\n 'y': l1['y1']\n }, {\n 'x': l1['x2'],\n 'y': l1['y2']\n }, {\n 'x': l2['x2'],\n 'y': l2['y2']\n })\n\n o3 = orientation({\n 'x': l2['x1'],\n 'y': l2['y1']\n }, {\n 'x': l2['x2'],\n 'y': l2['y2']\n }, {\n 'x': l1['x1'],\n 'y': l1['y1']\n })\n\n o4 = orientation({\n 'x': l2['x1'],\n 'y': l2['y1']\n }, {\n 'x': l2['x2'],\n 'y': l2['y2']\n }, {\n 'x': l1['x2'],\n 'y': l1['y2']\n })\n\n if o1 != o2 and o3 != o4:\n return True\n\n # Special cases\n if o1 == 0 and on_segment({\n 'x': l1['x1'],\n 'y': l1['y2']\n }, {\n 'x': l2['x1'],\n 'y': l2['y1']\n }, {\n 'x': l1['x2'],\n 'y': l1['y2']\n }):\n return True\n\n if o2 == 0 and on_segment({\n 'x': l1['x1'],\n 'y': l1['y1']\n }, {\n 'x': l2['x2'],\n 'y': l2['y2']\n }, {\n 'x': l1['x2'],\n 'y': l1['y2']\n }):\n return True\n\n if o3 == 0 and on_segment({\n 'x': l2['x1'],\n 'y': l2['y1']\n }, {\n 'x': l1['x1'],\n 'y': l1['y1']\n }, {\n 'x': l2['x2'],\n 'y': l2['y2']\n }):\n return True\n\n if o4 == 0 and on_segment({\n 'x': l2['x1'],\n 'y': l2['y1']\n }, {\n 'x': l1['x2'],\n 'y': l1['y2']\n }, {\n 'x': l2['x2'],\n 'y': l2['y2']\n }):\n return True\n\n return False\n\ndef get_header_footer(pages, page_height, page_width):\n header = { 'x1': 0, 'y1': 0, 'x2': 0, 'y2': 0 }\n footer = { 'x1': 0, 'y1': 0, 'x2': 0, 'y2': 0 }\n\n # Find headers and footers (skip page 1 and pages that are abnormal orientations)\n page_areas = [ page['areas'] for i, page in enumerate(pages) if i != 0 and ((page['page']['y2'] - page['page']['y1']) == page_height) ]\n\n # Flatten\n areas = [area for areas in page_areas for area in areas]\n\n # Get words in areas that are not text blocks\n words = [ area['soup'].find_all('span', 'ocrx_word') for area in areas if area['type'] != 'body' ]\n\n # Get the dimensions of all areas identified as text blocks\n text_blocks = [ {'y1': area['y1'], 'y2': area['y2'], 'x1': area['x1'], 'x2': area['x2']} for area in areas if area['type'] == 'body' ]\n # Maximum extent of text paragraphs in the document\n text_block_area = {\n 'x1': min([ area['x1'] for area in text_blocks ]),\n 'y1': min([ area['y1'] for area in text_blocks ]),\n 'x2': max([ area['x2'] for area in text_blocks ]),\n 'y2': max([ area['y2'] for area in text_blocks ])\n }\n\n # Get the bounding boxes of all words in the document that DO NOT belong to text blocks\n words_bboxes = []\n for word_a in words:\n for word in word_a:\n words_bboxes.append(extractbbox(word.get('title')))\n\n # Get the top-most coordinate of all word extents\n min_min_y1 = min([ word['y1'] for word in words_bboxes ])\n # For the words that have the top-most coordinate, get the mean of their y2\n max_min_y2 = np.nanmean([ word['y2'] for word in words_bboxes if word['y1'] == min_min_y1 ])\n\n # Get the max y1 of all word extents (looking for words in the last row of each page)\n min_max_y1 = max([ word['y1'] for word in words_bboxes ])\n # For the words that have the max y1, get the mean of their y2\n max_max_y2 = np.nanmean([ word['y2'] for word in words_bboxes if word['y1'] == min_max_y1 ])\n\n #\n # To determine if a document contains a header the following conditions must be met:\n # + The middle of the vertical extent between the words in the top row must be on the top 1/4 of the page\n # + The vertical extent of the words in the potential header must not overlap in y-space with any text block\n if (min_min_y1 + ((max_min_y2 - min_min_y1)/2)) < page_height/4 and not (text_block_area['y1'] <= max_min_y2 and min_min_y1 <= text_block_area['y2']):\n print('HAS HEADER - ', min_min_y1, max_min_y2)\n header = {\n 'x1': 0,\n 'y1': 0,\n 'x2': page_width,\n 'y2': int(max_min_y2)\n }\n\n # To determine if a footer is present, the same rules apply except it must be in the bottom 1/4 of the page\n if (min_max_y1 + ((max_max_y2 - min_max_y1)/2)) > (page_height - page_height/4) and not (text_block_area['y1'] <= max_max_y2 and min_max_y1 <= text_block_area['y2']):\n print('HAS FOOTER - ', min_max_y1, max_max_y2)\n footer = {\n 'x1': 0,\n 'y1': min_max_y1,\n 'x2': page_width,\n 'y2': page_height\n }\n\n return header, footer\n\n\ndef buffer(area, amt):\n return {\n 'x1': area['x1'] - amt,\n 'y1': area['y1'] - amt,\n 'x2': area['x2'] + amt,\n 'y2': area['y2'] + amt\n }\n\n\n# def plot_new_areas(page_no, areas):\n# fig = plt.figure()\n# ax = fig.add_subplot(111, aspect='equal')\n#\n# #areas = [ makeBox(area) for area in area ]\n# # words = [ makeBox(word) for word in words ]\n# areas = [ area['geom'] for area in areas ]\n# for area in areas:\n# ax.add_patch(patches.Rectangle(\n# (int(area['x1']), int(area['y1'])),\n# int(area['x2']) - int(area['x1']),\n# int(area['y2']) - int(area['y1']),\n# fill=False,\n# linewidth=0.5,\n# edgecolor=\"#0000FF\"\n# )\n# )\n#\n#\n# # for word in words:\n# # ax.add_patch(patches.Rectangle(\n# # (int(word['x1']), int(word['y1'])),\n# # int(word['x2']) - int(word['x1']),\n# # int(word['y2']) - int(word['y1']),\n# # fill=False,\n# # linewidth=0.1,\n# # edgecolor=\"#000000\"\n# # )\n# # )\n#\n# plt.ylim(0, 6600)\n# plt.xlim(0, 5100)\n# plt.axis(\"off\")\n# ax = plt.gca()\n# ax.invert_yaxis()\n# plt.axis('off')\n# fig.savefig('./' + page_no + '.png', dpi=400, bbox_inches='tight', pad_inches=0)\n\n\ndef area_summary(area):\n summary = {}\n summary.update(area)\n #summary['soup'] = area\n # Bounding box (x1, y1, x2, y2)\n #summary.update(extractbbox(area.get('title')))\n\n # Number of lines\n summary['lines'] = len(summary['soup'].find_all('span', 'ocr_line'))\n summary['line_heights'] = []\n\n for line in summary['soup'].find_all('span', 'ocr_line'):\n bbox = extractbbox(line.get('title'))\n height = bbox['y2'] - bbox['y1']\n summary['line_heights'].append(height)\n\n # Number of words\n try:\n summary['words'] = len(filter(None, summary['soup'].getText().strip().replace('\\n', ' ').replace(' ', ' ').split(' ')))\n except:\n summary['words'] = 0\n\n # Area\n summary['area'] = (summary['x2'] - summary['x1']) * (summary['y2'] - summary['y1'])\n\n # Get spacing of words\n summary['x_gaps'] = np.zeros(summary['x2'] - summary['x1'], dtype=np.int)\n\n # Words per line\n summary['words_in_line'] = []\n summary['word_distances'] = []\n summary['word_heights'] = []\n summary['word_areas'] = []\n summary['words_per_line'] = []\n\n # Record the x position of the first word in each line\n summary['first_word_x'] = []\n\n # Iterate on each line in the area\n for line in summary['soup'].find_all('span', 'ocr_line'):\n # For each line, get words\n words = line.find_all('span', 'ocrx_word')\n\n # Record the number of words in this line\n summary['words_per_line'].append(len(words))\n\n for word_idx, word in enumerate(words):\n wordbbox = extractbbox(word.get('title'))\n\n # Record the x coordinate of the first word of each line\n if word_idx == 0:\n summary['first_word_x'] = wordbbox['x1']\n\n summary['word_heights'].append(wordbbox['y2'] - wordbbox['y1'])\n summary['word_areas'].append((wordbbox['x2'] - wordbbox['x1']) * (wordbbox['y2'] - wordbbox['y1']))\n\n for x in range(wordbbox['x1'] - summary['x1'], wordbbox['x2'] - summary['x1']):\n summary['x_gaps'][x] = 1\n\n # If word isn't the last word in a line, get distance between word and word + 1\n if word_idx != (len(words) - 1):\n wordP1bbox = extractbbox(words[ word_idx + 1 ].get('title'))\n # Pythagorean theorum FTW\n summary['word_distances'].append(math.sqrt(math.pow((wordP1bbox['x1'] - wordbbox['x2']), 2) + math.pow((wordP1bbox['y1'] - wordbbox['y1']), 2)))\n\n # Count whitespace gaps\n summary['gaps'] = get_gaps(summary['x_gaps'])\n\n # Get the mean of the differences of the word distances (all the same == 0, difference increases away from 0)\n summary['word_separation_index'] = 0 if summary['words'] == 0 else meanOfDifferences(summary['word_distances'])\n\n # Quantify the variation in the height of words in this area\n summary['word_height_index'] = 0 if summary['words'] == 0 else meanOfDifferences(summary['word_heights'])\n\n # Get the average word height of this area\n summary['word_height_avg'] = 0 if summary['words'] == 0 else np.nanmean(summary['word_heights'])\n\n # Get word/area ratio\n summary['word_area_index'] = 0 if summary['words'] == 0 else np.sum(summary['word_areas']) / float(summary['area'])\n\n return summary\n\ndef summarize_document(area_stats):\n # Don't use areas with 1 line or no words in creating summary statistics\n return {\n 'word_separation_mean': np.nanmean([np.nanmean(area['word_distances']) for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_separation_median': np.nanmedian([np.nanmean(area['word_distances']) for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_separation_std': np.nanstd([np.nanmean(area['word_distances'])for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_separation_index_mean': np.nanmean([area['word_separation_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_separation_index_median': np.nanmedian([area['word_separation_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_separation_index_std': np.nanstd([area['word_separation_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_height_index_mean': np.nanmean([area['word_height_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_height_index_median': np.nanmedian([area['word_height_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_height_index_std': np.nanstd([area['word_height_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_area_index_mean': np.nanmean([area['word_area_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_area_index_median': np.nanmedian([area['word_area_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_area_index_std': np.nanstd([area['word_area_index'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_height_avg': np.nanmean([area['word_height_avg'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_height_avg_median': np.nanmedian([area['word_height_avg'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n 'word_height_avg_std': np.nanstd([area['word_height_avg'] for area in area_stats if area['words'] > 0 and area['lines'] > 1]),\n\n 'line_height_avg': np.nanmean([a for a in area['line_heights'] for area in area_stats]),\n 'line_height_std': np.nanstd([a for a in area['line_heights'] for area in area_stats]),\n 'max_area': max([ area['area'] for area in area_stats ]),\n 'max_lines': max([ area['lines'] for area in area_stats ]),\n 'max_gaps': max([ len(area['gaps']) for area in area_stats ])\n }\n\ndef merge_areas(areas):\n def process(soup):\n # Given a tesseract title string, extract the bounding box coordinates\n title = soup.get('title')\n for part in title.split(';'):\n if part.strip()[0:4] == 'bbox':\n bbox = part.replace('bbox', '').strip().split()\n return {\n 'x1': int(bbox[0]),\n 'y1': int(bbox[1]),\n 'x2': int(bbox[2]),\n 'y2': int(bbox[3]),\n 'soup': soup\n }\n return {}\n\n areas = [ process(area) for area in areas ]\n merged = group_areas(areas)\n\n last_length = len(areas)\n current_length = len(merged)\n c = 0\n while current_length < last_length:\n c += 1\n # Check yo self before you wreck yoself\n if c > 20:\n break\n last_length = len(merged)\n merged = group_areas(merged)\n current_length = len(merged)\n\n return merged\n\n\n\n\ndef group_areas(areas):\n def rectangles_intersect(a, b):\n pad = 1\n a['x1'] -= pad\n b['x2'] += pad\n\n # Determine whether or not two rectangles intersect\n if (a['x1'] < b['x2']) and (a['x2'] > b['x1']) and (a['y1'] < b['y2']) and (a['y2'] > b['y1']):\n return True\n else:\n return False\n\n grouped_areas = []\n\n for area in areas:\n found = False\n for idx, ga in enumerate(grouped_areas):\n if rectangles_intersect(ga, area):\n grouped_areas[idx]['soup'] = BeautifulSoup(str(area['soup']) + str(grouped_areas[idx]['soup']), 'html.parser')\n grouped_areas[idx].update(enlarge_extract(ga, area))\n found = True\n break\n\n if not found:\n grouped_areas.append(area)\n\n return grouped_areas\n" ]
[ [ "numpy.nanmedian", "numpy.nanmean", "numpy.nanstd", "numpy.zeros", "numpy.sum" ] ]
Karikari1234/Graph-Markov-Network
[ "8e29e58c8fe7879e53605485615d09356b990a85" ]
[ "Python Scripts/Exp_baseline.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n\nimport torch.utils.data as utils\nimport torch.nn.functional as F\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.parameter import Parameter\nimport math\nimport numpy as np\nimport pandas as pd\nimport time\nimport pickle\nimport os\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n# %matplotlib inline \n# plt.ioff()\n# plt.ion()\n\n\nimport sys\n\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n \nsys.path.append('../')\n\n\nimport models\nimport utils\nimport importlib\n\nimport argparse\n\n\ndtype = torch.float32\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")\n\ntorch.cuda.set_device(0)\n\n\n\n# Load data\n\ndef loadDataset(dataset = None):\n if dataset == 'PEMS-BAY':\n speed_matrix = pd.read_hdf('../Data/PEMS-BAY/pems-bay.h5')\n A = pd.read_pickle('../Data/PEMS-BAY/adj_mx_bay.pkl')\n A = A[2]\n A[np.where(A != 0)] = 1\n for i in range(0, A.shape[0]):\n for j in range(i, A.shape[0]):\n if A[i,j] == 1:\n A[j,i] = 1\n elif dataset == 'METR-LA':\n speed_matrix = pd.read_hdf('../Data/METR-LA/metr-la.h5')\n A = pd.read_pickle('../Data/METR-LA/adj_mx.pkl')\n A = A[2]\n A[np.where(A != 0)] = 1\n for i in range(0, A.shape[0]):\n for j in range(i, A.shape[0]):\n if A[i,j] == 1:\n A[j,i] = 1\n elif dataset == 'LOOP-SEA':\n speed_matrix = pd.read_pickle('../Data/LOOP-SEA/speed_matrix_2015_1mph')\n A = np.load('../Data/LOOP-SEA/Loop_Seattle_2015_A.npy')\n elif dataset == 'INRIX-SEA':\n speed_matrix = pd.read_pickle('../Data/INRIX-SEA/INRIX_Seattle_Speed_Matrix__2012_v2.pkl')\n A = np.load('../Data/INRIX-SEA/INRIX_Seattle_Adjacency_matrix_2012_v2.npy')\n else:\n print('Dataset not found.')\n return None, None\n print('Dataset loaded.')\n return speed_matrix, A\n\n\ndef StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed = 1024, save_model=True):\n result_dict[model_name] = {}\n result_dict[model_name]['train_loss'] = train_result[2]\n result_dict[model_name]['valid_loss'] = train_result[3] \n result_dict[model_name]['MAE'] = test_result[3] \n result_dict[model_name]['RMSE'] = test_result[4] \n result_dict[model_name]['MAPE'] = test_result[5] \n f = open(directory + '/log_rs_' + str(random_seed) + '.pkl', \"wb\")\n pickle.dump(result_dict,f)\n f.close()\n if save_model:\n torch.save(model.state_dict(), directory + '/' + model_name)\n\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-d', '-dataset', default='PEMS-BAY', type=str, required=True, help=\"specify dataset\")\n parser.add_argument('-m', '-missingrate', default=0.2, type=float, required=True, help=\"specify missing rate\")\n parser.add_argument('-o', '-optm', default='Adam', type=str, required=False, help=\"specify training optimizer\")\n parser.add_argument('-l', '-learningrate', default=0.001, type=float, required=False, help=\"specify initial learning rate\")\n parser.add_argument('-t', '-maskingtype', default='random', type=str, required=False, help=\"specify masking type\")\n parser.add_argument('-r', '-randomseed', default=1024, type=int, required=False, help=\"specify random seed\")\n parser.add_argument('-s', '-savemodel', default=1, type=int, required=False, help=\"specify whether save model\")\n args = parser.parse_args()\n \n dataset = args.d\n missing_rate = args.m\n optm = args.o\n learning_rate = args.l\n random_seed = args.r\n masking_type = args.t\n save_model = args.s\n \n np.random.seed(random_seed)\n \n print('Exp: baseline models')\n print('\\tDataset:', dataset)\n print('\\tMissing rate:', missing_rate)\n print('\\tOptimizer:', optm) \n print('\\tLearnig ate:', learning_rate)\n print('\\tMasking type:', masking_type)\n print('\\tRandom seed:', random_seed)\n \n if save_model==1:\n print('\\tSave model:', 'True')\n else:\n print('\\tSave model:', 'False')\n \n directory = './Masking_' + masking_type + '/' + str(dataset) + '_MR=' + str(missing_rate)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n result_dict = {}\n \n \n speed_matrix, A = loadDataset(dataset = dataset)\n\n\n importlib.reload(utils)\n from utils import PrepareDataset, TrainModel, TestModel\n\n\n importlib.reload(utils)\n from utils import PrepareDataset\n mask_ones_proportion = 1 - missing_rate\n train_dataloader, valid_dataloader, test_dataloader, max_speed, X_mean = PrepareDataset(speed_matrix, BATCH_SIZE = 64, seq_len = 10, pred_len = 1, \\\n train_propotion = 0.6, valid_propotion = 0.2, \\\n mask_ones_proportion = mask_ones_proportion, \\\n masking = True, masking_type = masking_type, delta_last_obsv = True, \\\n shuffle = True, random_seed = random_seed)\n\n\n inputs, labels = next(iter(train_dataloader))\n [batch_size, type_size, step_size, fea_size] = inputs.size()\n\n\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n\n\n # LSTM\n importlib.reload(models)\n from models import LSTM\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'LSTM'\n print(model_name)\n model = LSTM(A.shape[0])\n model, train_result = TrainModel(model, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(model, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)\n\n # LSTM-I\n importlib.reload(models)\n from models import LSTM\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'LSTMI'\n print(model_name)\n model = LSTM(A.shape[0], imputation = True)\n model, train_result = TrainModel(model, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(model, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)\n\n # LSTM-D\n import LSTMD\n importlib.reload(LSTMD)\n from LSTMD import LSTMD\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'LSTMD'\n print(model_name)\n model = LSTMD(fea_size, fea_size, fea_size, X_mean)\n model, train_result = TrainModel(model, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(model, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)\n\n # GRU\n importlib.reload(models)\n from models import GRU\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'GRU'\n print(model_name)\n gru = GRU(A.shape[0])\n gru, train_result = TrainModel(gru, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(gru, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)\n\n # GRU-I\n importlib.reload(models)\n from models import GRU\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'GRUI'\n print(model_name)\n model = GRU(A.shape[0], imputation = True)\n model, train_result = TrainModel(model, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(model, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)\n\n # GRU-D\n import GRUD\n importlib.reload(GRUD)\n from GRUD import GRUD\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'GRUD'\n print(model_name)\n model = GRUD(fea_size, fea_size, fea_size, X_mean)\n model, train_result = TrainModel(model, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(model, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)\n\n # Graph GRU-D\n import GGRUD\n importlib.reload(GGRUD)\n from GGRUD import GGRUD\n importlib.reload(utils)\n from utils import TrainModel, TestModel\n model_name = 'GGRUD'\n print(model_name)\n model = GGRUD(A, fea_size, fea_size, fea_size, X_mean)\n model, train_result = TrainModel(model, train_dataloader, valid_dataloader, learning_rate = learning_rate, patience = 5)\n test_result = TestModel(model, test_dataloader, max_speed)\n StoreData(result_dict, model_name, train_result, test_result, directory, model, random_seed, save_model)" ]
[ [ "pandas.read_hdf", "torch.cuda.set_device", "numpy.random.seed", "torch.cuda.is_available", "torch.device", "numpy.load", "pandas.read_pickle", "numpy.where" ] ]
olebole/specutils
[ "566de5cec00bd1198f1275ce74b1e261b61813af" ]
[ "specutils/analysis/uncertainty.py" ]
[ "\"\"\"\nA module for analysis tools dealing with uncertainties or error analysis in\nspectra.\n\"\"\"\n\nimport numpy as np\nfrom ..spectra import SpectralRegion\nfrom ..manipulation import extract_region\n\n__all__ = ['snr', 'snr_derived']\n\n\ndef snr(spectrum, region=None):\n \"\"\"\n Calculate the mean S/N of the spectrum based on the flux and uncertainty\n in the spectrum. This will be calculated over the regions, if they\n are specified.\n\n Parameters\n ----------\n spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`\n The spectrum object overwhich the equivalent width will be calculated.\n\n region: `~specutils.utils.SpectralRegion` or list of `~specutils.utils.SpectralRegion`\n Region within the spectrum to calculate the SNR.\n\n Returns\n -------\n snr : `~astropy.units.Quantity` or list (based on region input)\n Signal to noise ratio of the spectrum or within the regions\n\n Notes\n -----\n The spectrum will need to have the uncertainty defined in order for the SNR\n to be calculated. If the goal is instead signal to noise *per pixel*, this\n should be computed directly as ``spectrum.flux / spectrum.uncertainty``.\n\n \"\"\"\n\n if not hasattr(spectrum, 'uncertainty') or spectrum.uncertainty is None:\n raise Exception(\"Spectrum1D currently requires the uncertainty be defined.\")\n\n # No region, therefore whole spectrum.\n if region is None:\n return _snr_single_region(spectrum)\n\n # Single region\n elif isinstance(region, SpectralRegion):\n return _snr_single_region(spectrum, region=region)\n\n # List of regions\n elif isinstance(region, list):\n return [_snr_single_region(spectrum, region=reg)\n for reg in region]\n\n\ndef _snr_single_region(spectrum, region=None):\n \"\"\"\n Calculate the mean S/N of the spectrum based on the flux and uncertainty\n in the spectrum.\n\n Parameters\n ----------\n spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`\n The spectrum object overwhich the equivalent width will be calculated.\n\n region: `~specutils.utils.SpectralRegion`\n Region within the spectrum to calculate the SNR.\n\n Returns\n -------\n snr : `~astropy.units.Quantity` or list (based on region input)\n Signal to noise ratio of the spectrum or within the regions\n\n Notes\n -----\n This is a helper function for the above `snr()` method.\n\n \"\"\"\n\n if region is not None:\n calc_spectrum = extract_region(spectrum, region)\n else:\n calc_spectrum = spectrum\n\n flux = calc_spectrum.flux\n uncertainty = calc_spectrum.uncertainty.array * spectrum.uncertainty.unit\n\n # the axis=-1 will enable this to run on single-dispersion, single-flux\n # and single-dispersion, multiple-flux\n return np.mean(flux / uncertainty, axis=-1)\n\n\ndef snr_derived(spectrum, region=None):\n \"\"\"\n This function computes the signal to noise ratio DER_SNR following the\n definition set forth by the Spectral Container Working Group of ST-ECF,\n MAST and CADC.\n\n Parameters\n ----------\n spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`\n The spectrum object overwhich the equivalent width will be calculated.\n\n region: `~specutils.utils.SpectralRegion`\n Region within the spectrum to calculate the SNR.\n\n Returns\n -------\n snr : `~astropy.units.Quantity` or list (based on region input)\n Signal to noise ratio of the spectrum or within the regions\n\n Notes\n -----\n The DER_SNR algorithm is an unbiased estimator describing the spectrum\n as a whole as long as the noise is uncorrelated in wavelength bins spaced\n two pixels apart, the noise is Normal distributed, for large wavelength\n regions, the signal over the scale of 5 or more pixels can be approximated\n by a straight line.\n\n Code and some docs copied from\n ``http://www.stecf.org/software/ASTROsoft/DER_SNR/der_snr.py``\n \"\"\"\n\n # No region, therefore whole spectrum.\n if region is None:\n return _snr_derived(spectrum)\n\n # Single region\n elif isinstance(region, SpectralRegion):\n return _snr_derived(spectrum, region=region)\n\n # List of regions\n elif isinstance(region, list):\n return [_snr_derived(spectrum, region=reg)\n for reg in region]\n\n\ndef _snr_derived(spectrum, region=None):\n \"\"\"\n This function computes the signal to noise ratio DER_SNR following the\n definition set forth by the Spectral Container Working Group of ST-ECF,\n MAST and CADC\n\n Parameters\n ----------\n spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`\n The spectrum object overwhich the equivalent width will be calculated.\n\n region: `~specutils.utils.SpectralRegion`\n Region within the spectrum to calculate the SNR.\n\n Returns\n -------\n snr : `~astropy.units.Quantity` or list (based on region input)\n Signal to noise ratio of the spectrum or within the regions\n\n Notes\n -----\n This is a helper function for the above `snr_derived()` method.\n\n \"\"\"\n\n if region is not None:\n calc_spectrum = extract_region(spectrum, region)\n else:\n calc_spectrum = spectrum\n\n flux = calc_spectrum.flux\n\n # Values that are exactly zero (padded) are skipped\n n = len(flux)\n\n # For spectra shorter than this, no value can be returned\n if n > 4:\n signal = np.median(flux)\n noise = 0.6052697 * np.median(np.abs(2.0 * flux[2:n-2] - flux[0:n-4] - flux[4:n]))\n return signal / noise\n else:\n return 0.0\n" ]
[ [ "numpy.median", "numpy.mean", "numpy.abs" ] ]
raphaelsulzer/occupancy_networks
[ "aacc9e42e663b9c9ad0352a511c38cb1f705ec51" ]
[ "external/mesh-fusion/2_fusion.py" ]
[ "import math\nimport numpy as np\nimport os\nfrom scipy import ndimage\nfrom scipy.interpolate import RegularGridInterpolator as rgi\nimport common\nimport argparse\nimport ntpath\n\n# Import shipped libraries.\nimport librender\nimport libmcubes\nfrom multiprocessing import Pool\n\nuse_gpu = False\nif use_gpu:\n import libfusiongpu as libfusion\n from libfusiongpu import tsdf_gpu as compute_tsdf\nelse:\n import libfusioncpu as libfusion\n from libfusioncpu import tsdf_cpu as compute_tsdf\n\n\nclass Fusion:\n \"\"\"\n Performs TSDF fusion.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor.\n \"\"\"\n\n parser = self.get_parser()\n self.options = parser.parse_args()\n\n self.render_intrinsics = np.array([\n self.options.focal_length_x,\n self.options.focal_length_y,\n self.options.principal_point_x,\n self.options.principal_point_x\n ], dtype=float)\n # Essentially the same as above, just a slightly different format.\n self.fusion_intrisics = np.array([\n [self.options.focal_length_x, 0, self.options.principal_point_x],\n [0, self.options.focal_length_y, self.options.principal_point_y],\n [0, 0, 1]\n ])\n self.image_size = np.array([\n self.options.image_height,\n self.options.image_width,\n ], dtype=np.int32)\n # Mesh will be centered at (0, 0, 1)!\n self.znf = np.array([\n 1 - 0.75,\n 1 + 0.75\n ], dtype=float)\n # Derive voxel size from resolution.\n self.voxel_size = 1./self.options.resolution\n self.truncation = self.options.truncation_factor*self.voxel_size\n\n def get_parser(self):\n \"\"\"\n Get parser of tool.\n\n :return: parser\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')\n parser.add_argument('--mode', type=str, default='render',\n help='Operation mode: render, fuse or sample.')\n input_group = parser.add_mutually_exclusive_group(required=True)\n input_group.add_argument('--in_dir', type=str,\n help='Path to input directory.')\n input_group.add_argument('--in_file', type=str,\n help='Path to input directory.')\n parser.add_argument('--out_dir', type=str,\n help='Path to output directory; files within are overwritten!')\n parser.add_argument('--t_dir', type=str,\n help='Path to transformation directory.')\n parser.add_argument('--n_proc', type=int, default=0,\n help='Number of processes to run in parallel'\n '(0 means sequential execution).')\n parser.add_argument('--overwrite', action='store_true',\n help='Overwrites existing files if true.')\n\n parser.add_argument('--n_points', type=int, default=100000,\n help='Number of points to sample per model.')\n parser.add_argument('--n_views', type=int, default=100,\n help='Number of views per model.')\n parser.add_argument('--image_height', type=int, default=640,\n help='Depth image height.')\n parser.add_argument('--image_width', type=int, default=640,\n help='Depth image width.')\n parser.add_argument('--focal_length_x', type=float, default=640,\n help='Focal length in x direction.')\n parser.add_argument('--focal_length_y', type=float, default=640,\n help='Focal length in y direction.')\n parser.add_argument('--principal_point_x', type=float, default=320,\n help='Principal point location in x direction.')\n parser.add_argument('--principal_point_y', type=float, default=320,\n help='Principal point location in y direction.')\n parser.add_argument('--sample_weighted', action='store_true',\n help='Whether to use weighted sampling.')\n parser.add_argument('--sample_scale', type=float, default=0.2,\n help='Scale for weighted sampling.')\n parser.add_argument(\n '--depth_offset_factor', type=float, default=1.5,\n help='The depth maps are offsetted using depth_offset_factor*voxel_size.')\n parser.add_argument('--resolution', type=float, default=256,\n help='Resolution for fusion.')\n parser.add_argument(\n '--truncation_factor', type=float, default=10,\n help='Truncation for fusion is derived as truncation_factor*voxel_size.')\n\n return parser\n\n def read_directory(self, directory):\n \"\"\"\n Read directory.\n\n :param directory: path to directory\n :return: list of files\n \"\"\"\n\n files = []\n for filename in os.listdir(directory):\n files.append(os.path.normpath(os.path.join(directory, filename)))\n\n return files\n\n def get_in_files(self):\n if self.options.in_dir is not None:\n assert os.path.exists(self.options.in_dir)\n common.makedir(self.options.out_dir)\n files = self.read_directory(self.options.in_dir)\n else:\n files = [self.options.in_file]\n\n if not self.options.overwrite:\n def file_filter(filepath):\n outpath = self.get_outpath(filepath)\n return not os.path.exists(outpath)\n files = list(filter(file_filter, files))\n\n return files\n\n def get_outpath(self, filepath):\n filename = os.path.basename(filepath)\n if self.options.mode == 'render':\n outpath = os.path.join(self.options.out_dir, filename + '.h5')\n elif self.options.mode == 'fuse':\n modelname = os.path.splitext(os.path.splitext(filename)[0])[0]\n outpath = os.path.join(self.options.out_dir, modelname + '.off')\n elif self.options.mode == 'sample':\n modelname = os.path.splitext(os.path.splitext(filename)[0])[0]\n outpath = os.path.join(self.options.out_dir, modelname + '.npz')\n\n return outpath\n \n def get_points(self):\n \"\"\"\n See https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere.\n\n :param n_points: number of points\n :type n_points: int\n :return: list of points\n :rtype: numpy.ndarray\n \"\"\"\n\n rnd = 1.\n points = []\n offset = 2. / self.options.n_views\n increment = math.pi * (3. - math.sqrt(5.))\n\n for i in range(self.options.n_views):\n y = ((i * offset) - 1) + (offset / 2)\n r = math.sqrt(1 - pow(y, 2))\n\n phi = ((i + rnd) % self.options.n_views) * increment\n\n x = math.cos(phi) * r\n z = math.sin(phi) * r\n\n points.append([x, y, z])\n\n # visualization.plot_point_cloud(np.array(points))\n return np.array(points)\n\n def get_views(self):\n \"\"\"\n Generate a set of views to generate depth maps from.\n\n :param n_views: number of views per axis\n :type n_views: int\n :return: rotation matrices\n :rtype: [numpy.ndarray]\n \"\"\"\n\n Rs = []\n points = self.get_points()\n\n for i in range(points.shape[0]):\n # https://math.stackexchange.com/questions/1465611/given-a-point-on-a-sphere-how-do-i-find-the-angles-needed-to-point-at-its-ce\n longitude = - math.atan2(points[i, 0], points[i, 1])\n latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2))\n\n R_x = np.array([[1, 0, 0],\n [0, math.cos(latitude), -math.sin(latitude)],\n [0, math.sin(latitude), math.cos(latitude)]])\n R_y = np.array([[math.cos(longitude), 0, math.sin(longitude)],\n [0, 1, 0],\n [-math.sin(longitude), 0, math.cos(longitude)]])\n\n R = R_y.dot(R_x)\n Rs.append(R)\n\n return Rs\n\n def render(self, mesh, Rs):\n \"\"\"\n Render the given mesh using the generated views.\n\n :param base_mesh: mesh to render\n :type base_mesh: mesh.Mesh\n :param Rs: rotation matrices\n :type Rs: [numpy.ndarray]\n :return: depth maps\n :rtype: numpy.ndarray\n \"\"\"\n\n depthmaps = []\n for i in range(len(Rs)):\n np_vertices = Rs[i].dot(mesh.vertices.astype(np.float64).T)\n np_vertices[2, :] += 1\n\n np_faces = mesh.faces.astype(np.float64)\n np_faces += 1\n\n depthmap, mask, img = \\\n librender.render(np_vertices.copy(), np_faces.T.copy(),\n self.render_intrinsics, self.znf, self.image_size)\n\n # This is mainly result of experimenting.\n # The core idea is that the volume of the object is enlarged slightly\n # (by subtracting a constant from the depth map).\n # Dilation additionally enlarges thin structures (e.g. for chairs).\n depthmap -= self.options.depth_offset_factor * self.voxel_size\n depthmap = ndimage.morphology.grey_erosion(depthmap, size=(3, 3))\n\n depthmaps.append(depthmap)\n\n return depthmaps\n\n def fusion(self, depthmaps, Rs):\n \"\"\"\n Fuse the rendered depth maps.\n\n :param depthmaps: depth maps\n :type depthmaps: numpy.ndarray\n :param Rs: rotation matrices corresponding to views\n :type Rs: [numpy.ndarray]\n :return: (T)SDF\n :rtype: numpy.ndarray\n \"\"\"\n\n Ks = self.fusion_intrisics.reshape((1, 3, 3))\n Ks = np.repeat(Ks, len(depthmaps), axis=0).astype(np.float32)\n\n Ts = []\n for i in range(len(Rs)):\n Rs[i] = Rs[i]\n Ts.append(np.array([0, 0, 1]))\n\n Ts = np.array(Ts).astype(np.float32)\n Rs = np.array(Rs).astype(np.float32)\n\n depthmaps = np.array(depthmaps).astype(np.float32)\n views = libfusion.PyViews(depthmaps, Ks, Rs, Ts)\n\n # Note that this is an alias defined as libfusiongpu.tsdf_gpu or libfusioncpu.tsdf_cpu!\n tsdf = compute_tsdf(views,\n self.options.resolution, self.options.resolution,\n self.options.resolution, self.voxel_size, self.truncation, False)\n\n tsdf = np.transpose(tsdf[0], [2, 1, 0])\n return tsdf\n\n def run(self):\n \"\"\"\n Run the tool.\n \"\"\"\n common.makedir(self.options.out_dir)\n files = self.get_in_files()\n\n if self.options.mode == 'render':\n method = self.run_render\n elif self.options.mode == 'fuse':\n method = self.run_fuse\n elif self.options.mode == 'sample':\n method = self.run_sample\n else:\n print('Invalid model, choose render or fuse.')\n exit()\n\n if self.options.n_proc == 0:\n for filepath in files:\n method(filepath)\n else:\n with Pool(self.options.n_proc) as p:\n p.map(method, files)\n\n def run_render(self, filepath):\n \"\"\"\n Run rendering.\n \"\"\"\n timer = common.Timer()\n Rs = self.get_views()\n\n timer.reset()\n mesh = common.Mesh.from_off(filepath)\n depths = self.render(mesh, Rs)\n\n depth_file = self.get_outpath(filepath)\n common.write_hdf5(depth_file, np.array(depths))\n print('[Data] wrote %s (%f seconds)' % (depth_file, timer.elapsed()))\n\n def run_fuse(self, filepath):\n \"\"\"\n Run fusion.\n \"\"\"\n timer = common.Timer()\n Rs = self.get_views()\n\n # As rendering might be slower, we wait for rendering to finish.\n # This allows to run rendering and fusing in parallel (more or less).\n depths = common.read_hdf5(filepath)\n\n timer.reset()\n tsdf = self.fusion(depths, Rs)\n # To ensure that the final mesh is indeed watertight\n tsdf = np.pad(tsdf, 1, 'constant', constant_values=1e6)\n vertices, triangles = libmcubes.marching_cubes(-tsdf, 0)\n # Remove padding offset\n vertices -= 1\n # Normalize to [-0.5, 0.5]^3 cube\n vertices /= self.options.resolution\n vertices -= 0.5\n\n modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]\n t_loc, t_scale = self.get_transform(modelname)\n vertices = t_loc + t_scale * vertices\n\n off_file = self.get_outpath(filepath)\n libmcubes.export_off(vertices, triangles, off_file)\n print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))\n\n def run_sample(self, filepath):\n \"\"\"\n Run sampling.\n \"\"\"\n timer = common.Timer()\n Rs = self.get_views()\n\n # As rendering might be slower, we wait for rendering to finish.\n # This allows to run rendering and fusing in parallel (more or less).\n\n depths = common.read_hdf5(filepath)\n\n timer.reset()\n tsdf = self.fusion(depths, Rs)\n\n xs = np.linspace(-0.5, 0.5, tsdf.shape[0])\n ys = np.linspace(-0.5, 0.5, tsdf.shape[1])\n zs = np.linspace(-0.5, 0.5, tsdf.shape[2])\n tsdf_func = rgi((xs, ys, zs), tsdf)\n\n modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]\n points = self.get_random_points(tsdf)\n values = tsdf_func(points)\n t_loc, t_scale = self.get_transform(modelname)\n\n occupancy = (values <= 0.)\n out_file = self.get_outpath(filepath)\n np.savez(out_file, points=points, occupancy=occupancy, loc=t_loc, scale=t_scale)\n\n print('[Data] wrote %s (%f seconds)' % (out_file, timer.elapsed()))\n\n def get_transform(self, modelname):\n if self.options.t_dir is not None:\n t_filename = os.path.join(self.options.t_dir, modelname + '.npz')\n t_dict = np.load(t_filename)\n t_loc = t_dict['loc']\n t_scale = t_dict['scale']\n else:\n t_loc = np.zeros(3)\n t_scale = np.ones(3)\n\n return t_loc, t_scale\n\n def get_random_points(self, tsdf):\n N1, N2, N3 = tsdf.shape\n npoints = self.options.n_points\n\n if not self.options.sample_weighted:\n points = np.random.rand(npoints, 3)\n else:\n df = np.abs(tsdf)\n scale = self.options.sample_scale * df.max()\n indices = np.arange(N1*N2*N3)\n prob = np.exp(-df.flatten() / scale)\n prob = prob / prob.sum()\n indices_rnd = np.random.choice(indices, size=npoints, p=prob)\n idx1, idx2, idx3 = np.unravel_index(indices_rnd, [N1, N2, N3])\n idx1 = idx1 + np.random.rand(npoints)\n idx2 = idx2 + np.random.rand(npoints)\n idx3 = idx3 + np.random.rand(npoints)\n points = np.stack([idx1 / N1, idx2 / N2, idx3 / N3], axis=1)\n\n points -= 0.5\n\n return points\n\n\nif __name__ == '__main__':\n app = Fusion()\n app.run()\n" ]
[ [ "numpy.unravel_index", "numpy.savez", "numpy.pad", "numpy.linspace", "numpy.abs", "numpy.random.choice", "numpy.arange", "scipy.interpolate.RegularGridInterpolator", "numpy.stack", "numpy.ones", "numpy.random.rand", "scipy.ndimage.morphology.grey_erosion", "numpy.transpose", "numpy.load", "numpy.array", "numpy.zeros" ] ]
ridicolos/featuretools
[ "bbad3f7392b203b7b9c250a93465052e7fc06bbc", "bbad3f7392b203b7b9c250a93465052e7fc06bbc", "bbad3f7392b203b7b9c250a93465052e7fc06bbc" ]
[ "featuretools/tests/primitive_tests/test_agg_feats.py", "featuretools/tests/computational_backend/test_feature_set_calculator.py", "featuretools/tests/entityset_tests/test_dask_es.py" ]
[ "from datetime import datetime\nfrom inspect import isclass\nfrom math import isnan\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom woodwork.column_schema import ColumnSchema\nfrom woodwork.logical_types import Datetime\n\nimport featuretools as ft\nfrom featuretools.entityset.relationship import RelationshipPath\nfrom featuretools.primitives import (\n Count,\n Mean,\n Median,\n NMostCommon,\n NumTrue,\n NumUnique,\n Sum,\n TimeSinceFirst,\n TimeSinceLast,\n get_aggregation_primitives\n)\nfrom featuretools.primitives.base import (\n AggregationPrimitive,\n make_agg_primitive\n)\nfrom featuretools.primitives.utils import (\n PrimitivesDeserializer,\n serialize_primitive\n)\nfrom featuretools.synthesis.deep_feature_synthesis import (\n DeepFeatureSynthesis,\n check_stacking,\n match\n)\nfrom featuretools.tests.testing_utils import (\n backward_path,\n feature_with_name,\n to_pandas\n)\nfrom featuretools.utils.gen_utils import Library\n\n\n@pytest.fixture\ndef test_primitive():\n class TestAgg(AggregationPrimitive):\n name = \"test\"\n input_types = [ColumnSchema(semantic_tags={'numeric'})]\n return_type = ColumnSchema(semantic_tags={'numeric'})\n stack_on = []\n\n def get_function(self, agg_type='pandas'):\n return None\n\n return TestAgg\n\n\ndef test_get_depth(es):\n log_id_feat = ft.IdentityFeature(es['log'].ww['id'])\n customer_id_feat = ft.IdentityFeature(es['customers'].ww['id'])\n count_logs = ft.Feature(log_id_feat, parent_dataframe_name='sessions', primitive=Count)\n sum_count_logs = ft.Feature(count_logs, parent_dataframe_name='customers', primitive=Sum)\n num_logs_greater_than_5 = sum_count_logs > 5\n count_customers = ft.Feature(customer_id_feat,\n parent_dataframe_name=u'régions',\n where=num_logs_greater_than_5,\n primitive=Count)\n num_customers_region = ft.Feature(count_customers, dataframe_name=\"customers\")\n\n depth = num_customers_region.get_depth()\n assert depth == 5\n\n\ndef test_makes_count(es):\n dfs = DeepFeatureSynthesis(target_dataframe_name='sessions',\n entityset=es,\n agg_primitives=[Count],\n trans_primitives=[])\n\n features = dfs.build_features()\n assert feature_with_name(features, 'device_type')\n assert feature_with_name(features, 'customer_id')\n assert feature_with_name(features, u'customers.région_id')\n assert feature_with_name(features, 'customers.age')\n assert feature_with_name(features, 'COUNT(log)')\n assert feature_with_name(features, 'customers.COUNT(sessions)')\n assert feature_with_name(features, u'customers.régions.language')\n assert feature_with_name(features, 'customers.COUNT(log)')\n\n\ndef test_count_null_and_make_agg_primitive(pd_es):\n def count_func(values, count_null=False):\n if len(values) == 0:\n return 0\n\n if count_null:\n values = values.fillna(0)\n\n return values.count()\n\n def count_generate_name(self, base_feature_names, relationship_path_name,\n parent_dataframe_name, where_str, use_prev_str):\n return u\"COUNT(%s%s%s)\" % (relationship_path_name,\n where_str,\n use_prev_str)\n\n Count = make_agg_primitive(\n count_func,\n [[ColumnSchema(semantic_tags={'foreign_key'})], [ColumnSchema()]],\n ColumnSchema(semantic_tags={'numeric'}),\n name=\"count\",\n stack_on_self=False,\n cls_attributes={\"generate_name\": count_generate_name}\n )\n count_null = ft.Feature(pd_es['log'].ww['value'], parent_dataframe_name='sessions', primitive=Count(count_null=True))\n feature_matrix = ft.calculate_feature_matrix([count_null], entityset=pd_es)\n values = [5, 4, 1, 2, 3, 2]\n assert (values == feature_matrix[count_null.get_name()]).all()\n\n\ndef test_check_input_types(es):\n count = ft.Feature(es[\"sessions\"].ww[\"id\"], parent_dataframe_name=\"customers\", primitive=Count)\n mean = ft.Feature(count, parent_dataframe_name=u\"régions\", primitive=Mean)\n assert mean._check_input_types()\n\n boolean = count > 3\n mean = ft.Feature(count, parent_dataframe_name=u\"régions\", where=boolean, primitive=Mean)\n assert mean._check_input_types()\n\n\ndef test_mean_nan(es):\n array = pd.Series([5, 5, 5, 5, 5])\n mean_func_nans_default = Mean().get_function()\n mean_func_nans_false = Mean(skipna=False).get_function()\n mean_func_nans_true = Mean(skipna=True).get_function()\n assert mean_func_nans_default(array) == 5\n assert mean_func_nans_false(array) == 5\n assert mean_func_nans_true(array) == 5\n array = pd.Series([5, np.nan, np.nan, np.nan, np.nan, 10])\n assert mean_func_nans_default(array) == 7.5\n assert isnan(mean_func_nans_false(array))\n assert mean_func_nans_true(array) == 7.5\n array_nans = pd.Series([np.nan, np.nan, np.nan, np.nan])\n assert isnan(mean_func_nans_default(array_nans))\n assert isnan(mean_func_nans_false(array_nans))\n assert isnan(mean_func_nans_true(array_nans))\n\n # test naming\n default_feat = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Mean)\n assert default_feat.get_name() == \"MEAN(log.value)\"\n ignore_nan_feat = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Mean(skipna=True))\n assert ignore_nan_feat.get_name() == \"MEAN(log.value)\"\n include_nan_feat = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Mean(skipna=False))\n assert include_nan_feat.get_name() == \"MEAN(log.value, skipna=False)\"\n\n\ndef test_base_of_and_stack_on_heuristic(es, test_primitive):\n child = ft.Feature(es[\"sessions\"].ww[\"id\"], parent_dataframe_name=\"customers\", primitive=Count)\n test_primitive.stack_on = []\n child.primitive.base_of = []\n assert not check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = []\n child.primitive.base_of = None\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = []\n child.primitive.base_of = [test_primitive]\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = None\n child.primitive.base_of = []\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = None\n child.primitive.base_of = None\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = None\n child.primitive.base_of = [test_primitive]\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = [type(child.primitive)]\n child.primitive.base_of = []\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = [type(child.primitive)]\n child.primitive.base_of = None\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = [type(child.primitive)]\n child.primitive.base_of = [test_primitive]\n assert check_stacking(test_primitive(), [child])\n\n\ndef test_stack_on_self(es, test_primitive):\n # test stacks on self\n child = ft.Feature(es['log'].ww['value'], parent_dataframe_name=u'régions', primitive=test_primitive)\n test_primitive.stack_on = []\n child.primitive.base_of = []\n test_primitive.stack_on_self = False\n child.primitive.stack_on_self = False\n assert not check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on_self = True\n assert check_stacking(test_primitive(), [child])\n\n test_primitive.stack_on = None\n test_primitive.stack_on_self = False\n assert not check_stacking(test_primitive(), [child])\n\n\ndef test_init_and_name(es):\n log = es['log']\n\n # Add a BooleanNullable column so primitives with that input type get tested\n boolean_nullable = log.ww['purchased']\n boolean_nullable = boolean_nullable.ww.set_logical_type('BooleanNullable')\n log.ww['boolean_nullable'] = boolean_nullable\n\n features = [ft.Feature(es['log'].ww[col]) for col in log.columns]\n\n # check all primitives have name\n for attribute_string in dir(ft.primitives):\n attr = getattr(ft.primitives, attribute_string)\n if isclass(attr):\n if issubclass(attr, AggregationPrimitive) and attr != AggregationPrimitive:\n assert getattr(attr, \"name\") is not None\n\n agg_primitives = get_aggregation_primitives().values()\n # If Dask EntitySet use only Dask compatible primitives\n if es.dataframe_type == Library.DASK.value:\n agg_primitives = [prim for prim in agg_primitives if Library.DASK in prim.compatibility]\n if es.dataframe_type == Library.KOALAS.value:\n agg_primitives = [prim for prim in agg_primitives if Library.KOALAS in prim.compatibility]\n\n for agg_prim in agg_primitives:\n input_types = agg_prim.input_types\n if type(input_types[0]) != list:\n input_types = [input_types]\n\n # test each allowed input_types for this primitive\n for it in input_types:\n # use the input_types matching function from DFS\n matching_types = match(it, features)\n if len(matching_types) == 0:\n raise Exception(\"Agg Primitive %s not tested\" % agg_prim.name)\n for t in matching_types:\n instance = ft.Feature(t, parent_dataframe_name='sessions', primitive=agg_prim)\n\n # try to get name and calculate\n instance.get_name()\n ft.calculate_feature_matrix([instance], entityset=es)\n\n\ndef test_invalid_init_args(diamond_es):\n error_text = 'parent_dataframe must match first relationship in path'\n with pytest.raises(AssertionError, match=error_text):\n path = backward_path(diamond_es, ['stores', 'transactions'])\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'customers',\n ft.primitives.Mean,\n relationship_path=path)\n\n error_text = 'Base feature must be defined on the dataframe at the end of relationship_path'\n with pytest.raises(AssertionError, match=error_text):\n path = backward_path(diamond_es, ['regions', 'stores'])\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'regions',\n ft.primitives.Mean,\n relationship_path=path)\n\n error_text = 'All relationships in path must be backward'\n with pytest.raises(AssertionError, match=error_text):\n backward = backward_path(diamond_es, ['customers', 'transactions'])\n forward = RelationshipPath([(True, r) for _, r in backward])\n path = RelationshipPath(list(forward) + list(backward))\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'transactions',\n ft.primitives.Mean,\n relationship_path=path)\n\n\ndef test_init_with_multiple_possible_paths(diamond_es):\n error_text = \"There are multiple possible paths to the base dataframe. \" \\\n \"You must specify a relationship path.\"\n with pytest.raises(RuntimeError, match=error_text):\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'regions',\n ft.primitives.Mean)\n\n # Does not raise if path specified.\n path = backward_path(diamond_es, ['regions', 'customers', 'transactions'])\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'regions',\n ft.primitives.Mean,\n relationship_path=path)\n\n\ndef test_init_with_single_possible_path(diamond_es):\n # This uses diamond_es to test that there being a cycle somewhere in the\n # graph doesn't cause an error.\n feat = ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'customers',\n ft.primitives.Mean)\n expected_path = backward_path(diamond_es, ['customers', 'transactions'])\n assert feat.relationship_path == expected_path\n\n\ndef test_init_with_no_path(diamond_es):\n error_text = 'No backward path from \"transactions\" to \"customers\" found.'\n with pytest.raises(RuntimeError, match=error_text):\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['customers'].ww['name']),\n 'transactions',\n ft.primitives.Count)\n\n error_text = 'No backward path from \"transactions\" to \"transactions\" found.'\n with pytest.raises(RuntimeError, match=error_text):\n ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'transactions',\n ft.primitives.Mean)\n\n\ndef test_name_with_multiple_possible_paths(diamond_es):\n path = backward_path(diamond_es, ['regions', 'customers', 'transactions'])\n feat = ft.AggregationFeature(ft.IdentityFeature(diamond_es['transactions'].ww['amount']),\n 'regions',\n ft.primitives.Mean,\n relationship_path=path)\n\n assert feat.get_name() == \"MEAN(customers.transactions.amount)\"\n assert feat.relationship_path_name() == 'customers.transactions'\n\n\ndef test_copy(games_es):\n home_games = next(r for r in games_es.relationships\n if r._child_column_name == 'home_team_id')\n path = RelationshipPath([(False, home_games)])\n feat = ft.AggregationFeature(ft.IdentityFeature(games_es['games'].ww['home_team_score']),\n 'teams',\n relationship_path=path,\n primitive=ft.primitives.Mean)\n copied = feat.copy()\n assert copied.dataframe_name == feat.dataframe_name\n assert copied.base_features == feat.base_features\n assert copied.relationship_path == feat.relationship_path\n assert copied.primitive == feat.primitive\n\n\ndef test_serialization(es):\n primitives_deserializer = PrimitivesDeserializer()\n value = ft.IdentityFeature(es['log'].ww['value'])\n primitive = ft.primitives.Max()\n max1 = ft.AggregationFeature(value, 'customers', primitive)\n\n path = next(es.find_backward_paths('customers', 'log'))\n dictionary = {\n 'name': None,\n 'base_features': [value.unique_name()],\n 'relationship_path': [r.to_dictionary() for r in path],\n 'primitive': serialize_primitive(primitive),\n 'where': None,\n 'use_previous': None,\n }\n\n assert dictionary == max1.get_arguments()\n deserialized = ft.AggregationFeature.from_dictionary(dictionary,\n es,\n {value.unique_name(): value},\n primitives_deserializer)\n _assert_agg_feats_equal(max1, deserialized)\n\n is_purchased = ft.IdentityFeature(es['log'].ww['purchased'])\n use_previous = ft.Timedelta(3, 'd')\n max2 = ft.AggregationFeature(value, 'customers', primitive,\n where=is_purchased, use_previous=use_previous)\n\n dictionary = {\n 'name': None,\n 'base_features': [value.unique_name()],\n 'relationship_path': [r.to_dictionary() for r in path],\n 'primitive': serialize_primitive(primitive),\n 'where': is_purchased.unique_name(),\n 'use_previous': use_previous.get_arguments(),\n }\n\n assert dictionary == max2.get_arguments()\n dependencies = {\n value.unique_name(): value,\n is_purchased.unique_name(): is_purchased\n }\n deserialized = ft.AggregationFeature.from_dictionary(dictionary,\n es,\n dependencies,\n primitives_deserializer)\n _assert_agg_feats_equal(max2, deserialized)\n\n\ndef test_time_since_last(pd_es):\n f = ft.Feature(pd_es[\"log\"].ww[\"datetime\"], parent_dataframe_name=\"customers\", primitive=TimeSinceLast)\n fm = ft.calculate_feature_matrix([f],\n entityset=pd_es,\n instance_ids=[0, 1, 2],\n cutoff_time=datetime(2015, 6, 8))\n\n correct = [131376000.0, 131289534.0, 131287797.0]\n # note: must round to nearest second\n assert all(fm[f.get_name()].round().values == correct)\n\n\ndef test_time_since_first(pd_es):\n f = ft.Feature(pd_es[\"log\"].ww[\"datetime\"], parent_dataframe_name=\"customers\", primitive=TimeSinceFirst)\n fm = ft.calculate_feature_matrix([f],\n entityset=pd_es,\n instance_ids=[0, 1, 2],\n cutoff_time=datetime(2015, 6, 8))\n\n correct = [131376600.0, 131289600.0, 131287800.0]\n # note: must round to nearest second\n assert all(fm[f.get_name()].round().values == correct)\n\n\ndef test_median(pd_es):\n f = ft.Feature(pd_es[\"log\"].ww[\"value_many_nans\"], parent_dataframe_name=\"customers\", primitive=Median)\n fm = ft.calculate_feature_matrix([f],\n entityset=pd_es,\n instance_ids=[0, 1, 2],\n cutoff_time=datetime(2015, 6, 8))\n\n correct = [1, 3, np.nan]\n np.testing.assert_equal(fm[f.get_name()].values, correct)\n\n\ndef test_agg_same_method_name(es):\n \"\"\"\n Pandas relies on the function name when calculating aggregations. This means if a two\n primitives with the same function name are applied to the same column, pandas\n can't differentiate them. We have a work around to this based on the name property\n that we test here.\n \"\"\"\n # TODO: Update to work with Dask and Koalas\n if es.dataframe_type != Library.PANDAS.value:\n pytest.xfail(\"Cannot use primitives made with make_agg_primitives with Dask or Koalas EntitySets\")\n # test with normally defined functions\n\n def custom_primitive(x):\n return x.sum()\n\n Sum = make_agg_primitive(custom_primitive, input_types=[ColumnSchema(semantic_tags={'numeric'})],\n return_type=ColumnSchema(semantic_tags={'numeric'}), name=\"sum\")\n\n def custom_primitive(x):\n return x.max()\n\n Max = make_agg_primitive(custom_primitive, input_types=[ColumnSchema(semantic_tags={'numeric'})],\n return_type=ColumnSchema(semantic_tags={'numeric'}), name=\"max\")\n\n f_sum = ft.Feature(es[\"log\"].ww[\"value\"], parent_dataframe_name=\"customers\", primitive=Sum)\n f_max = ft.Feature(es[\"log\"].ww[\"value\"], parent_dataframe_name=\"customers\", primitive=Max)\n\n fm = ft.calculate_feature_matrix([f_sum, f_max], entityset=es)\n assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]\n\n # test with lambdas\n Sum = make_agg_primitive(lambda x: x.sum(), input_types=[ColumnSchema(semantic_tags={'numeric'})],\n return_type=ColumnSchema(semantic_tags={'numeric'}), name=\"sum\")\n Max = make_agg_primitive(lambda x: x.max(), input_types=[ColumnSchema(semantic_tags={'numeric'})],\n return_type=ColumnSchema(semantic_tags={'numeric'}), name=\"max\")\n\n f_sum = ft.Feature(es[\"log\"].ww[\"value\"], parent_dataframe_name=\"customers\", primitive=Sum)\n f_max = ft.Feature(es[\"log\"].ww[\"value\"], parent_dataframe_name=\"customers\", primitive=Max)\n fm = ft.calculate_feature_matrix([f_sum, f_max], entityset=es)\n assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]\n\n\ndef test_time_since_last_custom(pd_es):\n def time_since_last(values, time=None):\n time_since = time - values.iloc[0]\n return time_since.total_seconds()\n\n TimeSinceLast = make_agg_primitive(time_since_last,\n [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'})],\n ColumnSchema(semantic_tags={'numeric'}),\n name=\"time_since_last\",\n uses_calc_time=True)\n f = ft.Feature(pd_es[\"log\"].ww[\"datetime\"], parent_dataframe_name=\"customers\", primitive=TimeSinceLast)\n fm = ft.calculate_feature_matrix([f],\n entityset=pd_es,\n instance_ids=[0, 1, 2],\n cutoff_time=datetime(2015, 6, 8))\n\n correct = [131376600, 131289600, 131287800]\n # note: must round to nearest second\n assert all(fm[f.get_name()].round().values == correct)\n\n error_text = \"'time' is a restricted keyword. Please use a different keyword.\"\n with pytest.raises(ValueError, match=error_text):\n TimeSinceLast = make_agg_primitive(time_since_last,\n [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'})],\n ColumnSchema(semantic_tags={'numeric'}),\n uses_calc_time=False)\n\n\ndef test_custom_primitive_time_as_arg(pd_es):\n def time_since_last(values, time):\n time_since = time - values.iloc[0]\n return time_since.total_seconds()\n\n TimeSinceLast = make_agg_primitive(time_since_last,\n [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'})],\n ColumnSchema(semantic_tags={'numeric'}),\n uses_calc_time=True)\n assert TimeSinceLast.name == \"time_since_last\"\n f = ft.Feature(pd_es[\"log\"].ww[\"datetime\"], parent_dataframe_name=\"customers\", primitive=TimeSinceLast)\n fm = ft.calculate_feature_matrix([f],\n entityset=pd_es,\n instance_ids=[0, 1, 2],\n cutoff_time=datetime(2015, 6, 8))\n\n correct = [131376600, 131289600, 131287800]\n # note: must round to nearest second\n assert all(fm[f.get_name()].round().values == correct)\n\n error_text = \"'time' is a restricted keyword. Please use a different keyword.\"\n with pytest.raises(ValueError, match=error_text):\n make_agg_primitive(time_since_last,\n [ColumnSchema(logical_type=Datetime, semantic_tags={'time_index'})],\n ColumnSchema(semantic_tags={'numeric'}),\n uses_calc_time=False)\n\n\ndef test_custom_primitive_multiple_inputs(pd_es):\n def mean_sunday(numeric, datetime):\n '''\n Finds the mean of non-null values of a feature that occurred on Sundays\n '''\n days = pd.DatetimeIndex(datetime).weekday.values\n df = pd.DataFrame({'numeric': numeric, 'time': days})\n return df[df['time'] == 6]['numeric'].mean()\n\n MeanSunday = make_agg_primitive(\n function=mean_sunday,\n input_types=[ColumnSchema(semantic_tags={'numeric'}), ColumnSchema(logical_type=Datetime)],\n return_type=ColumnSchema(semantic_tags={'numeric'})\n )\n\n fm, features = ft.dfs(entityset=pd_es,\n target_dataframe_name=\"sessions\",\n agg_primitives=[MeanSunday],\n trans_primitives=[])\n mean_sunday_value = pd.Series([None, None, None, 2.5, 7, None])\n iterator = zip(fm[\"MEAN_SUNDAY(log.value, datetime)\"], mean_sunday_value)\n for x, y in iterator:\n assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))\n\n pd_es.add_interesting_values()\n mean_sunday_value_priority_0 = pd.Series([None, None, None, 2.5, 0, None])\n fm, features = ft.dfs(entityset=pd_es,\n target_dataframe_name=\"sessions\",\n agg_primitives=[MeanSunday],\n trans_primitives=[],\n where_primitives=[MeanSunday])\n where_feat = \"MEAN_SUNDAY(log.value, datetime WHERE priority_level = 0)\"\n for x, y in zip(fm[where_feat], mean_sunday_value_priority_0):\n assert ((pd.isnull(x) and pd.isnull(y)) or (x == y))\n\n\ndef test_custom_primitive_default_kwargs(es):\n def sum_n_times(numeric, n=1):\n return np.nan_to_num(numeric).sum(dtype=np.float) * n\n\n SumNTimes = make_agg_primitive(function=sum_n_times,\n input_types=[ColumnSchema(semantic_tags={'numeric'})],\n return_type=ColumnSchema(semantic_tags={'numeric'}))\n\n sum_n_1_n = 1\n sum_n_1_base_f = ft.Feature(es['log'].ww['value'])\n sum_n_1 = ft.Feature([sum_n_1_base_f], parent_dataframe_name='sessions', primitive=SumNTimes(n=sum_n_1_n))\n sum_n_2_n = 2\n sum_n_2_base_f = ft.Feature(es['log'].ww['value_2'])\n sum_n_2 = ft.Feature([sum_n_2_base_f], parent_dataframe_name='sessions', primitive=SumNTimes(n=sum_n_2_n))\n assert sum_n_1_base_f == sum_n_1.base_features[0]\n assert sum_n_1_n == sum_n_1.primitive.kwargs['n']\n assert sum_n_2_base_f == sum_n_2.base_features[0]\n assert sum_n_2_n == sum_n_2.primitive.kwargs['n']\n\n\ndef test_makes_numtrue(es):\n if es.dataframe_type == Library.KOALAS.value:\n pytest.xfail('Koalas EntitySets do not support NumTrue primitive')\n dfs = DeepFeatureSynthesis(target_dataframe_name='sessions',\n entityset=es,\n agg_primitives=[NumTrue],\n trans_primitives=[])\n features = dfs.build_features()\n assert feature_with_name(features, 'customers.NUM_TRUE(log.purchased)')\n assert feature_with_name(features, 'NUM_TRUE(log.purchased)')\n\n\ndef test_make_three_most_common(pd_es):\n def pd_top3(x):\n counts = x.value_counts()\n counts = counts[counts > 0]\n array = np.array(counts[:3].index)\n if len(array) < 3:\n filler = np.full(3 - len(array), np.nan)\n array = np.append(array, filler)\n return array\n\n NMostCommoner = make_agg_primitive(function=pd_top3,\n input_types=[ColumnSchema(semantic_tags={'category'})],\n return_type=None,\n number_output_features=3)\n\n fm, features = ft.dfs(entityset=pd_es,\n target_dataframe_name=\"customers\",\n instance_ids=[0, 1, 2],\n agg_primitives=[NMostCommoner],\n trans_primitives=[])\n\n df = fm[[\"PD_TOP3(log.product_id)[%s]\" % i for i in range(3)]]\n\n assert set(df.iloc[0].values[:2]) == set(['coke zero', 'toothpaste']) # coke zero and toothpaste have same number of occurrences\n assert df.iloc[0].values[2] in ['car', 'brown bag'] # so just check that the top two match\n\n assert df.iloc[1].reset_index(drop=True).equals(pd.Series(['coke zero', 'Haribo sugar-free gummy bears', np.nan]))\n assert df.iloc[2].reset_index(drop=True).equals(pd.Series(['taco clock', np.nan, np.nan]))\n\n\ndef test_stacking_multi(pd_es):\n threecommon = NMostCommon(3)\n tc = ft.Feature(pd_es['log'].ww['product_id'], parent_dataframe_name=\"sessions\", primitive=threecommon)\n\n stacked = []\n for i in range(3):\n stacked.append(ft.Feature(tc[i], parent_dataframe_name='customers', primitive=NumUnique))\n\n fm = ft.calculate_feature_matrix(stacked, entityset=pd_es, instance_ids=[0, 1, 2])\n\n correct_vals = [[3, 2, 1], [2, 1, 0], [0, 0, 0]]\n correct_vals1 = [[3, 1, 1], [2, 1, 0], [0, 0, 0]]\n # either of the above can be correct, and the outcome depends on the sorting of\n # two values in the initial n most common function, which changes arbitrarily.\n\n for i in range(3):\n f = 'NUM_UNIQUE(sessions.N_MOST_COMMON(log.product_id)[%d])' % i\n cols = fm.columns\n assert f in cols\n assert fm[cols[i]].tolist() == correct_vals[i] or fm[cols[i]].tolist() == correct_vals1[i]\n\n\ndef test_use_previous_pd_dateoffset(es):\n total_events_pd = ft.Feature(es[\"log\"].ww[\"id\"],\n parent_dataframe_name=\"customers\",\n use_previous=pd.DateOffset(hours=47, minutes=60),\n primitive=Count)\n\n feature_matrix = ft.calculate_feature_matrix([total_events_pd], es,\n cutoff_time=pd.Timestamp('2011-04-11 10:31:30'),\n instance_ids=[0, 1, 2])\n feature_matrix = to_pandas(feature_matrix, index='id', sort_index=True)\n col_name = list(feature_matrix.head().keys())[0]\n assert (feature_matrix[col_name] == [1, 5, 2]).all()\n\n\ndef _assert_agg_feats_equal(f1, f2):\n assert f1.unique_name() == f2.unique_name()\n assert f1.child_dataframe_name == f2.child_dataframe_name\n assert f1.parent_dataframe_name == f2.parent_dataframe_name\n assert f1.relationship_path == f2.relationship_path\n assert f1.use_previous == f2.use_previous\n\n\ndef test_override_multi_feature_names(pd_es):\n def gen_custom_names(primitive, base_feature_names, relationship_path_name,\n parent_dataframe_name, where_str, use_prev_str):\n base_string = 'Custom_%s({}.{})'.format(parent_dataframe_name, base_feature_names)\n return [base_string % i for i in range(primitive.number_output_features)]\n\n def pd_top3(x):\n counts = x.value_counts()\n counts = counts[counts > 0]\n array = np.array(counts[:3].index)\n if len(array) < 3:\n filler = np.full(3 - len(array), np.nan)\n array = np.append(array, filler)\n return array\n\n num_features = 3\n NMostCommoner = make_agg_primitive(function=pd_top3,\n input_types=[ColumnSchema(semantic_tags={'numeric'})],\n return_type=ColumnSchema(semantic_tags={'category'}),\n number_output_features=num_features,\n cls_attributes={\"generate_names\": gen_custom_names})\n\n fm, features = ft.dfs(entityset=pd_es,\n target_dataframe_name=\"products\",\n instance_ids=[0, 1, 2],\n agg_primitives=[NMostCommoner],\n trans_primitives=[])\n\n expected_names = []\n base_names = [['value'], ['value_2'], ['value_many_nans']]\n for name in base_names:\n expected_names += gen_custom_names(NMostCommoner, name, None, 'products', None, None)\n\n for name in expected_names:\n assert name in fm.columns\n", "from datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom dask import dataframe as dd\nfrom woodwork.column_schema import ColumnSchema\nfrom woodwork.logical_types import Categorical, Datetime, Double, Integer\n\nimport featuretools as ft\nfrom featuretools import Timedelta\nfrom featuretools.computational_backends.feature_set import FeatureSet\nfrom featuretools.computational_backends.feature_set_calculator import (\n FeatureSetCalculator\n)\nfrom featuretools.entityset.relationship import RelationshipPath\nfrom featuretools.feature_base import DirectFeature, IdentityFeature\nfrom featuretools.primitives import (\n And,\n Count,\n CumSum,\n EqualScalar,\n GreaterThanEqualToScalar,\n GreaterThanScalar,\n LessThanEqualToScalar,\n LessThanScalar,\n Mean,\n Min,\n Mode,\n Negate,\n NMostCommon,\n NotEqualScalar,\n NumTrue,\n Sum,\n TimeSinceLast,\n Trend\n)\nfrom featuretools.primitives.base import AggregationPrimitive\nfrom featuretools.tests.testing_utils import backward_path, to_pandas\nfrom featuretools.utils import Trie\nfrom featuretools.utils.gen_utils import Library\n\n\ndef test_make_identity(es):\n f = IdentityFeature(es['log'].ww['datetime'])\n\n feature_set = FeatureSet([f])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[f.get_name()][0]\n assert (v == datetime(2011, 4, 9, 10, 30, 0))\n\n\ndef test_make_dfeat(es):\n f = DirectFeature(ft.Feature(es['customers'].ww['age']),\n child_dataframe_name='sessions')\n\n feature_set = FeatureSet([f])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[f.get_name()][0]\n assert (v == 33)\n\n\ndef test_make_agg_feat_of_identity_column(es):\n agg_feat = ft.Feature(es['log'].ww['value'], parent_dataframe_name='sessions', primitive=Sum)\n\n feature_set = FeatureSet([agg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[agg_feat.get_name()][0]\n assert (v == 50)\n\n\n# full_dataframe not supported with Dask\ndef test_full_dataframe_trans_of_agg(pd_es):\n agg_feat = ft.Feature(pd_es['log'].ww['value'], parent_dataframe_name='customers',\n primitive=Sum)\n trans_feat = ft.Feature(agg_feat, primitive=CumSum)\n\n feature_set = FeatureSet([trans_feat])\n calculator = FeatureSetCalculator(pd_es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([1]))\n\n v = df[trans_feat.get_name()].values[0]\n assert v == 82\n\n\ndef test_full_dataframe_error_dask(dask_es):\n agg_feat = ft.Feature(dask_es['log'].ww['value'], parent_dataframe_name='customers',\n primitive=Sum)\n trans_feat = ft.Feature(agg_feat, primitive=CumSum)\n\n feature_set = FeatureSet([trans_feat])\n calculator = FeatureSetCalculator(dask_es,\n time_last=None,\n feature_set=feature_set)\n error_text = \"Cannot use primitives that require full dataframe with Dask\"\n\n with pytest.raises(ValueError, match=error_text):\n calculator.run(np.array([1]))\n\n\ndef test_make_agg_feat_of_identity_index_column(es):\n agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)\n\n feature_set = FeatureSet([agg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[agg_feat.get_name()][0]\n assert (v == 5)\n\n\ndef test_make_agg_feat_where_count(es):\n agg_feat = ft.Feature(es['log'].ww['id'],\n parent_dataframe_name='sessions',\n where=IdentityFeature(es['log'].ww['product_id']) == 'coke zero',\n primitive=Count)\n\n feature_set = FeatureSet([agg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[agg_feat.get_name()][0]\n assert (v == 3)\n\n\ndef test_make_agg_feat_using_prev_time(es):\n agg_feat = ft.Feature(es['log'].ww['id'],\n parent_dataframe_name='sessions',\n use_previous=Timedelta(10, 's'),\n primitive=Count)\n\n feature_set = FeatureSet([agg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 4, 9, 10, 30, 10),\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[agg_feat.get_name()][0]\n assert (v == 2)\n\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 4, 9, 10, 30, 30),\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])))\n\n v = df[agg_feat.get_name()][0]\n assert (v == 1)\n\n\ndef test_make_agg_feat_using_prev_n_events(es):\n if es.dataframe_type != Library.PANDAS.value:\n pytest.xfail('Distrubuted entitysets do not support use_previous')\n agg_feat_1 = ft.Feature(es['log'].ww['value'],\n parent_dataframe_name='sessions',\n use_previous=Timedelta(1, 'observations'),\n primitive=Min)\n\n agg_feat_2 = ft.Feature(es['log'].ww['value'],\n parent_dataframe_name='sessions',\n use_previous=Timedelta(3, 'observations'),\n primitive=Min)\n\n assert agg_feat_1.get_name() != agg_feat_2.get_name(), \\\n 'Features should have different names based on use_previous'\n\n feature_set = FeatureSet([agg_feat_1, agg_feat_2])\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 4, 9, 10, 30, 6),\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n\n # time_last is included by default\n v1 = df[agg_feat_1.get_name()][0]\n v2 = df[agg_feat_2.get_name()][0]\n assert v1 == 5\n assert v2 == 0\n\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 4, 9, 10, 30, 30),\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n\n v1 = df[agg_feat_1.get_name()][0]\n v2 = df[agg_feat_2.get_name()][0]\n assert v1 == 20\n assert v2 == 10\n\n\ndef test_make_agg_feat_multiple_dtypes(es):\n if es.dataframe_type != Library.PANDAS.value:\n pytest.xfail('Currently no Dask or Koalas compatible agg prims that use multiple dtypes')\n compare_prod = IdentityFeature(es['log'].ww['product_id']) == 'coke zero'\n\n agg_feat = ft.Feature(es['log'].ww['id'],\n parent_dataframe_name='sessions',\n where=compare_prod,\n primitive=Count)\n\n agg_feat2 = ft.Feature(es['log'].ww['product_id'],\n parent_dataframe_name='sessions',\n where=compare_prod,\n primitive=Mode)\n\n feature_set = FeatureSet([agg_feat, agg_feat2])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n\n v = df[agg_feat.get_name()][0]\n v2 = df[agg_feat2.get_name()][0]\n assert (v == 3)\n assert (v2 == 'coke zero')\n\n\ndef test_make_agg_feat_where_different_identity_feat(es):\n feats = []\n where_cmps = [LessThanScalar, GreaterThanScalar, LessThanEqualToScalar,\n GreaterThanEqualToScalar, EqualScalar, NotEqualScalar]\n for where_cmp in where_cmps:\n feats.append(ft.Feature(es['log'].ww['id'],\n parent_dataframe_name='sessions',\n where=ft.Feature(es['log'].ww['datetime'], primitive=where_cmp(datetime(2011, 4, 10, 10, 40, 1))),\n primitive=Count))\n\n df = ft.calculate_feature_matrix(entityset=es, features=feats, instance_ids=[0, 1, 2, 3])\n df = to_pandas(df, index='id', sort_index=True)\n\n for i, where_cmp in enumerate(where_cmps):\n name = feats[i].get_name()\n instances = df[name]\n v0, v1, v2, v3 = instances[0:4]\n if where_cmp == LessThanScalar:\n assert (v0 == 5)\n assert (v1 == 4)\n assert (v2 == 1)\n assert (v3 == 1)\n elif where_cmp == GreaterThanScalar:\n assert (v0 == 0)\n assert (v1 == 0)\n assert (v2 == 0)\n assert (v3 == 0)\n elif where_cmp == LessThanEqualToScalar:\n assert (v0 == 5)\n assert (v1 == 4)\n assert (v2 == 1)\n assert (v3 == 2)\n elif where_cmp == GreaterThanEqualToScalar:\n assert (v0 == 0)\n assert (v1 == 0)\n assert (v2 == 0)\n assert (v3 == 1)\n elif where_cmp == EqualScalar:\n assert (v0 == 0)\n assert (v1 == 0)\n assert (v2 == 0)\n assert (v3 == 1)\n elif where_cmp == NotEqualScalar:\n assert (v0 == 5)\n assert (v1 == 4)\n assert (v2 == 1)\n assert (v3 == 1)\n\n\ndef test_make_agg_feat_of_grandchild_dataframe(es):\n agg_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)\n\n feature_set = FeatureSet([agg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n v = df[agg_feat.get_name()].values[0]\n assert (v == 10)\n\n\ndef test_make_agg_feat_where_count_feat(es):\n \"\"\"\n Feature we're creating is:\n Number of sessions for each customer where the\n number of logs in the session is less than 3\n \"\"\"\n log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)\n\n feat = ft.Feature(es['sessions'].ww['id'],\n parent_dataframe_name='customers',\n where=log_count_feat > 1,\n primitive=Count)\n\n feature_set = FeatureSet([feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0, 1]))\n df = to_pandas(df, index='id', sort_index=True)\n\n name = feat.get_name()\n instances = df[name]\n v0, v1 = instances[0:2]\n assert (v0 == 2)\n assert (v1 == 2)\n\n\ndef test_make_compare_feat(es):\n \"\"\"\n Feature we're creating is:\n Number of sessions for each customer where the\n number of logs in the session is less than 3\n \"\"\"\n log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)\n\n mean_agg_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Mean)\n\n mean_feat = DirectFeature(mean_agg_feat, child_dataframe_name='sessions')\n\n feat = log_count_feat > mean_feat\n\n feature_set = FeatureSet([feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0, 1, 2]))\n df = to_pandas(df, index='id', sort_index=True)\n\n name = feat.get_name()\n instances = df[name]\n v0, v1, v2 = instances[0:3]\n assert v0\n assert v1\n assert not v2\n\n\ndef test_make_agg_feat_where_count_and_device_type_feat(es):\n \"\"\"\n Feature we're creating is:\n Number of sessions for each customer where the\n number of logs in the session is less than 3\n \"\"\"\n log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)\n\n compare_count = log_count_feat == 1\n compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1\n and_feat = ft.Feature([compare_count, compare_device_type], primitive=And)\n feat = ft.Feature(es['sessions'].ww['id'],\n parent_dataframe_name='customers',\n where=and_feat,\n primitive=Count)\n\n feature_set = FeatureSet([feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n\n name = feat.get_name()\n instances = df[name]\n assert (instances.values[0] == 1)\n\n\ndef test_make_agg_feat_where_count_or_device_type_feat(es):\n \"\"\"\n Feature we're creating is:\n Number of sessions for each customer where the\n number of logs in the session is less than 3\n \"\"\"\n log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)\n\n compare_count = log_count_feat > 1\n compare_device_type = IdentityFeature(es['sessions'].ww['device_type']) == 1\n or_feat = compare_count.OR(compare_device_type)\n feat = ft.Feature(es['sessions'].ww['id'],\n parent_dataframe_name='customers',\n where=or_feat,\n primitive=Count)\n\n feature_set = FeatureSet([feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id', int_index=True)\n\n name = feat.get_name()\n instances = df[name]\n assert (instances.values[0] == 3)\n\n\ndef test_make_agg_feat_of_agg_feat(es):\n log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='sessions', primitive=Count)\n\n customer_sum_feat = ft.Feature(log_count_feat, parent_dataframe_name='customers', primitive=Sum)\n\n feature_set = FeatureSet([customer_sum_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n v = df[customer_sum_feat.get_name()].values[0]\n assert (v == 10)\n\n\n@pytest.fixture\ndef pd_df():\n return pd.DataFrame({\n \"id\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n \"e1\": [\"h\", \"h\", \"i\", \"i\", \"j\"],\n \"e2\": [\"x\", \"x\", \"y\", \"y\", \"x\"],\n \"e3\": [\"z\", \"z\", \"z\", \"z\", \"z\"],\n \"val\": [1, 1, 1, 1, 1]\n })\n\n\n@pytest.fixture\ndef dd_df(pd_df):\n return dd.from_pandas(pd_df, npartitions=2)\n\n\n@pytest.fixture\ndef ks_df(pd_df):\n ks = pytest.importorskip('databricks.koalas', reason=\"Koalas not installed, skipping\")\n return ks.from_pandas(pd_df)\n\n\n@pytest.fixture(params=['pd_df', 'dd_df', 'ks_df'])\ndef df(request):\n return request.getfixturevalue(request.param)\n\n\ndef test_make_3_stacked_agg_feats(df):\n \"\"\"\n Tests stacking 3 agg features.\n\n The test specifically uses non numeric indices to test how ancestor columns are handled\n as dataframes are merged together\n\n \"\"\"\n if isinstance(df, dd.DataFrame):\n pytest.xfail('normalize_datdataframe fails with dask DataFrame')\n es = ft.EntitySet()\n ltypes = {\n 'e1': Categorical,\n 'e2': Categorical,\n 'e3': Categorical,\n 'val': Double\n }\n es.add_dataframe(dataframe=df,\n index=\"id\",\n dataframe_name=\"e0\",\n logical_types=ltypes)\n\n es.normalize_dataframe(base_dataframe_name=\"e0\",\n new_dataframe_name=\"e1\",\n index=\"e1\",\n additional_columns=[\"e2\", \"e3\"])\n\n es.normalize_dataframe(base_dataframe_name=\"e1\",\n new_dataframe_name=\"e2\",\n index=\"e2\",\n additional_columns=[\"e3\"])\n\n es.normalize_dataframe(base_dataframe_name=\"e2\",\n new_dataframe_name=\"e3\",\n index=\"e3\")\n\n sum_1 = ft.Feature(es[\"e0\"].ww[\"val\"], parent_dataframe_name=\"e1\", primitive=Sum)\n sum_2 = ft.Feature(sum_1, parent_dataframe_name=\"e2\", primitive=Sum)\n sum_3 = ft.Feature(sum_2, parent_dataframe_name=\"e3\", primitive=Sum)\n\n feature_set = FeatureSet([sum_3])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([\"z\"]))\n v = df[sum_3.get_name()][0]\n assert (v == 5)\n\n\ndef test_make_dfeat_of_agg_feat_on_self(es):\n \"\"\"\n The graph looks like this:\n\n R R = Regions, a parent of customers\n |\n C C = Customers, the dataframe we're trying to predict on\n |\n etc.\n\n We're trying to calculate a DFeat from C to R on an agg_feat of R on C.\n \"\"\"\n customer_count_feat = ft.Feature(es['customers'].ww['id'], parent_dataframe_name=u'régions', primitive=Count)\n\n num_customers_feat = DirectFeature(customer_count_feat, child_dataframe_name='customers')\n\n feature_set = FeatureSet([num_customers_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n v = df[num_customers_feat.get_name()].values[0]\n assert (v == 3)\n\n\ndef test_make_dfeat_of_agg_feat_through_parent(es):\n \"\"\"\n The graph looks like this:\n\n R C = Customers, the dataframe we're trying to predict on\n / \\\\ R = Regions, a parent of customers\n S C S = Stores, a child of regions\n |\n etc.\n\n We're trying to calculate a DFeat from C to R on an agg_feat of R on S.\n \"\"\"\n store_id_feat = IdentityFeature(es['stores'].ww['id'])\n\n store_count_feat = ft.Feature(store_id_feat, parent_dataframe_name=u'régions', primitive=Count)\n\n num_stores_feat = DirectFeature(store_count_feat, child_dataframe_name='customers')\n\n feature_set = FeatureSet([num_stores_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n v = df[num_stores_feat.get_name()].values[0]\n assert (v == 3)\n\n\ndef test_make_deep_agg_feat_of_dfeat_of_agg_feat(es):\n \"\"\"\n The graph looks like this (higher implies parent):\n\n C C = Customers, the dataframe we're trying to predict on\n | S = Sessions, a child of Customers\n P S L = Log, a child of both Sessions and Log\n \\\\ / P = Products, a parent of Log which is not a descendent of customers\n L\n\n We're trying to calculate a DFeat from L to P on an agg_feat of P on L, and\n then aggregate it with another agg_feat of C on L.\n \"\"\"\n log_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='products', primitive=Count)\n\n product_purchases_feat = DirectFeature(log_count_feat,\n child_dataframe_name='log')\n\n purchase_popularity = ft.Feature(product_purchases_feat, parent_dataframe_name='customers', primitive=Mean)\n\n feature_set = FeatureSet([purchase_popularity])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n v = df[purchase_popularity.get_name()].values[0]\n assert (v == 38.0 / 10.0)\n\n\ndef test_deep_agg_feat_chain(es):\n \"\"\"\n Agg feat of agg feat:\n region.Mean(customer.Count(Log))\n \"\"\"\n customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)\n\n region_avg_feat = ft.Feature(customer_count_feat, parent_dataframe_name=u'régions', primitive=Mean)\n\n feature_set = FeatureSet([region_avg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array(['United States']))\n df = to_pandas(df, index='id')\n\n v = df[region_avg_feat.get_name()][0]\n assert (v == 17 / 3.)\n\n\n# NMostCommon not supported with Dask or Koalas\ndef test_topn(pd_es):\n topn = ft.Feature(pd_es['log'].ww['product_id'],\n parent_dataframe_name='customers',\n primitive=NMostCommon(n=2))\n feature_set = FeatureSet([topn])\n\n calculator = FeatureSetCalculator(pd_es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0, 1, 2]))\n true_results = pd.DataFrame([\n ['toothpaste', 'coke zero'],\n ['coke zero', 'Haribo sugar-free gummy bears'],\n ['taco clock', np.nan]\n ])\n assert ([name in df.columns for name in topn.get_feature_names()])\n\n for i in range(df.shape[0]):\n true = true_results.loc[i]\n actual = df.loc[i]\n if i == 0:\n # coke zero and toothpase have same number of occurrences\n assert set(true.values) == set(actual.values)\n else:\n for i1, i2 in zip(true, actual):\n assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)\n\n\n# Trend not supported with Dask or Koalas\ndef test_trend(pd_es):\n trend = ft.Feature([ft.Feature(pd_es['log'].ww['value']), ft.Feature(pd_es['log'].ww['datetime'])],\n parent_dataframe_name='customers',\n primitive=Trend)\n feature_set = FeatureSet([trend])\n\n calculator = FeatureSetCalculator(pd_es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0, 1, 2]))\n\n true_results = [-0.812730, 4.870378, np.nan]\n\n np.testing.assert_almost_equal(df[trend.get_name()].tolist(), true_results, decimal=5)\n\n\ndef test_direct_squared(es):\n feature = IdentityFeature(es['log'].ww['value'])\n squared = feature * feature\n feature_set = FeatureSet([feature, squared])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0, 1, 2])))\n for i, row in df.iterrows():\n assert (row[0] * row[0]) == row[1]\n\n\ndef test_agg_empty_child(es):\n customer_count_feat = ft.Feature(es['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)\n feature_set = FeatureSet([customer_count_feat])\n\n # time last before the customer had any events, so child frame is empty\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 4, 8),\n feature_set=feature_set)\n df = to_pandas(calculator.run(np.array([0])), index='id')\n\n assert df[\"COUNT(log)\"].iloc[0] == 0\n\n\ndef test_diamond_entityset(diamond_es):\n es = diamond_es\n\n amount = ft.IdentityFeature(es['transactions'].ww['amount'])\n path = backward_path(es, ['regions', 'customers', 'transactions'])\n through_customers = ft.AggregationFeature(amount, 'regions',\n primitive=ft.primitives.Sum,\n relationship_path=path)\n path = backward_path(es, ['regions', 'stores', 'transactions'])\n through_stores = ft.AggregationFeature(amount, 'regions',\n primitive=ft.primitives.Sum,\n relationship_path=path)\n\n feature_set = FeatureSet([through_customers, through_stores])\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 4, 8),\n feature_set=feature_set)\n df = calculator.run(np.array([0, 1, 2]))\n df = to_pandas(df, index='id', sort_index=True)\n\n assert (df['SUM(stores.transactions.amount)'] == [94, 261, 128]).all()\n assert (df['SUM(customers.transactions.amount)'] == [72, 411, 0]).all()\n\n\ndef test_two_relationships_to_single_dataframe(games_es):\n es = games_es\n home_team, away_team = es.relationships\n path = RelationshipPath([(False, home_team)])\n mean_at_home = ft.AggregationFeature(ft.Feature(es['games'].ww['home_team_score']),\n 'teams',\n relationship_path=path,\n primitive=ft.primitives.Mean)\n path = RelationshipPath([(False, away_team)])\n mean_at_away = ft.AggregationFeature(ft.Feature(es['games'].ww['away_team_score']),\n 'teams',\n relationship_path=path,\n primitive=ft.primitives.Mean)\n home_team_mean = ft.DirectFeature(mean_at_home, 'games',\n relationship=home_team)\n away_team_mean = ft.DirectFeature(mean_at_away, 'games',\n relationship=away_team)\n\n feature_set = FeatureSet([home_team_mean, away_team_mean])\n calculator = FeatureSetCalculator(es,\n time_last=datetime(2011, 8, 28),\n feature_set=feature_set)\n df = calculator.run(np.array(range(3)))\n df = to_pandas(df, index='id', sort_index=True)\n\n assert (df[home_team_mean.get_name()] == [1.5, 1.5, 2.5]).all()\n assert (df[away_team_mean.get_name()] == [1, 0.5, 2]).all()\n\n\n@pytest.fixture\ndef pd_parent_child():\n parent_df = pd.DataFrame({\"id\": [1]})\n child_df = pd.DataFrame({\"id\": [1, 2, 3],\n \"parent_id\": [1, 1, 1],\n \"time_index\": pd.date_range(start='1/1/2018', periods=3),\n \"value\": [10, 5, 2],\n \"cat\": ['a', 'a', 'b']}).astype({'cat': 'category'})\n return (parent_df, child_df)\n\n\n@pytest.fixture\ndef dd_parent_child(pd_parent_child):\n parent_df, child_df = pd_parent_child\n parent_df = dd.from_pandas(parent_df, npartitions=2)\n child_df = dd.from_pandas(child_df, npartitions=2)\n return (parent_df, child_df)\n\n\n@pytest.fixture\ndef ks_parent_child(pd_parent_child):\n ks = pytest.importorskip('databricks.koalas', reason=\"Koalas not installed, skipping\")\n parent_df, child_df = pd_parent_child\n parent_df = ks.from_pandas(parent_df)\n child_df = ks.from_pandas(child_df)\n return (parent_df, child_df)\n\n\n@pytest.fixture(params=['pd_parent_child', 'dd_parent_child', 'ks_parent_child'])\ndef parent_child(request):\n return request.getfixturevalue(request.param)\n\n\ndef test_empty_child_dataframe(parent_child):\n parent_df, child_df = parent_child\n child_ltypes = {\n 'parent_id': Integer,\n 'time_index': Datetime,\n 'value': Double,\n 'cat': Categorical\n }\n\n es = ft.EntitySet(id=\"blah\")\n es.add_dataframe(dataframe_name=\"parent\",\n dataframe=parent_df,\n index=\"id\")\n es.add_dataframe(dataframe_name=\"child\",\n dataframe=child_df,\n index=\"id\",\n time_index=\"time_index\",\n logical_types=child_ltypes)\n es.add_relationship(\"parent\", \"id\", \"child\", \"parent_id\")\n\n # create regular agg\n count = ft.Feature(es[\"child\"].ww[\"id\"], parent_dataframe_name=\"parent\", primitive=Count)\n\n # create agg feature that requires multiple arguments\n trend = ft.Feature([ft.Feature(es[\"child\"].ww[\"value\"]), ft.Feature(es[\"child\"].ww['time_index'])],\n parent_dataframe_name=\"parent\",\n primitive=Trend)\n\n # create multi-output agg feature\n n_most_common = ft.Feature(es[\"child\"].ww[\"cat\"], parent_dataframe_name=\"parent\", primitive=NMostCommon)\n\n # create aggs with where\n where = ft.Feature(es[\"child\"].ww[\"value\"]) == 1\n count_where = ft.Feature(es[\"child\"].ww[\"id\"], parent_dataframe_name=\"parent\", where=where, primitive=Count)\n trend_where = ft.Feature([ft.Feature(es[\"child\"].ww[\"value\"]), ft.Feature(es[\"child\"].ww[\"time_index\"])],\n parent_dataframe_name=\"parent\",\n where=where,\n primitive=Trend)\n n_most_common_where = ft.Feature(es[\"child\"].ww[\"cat\"], parent_dataframe_name=\"parent\", where=where, primitive=NMostCommon)\n\n if isinstance(parent_df, pd.DataFrame):\n features = [count, count_where, trend, trend_where, n_most_common, n_most_common_where]\n data = {count.get_name(): pd.Series([0], dtype=\"Int64\"),\n count_where.get_name(): pd.Series([0], dtype=\"Int64\"),\n trend.get_name(): pd.Series([np.nan], dtype=\"float\"),\n trend_where.get_name(): pd.Series([np.nan], dtype=\"float\")}\n for name in n_most_common.get_feature_names():\n data[name] = pd.Series([np.nan], dtype=\"category\")\n for name in n_most_common_where.get_feature_names():\n data[name] = pd.Series([np.nan], dtype=\"category\")\n else:\n features = [count, count_where]\n data = {count.get_name(): pd.Series([0], dtype=\"Int64\"),\n count_where.get_name(): pd.Series([0], dtype=\"Int64\")}\n\n answer = pd.DataFrame(data)\n\n # cutoff time before all rows\n fm = ft.calculate_feature_matrix(entityset=es,\n features=features,\n cutoff_time=pd.Timestamp(\"12/31/2017\"))\n fm = to_pandas(fm)\n\n for column in data.keys():\n pd.testing.assert_series_equal(fm[column], answer[column], check_names=False, check_index=False)\n\n # cutoff time after all rows, but where clause filters all rows\n if isinstance(parent_df, pd.DataFrame):\n features = [count_where, trend_where, n_most_common_where]\n data = {count_where.get_name(): pd.Series([0], dtype=\"Int64\"),\n trend_where.get_name(): pd.Series([np.nan], dtype=\"float\")}\n for name in n_most_common_where.get_feature_names():\n data[name] = pd.Series([np.nan], dtype=\"category\")\n else:\n features = [count_where]\n data = {count_where.get_name(): pd.Series([0], dtype=\"Int64\")}\n answer = pd.DataFrame(data)\n\n fm2 = ft.calculate_feature_matrix(entityset=es,\n features=features,\n cutoff_time=pd.Timestamp(\"1/4/2018\"))\n fm2 = to_pandas(fm2)\n\n for column in data.keys():\n pd.testing.assert_series_equal(fm[column], answer[column], check_names=False, check_index=False)\n\n\ndef test_with_features_built_from_es_metadata(es):\n metadata = es.metadata\n\n agg_feat = ft.Feature(metadata['log'].ww['id'], parent_dataframe_name='customers', primitive=Count)\n\n feature_set = FeatureSet([agg_feat])\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n df = calculator.run(np.array([0]))\n df = to_pandas(df, index='id')\n v = df[agg_feat.get_name()].values[0]\n assert (v == 10)\n\n\n# TODO: Fails with Dask and Koalas (conflicting aggregation primitives)\ndef test_handles_primitive_function_name_uniqueness(es):\n if es.dataframe_type != Library.PANDAS.value:\n pytest.xfail(\"Fails with Dask and Koalas due conflicting aggregation primitive names\")\n\n class SumTimesN(AggregationPrimitive):\n name = \"sum_times_n\"\n input_types = [ColumnSchema(semantic_tags={'numeric'})]\n return_type = ColumnSchema(semantic_tags={'numeric'})\n\n def __init__(self, n):\n self.n = n\n\n def get_function(self, agg_type='pandas'):\n def my_function(values):\n return values.sum() * self.n\n\n return my_function\n\n # works as expected\n f1 = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=SumTimesN(n=1))\n fm = ft.calculate_feature_matrix(features=[f1], entityset=es)\n\n value_sum = pd.Series([56, 26, 0])\n assert all(fm[f1.get_name()].sort_index() == value_sum)\n\n # works as expected\n f2 = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=SumTimesN(n=2))\n fm = ft.calculate_feature_matrix(features=[f2], entityset=es)\n\n double_value_sum = pd.Series([112, 52, 0])\n assert all(fm[f2.get_name()].sort_index() == double_value_sum)\n\n # same primitive, same column, different args\n fm = ft.calculate_feature_matrix(features=[f1, f2], entityset=es)\n\n assert all(fm[f1.get_name()].sort_index() == value_sum)\n assert all(fm[f2.get_name()].sort_index() == double_value_sum)\n\n # different primitives, same function returned by get_function,\n # different base features\n f3 = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Sum)\n f4 = ft.Feature(es[\"log\"].ww[\"purchased\"],\n parent_dataframe_name=\"customers\",\n primitive=NumTrue)\n fm = ft.calculate_feature_matrix(features=[f3, f4], entityset=es)\n\n purchased_sum = pd.Series([10, 1, 1])\n assert all(fm[f3.get_name()].sort_index() == value_sum)\n assert all(fm[f4.get_name()].sort_index() == purchased_sum)\\\n\n\n # different primitives, same function returned by get_function,\n # same base feature\n class Sum1(AggregationPrimitive):\n \"\"\"Sums elements of a numeric or boolean feature.\"\"\"\n name = \"sum1\"\n input_types = [ColumnSchema(semantic_tags={'numeric'})]\n return_type = ColumnSchema(semantic_tags={'numeric'})\n stack_on_self = False\n stack_on_exclude = [Count]\n default_value = 0\n\n def get_function(self, agg_type='pandas'):\n return np.sum\n\n class Sum2(AggregationPrimitive):\n \"\"\"Sums elements of a numeric or boolean feature.\"\"\"\n name = \"sum2\"\n input_types = [ColumnSchema(semantic_tags={'numeric'})]\n return_type = ColumnSchema(semantic_tags={'numeric'})\n stack_on_self = False\n stack_on_exclude = [Count]\n default_value = 0\n\n def get_function(self, agg_type='pandas'):\n return np.sum\n\n class Sum3(AggregationPrimitive):\n \"\"\"Sums elements of a numeric or boolean feature.\"\"\"\n name = \"sum3\"\n input_types = [ColumnSchema(semantic_tags={'numeric'})]\n return_type = ColumnSchema(semantic_tags={'numeric'})\n stack_on_self = False\n stack_on_exclude = [Count]\n default_value = 0\n\n def get_function(self, agg_type='pandas'):\n return np.sum\n\n f5 = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Sum1)\n f6 = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Sum2)\n f7 = ft.Feature(es[\"log\"].ww[\"value\"],\n parent_dataframe_name=\"customers\",\n primitive=Sum3)\n fm = ft.calculate_feature_matrix(features=[f5, f6, f7], entityset=es)\n assert all(fm[f5.get_name()].sort_index() == value_sum)\n assert all(fm[f6.get_name()].sort_index() == value_sum)\n assert all(fm[f7.get_name()].sort_index() == value_sum)\n\n\n# No order guarantees w/ Dask\ndef test_returns_order_of_instance_ids(pd_es):\n feature_set = FeatureSet([ft.Feature(pd_es['customers'].ww['age'])])\n calculator = FeatureSetCalculator(pd_es,\n time_last=None,\n feature_set=feature_set)\n\n instance_ids = [0, 1, 2]\n assert list(pd_es['customers']['id']) != instance_ids\n\n df = calculator.run(np.array(instance_ids))\n\n assert list(df.index) == instance_ids\n\n\ndef test_calls_progress_callback(es):\n # call with all feature types. make sure progress callback calls sum to 1\n identity = ft.Feature(es['customers'].ww['age'])\n direct = ft.Feature(es['cohorts'].ww['cohort_name'], 'customers')\n agg = ft.Feature(es['sessions'].ww['id'], parent_dataframe_name='customers', primitive=Count)\n agg_apply = ft.Feature(es['log'].ww['datetime'], parent_dataframe_name='customers', primitive=TimeSinceLast) # this feature is handle differently than simple features\n trans = ft.Feature(agg, primitive=Negate)\n trans_full = ft.Feature(agg, primitive=CumSum)\n groupby_trans = ft.Feature(agg, primitive=CumSum, groupby=ft.Feature(es['customers'].ww['cohort']))\n\n if es.dataframe_type != Library.PANDAS.value:\n all_features = [identity, direct, agg, trans]\n else:\n all_features = [identity, direct, agg, agg_apply, trans, trans_full, groupby_trans]\n\n feature_set = FeatureSet(all_features)\n calculator = FeatureSetCalculator(es,\n time_last=None,\n feature_set=feature_set)\n\n class MockProgressCallback:\n def __init__(self):\n self.total = 0\n\n def __call__(self, update):\n self.total += update\n\n mock_progress_callback = MockProgressCallback()\n\n instance_ids = [0, 1, 2]\n calculator.run(np.array(instance_ids), mock_progress_callback)\n\n assert np.isclose(mock_progress_callback.total, 1)\n\n # testing again with a time_last with no data\n feature_set = FeatureSet(all_features)\n calculator = FeatureSetCalculator(es,\n time_last=pd.Timestamp(\"1950\"),\n feature_set=feature_set)\n\n mock_progress_callback = MockProgressCallback()\n calculator.run(np.array(instance_ids), mock_progress_callback)\n\n assert np.isclose(mock_progress_callback.total, 1)\n\n\n# precalculated_features is only used with approximate\ndef test_precalculated_features(pd_es):\n error_msg = 'This primitive should never be used because the features are precalculated'\n\n class ErrorPrim(AggregationPrimitive):\n \"\"\"A primitive whose function raises an error.\"\"\"\n name = \"error_prim\"\n input_types = [ColumnSchema(semantic_tags={'numeric'})]\n return_type = ColumnSchema(semantic_tags={'numeric'})\n\n def get_function(self, agg_type='pandas'):\n def error(s):\n raise RuntimeError(error_msg)\n return error\n\n value = ft.Feature(pd_es['log'].ww['value'])\n agg = ft.Feature(value,\n parent_dataframe_name='sessions',\n primitive=ErrorPrim)\n agg2 = ft.Feature(agg,\n parent_dataframe_name='customers',\n primitive=ErrorPrim)\n direct = ft.Feature(agg2, dataframe_name='sessions')\n\n # Set up a FeatureSet which knows which features are precalculated.\n precalculated_feature_trie = Trie(default=set, path_constructor=RelationshipPath)\n precalculated_feature_trie.get_node(direct.relationship_path).value.add(agg2.unique_name())\n feature_set = FeatureSet([direct], approximate_feature_trie=precalculated_feature_trie)\n\n # Fake precalculated data.\n values = [0, 1, 2]\n parent_fm = pd.DataFrame({agg2.get_name(): values})\n precalculated_fm_trie = Trie(path_constructor=RelationshipPath)\n precalculated_fm_trie.get_node(direct.relationship_path).value = parent_fm\n\n calculator = FeatureSetCalculator(pd_es,\n feature_set=feature_set,\n precalculated_features=precalculated_fm_trie)\n\n instance_ids = [0, 2, 3, 5]\n fm = calculator.run(np.array(instance_ids))\n\n assert list(fm[direct.get_name()]) == [values[0], values[0], values[1], values[2]]\n\n # Calculating without precalculated features should error.\n with pytest.raises(RuntimeError, match=error_msg):\n FeatureSetCalculator(pd_es, feature_set=FeatureSet([direct])).run(instance_ids)\n", "import dask.dataframe as dd\nimport pandas as pd\nimport pytest\nfrom woodwork.logical_types import (\n Categorical,\n Datetime,\n Double,\n Integer,\n NaturalLanguage\n)\n\nfrom featuretools.entityset import EntitySet\nfrom featuretools.tests.testing_utils import get_df_tags\nfrom featuretools.utils.gen_utils import Library\n\n\ndef test_add_dataframe(pd_es):\n dask_es = EntitySet(id=\"dask_es\")\n log_dask = dd.from_pandas(pd_es[\"log\"], npartitions=2)\n dask_es = dask_es.add_dataframe(\n dataframe_name=\"log_dask\",\n dataframe=log_dask,\n index=\"id\",\n time_index=\"datetime\",\n logical_types=pd_es[\"log\"].ww.logical_types,\n semantic_tags=get_df_tags(pd_es[\"log\"])\n )\n pd.testing.assert_frame_equal(pd_es[\"log\"], dask_es[\"log_dask\"].compute(), check_like=True)\n\n\ndef test_add_dataframe_with_non_numeric_index(pd_es, dask_es):\n df = pd.DataFrame({\"id\": [\"A_1\", \"A_2\", \"C\", \"D\"],\n \"values\": [1, 12, -34, 27]})\n dask_df = dd.from_pandas(df, npartitions=2)\n\n pd_es.add_dataframe(\n dataframe_name=\"new_dataframe\",\n dataframe=df,\n index=\"id\",\n logical_types={\"id\": Categorical, \"values\": Integer})\n\n dask_es.add_dataframe(\n dataframe_name=\"new_dataframe\",\n dataframe=dask_df,\n index=\"id\",\n logical_types={\"id\": Categorical, \"values\": Integer})\n\n pd.testing.assert_frame_equal(pd_es['new_dataframe'].reset_index(drop=True), dask_es['new_dataframe'].compute())\n\n\ndef test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):\n df = pd.DataFrame({\"id\": [0, 1, 2, 3],\n \"values\": [1, 12, -34, 27]})\n dask_df = dd.from_pandas(df, npartitions=2)\n\n err_msg = \"All dataframes must be of the same type. \" \\\n \"Cannot add dataframe of type {} to an entityset with existing dataframes \" \\\n \"of type {}\"\n\n # Test error is raised when trying to add Dask dataframe to entityset with existing pandas dataframes\n with pytest.raises(ValueError, match=err_msg.format(type(dask_df), type(pd_es.dataframes[0]))):\n pd_es.add_dataframe(\n dataframe_name=\"new_dataframe\",\n dataframe=dask_df,\n index=\"id\")\n\n # Test error is raised when trying to add pandas dataframe to entityset with existing dask dataframes\n with pytest.raises(ValueError, match=err_msg.format(type(df), type(dask_es.dataframes[0]))):\n dask_es.add_dataframe(\n dataframe_name=\"new_dataframe\",\n dataframe=df,\n index=\"id\")\n\n\ndef test_add_last_time_indexes():\n pd_es = EntitySet(id=\"pd_es\")\n dask_es = EntitySet(id=\"dask_es\")\n\n sessions = pd.DataFrame({\"id\": [0, 1, 2, 3],\n \"user\": [1, 2, 1, 3],\n \"time\": [pd.to_datetime('2019-01-10'),\n pd.to_datetime('2019-02-03'),\n pd.to_datetime('2019-01-01'),\n pd.to_datetime('2017-08-25')],\n \"strings\": [\"I am a string\",\n \"23\",\n \"abcdef ghijk\",\n \"\"]})\n sessions_dask = dd.from_pandas(sessions, npartitions=2)\n sessions_logical_types = {\n \"id\": Integer,\n \"user\": Integer,\n \"time\": Datetime,\n \"strings\": NaturalLanguage\n }\n\n transactions = pd.DataFrame({\"id\": [0, 1, 2, 3, 4, 5],\n \"session_id\": [0, 0, 1, 2, 2, 3],\n \"amount\": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],\n \"time\": [pd.to_datetime('2019-01-10 03:53'),\n pd.to_datetime('2019-01-10 04:12'),\n pd.to_datetime('2019-02-03 10:34'),\n pd.to_datetime('2019-01-01 12:35'),\n pd.to_datetime('2019-01-01 12:49'),\n pd.to_datetime('2017-08-25 04:53')]})\n transactions_dask = dd.from_pandas(transactions, npartitions=2)\n\n transactions_logical_types = {\n \"id\": Integer,\n \"session_id\": Integer,\n \"time\": Datetime,\n \"amount\": Double\n }\n\n pd_es.add_dataframe(dataframe_name=\"sessions\", dataframe=sessions, index=\"id\", time_index=\"time\")\n dask_es.add_dataframe(dataframe_name=\"sessions\", dataframe=sessions_dask,\n index=\"id\", time_index=\"time\",\n logical_types=sessions_logical_types)\n\n pd_es.add_dataframe(dataframe_name=\"transactions\", dataframe=transactions, index=\"id\", time_index=\"time\")\n dask_es.add_dataframe(dataframe_name=\"transactions\", dataframe=transactions_dask,\n index=\"id\", time_index=\"time\",\n logical_types=transactions_logical_types)\n\n pd_es = pd_es.add_relationship(\"sessions\", \"id\", \"transactions\", \"session_id\")\n dask_es = dask_es.add_relationship(\"sessions\", \"id\", \"transactions\", \"session_id\")\n\n assert 'foreign_key' in pd_es['transactions'].ww.semantic_tags['session_id']\n assert 'foreign_key' in dask_es['transactions'].ww.semantic_tags['session_id']\n\n assert pd_es['sessions'].ww.metadata.get('last_time_index') is None\n assert dask_es['sessions'].ww.metadata.get('last_time_index') is None\n\n pd_es.add_last_time_indexes()\n dask_es.add_last_time_indexes()\n\n pd_lti_name = pd_es['sessions'].ww.metadata.get('last_time_index')\n ks_lti_name = dask_es['sessions'].ww.metadata.get('last_time_index')\n assert pd_lti_name == ks_lti_name\n pd.testing.assert_series_equal(pd_es['sessions'][pd_lti_name].sort_index(),\n dask_es['sessions'][ks_lti_name].compute().sort_index(), check_names=False)\n\n\ndef test_add_dataframe_with_make_index():\n values = [1, 12, -23, 27]\n df = pd.DataFrame({\"values\": values})\n dask_df = dd.from_pandas(df, npartitions=2)\n dask_es = EntitySet(id=\"dask_es\")\n logical_types = {\"values\": Integer}\n dask_es.add_dataframe(dataframe_name=\"new_dataframe\", dataframe=dask_df, make_index=True, index=\"new_index\", logical_types=logical_types)\n\n expected_df = pd.DataFrame({\"values\": values, \"new_index\": range(len(values))})\n pd.testing.assert_frame_equal(expected_df, dask_es['new_dataframe'].compute())\n\n\ndef test_dataframe_type_dask(dask_es):\n assert dask_es.dataframe_type == Library.DASK.value\n" ]
[ [ "pandas.DateOffset", "pandas.Series", "pandas.isnull", "pandas.Timestamp", "numpy.nan_to_num", "pandas.DataFrame", "pandas.DatetimeIndex", "numpy.append", "numpy.array" ], [ "pandas.testing.assert_series_equal", "pandas.Series", "pandas.isnull", "pandas.Timestamp", "pandas.DataFrame", "pandas.date_range", "numpy.array", "numpy.isclose" ], [ "pandas.to_datetime", "pandas.DataFrame" ] ]
adam-coogan/swyft
[ "c54bdd9f77ddf02fda857e26640df012cbe545fc" ]
[ "swyft/utils/array.py" ]
[ "from typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom swyft.types import Array, Device\n\n\ndef dict_to_tensor(d, device=\"cpu\", non_blocking=False, indices=slice(0, None)):\n return {\n k: array_to_tensor(v[indices]).float().to(device, non_blocking=non_blocking)\n for k, v in d.items()\n }\n\n\ndef dict_to_tensor_unsqueeze(\n d, device=\"cpu\", non_blocking=False, indices=slice(0, None)\n):\n return {\n k: array_to_tensor(v[indices])\n .float()\n .unsqueeze(0)\n .to(device, non_blocking=non_blocking)\n for k, v in d.items()\n }\n\n\nnp_bool_types = [np.bool]\nnp_int_types = [np.int8, np.int16, np.int32, np.int64]\nnp_float_types = [np.float32, np.float64]\ntorch_bool_types = [torch.bool]\ntorch_int_types = [torch.int8, torch.int16, torch.int32, torch.int64]\ntorch_float_types = [torch.float32, torch.float64]\n\n\ndef array_to_tensor(\n array: Array, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None\n) -> torch.Tensor:\n \"\"\"Converts np.ndarray and torch.Tensor to torch.Tensor with dtype and on device.\n When dtype is None, unsafe casts all float-type arrays to torch.float32 and all int-type arrays to torch.int64\n \"\"\"\n if not isinstance(array, (np.ndarray, torch.Tensor)):\n np.asarray(array)\n\n input_dtype = array.dtype\n if isinstance(input_dtype, np.dtype):\n if dtype is None:\n if input_dtype in np_float_types:\n dtype = torch.float32\n elif input_dtype in np_int_types:\n dtype = torch.int64\n elif input_dtype in np_bool_types:\n dtype = torch.bool\n else:\n raise TypeError(\n f\"{input_dtype} was not a supported numpy int, float, or bool.\"\n )\n return torch.from_numpy(array).to(dtype=dtype, device=device)\n elif isinstance(input_dtype, torch.dtype):\n if dtype is None:\n if input_dtype in torch_float_types:\n dtype = torch.float32\n elif input_dtype in torch_int_types:\n dtype = torch.int64\n elif input_dtype in torch_bool_types:\n dtype = torch.bool\n else:\n raise TypeError(\n f\"{input_dtype} was not a supported torch int, float, or bool.\"\n )\n return array.to(dtype=dtype, device=device)\n else:\n raise TypeError(\n f\"{input_dtype} was not recognized as a supported numpy.dtype or torch.dtype.\"\n )\n\n\ndef tensor_to_array(\n tensor: Array, dtype: Optional[np.dtype] = None, copy: bool = True\n) -> np.ndarray:\n if isinstance(tensor, torch.Tensor):\n out = np.asarray(tensor.detach().cpu().numpy(), dtype=dtype)\n else:\n out = np.asarray(tensor, dtype=dtype)\n\n if copy:\n return out.copy()\n else:\n return out\n\n\ndef tobytes(x: Array):\n if isinstance(x, np.ndarray):\n return x.tobytes()\n elif isinstance(x, torch.Tensor):\n return x.numpy().tobytes()\n else:\n raise TypeError(f\"{type(x)} does not support tobytes.\")\n\n\ndef _all_finite(x: Array):\n if isinstance(x, torch.Tensor):\n return torch.all(torch.isfinite(x))\n else:\n return np.all(np.isfinite(x))\n\n\ndef all_finite(x):\n if isinstance(x, dict):\n return all(_all_finite(v) for v in x.values())\n elif isinstance(x, (torch.Tensor, np.ndarray)):\n return _all_finite(x)\n elif isinstance(x, list):\n return all(_all_finite(v) for v in x)\n else:\n raise NotImplementedError(\"That type is not yet implemented.\")\n" ]
[ [ "numpy.asarray", "torch.isfinite", "torch.from_numpy", "numpy.isfinite" ] ]
gianlucacovini/opt4ds
[ "42904fd56c18a83fd5ff6f068bbd20b055a40734" ]
[ "aa2020/python/plot_shortest_path_tree.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 26 11:14:53 2020\n\n@author: Gualandi\n\"\"\"\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nLs = [('a', 'b', 5), ('a', 'c', 3), ('a', 'd', 3), ('b', 'c', 2), \n ('b', 'd', 5), ('c', 'e', 2), ('c', 'd', 3), ('d', 'e', 2), \n ('d', 'f', 3), ('e', 'g', 3), ('f', 'c', 4), ('g', 'f', 2)]\n\nCs = dict([((i,j),c) for i,j,c in Ls])\nAs = [(i,j) for i,j,_ in Ls]\n\n# NetworkX Digraph\nG = nx.DiGraph()\nG.add_edges_from(As)\n\nval_map = {'g': 0.5714285714285714,\n 'a': 0.0}\n\nvalues = [val_map.get(node, 0.2) for node in G.nodes()]\n\n# Specify the edges you want here\nred_edges = [('e', 'g'), ('b', 'c'), ('c', 'e'), ('f', 'c'), ('d', 'e'), ('a', 'c')]\nblack_edges = [edge for edge in G.edges() if edge not in red_edges]\n\n# Need to create a layout when doing\n# separate calls to draw nodes and edges\npos = nx.kamada_kawai_layout(G)\n\nnx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('coolwarm'), \n node_color = values, node_size = 400)\nnx.draw_networkx_labels(G, pos)\n\nnx.draw_networkx_edges(G, pos, edgelist=red_edges, lw=2,\n edge_color='r', arrows=True)\nnx.draw_networkx_edges(G, pos, edgelist=black_edges, arrows=True)\nnx.draw_networkx_edge_labels(G, pos, edge_labels=Cs)\n\nplt.savefig(\"ShortestPathGraph.pdf\", bbox_inches='tight')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.savefig" ] ]
jayhombal/forest-biomass-prediction
[ "f48264a7696896899c9745de1b82e85130b634ae" ]
[ "src/data/aerial_data_preprocess.py" ]
[ "import numpy as np\nimport pandas as pd\nimport uuid\n\nclass AerialDataProcessor:\n \"\"\"\n Class for reading, processing, and writing data from the UCI\n Condition monitoring of hydraulic systems` dataset.\n \"\"\"\n\n\n def __init__(self):\n \n self.aerial_data_usecols= ['left', 'bottom', 'right', 'top',\n 'score', 'height', 'area', 'geo_index']\n\n self.aerial_data = pd.DataFrame()\n\n self.aerial_data_processed_file = 'processed_forest_aerial_data.csv'\n \n\n def read_data(self, raw_data_path):\n \"\"\"Read raw data into DataProcessor.\"\"\"\n self.aerial_data = pd.read_csv(raw_data_path, usecols= self.aerial_data_usecols)\n\n\n def process_data(self, stable=True):\n \"\"\"Process raw data into useful files for model.\n Cleans the ground dataset\n \"\"\"\n ground_data_geo_index = ['747000_4308000', '747000_4309000']\n bin_labels_5 = [1,2,3,4,5]\n ## Filter Aerial data matching the ground_data geo_index\n self.aerial_data = self.aerial_data[self.aerial_data['geo_index'].isin(ground_data_geo_index)]\n \n # create uuid\n ids = [uuid.uuid4() for _ in range(len(self.aerial_data.index))]\n self.aerial_data = self.aerial_data.assign(id =ids )\n\n \n self.aerial_data['adbin'] = pd.qcut(self.aerial_data['area'],\n q=[0, .2, .4, .6, .8, 1],\n labels=bin_labels_5)\n\n self.aerial_data.reset_index()\n\n def write_data(self, processed_data_path):\n \"\"\"Write processed data to directory.\"\"\"\n plot1 = self.aerial_data[self.aerial_data['geo_index'] == '747000_4308000']\n plot2 = self.aerial_data[self.aerial_data['geo_index'] == '747000_4309000']\n plot1.to_csv(processed_data_path + 'plot1_' + self.aerial_data_processed_file , header=True, index=False)\n plot2.to_csv(processed_data_path + 'plot2_' + self.aerial_data_processed_file , header=True, index=False)\n \n \n" ]
[ [ "pandas.read_csv", "pandas.qcut", "pandas.DataFrame" ] ]
csm-kr/s2cnn
[ "09652af9811357c4bf6f7a6d3e912a06d7826f70" ]
[ "examples/sun360/sun360_dataset.py" ]
[ "from torch.utils.data import DataLoader, Dataset\nimport torch.nn as nn\nimport os\nimport glob\nimport torch\nimport numpy as np\nfrom examples.mnist.gendata import get_projection_grid, project_2d_on_sphere_sun360, rand_rotation_matrix, rotate_grid\nimport cv2\nfrom utils import rotate_map_given_R, calculate_Rmatrix_from_phi_theta, show_spheres\n\n\nclass SUN360Dataset(Dataset):\n\n # class_names = ('bathroom', 'beach', 'cave', 'church',\n # 'desert', 'field ', 'forest', 'mountain', 'theater',\n # 'train_interior')\n\n def __init__(self, root, split, vis=False, rotate=True):\n # indoor vs outdoor classification\n self.root = os.path.join(root, \"pano1024x512\")\n # self.img_indoor_path = glob.glob(os.path.join(self.root, 'indoor', '*/*.jpg'))\n # self.img_outdoor_path = glob.glob(os.path.join(self.root, 'outdoor', '*/*.jpg'))\n\n self.img_indoor_path = glob.glob(os.path.join(self.root, 'indoor_sample', '*/*.jpg'))\n self.img_outdoor_path = glob.glob(os.path.join(self.root, 'outdoor_sample', '*/*.jpg'))\n\n self.img_path = self.img_indoor_path + self.img_outdoor_path\n # self.img_others_path = glob.glob(os.path.join(self.root, 'others', '*.jpg'))\n\n ratio = 0.7\n num_train_data = int(ratio * len(self.img_path))\n np.random.seed(1)\n train_data_path = sorted(np.random.choice(self.img_path, num_train_data, replace=False))\n test_data_path = sorted(list(set(self.img_path) - set(train_data_path)))\n\n assert len(train_data_path) + len(test_data_path) == len(self.img_path)\n\n self.split = split\n if self.split == 'train':\n self.img_path = train_data_path\n elif self.split == 'test':\n self.img_path = test_data_path\n\n self.rotate = rotate\n self.vis = vis\n super().__init__()\n\n def __getitem__(self, idx):\n\n img = cv2.imread(self.img_path[idx]) # BGR\n img = cv2.imread(\"D:\\data\\SUN360_panoramas_1024x512\\pano1024x512\\outdoor\\others\\pano_aaartbimirvryq.jpg\") # BGR\n # print(self.img_path[idx])\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB\n img_np = cv2.resize(img, (224, 224))\n\n # FIXME\n bandwidth = 112\n grid = get_projection_grid(b=bandwidth)\n if self.rotate:\n rot = rand_rotation_matrix()\n rotated_grid = rotate_grid(rot, grid)\n map_x, map_y = rotate_map_given_R(rot, bandwidth * 2, bandwidth * 2)\n img_np = cv2.remap(img_np, map_x, map_y, cv2.INTER_CUBIC, borderMode=cv2.BORDER_TRANSPARENT)\n else:\n rotated_grid = grid\n\n if self.vis:\n img_np_vis = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) # RGB\n cv2.imshow('rotated_img', img_np_vis)\n cv2.waitKey(0)\n img_np_ = np.transpose(img_np, (2, 0, 1))\n show_spheres(scale=2, points=rotated_grid, rgb=img_np_)\n # R = calculate_Rmatrix_from_phi_theta(0, 0)\n\n img_np = np.transpose(img_np, (2, 0, 1)) # [3, 224, 224]\n img_torch = torch.FloatTensor(img_np) # [3, 224, 224]\n\n if \"indoor\" in self.img_path[idx]:\n label = torch.zeros(1).type(torch.long)\n elif \"outdoor\" in self.img_path[idx]:\n label = torch.ones(1).type(torch.long)\n\n return img_torch, label\n\n def __len__(self):\n return len(self.img_path)\n\n\nif __name__ == '__main__':\n root = \"D:\\data\\SUN360_panoramas_1024x512\"\n dataset = SUN360Dataset(root, 'train')\n dataloader = DataLoader(dataset=dataset,\n batch_size=4,\n shuffle=True,\n pin_memory=True,\n num_workers=4)\n\n for img, label in dataloader:\n print(label)\n" ]
[ [ "torch.ones", "numpy.random.seed", "numpy.random.choice", "torch.zeros", "torch.utils.data.DataLoader", "torch.FloatTensor", "numpy.transpose" ] ]
yanyuliren/tt
[ "a6885a4128a4992ef8a905d27790efa67023063f" ]
[ "network2.py" ]
[ "\"\"\"network2.py\r\n~~~~~~~~~~~~~~\r\n\r\nImplementing the stochastic gradient descent learning algorithm for a \r\nfeedforward neural network.\r\n\r\nImprovements include the addition of the cross-entropy cost function,\r\nregularization, and better initialization of network weights. Focused on making the code simple, \r\neasily readable, and easily modifiable. It is not optimized, and omits many desirable\r\nfeatures.\r\n\r\n\"\"\"\r\n\r\n#### Libraries\r\n# Standard library\r\nimport json\r\nimport random\r\nimport sys\r\n\r\n# Third-party libraries\r\nimport numpy as np\r\n\r\n\r\n#### Define the quadratic and cross-entropy cost functions\r\n\r\nclass QuadraticCost(object):\r\n\r\n @staticmethod\r\n def fn(a, y):\r\n \"\"\"Return the cost associated with an output ``a`` and desired output\r\n ``y``.\r\n\r\n \"\"\"\r\n return 0.5*np.linalg.norm(a-y)**2\r\n\r\n @staticmethod\r\n def delta(z, a, y):\r\n \"\"\"Return the error delta from the output layer.\"\"\"\r\n return (a-y) * sigmoid_prime(z)\r\n\r\n\r\nclass CrossEntropyCost(object):\r\n\r\n @staticmethod\r\n def fn(a, y):\r\n \"\"\"Return the cost associated with an output ``a`` and desired output\r\n ``y``. Note that np.nan_to_num is used to ensure numerical\r\n stability. In particular, if both ``a`` and ``y`` have a 1.0\r\n in the same slot, then the expression (1-y)*np.log(1-a)\r\n returns nan. The np.nan_to_num ensures that that is converted\r\n to the correct value (0.0).\r\n\r\n \"\"\"\r\n return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))\r\n\r\n @staticmethod\r\n def delta(z, a, y):\r\n \"\"\"Return the error delta from the output layer. Note that the\r\n parameter ``z`` is not used by the method. It is included in\r\n the method's parameters in order to make the interface\r\n consistent with the delta method for other cost classes.\r\n\r\n \"\"\"\r\n return (a-y)\r\n\r\n\r\n#### Main Network class\r\nclass Network(object):\r\n\r\n def __init__(self, sizes, cost=CrossEntropyCost):\r\n \"\"\"The list ``sizes`` contains the number of neurons in the respective\r\n layers of the network. For example, if the list was [2, 3, 1]\r\n then it would be a three-layer network, with the first layer\r\n containing 2 neurons, the second layer 3 neurons, and the\r\n third layer 1 neuron. The biases and weights for the network\r\n are initialized randomly, using\r\n ``self.default_weight_initializer`` (see docstring for that\r\n method).\r\n\r\n \"\"\"\r\n self.num_layers = len(sizes)\r\n self.sizes = sizes\r\n self.default_weight_initializer()\r\n self.cost=cost\r\n\r\n def default_weight_initializer(self):\r\n \"\"\"Initialize each weight using a Gaussian distribution with mean 0\r\n and standard deviation 1 over the square root of the number of\r\n weights connecting to the same neuron. Initialize the biases\r\n using a Gaussian distribution with mean 0 and standard\r\n deviation 1.\r\n\r\n Note that the first layer is assumed to be an input layer, and\r\n by convention we won't set any biases for those neurons, since\r\n biases are only ever used in computing the outputs from later\r\n layers.\r\n\r\n \"\"\"\r\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n self.weights = [np.random.randn(y, x)/np.sqrt(x)\r\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]\r\n\r\n def large_weight_initializer(self):\r\n \"\"\"Initialize the weights using a Gaussian distribution with mean 0\r\n and standard deviation 1. Initialize the biases using a\r\n Gaussian distribution with mean 0 and standard deviation 1.\r\n\r\n Note that the first layer is assumed to be an input layer, and\r\n by convention we won't set any biases for those neurons, since\r\n biases are only ever used in computing the outputs from later\r\n layers.\r\n\r\n This weight and bias initializer uses the same approach as in\r\n Chapter 1, and is included for purposes of comparison. It\r\n will usually be better to use the default weight initializer\r\n instead.\r\n\r\n \"\"\"\r\n self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]\r\n self.weights = [np.random.randn(y, x)\r\n for x, y in zip(self.sizes[:-1], self.sizes[1:])]\r\n\r\n def feedforward(self, a, biases_saved = None, weights_saved = None):\r\n \"\"\"Return the output of the network if ``a`` is input.\"\"\"\r\n\t\t\r\n\t\t#if the biases and weights are supplied - when custom user input is given\r\n if biases_saved is not None and weights_saved is not None:\r\n self.biases = biases_saved\r\n self.weights = weights_saved\r\n\t\t# =============\r\n\t\t\r\n for b, w in zip(self.biases, self.weights):\r\n a = sigmoid(np.dot(w, a)+b)\r\n return a\r\n\r\n def SGD(self, training_data, epochs, mini_batch_size, eta,\r\n lmbda = 0.0,\r\n evaluation_data=None,\r\n monitor_evaluation_cost=False,\r\n monitor_evaluation_accuracy=False,\r\n monitor_training_cost=False,\r\n monitor_training_accuracy=False):\r\n \"\"\"Train the neural network using mini-batch stochastic gradient\r\n descent. The ``training_data`` is a list of tuples ``(x, y)``\r\n representing the training inputs and the desired outputs. The\r\n other non-optional parameters are self-explanatory, as is the\r\n regularization parameter ``lmbda``. The method also accepts\r\n ``evaluation_data``, usually either the validation or test\r\n data. We can monitor the cost and accuracy on either the\r\n evaluation data or the training data, by setting the\r\n appropriate flags. The method returns a tuple containing four\r\n lists: the (per-epoch) costs on the evaluation data, the\r\n accuracies on the evaluation data, the costs on the training\r\n data, and the accuracies on the training data. All values are\r\n evaluated at the end of each training epoch. So, for example,\r\n if we train for 30 epochs, then the first element of the tuple\r\n will be a 30-element list containing the cost on the\r\n evaluation data at the end of each epoch. Note that the lists\r\n are empty if the corresponding flag is not set.\r\n\r\n \"\"\"\r\n if evaluation_data: n_data = len(evaluation_data)\r\n n = len(training_data)\r\n evaluation_cost, evaluation_accuracy = [], []\r\n training_cost, training_accuracy = [], []\r\n for j in xrange(epochs):\r\n random.shuffle(training_data)\r\n mini_batches = [\r\n training_data[k:k+mini_batch_size]\r\n for k in xrange(0, n, mini_batch_size)]\r\n for mini_batch in mini_batches:\r\n self.update_mini_batch(\r\n mini_batch, eta, lmbda, len(training_data))\r\n print (\"Epoch %s training complete\" % j)\r\n if monitor_training_cost:\r\n cost = self.total_cost(training_data, lmbda)\r\n training_cost.append(cost)\r\n print (\"Cost on training data: {}\".format(cost))\r\n if monitor_training_accuracy:\r\n accuracy = self.accuracy(training_data, convert=True)\r\n training_accuracy.append(accuracy)\r\n print (\"Accuracy on training data: {} / {}\".format(\r\n accuracy, n))\r\n if monitor_evaluation_cost:\r\n cost = self.total_cost(evaluation_data, lmbda, convert=True)\r\n evaluation_cost.append(cost)\r\n print (\"Cost on evaluation data: {}\".format(cost))\r\n if monitor_evaluation_accuracy:\r\n accuracy = self.accuracy(evaluation_data)\r\n evaluation_accuracy.append(accuracy)\r\n print (\"Accuracy on evaluation data: {} / {} - {}\".format(\r\n self.accuracy(evaluation_data), n_data, self.accuracy(evaluation_data)/float(n_data)*100))\r\n print()\r\n return evaluation_cost, evaluation_accuracy, \\\r\n training_cost, training_accuracy\r\n\r\n def update_mini_batch(self, mini_batch, eta, lmbda, n):\r\n \"\"\"Update the network's weights and biases by applying gradient\r\n descent using backpropagation to a single mini batch. The\r\n ``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the\r\n learning rate, ``lmbda`` is the regularization parameter, and\r\n ``n`` is the total size of the training data set.\r\n\r\n \"\"\"\r\n nabla_b = [np.zeros(b.shape) for b in self.biases]\r\n nabla_w = [np.zeros(w.shape) for w in self.weights]\r\n for x, y in mini_batch:\r\n delta_nabla_b, delta_nabla_w = self.backprop(x, y)\r\n nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]\r\n nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]\r\n self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw\r\n for w, nw in zip(self.weights, nabla_w)]\r\n self.biases = [b-(eta/len(mini_batch))*nb\r\n for b, nb in zip(self.biases, nabla_b)]\r\n\r\n def backprop(self, x, y):\r\n \"\"\"Return a tuple ``(nabla_b, nabla_w)`` representing the\r\n gradient for the cost function C_x. ``nabla_b`` and\r\n ``nabla_w`` are layer-by-layer lists of numpy arrays, similar\r\n to ``self.biases`` and ``self.weights``.\"\"\"\r\n nabla_b = [np.zeros(b.shape) for b in self.biases]\r\n nabla_w = [np.zeros(w.shape) for w in self.weights]\r\n # feedforward\r\n activation = x\r\n activations = [x] # list to store all the activations, layer by layer\r\n zs = [] # list to store all the z vectors, layer by layer\r\n for b, w in zip(self.biases, self.weights):\r\n z = np.dot(w, activation)+b\r\n zs.append(z)\r\n activation = sigmoid(z)\r\n activations.append(activation)\r\n # backward pass\r\n delta = (self.cost).delta(zs[-1], activations[-1], y)\r\n nabla_b[-1] = delta\r\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\r\n # Note that the variable l in the loop below is used a little\r\n # differently to the notation in Chapter 2 of the book. Here,\r\n # l = 1 means the last layer of neurons, l = 2 is the\r\n # second-last layer, and so on. It's a renumbering of the\r\n # scheme in the book, used here to take advantage of the fact\r\n # that Python can use negative indices in lists.\r\n for l in xrange(2, self.num_layers):\r\n z = zs[-l]\r\n sp = sigmoid_prime(z)\r\n delta = np.dot(self.weights[-l+1].transpose(), delta) * sp\r\n nabla_b[-l] = delta\r\n nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())\r\n return (nabla_b, nabla_w)\r\n\r\n def accuracy(self, data, convert=False):\r\n \"\"\"Return the number of inputs in ``data`` for which the neural\r\n network outputs the correct result. The neural network's\r\n output is assumed to be the index of whichever neuron in the\r\n final layer has the highest activation.\r\n\r\n The flag ``convert`` should be set to False if the data set is\r\n validation or test data (the usual case), and to True if the\r\n data set is the training data. The need for this flag arises\r\n due to differences in the way the results ``y`` are\r\n represented in the different data sets. In particular, it\r\n flags whether we need to convert between the different\r\n representations. It may seem strange to use different\r\n representations for the different data sets. Why not use the\r\n same representation for all three data sets? It's done for\r\n efficiency reasons -- the program usually evaluates the cost\r\n on the training data and the accuracy on other data sets.\r\n These are different types of computations, and using different\r\n representations speeds things up. More details on the\r\n representations can be found in\r\n mnist_loader.load_data_wrapper.\r\n\r\n \"\"\"\r\n if convert:\r\n results = [(np.argmax(self.feedforward(x)), np.argmax(y))\r\n for (x, y) in data]\r\n else:\r\n results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in data]\r\n return sum(int(x == y) for (x, y) in results)\r\n\r\n def total_cost(self, data, lmbda, convert=False):\r\n \"\"\"Return the total cost for the data set ``data``. The flag\r\n ``convert`` should be set to False if the data set is the\r\n training data (the usual case), and to True if the data set is\r\n the validation or test data. See comments on the similar (but\r\n reversed) convention for the ``accuracy`` method, above.\r\n \"\"\"\r\n cost = 0.0\r\n for x, y in data:\r\n a = self.feedforward(x)\r\n if convert: y = vectorized_result(y)\r\n cost += self.cost.fn(a, y)/len(data)\r\n cost += 0.5*(lmbda/len(data))*sum(\r\n np.linalg.norm(w)**2 for w in self.weights)\r\n return cost\r\n\r\n def save(self, filename):\r\n \"\"\"Save the neural network to the file ``filename``.\"\"\"\r\n data = {\"sizes\": self.sizes,\r\n \"weights\": [w.tolist() for w in self.weights],\r\n \"biases\": [b.tolist() for b in self.biases],\r\n \"cost\": str(self.cost.__name__)}\r\n f = open(filename, \"w\")\r\n json.dump(data, f)\r\n f.close()\r\n\r\n#### Loading a Network\r\ndef load(filename):\r\n \"\"\"Load a neural network from the file ``filename``. Returns an\r\n instance of Network.\r\n\r\n \"\"\"\r\n f = open(filename, \"r\")\r\n data = json.load(f)\r\n f.close()\r\n cost = getattr(sys.modules[__name__], data[\"cost\"])\r\n net = Network(data[\"sizes\"], cost=cost)\r\n net.weights = [np.array(w) for w in data[\"weights\"]]\r\n net.biases = [np.array(b) for b in data[\"biases\"]]\r\n return net\r\n\r\n#### Miscellaneous functions\r\ndef vectorized_result(j):\r\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the j'th position\r\n and zeroes elsewhere. This is used to convert a digit (0...9)\r\n into a corresponding desired output from the neural network.\r\n\r\n \"\"\"\r\n e = np.zeros((10, 1))\r\n e[j] = 1.0\r\n return e\r\n\r\ndef sigmoid(z):\r\n \"\"\"The sigmoid function.\"\"\"\r\n return 1.0/(1.0+np.exp(-z))\r\n\r\ndef sigmoid_prime(z):\r\n \"\"\"Derivative of the sigmoid function.\"\"\"\r\n return sigmoid(z)*(1-sigmoid(z))\r\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.sqrt", "numpy.linalg.norm", "numpy.argmax", "numpy.random.randn", "numpy.array", "numpy.exp", "numpy.zeros" ] ]
MarvinT/pyoperant
[ "f5837cfd48e279023fe5e82a60d659d7171cb4b9" ]
[ "pyoperant/queues.py" ]
[ "import random\nfrom pyoperant.utils import rand_from_log_shape_dist\nimport cPickle as pickle\nimport numpy as np\n\ndef random_queue(conditions,tr_max=100,weights=None):\n \"\"\" generator which randomly samples conditions\n\n Args:\n conditions (list): The conditions to sample from. \n weights (list of ints): Weights of each condition\n\n Kwargs:\n tr_max (int): Maximum number of trial conditions to generate. (default: 100)\n\n Returns:\n whatever the elements of 'conditions' are\n\n \"\"\"\n if weights:\n conditions_weighted = []\n for cond,w in zip(conditions,weights):\n for ww in range(w):\n conditions_weighted += cond\n conditions = conditions_weighted\n\n tr_num = 0\n while tr_num < tr_max:\n yield random.choice(conditions)\n tr_num += 1\n\ndef block_queue(conditions,reps=1,shuffle=False):\n \"\"\" generate trial conditions from a block\n\n Args:\n conditions (list): The conditions to sample from. \n\n Kwargs:\n reps (int): number of times each item in conditions will be presented (default: 1)\n shuffle (bool): Shuffles the queue (default: False)\n\n Returns:\n whatever the elements of 'conditions' are\n\n \"\"\"\n conditions_repeated = []\n for rr in range(reps):\n conditions_repeated += conditions\n conditions = conditions_repeated\n\n if shuffle:\n random.shuffle(conditions)\n \n for cond in conditions:\n yield cond\n\nclass AdaptiveBase(object):\n \"\"\"docstring for AdaptiveBase\n This is an abstract object for implementing adaptive procedures, such as\n a staircase. Importantly, any objects inheriting this need to define the\n `update()` and `next()` methods.\n \"\"\"\n def __init__(self, **kwargs):\n self.updated = True # for first trial, no update needed\n self.update_error_str = \"queue hasn't been updated since last trial\"\n\n def __iter__(self):\n return self\n\n def update(self, correct, no_resp):\n self.updated = True\n if no_resp:\n self.no_response()\n\n def next(self):\n if not self.updated: #hasn't been updated since last trial\n raise Exception(self.update_error_str)\n self.updated = False\n\n def no_response(self):\n pass\n\n def on_load(self):\n try:\n super(AdaptiveBase, self).on_load()\n except AttributeError:\n pass\n self.updated = True\n self.no_response()\n\nclass PersistentBase(object):\n \"\"\"\n A mixin that allows for the creation of an obj through a load command that\n first checks for a pickled file to load an object before generating a new one.\n \"\"\"\n def __init__(self, filename=None, **kwargs):\n assert filename != None\n super(PersistentBase, self).__init__(**kwargs)\n self.filename = filename\n self.save()\n\n @classmethod\n def load(cls, filename, *args, **kwargs):\n try:\n with open(filename, 'rb') as handle:\n ab = pickle.load(handle)\n ab.on_load()\n return ab\n except IOError:\n return cls(*args, filename=filename, **kwargs)\n\n def on_load(self):\n try:\n super(PersistentBase, self).on_load()\n except AttributeError:\n pass\n\n def save(self):\n with open(self.filename, 'wb') as handle:\n pickle.dump(self, handle)\n\n\nclass KaernbachStaircase(AdaptiveBase):\n \"\"\" generates values for a staircase procedure from Kaernbach 1991\n This procedure returns values for each trial and assumes that larger values are\n easier. Thus, after a correct trial, the next value returned will be smaller and\n after incorrect trials, the next value returned will be larger. The magnitudes of\n these changes are stepsize_dn and stepsize_up, respectively.\n Args:\n start_val (float/int): the starting value of the procedure (default: 100)\n Kwargs:\n stepsize_up (int): number of steps to take after incorrect trial (default: 3)\n stepsize_dn (int): number of steps to take after correct trial (default: 1)\n min_val (float): minimum parameter value to allow (default: 0)\n max_val (float): maximum parameter value to allow (default: 100)\n crit (int): minimum number of trials (default: 0)\n crit_method (int): maximum number of trials (default: 100)\n Returns:\n float\n \"\"\"\n def __init__(self, \n start_val=100,\n stepsize_up=3,\n stepsize_dn=1,\n min_val=0,\n max_val=100,\n crit=100,\n crit_method='trials'\n ):\n super(KaernbachStaircase, self).__init__()\n self.val = start_val\n self.stepsize_up = stepsize_up\n self.stepsize_dn = stepsize_dn \n self.min_val = min_val\n self.max_val = max_val\n self.crit = crit\n self.crit_method = crit_method\n self.counter = 0\n self.going_up = False\n\n def update(self, correct, no_resp):\n super(KaernbachStaircase, self).update(correct, no_resp)\n \n self.val += -1*self.stepsize_dn if correct else self.stepsize_up\n\n if self.crit_method=='reversals':\n if correct==self.going_up: # checks if last trial's perf was consistent w/ trend\n self.counter += 1\n self.going_up = not self.going_up\n\n # stop at max/min if we hit the rails\n if (self.max_val!=None) and (self.val > self.max_val):\n self.val = self.max_val\n elif (self.min_val!=None) and (self.val < self.min_val):\n self.val = self.min_val\n\n def next(self):\n super(KaernbachStaircase, self).next()\n if self.counter > self.crit:\n raise StopIteration\n self.counter += 1 if self.crit_method=='trials' else 0\n return self.val\n\nclass DoubleStaircase(AdaptiveBase):\n \"\"\"\n Generates conditions from a list of stims that monotonically vary from most \n easily left to most easily right\n i.e. left is low and right is high\n\n The goal of this queue is to estimate the 50% point of a psychometric curve.\n\n This will probe left and right trials, if the response is correct, it will\n move the indices closer to each other until they are adjacent.\n\n stims: an array of stimuli names ordered from most easily left to most easily right\n rate_constant: the step size is the rate_constant*(high_idx-low_idx)\n \"\"\"\n def __init__(self, stims, rate_constant=.05, **kwargs):\n super(DoubleStaircase, self).__init__(**kwargs)\n self.stims = stims\n self.rate_constant = rate_constant\n self.low_idx = 0\n self.high_idx = len(self.stims) - 1\n self.trial = {}\n self.update_error_str = \"double staircase queue %s hasn't been updated since last trial\" % (self.stims[0])\n\n def update(self, correct, no_resp):\n super(DoubleStaircase, self).update(correct, no_resp)\n if correct:\n if self.trial['low']:\n self.low_idx = self.trial['value']\n else:\n self.high_idx = self.trial['value']\n self.trial = {}\n\n def next(self):\n super(DoubleStaircase, self).next()\n if self.high_idx - self.low_idx <= 1:\n raise StopIteration\n \n delta = int(np.ceil((self.high_idx - self.low_idx) * self.rate_constant))\n if random.random() < .5: # probe low side\n self.trial['low'] = True\n self.trial['value'] = self.low_idx + delta\n return {'class': 'L', 'stim_name': self.stims[self.trial['value']]}\n else:\n self.trial['low'] = False\n self.trial['value'] = self.high_idx - delta\n return {'class': 'R', 'stim_name': self.stims[self.trial['value']]}\n\n def no_response(self):\n super(DoubleStaircase, self).no_response()\n self.trial = {}\n\nclass DoubleStaircaseReinforced(AdaptiveBase):\n \"\"\"\n Generates conditions as with DoubleStaircase, but 1-probe_rate proportion of\n the trials easier/known trials to reduce frustration.\n\n Easier trials are sampled from a log shaped distribution so that more trials \n are sampled from the edges than near the indices\n\n stims: an array of stimuli names ordered from most easily left to most easily right\n rate_constant: the step size is the rate_constant*(high_idx-low_idx)\n probe_rate: proportion of trials that are between [0, low_idx] or [high_idx, length(stims)]\n \"\"\"\n def __init__(self, stims, rate_constant=.05, probe_rate=.1, sample_log=False, **kwargs):\n super(DoubleStaircaseReinforced, self).__init__(**kwargs)\n self.dblstaircase = DoubleStaircase(stims, rate_constant)\n self.stims = stims\n self.probe_rate = probe_rate\n self.sample_log = sample_log\n self.last_probe = False\n self.update_error_str = \"reinforced double staircase queue %s hasn't been updated since last trial\" % (self.stims[0])\n\n def update(self, correct, no_resp):\n super(DoubleStaircaseReinforced, self).update(correct, no_resp)\n if self.last_probe:\n self.dblstaircase.update(correct, no_resp)\n self.last_probe = False\n\n def next(self):\n super(DoubleStaircaseReinforced, self).next()\n\n if random.random() < self.probe_rate:\n try:\n ret = self.dblstaircase.next()\n self.last_probe = True\n return ret\n except StopIteration:\n self.probe_rate = 0\n self.last_probe = False\n return self.next()\n else:\n self.last_probe = False\n if random.random() < .5: # probe left\n if self.sample_log:\n val = int((1 - rand_from_log_shape_dist()) * self.dblstaircase.low_idx)\n else:\n val = random.randrange(self.dblstaircase.low_idx)\n return {'class': 'L', 'stim_name': self.stims[val]}\n else: # probe right\n if self.sample_log:\n val = self.dblstaircase.high_idx + int(rand_from_log_shape_dist() * (len(self.stims) - self.dblstaircase.high_idx)) \n else:\n val = self.dblstaircase.high_idx + random.randrange(len(self.stims) - self.dblstaircase.high_idx)\n return {'class': 'R', 'stim_name': self.stims[val]}\n\n def no_response(self):\n super(DoubleStaircaseReinforced, self).no_response()\n self.last_probe = False\n\n def on_load(self):\n super(DoubleStaircaseReinforced, self).on_load()\n self.dblstaircase.on_load()\n\n\nclass MixedAdaptiveQueue(PersistentBase, AdaptiveBase):\n \"\"\"\n Generates conditions from multiple adaptive sub queues.\n\n Use the generator MixedAdaptiveQueue.load(filename, sub_queues)\n to load a previously saved MixedAdaptiveQueue or generate a new one \n if the pkl file doesn't exist.\n\n sub_queues: a list of adaptive queues\n probabilities: a list of weights with which to sample from sub_queues\n should be same length as sub_queues\n NotImplemented\n filename: filename of pickle to save itself\n \"\"\"\n def __init__(self, sub_queues, probabilities=None, **kwargs):\n super(MixedAdaptiveQueue, self).__init__(**kwargs)\n self.sub_queues = sub_queues\n self.probabilities = probabilities\n self.sub_queue_idx = -1\n self.update_error_str = \"MixedAdaptiveQueue hasn't been updated since last trial\"\n self.save()\n\n def update(self, correct, no_resp):\n super(MixedAdaptiveQueue, self).update(correct, no_resp)\n self.sub_queues[self.sub_queue_idx].update(correct, no_resp)\n self.save()\n\n def next(self):\n super(MixedAdaptiveQueue, self).next()\n if self.probabilities is None:\n try:\n self.sub_queue_idx = random.randrange(len(self.sub_queues))\n return self.sub_queues[self.sub_queue_idx].next()\n except StopIteration:\n #TODO: deal with subqueue finished, and possibility of all subqueues finishing\n raise NotImplementedError\n else:\n #TODO: support variable probabilities for each sub_queue\n raise NotImplementedError\n\n def on_load(self):\n super(MixedAdaptiveQueue, self).on_load()\n for sub_queue in self.sub_queues:\n try:\n sub_queue.on_load()\n except AttributeError:\n pass\n\n\n\n\n" ]
[ [ "numpy.ceil" ] ]
tomsaleeba/natcap-invest-docker-flask
[ "1ef3e58f6af58d1783f6c0b8c80377e645923204" ]
[ "natcap_invest_docker_flask/natcap_wrapper.py" ]
[ "import time\nimport csv\nimport os\nfrom copy import deepcopy\nimport shutil\nimport logging\nimport base64\n# Note for eventlet: DO NOT call eventlet.monkey_patch(), it doesn't\n# work with multiprocessing\nimport multiprocessing as mp\nimport uuid\n\nimport natcap.invest.pollination\nimport shapefile\nimport subprocess32 as subprocess\nimport osgeo.ogr as ogr\nimport osgeo.osr as osr\nimport numpy as np\nfrom flask.json import dumps\n\nfrom natcap_invest_docker_flask.helpers import \\\n get_records, biophys_col_count, fill_in_and_write, \\\n subtract_reveg_from_farm, append_to_2d_np, subset_of_years\nfrom natcap_invest_docker_flask.logger import logger_getter\nfrom reveg_alg.alg import get_values_for_year\n\n# ogr2ogr defaults to the filename as the layer name, we want something more\n# predictable\nKNOWN_REVEG_LAYER_NAME = 'reveg_geojson'\n\nlogger = logger_getter.get_app_logger()\nlogging.getLogger('natcap').setLevel(logging.WARN)\nlogging.getLogger('taskgraph').setLevel(logging.WARN)\nlogging.getLogger('pygeoprocessing').setLevel(logging.WARN)\n\nmetres_of_padding_for_farm = int(os.getenv('FARM_PADDING_METRES', 1500))\nlogger.info(f'Using farm padding of {metres_of_padding_for_farm} metres')\nis_purge_workspace = bool(int(os.getenv('PURGE_WORKSPACE', 1)))\nlogger.info(f'Purge workspace after run = {is_purge_workspace}')\n\ndata_dir_path = u'/data/pollination'\napp_docker_dir_path = u'/app/docker'\nworkspace_parent_dir_path = u'/workspace/'\nreveg_lucode = 1337\nfarm_lucode = 2000 # only used for generating raster for humans\nfarm_layer_and_file_name = u'farms'\nreproj_reveg_filename = u'reprojected_' + KNOWN_REVEG_LAYER_NAME + '.json'\nFAILURE_FLAG = 'BANG!!!'\nyear_key = 'year'\n\n\ndef landcover_biophys_table_path(crop_type):\n return os.path.join(app_docker_dir_path, u'landcover_biophysical_table',\n crop_type + u'.csv')\n\n\ndef farm_attribute_table_path(crop_type):\n return os.path.join(app_docker_dir_path, u'farm_attribute_table',\n crop_type + u'.csv')\n\n\ndef workspace_path(suffix):\n return os.path.join(workspace_parent_dir_path, str(suffix))\n\n\ndef now_in_ms():\n return int(round(time.time() * 1000.0))\n\n\ndef generate_unique_token():\n return '%d_%s' % (now_in_ms(), uuid.uuid4())\n\n\ndef run_subprocess(args):\n subprocess.check_call(args, stdout=subprocess.DEVNULL)\n\n\ndef get_reveg_biophysical_table_row_for_year(year, existing_bp_table):\n val = get_values_for_year(year)\n prefix = 'fr_'\n fr_cols = filter(lambda x: x.startswith(prefix), val.keys())\n for curr in list(fr_cols):\n season = curr.replace(prefix, '')\n try:\n key = f'floral_resources_{season}_index'\n existing_bp_table[key]\n fr_season_col_val = val[curr]\n # continue so we delete the other values\n except ValueError:\n # purely so we can have nice, named debug output\n del val[curr]\n continue\n if not fr_season_col_val:\n raise ValueError('Programmer error: could not find a ' +\n f'{prefix} col value')\n logger.debug(f'[year {year}] biophys table reveg row is {val}')\n return [\n reveg_lucode,\n val['nesting_cavity'],\n val['nesting_ground'],\n fr_season_col_val,\n ]\n\n\ndef run_natcap_pollination(farm_vector_path, landcover_biophysical_table_path,\n landcover_raster_path, workspace_dir_path,\n crop_type, has_varroa_mite_hit):\n \"\"\" executes the pollination model and gathers the results \"\"\"\n varroa_fragment = '_varroa' if has_varroa_mite_hit else ''\n guild_csv = f'{crop_type}{varroa_fragment}.csv'\n args = {\n u'farm_vector_path':\n farm_vector_path,\n u'guild_table_path':\n os.path.join(app_docker_dir_path, u'guild_table', guild_csv),\n u'landcover_biophysical_table_path':\n landcover_biophysical_table_path,\n u'landcover_raster_path':\n landcover_raster_path,\n u'results_suffix':\n varroa_fragment,\n u'workspace_dir':\n workspace_dir_path,\n }\n natcap.invest.pollination.execute(args)\n farm_results = shapefile.Reader(\n os.path.join(workspace_dir_path, f'farm_results{varroa_fragment}'))\n records = get_records(farm_results.records(), farm_results.fields)\n return records\n\n\ndef add_year_to_record(year, record):\n record[year_key] = year\n\n\ndef set_reveg_flag(flag, record):\n record['has_reveg'] = flag\n\n\ndef set_varroa_flag(flag, record):\n record['is_varroa'] = flag\n\n\ndef read_biophys_table_from_file(file_path):\n return np.genfromtxt(file_path,\n delimiter=',',\n names=True,\n usecols=range(biophys_col_count))\n\n\ndef debug_dump_bp_table(bp_table, year_num):\n if not logger.isEnabledFor(logging.DEBUG):\n return\n # thanks https://stackoverflow.com/a/2891805/1410035\n with np.printoptions(precision=5, suppress=True):\n header = bp_table.dtype.names\n logger.debug(f'[year {year_num}] biophys table:\\n{header}\\n{bp_table}')\n\n\ndef do_no_reveg_runs(farm_vector_path, landcover_raster_path, workspace_dir,\n output_queue, crop_type, varroa_mite_year, total_years):\n try:\n logger.debug('processing year 0')\n year0_workspace_dir_path = os.path.join(workspace_dir, 'year0')\n os.mkdir(year0_workspace_dir_path)\n bp_table = build_no_reveg_biophys_table(crop_type)\n debug_dump_bp_table(bp_table, 0)\n year0_biophys_table_path = os.path.join(\n year0_workspace_dir_path, 'landcover_biophysical_table.csv')\n fill_in_and_write(bp_table, year0_biophys_table_path)\n\n def run_and_set_varroa_as(is_varroa):\n return run_natcap_pollination(farm_vector_path,\n year0_biophys_table_path,\n landcover_raster_path,\n year0_workspace_dir_path, crop_type,\n is_varroa)\n\n def yv_mapper(year, is_varroa):\n def result(e):\n add_year_to_record(year, e)\n set_reveg_flag(False, e) # these runs are the \"no reveg\" ones\n set_varroa_flag(is_varroa, e)\n return e\n return result\n\n # year 0 records\n year0_records = run_and_set_varroa_as(False)\n year0_no_varroa_recs = map(yv_mapper(0, False), year0_records)\n # note: we make copies of lots of records to make the client's life\n # easier when it comes to charting. There's no out-of-band knowledge\n # required to be able to chart the results.\n # note: we assume varroa won't hit in year 0\n year0_varroa_recs = map(\n yv_mapper(0, True), deepcopy(year0_records))\n\n year_before_varroa = varroa_mite_year - 1\n if year_before_varroa > 0:\n # we need this so the chart has the sudden drop for varroa\n year_before_varroa_recs = map(\n yv_mapper(year_before_varroa, True), deepcopy(year0_records))\n else:\n year_before_varroa_recs = []\n\n # varroa year records\n raw_year_varroa_records = run_and_set_varroa_as(True)\n year_varroa_recs = map(\n yv_mapper(varroa_mite_year, True), raw_year_varroa_records)\n\n # final year records\n year_final_no_varroa_recs = map(\n yv_mapper(total_years, False), deepcopy(year0_records))\n year_final_varroa_recs = map(\n yv_mapper(total_years, True), deepcopy(raw_year_varroa_records))\n result = list(year0_no_varroa_recs) + \\\n list(year0_varroa_recs) + \\\n list(year_before_varroa_recs) + \\\n list(year_varroa_recs) + \\\n list(year_final_no_varroa_recs) + \\\n list(year_final_varroa_recs)\n output_queue.put(result)\n except Exception:\n logger.exception(\n 'Failed while processing year 0') # stack trace will be included\n output_queue.put(FAILURE_FLAG)\n\n\ndef build_no_reveg_biophys_table(crop_type):\n \"\"\" Build biophysical table for run with no reveg.\n We need to chomp the reveg vector out of the farm vector so allow the reveg\n lucode to show through during reveg runs. However, during no reveg runs, we\n would just have the underlying lucode show through, which is not correct.\n Here we set the biophys table values for the reveg vector to what the farm\n is so the run is as-if the farm covers everything. The only caveat is that\n the model won't calculate yield for that chomped out piece.\"\"\"\n base_landcover_bp_table_path = landcover_biophys_table_path(crop_type)\n bp_table = read_biophys_table_from_file(base_landcover_bp_table_path)\n fat_row = load_farm_attributes(crop_type)\n prefix = 'fr_'\n fr_col_keys = list(filter(lambda x: x.startswith(prefix), fat_row.keys()))\n if len(fr_col_keys) != 1:\n raise ValueError(f'Data error: expecting exactly 1 {prefix} column')\n fr_season_col_val = fat_row[fr_col_keys[0]]\n reveg_row = [reveg_lucode, fat_row['n_cavity'], fat_row['n_ground'],\n fr_season_col_val]\n return append_to_2d_np(bp_table, reveg_row)\n\n\ndef run_future_year(farm_vector_path, landcover_raster_path, workspace_dir,\n year_number, output_queue, crop_type, varroa_mite_year):\n try:\n logger.debug(f'processing year {year_number}')\n year_workspace_dir_path = os.path.join(workspace_dir,\n f'year{year_number}')\n os.mkdir(year_workspace_dir_path)\n bp_table = build_biophys_table(crop_type, year_number)\n debug_dump_bp_table(bp_table, year_number)\n curr_year_landcover_bp_table_path = os.path.join(\n year_workspace_dir_path, 'landcover_biophysical_table.csv')\n fill_in_and_write(bp_table, curr_year_landcover_bp_table_path)\n\n def run_and_set_varroa_as(is_varroa):\n return run_natcap_pollination(farm_vector_path,\n curr_year_landcover_bp_table_path,\n landcover_raster_path,\n year_workspace_dir_path, crop_type,\n is_varroa)\n\n records = []\n\n def no_varroa_mapper(e):\n set_varroa_flag(False, e)\n set_reveg_flag(True, e)\n add_year_to_record(year_number, e)\n return e\n\n records += map(no_varroa_mapper, run_and_set_varroa_as(False))\n\n def varroa_mapper(e):\n set_varroa_flag(True, e)\n set_reveg_flag(True, e)\n add_year_to_record(year_number, e)\n return e\n\n has_varroa_mite_hit = year_number >= varroa_mite_year\n if has_varroa_mite_hit:\n varroa_records = run_and_set_varroa_as(True)\n records += map(varroa_mapper, varroa_records)\n else:\n # duplicate the non-varroa result for client's benefit\n records += map(varroa_mapper, deepcopy(records))\n output_queue.put(records)\n except Exception:\n logger.exception('Failed while processing year %d' %\n year_number) # stack trace will be included\n output_queue.put(FAILURE_FLAG)\n\n\ndef build_biophys_table(crop_type, year_number):\n base_landcover_bp_table_path = landcover_biophys_table_path(crop_type)\n bp_table = read_biophys_table_from_file(base_landcover_bp_table_path)\n reveg_row = get_reveg_biophysical_table_row_for_year(year_number, bp_table)\n return append_to_2d_np(bp_table, reveg_row)\n\n\ndef burn_reveg_on_raster(year0_raster_path, reveg_vector,\n year_workspace_dir_path):\n \"\"\" clones the raster and burns the reveg landuse code into the clone using\n the vector \"\"\"\n data = None\n result_path = os.path.join(year_workspace_dir_path,\n 'landcover_raster_with_reveg.tif')\n with open(year0_raster_path, 'rb') as f:\n data = f.read()\n with open(result_path, 'wb') as f:\n f.write(data)\n reprojected_reveg_vector_path = reproject_geojson_to_epsg3107(\n year_workspace_dir_path, reveg_vector)\n run_subprocess([\n '/usr/bin/gdal_rasterize', '-burn',\n str(reveg_lucode), '-l', KNOWN_REVEG_LAYER_NAME,\n reprojected_reveg_vector_path, result_path\n ])\n return result_path\n\n\ndef reproject_geojson_to_epsg3107(workspace_dir_path, reveg_geojson):\n reveg_vector_path = os.path.join(workspace_dir_path,\n KNOWN_REVEG_LAYER_NAME + '.json')\n with open(reveg_vector_path, 'w') as f:\n f.write(dumps(reveg_geojson))\n result_path = os.path.join(workspace_dir_path, reproj_reveg_filename)\n crs = get_crs_from_geojson(reveg_geojson)\n run_subprocess([\n '/usr/bin/ogr2ogr', '-s_srs', crs, '-t_srs', 'EPSG:3107', '-f',\n 'GeoJSON', result_path, reveg_vector_path\n ])\n return result_path\n\n\ndef generate_images(workspace_dir, landcover_raster_path, farm_vector_path):\n \"\"\" generates the images and reads in the bytes of each image \"\"\"\n result = {}\n farm_on_raster_path = os.path.join(workspace_dir,\n 'landcover_and_farm.png')\n burn_vector_script_path = os.path.join(app_docker_dir_path,\n 'burn-vector-to-raster-png.sh')\n # TODO we're using the chomped farm raster here. Perhaps we should burn the\n # reveg over the top, but colour it like the farm, which shows how we treat\n # the \"no reveg\" runs.\n run_subprocess([\n burn_vector_script_path, landcover_raster_path,\n farm_on_raster_path, farm_vector_path, farm_layer_and_file_name,\n str(farm_lucode)\n ])\n with open(farm_on_raster_path, 'rb') as f1:\n result['base'] = base64.b64encode(f1.read()).decode('utf-8')\n reveg_vector_path = os.path.join(workspace_dir, reproj_reveg_filename)\n is_only_year0_run = not os.path.isfile(reveg_vector_path)\n if is_only_year0_run:\n return result\n reveg_and_farm_on_raster_path = os.path.join(\n workspace_dir, 'landcover_and_farm_and_reveg.png')\n run_subprocess([\n burn_vector_script_path,\n farm_on_raster_path.replace('.png', '.tif'),\n reveg_and_farm_on_raster_path, reveg_vector_path,\n KNOWN_REVEG_LAYER_NAME, str(reveg_lucode)\n ])\n with open(reveg_and_farm_on_raster_path, 'rb') as f2:\n result['reveg'] = base64.b64encode(f2.read()).decode('utf-8')\n return result\n\n\ndef create_cropped_raster(farm_vector_path, workspace_dir):\n vector_extent = get_extent(farm_vector_path)\n full_raster_path = os.path.join(data_dir_path,\n u'south_australia_landcover.tif.gz')\n cropped_raster_path = os.path.join(workspace_dir, 'landcover_cropped.tif')\n run_subprocess(\n [\n '/usr/bin/gdal_translate',\n '-projwin',\n # probably specific to southern hemisphere and Australia's side of\n # 0 degree longitude.\n vector_extent['x_min'],\n vector_extent['y_max'],\n vector_extent['x_max'],\n vector_extent['y_min'],\n '-of',\n 'GTiff',\n u'/vsigzip/' + full_raster_path,\n cropped_raster_path\n ])\n return cropped_raster_path\n\n\ndef get_extent(farm_vector_path):\n sa_lambert_epsg_code = 3107\n target = osr.SpatialReference()\n target.ImportFromEPSG(sa_lambert_epsg_code)\n vector_ds = ogr.Open(farm_vector_path)\n vector_layer = vector_ds.GetLayer()\n geometry_collection = ogr.Geometry(ogr.wkbGeometryCollection)\n for curr_feature in vector_layer:\n geometry = curr_feature.GetGeometryRef()\n geometry.TransformTo(target)\n geometry_collection.AddGeometry(geometry)\n x_min, x_max, y_min, y_max = geometry_collection.GetEnvelope()\n return {\n 'x_min': str(x_min - metres_of_padding_for_farm),\n 'y_min': str(y_min - metres_of_padding_for_farm),\n 'y_max': str(y_max + metres_of_padding_for_farm),\n 'x_max': str(x_max + metres_of_padding_for_farm),\n }\n\n\ndef load_farm_attributes(crop_type):\n \"\"\" Loads the attributes for specified crop to be used in the farm vector\n attribute table \"\"\"\n with open(farm_attribute_table_path(crop_type), 'r') as f:\n reader = csv.DictReader(f)\n rows = list(reader)\n row_count = len(rows)\n if row_count != 1:\n logger.warn('Data error: Incorrect number of farm attribute rows' +\n f'found={row_count} for crop {crop_type}. Must be ' +\n 'exactly 1!')\n return rows[0]\n\n\ndef transform_geojson_to_shapefile(geojson_vector_from_user, filename_fragment,\n workspace_dir, crop_type):\n \"\"\" Writes the supplied GeoJSON to a file, then transforms it\n to a shapefile and returns the path to that shapefile \"\"\"\n shapefile_path = os.path.join(workspace_dir, filename_fragment + u'.shp')\n geojson_path = os.path.join(workspace_dir, filename_fragment + u'.json')\n row = load_farm_attributes(crop_type)\n row['crop_type'] = crop_type\n baked_geojson_vector = deepcopy(geojson_vector_from_user)\n # We only support a single polygon from the user. To support multiple\n # features/MultiPolygons we have to add logic that can draw the reveg in\n # the middle of *each* vector.\n baked_geojson_vector['features'][0]['properties'] = row\n crs = get_crs_from_geojson(geojson_vector_from_user)\n with open(geojson_path, 'w') as f:\n f.write(dumps(baked_geojson_vector))\n run_subprocess([\n '/usr/bin/ogr2ogr', '-s_srs', crs, '-t_srs', 'EPSG:3107', '-f',\n 'ESRI Shapefile', shapefile_path, geojson_path\n ])\n return shapefile_path\n\n\ndef get_crs_from_geojson(the_geojson):\n try:\n result = the_geojson['crs']['properties']['name']\n logger.debug('Using CRS from user input: %s' % result)\n return result\n except KeyError:\n return 'EPSG:4326'\n\n\nclass NatcapModelRunner(object):\n def execute_model(self, *args, **kwargs):\n return self._execute_model(create_cropped_raster, *args, **kwargs)\n\n def execute_model_for_sample_data(self, *args, **kwargs):\n def raster_fn(_, _2):\n # only works in the docker container, because the data comes from\n # the base image.\n return u'/data/pollination-sample/landcover.tif'\n\n return self._execute_model(raster_fn, *args, **kwargs)\n\n def _execute_model(self, landcover_raster_cropper_fn, geojson_farm_vector,\n years_to_simulate, geojson_reveg_vector, crop_type,\n mark_year_as_done_fn, sim_count_fn, varroa_mite_year):\n start_ms = now_in_ms()\n workspace_dir = workspace_path(generate_unique_token())\n logger.debug(f'using workspace dir \"{workspace_dir}\"')\n os.mkdir(workspace_dir)\n farm_vector_minus_reveg_geojson = subtract_reveg_from_farm(\n geojson_farm_vector, geojson_reveg_vector)\n farm_vector_minus_reveg_path = transform_geojson_to_shapefile(\n farm_vector_minus_reveg_geojson,\n farm_layer_and_file_name, workspace_dir,\n crop_type)\n farm_vector_path = farm_vector_minus_reveg_path\n cropped_landcover_raster_path = landcover_raster_cropper_fn(\n farm_vector_path, workspace_dir)\n landcover_raster_path = burn_reveg_on_raster(\n cropped_landcover_raster_path, geojson_reveg_vector,\n workspace_dir)\n\n # we use a pool so we can limit the number of concurrent processes. If\n # we just create processes we would either need to manage what's\n # running ourselves or have them all run at the same time\n pool = mp.Pool(mp.cpu_count())\n # TODO ideally we'd have one Pool that is used by all clients, not one\n # pool per HTTP request\n manager = mp.Manager()\n output = manager.Queue()\n processes = []\n\n processes.append(\n pool.apply_async(do_no_reveg_runs,\n (farm_vector_path, landcover_raster_path,\n workspace_dir, output, crop_type,\n varroa_mite_year, years_to_simulate)))\n for curr_year in subset_of_years(years_to_simulate, varroa_mite_year):\n processes.append(\n pool.apply_async(run_future_year,\n (farm_vector_minus_reveg_path,\n landcover_raster_path, workspace_dir,\n curr_year, output, crop_type,\n varroa_mite_year)))\n sim_count_fn(len(processes) + 1)\n records = []\n pool.close()\n # we don't pool.join(), instead we block on the results in the queue so\n # we can fire notifications as processes finish. We trust that once we\n # have all the results, all the processes are finished.\n for p in processes:\n process_result = output.get()\n if process_result == FAILURE_FLAG:\n raise RuntimeError('Failed while executing the model')\n mark_year_as_done_fn()\n records += process_result\n records.sort(key=lambda x: x[year_key])\n result = {\n 'images':\n generate_images(workspace_dir, cropped_landcover_raster_path,\n farm_vector_path),\n 'records':\n records,\n 'elapsed_ms':\n now_in_ms() - start_ms\n }\n if is_purge_workspace:\n shutil.rmtree(workspace_dir)\n logger.debug('execution time %dms' % result['elapsed_ms'])\n return result\n" ]
[ [ "numpy.printoptions" ] ]
piroshi303/s2d-bot
[ "7a8889ce6b88e7d3004e61459a43749381ab58c9" ]
[ "s2d-bot.py" ]
[ "import discord\nimport asyncio\nimport yaml\nimport pandas as pd\nimport urllib.request, urllib.error\nfrom xml.sax.saxutils import unescape\nfrom bs4 import BeautifulSoup\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('Logged in as' + client.user.name)\n print(client.user.id)\n print('------')\n\nasync def background_loop(channel_id):\n '''\n definition name:\n background_loop(channel_id)\n description:\n clinet起動時に実行されるBackground Loop\n argument:\n 'channel_id' -- Discordの書き込み先Text Channel Id\n '''\n\n await client.wait_until_ready()\n\n # Const\n YML_SHITARABA = 'shitaraba'\n YML_SHITARABA_CATEGORY = 'category'\n YML_SHITARABA_SEQUENCE = 'sequence'\n YML_SHITARABA_THREAD_STOP = 'thread_stop'\n YML_SHITARABA_NONAME = 'noname'\n\n ''' ==============================================\n 設定値の取得\n ============================================== '''\n with open('setting.yml', 'r') as rf:\n file = rf.read()\n\n ymlBaseValue = yaml.safe_load(file)\n ymlBbsValue = ymlBaseValue[YML_SHITARABA]\n\n bbsInfo = BbsInfo(ymlBbsValue[YML_SHITARABA_CATEGORY], ymlBbsValue[YML_SHITARABA_SEQUENCE], ymlBbsValue[YML_SHITARABA_THREAD_STOP])\n\n ''' ==============================================\n したらば掲示板情報の取得\n ============================================== '''\n bbsInfo.checkBbs()\n\n currentThreadUrlResponse = bbsInfo.currentThreadUrlResponse\n num = bbsInfo.currentThreadNum + 1\n\n beforeThreadName = bbsInfo.currentThreadName\n\n while not client.is_closed:\n channel = client.get_channel(channel_id)\n\n bbsResponse = BbsResponse(currentThreadUrlResponse + str(num))\n\n if bbsResponse.isGetResponse == True:\n ''' ==============================================\n レスのヘッダー情報作成\n ============================================== '''\n name = '【' + bbsInfo.currentThreadName + ': ' + bbsResponse.response_no\n\n # 名無しではない場合、名称を追加\n if ymlBbsValue[YML_SHITARABA_NONAME] != bbsResponse.name:\n name = name + ' - ' + bbsResponse.name\n\n name = name + '】'\n\n await client.send_message(channel, name + '\\n' + bbsResponse.response)\n\n ''' ==============================================\n スレッドが書き込み上限に達した場合は掲示板情報を更新\n ============================================== '''\n if num == bbsInfo.thread_stop:\n await asyncio.sleep(10)\n\n # したらば掲示板情報の更新\n bbsInfo.checkBbs()\n\n currentThreadName = bbsInfo.currentThreadName\n currentThreadUrlResponse = bbsInfo.currentThreadUrlResponse\n\n # レスカウンタの初期化\n num = 2\n\n # スレッドの変更通知\n await client.send_message(channel, beforeThreadName + 'が' + str(bbsInfo.thread_stop) + 'まで埋まりました。' + '\\n'\n + '次スレは' + currentThreadName + 'です。')\n\n # 次回利用するため\n beforeThreadName = bbsInfo.currentThreadName\n\n else:\n # 次スレに移動\n num += 1\n\n await asyncio.sleep(10)\n\nclass BbsInfo:\n '''\n class Name:\n BbsInfo\n description:\n したらば掲示板の情報を取得します。\n '''\n\n # private variable\n __category = ''\n __sequence = ''\n\n # public variable\n thread_stop = 0\n currentThreadId = ''\n currentThreadName = ''\n currentThreadNum = 0\n currentThreadUrlRead = ''\n currentThreadUrlResponse = ''\n\n # Construct\n def __init__(self, category, sequence, thread_stop):\n self.__category = category\n self.__sequence = sequence\n self.thread_stop = thread_stop\n\n def checkBbs(self):\n '''\n checkBbs(self)\n 'channel_id' -- Discordの書き込み先Text Channel Id\n '''\n\n # Const\n CATEGORY = 'category'\n SEQUENCE = 'sequence'\n URL_SUB = 'http://jbbs.shitaraba.net/' + CATEGORY + '/' + SEQUENCE + '/subject.txt'\n URL_THR = 'http://jbbs.shitaraba.net/bbs/read.cgi/' + CATEGORY + '/' + SEQUENCE + '/'\n URL_RES = 'http://jbbs.shitaraba.net/bbs/rawmode.cgi/' + CATEGORY + '/' + SEQUENCE + '/'\n EUC = 'euc_jp'\n CGI = '.cgi'\n\n # Tuple\n columns = ('id', 'name_count', 'name', 'count', 'url1', 'url2', 'flag')\n\n ''' ==============================================\n 掲示板情報の取得\n ============================================== '''\n # subject.txtから掲示板一覧を取得\n subject_url = URL_SUB.replace(CATEGORY, self.__category).replace(SEQUENCE, self.__sequence)\n dataFrame = pd.DataFrame(pd.read_csv(subject_url, names=(columns[0], columns[1]), encoding=EUC))\n\n # ソートと重複削除\n if dataFrame.duplicated().any() == True:\n dataFrame = dataFrame.drop_duplicates().sort_values(by=columns[0], ascending=False)\n dataFrame = dataFrame.reset_index(drop=True)\n\n # 掲示板ID\n dataFrame[columns[0]] = dataFrame[columns[0]].str.replace(CGI, '')\n\n # 掲示板名\n dataFrame[columns[2]] = pd.Series(dataFrame[columns[1]].str.rsplit('(', expand=True, n=1).get(0))\n\n # 書き込み数\n dataFrame[columns[3]] = pd.Series(dataFrame[columns[1]].str.rsplit('(', expand=True, n=1).get(1).str.replace('(', '').str.replace(')', '')).astype(int)\n\n # スレッドのURL(API)\n dataFrame[columns[4]] = pd.Series(URL_THR.replace(CATEGORY, self.__category).replace(SEQUENCE, self.__sequence) + dataFrame[columns[0]] + '/')\n\n # レスのURL(API)\n dataFrame[columns[5]] = pd.Series(URL_RES.replace(CATEGORY, self.__category).replace(SEQUENCE, self.__sequence) + dataFrame[columns[0]] + '/')\n\n # スレッドストップフラグ\n dataFrame[columns[6]] = dataFrame[columns[3]].where(dataFrame[columns[3]] != self.thread_stop, False).where(dataFrame[columns[3]] == self.thread_stop, True)\n\n # 書き込み可能なスレッドのうち最も古い掲示板(チェック対象)を取得\n idSeries = pd.Series(dataFrame[columns[0]].where(dataFrame[columns[6]] == True)).dropna().sort_values(ascending=True).reset_index(drop=True)\n currentDataFrame = dataFrame.where(dataFrame[columns[0]] == idSeries[0]).dropna()\n\n # 書き込み可能なスレッドで最も古い掲示板情報(=カレントスレッド)の取得\n self.currentThreadId = currentDataFrame[columns[0]].values[0]\n self.currentThreadName = currentDataFrame[columns[2]].values[0]\n self.currentThreadNum = int(currentDataFrame[columns[3]].values[0])\n self.currentThreadUrlRead = currentDataFrame[columns[4]].values[0]\n self.currentThreadUrlResponse = currentDataFrame[columns[5]].values[0]\n\nclass BbsResponse:\n '''\n class Name:\n BbsResponse\n description:\n したらば掲示板の書き込みを取得します。\n '''\n\n # public variable\n response_no = ''\n name = ''\n e_mail = ''\n data_time = ''\n response = ''\n title = ''\n author_id = ''\n isGetResponse = False # レス取得フラグ: True = 取得 / False = 未取得\n\n def __init__(self, url):\n # Const\n EUC = 'euc_jp'\n SPLIT_TEXT = '<>'\n\n # Tuple\n keys = ('response_no', 'name', 'e_mail', 'date_time', 'response', 'title', 'author_id')\n\n opener = urllib.request.build_opener()\n\n try:\n r = opener.open(url)\n content = r.read().decode(EUC)\n contents = content.split('\\n')\n\n if content != '':\n self.isGetResponse = True\n\n buf = contents[0].split(SPLIT_TEXT)\n\n if len(buf) > 7:\n while len(buf) > 7:\n buf[4] = buf[4] + SPLIT_TEXT + buf[5]\n del buf[5]\n\n dic = dict(zip(keys, buf))\n\n # Remove <font>tag\n soup_name = BeautifulSoup(dic[keys[1]], 'html.parser')\n while (soup_name.font):\n soup_name.font.unwrap()\n\n dic[keys[1]] = soup_name.prettify()\n\n # Remove <a>tag\n soup_comment = BeautifulSoup(dic[keys[4]], 'html.parser')\n while (soup_comment.a):\n soup_comment.a.unwrap()\n\n # Remove <br>tag\n while (soup_comment.br):\n soup_comment.br.unwrap()\n\n dic[keys[4]] = soup_comment.prettify()\n\n self.response_no = dic[keys[0]]\n self.name = dic[keys[1]].replace('\\n', '')\n self.e_mail = dic[keys[2]]\n self.data_time = dic[keys[3]]\n self.response = unescape(dic[keys[4]])\n self.title = dic[keys[5]]\n self.author_id = dic[keys[6]]\n\n else:\n self.isGetResponse = False\n\n except urllib.error.HTTPError:\n self.isGetResponse = False\n\nif __name__ == '__main__':\n with open('setting.yml', 'r') as fp:\n file = fp.read()\n\n ymlBaseValue = yaml.safe_load(file)\n ymlBbsValue = ymlBaseValue['shitaraba']\n\n token = ymlBaseValue['token']\n channel_id = ymlBaseValue['channel_id']\n\n\n client.loop.create_task(background_loop(channel_id))\n client.run(token)\n" ]
[ [ "pandas.read_csv" ] ]
innat/vit-tensorflow
[ "2f0f009651295c054aa84ed45f4dace35e7ea442", "2f0f009651295c054aa84ed45f4dace35e7ea442" ]
[ "vit_tensorflow/mobile_vit.py", "vit_tensorflow/vit.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras import Sequential\nimport tensorflow.keras.layers as nn\n\nfrom einops import rearrange\nfrom einops.layers.tensorflow import Reduce\n\n\ndef gelu(x, approximate=False):\n if approximate:\n coeff = tf.cast(0.044715, x.dtype)\n return 0.5 * x * (1.0 + tf.tanh(0.7978845608028654 * (x + coeff * tf.pow(x, 3))))\n else:\n return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(1.4142135623730951, x.dtype)))\n\n\nclass GELU(Layer):\n def __init__(self, approximate=False):\n super(GELU, self).__init__()\n self.approximate = approximate\n\n def call(self, x, training=True):\n return gelu(x, self.approximate)\n\n\nclass Swish(Layer):\n def __init__(self):\n super(Swish, self).__init__()\n\n def call(self, x, training=True):\n x = tf.keras.activations.swish(x)\n return x\n\n\nclass Conv_NxN_BN(Layer):\n def __init__(self, dim, kernel_size=1, stride=1):\n super(Conv_NxN_BN, self).__init__()\n\n self.layers = Sequential([\n nn.Conv2D(filters=dim, kernel_size=kernel_size, strides=stride, padding='SAME', use_bias=False),\n nn.BatchNormalization(momentum=0.9, epsilon=1e-5),\n Swish()\n ])\n\n def call(self, x, training=True):\n x = self.layers(x, training=training)\n return x\n\n\nclass PreNorm(Layer):\n def __init__(self, fn):\n super(PreNorm, self).__init__()\n\n self.norm = nn.LayerNormalization()\n self.fn = fn\n\n def call(self, x, training=True):\n return self.fn(self.norm(x), training=training)\n\n\nclass MLP(Layer):\n def __init__(self, dim, hidden_dim, dropout=0.0):\n super(MLP, self).__init__()\n\n self.net = Sequential([\n nn.Dense(units=hidden_dim),\n Swish(),\n nn.Dropout(rate=dropout),\n nn.Dense(units=dim),\n nn.Dropout(rate=dropout)\n ])\n\n def call(self, x, training=True):\n return self.net(x, training=training)\n\n\nclass Attention(Layer):\n def __init__(self, dim, heads=8, dim_head=64, dropout=0.0):\n super(Attention, self).__init__()\n\n inner_dim = dim_head * heads\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.attend = nn.Softmax()\n self.to_qkv = nn.Dense(units=inner_dim * 3, use_bias=False)\n\n self.to_out = Sequential([\n nn.Dense(units=dim),\n nn.Dropout(rate=dropout)\n ])\n\n def call(self, x, training=True):\n qkv = self.to_qkv(x)\n qkv = tf.split(qkv, num_or_size_splits=3, axis=-1)\n\n q, k, v = map(lambda t: rearrange(t, 'b p n (h d) -> b p h n d', h=self.heads), qkv)\n\n dots = tf.matmul(q, tf.transpose(k, perm=[0, 1, 3, 2])) * self.scale\n attn = self.attend(dots)\n out = tf.matmul(attn, v)\n out = rearrange(out, 'b p h n d -> b p n (h d)')\n out = self.to_out(out, training=training)\n\n return out\n\n\nclass Transformer(Layer):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.0):\n super(Transformer, self).__init__()\n\n self.layers = []\n\n for _ in range(depth):\n self.layers.append([\n PreNorm(Attention(dim, heads, dim_head, dropout)),\n PreNorm(MLP(dim, mlp_dim, dropout))\n ])\n\n def call(self, x, training=True):\n for attn, ff in self.layers:\n x = attn(x, training=training) + x\n x = ff(x, training=training) + x\n\n return x\n\n\nclass MV2Block(Layer):\n def __init__(self, dim_in, dim_out, stride=1, expansion=4):\n super(MV2Block, self).__init__()\n\n assert stride in [1, 2]\n\n hidden_dim = int(dim_in * expansion)\n self.use_res_connect = stride == 1 and dim_in == dim_out\n\n if expansion == 1:\n self.conv = Sequential([\n # dw\n nn.Conv2D(filters=hidden_dim, kernel_size=3, strides=stride, padding='SAME', groups=hidden_dim,\n use_bias=False),\n nn.BatchNormalization(momentum=0.9, epsilon=1e-5),\n Swish(),\n # pw-linear\n nn.Conv2D(filters=dim_out, kernel_size=1, strides=1, use_bias=False),\n nn.BatchNormalization(momentum=0.9, epsilon=1e-5)\n ])\n else:\n self.conv = Sequential([\n # pw\n nn.Conv2D(filters=hidden_dim, kernel_size=1, strides=1, use_bias=False),\n nn.BatchNormalization(momentum=0.9, epsilon=1e-5),\n Swish(),\n # dw\n nn.Conv2D(filters=hidden_dim, kernel_size=3, strides=stride, padding='SAME', groups=hidden_dim,\n use_bias=False),\n nn.BatchNormalization(momentum=0.9, epsilon=1e-5),\n Swish(),\n # pw-linear\n nn.Conv2D(filters=dim_out, kernel_size=1, strides=1, use_bias=False),\n nn.BatchNormalization(momentum=0.9, epsilon=1e-5)\n ])\n\n def call(self, x, training=True):\n out = self.conv(x, training=training)\n if self.use_res_connect:\n out = out + x\n return out\n\n\nclass MobileViTBlock(Layer):\n def __init__(self, dim, depth, channel, kernel_size, patch_size, mlp_dim, dropout=0.0):\n super(MobileViTBlock, self).__init__()\n\n self.ph, self.pw = patch_size\n\n self.conv1 = Conv_NxN_BN(channel, kernel_size=kernel_size, stride=1)\n self.conv2 = Conv_NxN_BN(dim, kernel_size=1, stride=1)\n\n self.transformer = Transformer(dim=dim, depth=depth, heads=4, dim_head=8, mlp_dim=mlp_dim, dropout=dropout)\n\n self.conv3 = Conv_NxN_BN(channel, kernel_size=1, stride=1)\n self.conv4 = Conv_NxN_BN(channel, kernel_size=kernel_size, stride=1)\n\n def call(self, x, training=True):\n y = tf.identity(x)\n\n # Local representations\n x = self.conv1(x, training=training)\n x = self.conv2(x, training=training)\n\n # Global representations\n _, h, w, c = x.shape\n x = rearrange(x, 'b (h ph) (w pw) d -> b (ph pw) (h w) d', ph=self.ph, pw=self.pw)\n x = self.transformer(x, training=training)\n x = rearrange(x, 'b (ph pw) (h w) d -> b (h ph) (w pw) d', h=h // self.ph, w=w // self.pw, ph=self.ph,\n pw=self.pw)\n\n # Fusion\n x = self.conv3(x, training=training)\n x = tf.concat([x, y], axis=-1)\n x = self.conv4(x, training=training)\n\n return x\n\n\nclass MobileViT(Model):\n def __init__(self,\n image_size,\n dims,\n channels,\n num_classes,\n expansion=4,\n kernel_size=3,\n patch_size=(2, 2),\n depths=(2, 4, 3)\n ):\n super(MobileViT, self).__init__()\n assert len(dims) == 3, 'dims must be a tuple of 3'\n assert len(depths) == 3, 'depths must be a tuple of 3'\n\n ih, iw = image_size\n ph, pw = patch_size\n assert ih % ph == 0 and iw % pw == 0\n\n init_dim, *_, last_dim = channels\n\n self.conv1 = Conv_NxN_BN(init_dim, kernel_size=3, stride=2)\n\n self.stem = Sequential()\n self.stem.add(MV2Block(channels[0], channels[1], stride=1, expansion=expansion))\n self.stem.add(MV2Block(channels[1], channels[2], stride=2, expansion=expansion))\n self.stem.add(MV2Block(channels[2], channels[3], stride=1, expansion=expansion))\n self.stem.add(MV2Block(channels[2], channels[3], stride=1, expansion=expansion))\n\n self.trunk = []\n self.trunk.append([\n MV2Block(channels[3], channels[4], stride=2, expansion=expansion),\n MobileViTBlock(dims[0], depths[0], channels[5], kernel_size, patch_size, mlp_dim=int(dims[0] * 2))\n ])\n\n self.trunk.append([\n MV2Block(channels[5], channels[6], stride=2, expansion=expansion),\n MobileViTBlock(dims[1], depths[1], channels[7], kernel_size, patch_size, mlp_dim=int(dims[1] * 4))\n ])\n\n self.trunk.append([\n MV2Block(channels[7], channels[8], stride=2, expansion=expansion),\n MobileViTBlock(dims[2], depths[2], channels[9], kernel_size, patch_size, mlp_dim=int(dims[2] * 4))\n ])\n\n self.to_logits = Sequential([\n Conv_NxN_BN(last_dim, kernel_size=1, stride=1),\n Reduce('b h w c -> b c', 'mean'),\n nn.Dense(units=num_classes, use_bias=False)\n ])\n\n def call(self, x, training=True, **kwargs):\n x = self.conv1(x, training=training)\n\n x = self.stem(x, training=training)\n\n for conv, attn in self.trunk:\n x = conv(x, training=training)\n x = attn(x, training=training)\n\n x = self.to_logits(x, training=training)\n\n return x\n\n\"\"\" Usage\nv = MobileViT(\n image_size=(256, 256),\n dims=[96, 120, 144],\n channels=[16, 32, 48, 48, 64, 64, 80, 80, 96, 96, 384],\n num_classes=1000\n)\n\nimg = tf.random.normal(shape=[1, 256, 256, 3])\npreds = v(img) # (1, 1000)\n\"\"\"", "import tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Layer\nfrom tensorflow.keras import Sequential\nimport tensorflow.keras.layers as nn\n\nfrom tensorflow import einsum\nfrom einops import rearrange, repeat\nfrom einops.layers.tensorflow import Rearrange\n\ndef pair(t):\n return t if isinstance(t, tuple) else (t, t)\n\nclass PreNorm(Layer):\n def __init__(self, fn):\n super(PreNorm, self).__init__()\n\n self.norm = nn.LayerNormalization()\n self.fn = fn\n\n def call(self, x, training=True):\n return self.fn(self.norm(x), training=training)\n\nclass MLP(Layer):\n def __init__(self, dim, hidden_dim, dropout=0.0):\n super(MLP, self).__init__()\n\n def GELU():\n def gelu(x, approximate=False):\n if approximate:\n coeff = tf.cast(0.044715, x.dtype)\n return 0.5 * x * (1.0 + tf.tanh(0.7978845608028654 * (x + coeff * tf.pow(x, 3))))\n else:\n return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(1.4142135623730951, x.dtype)))\n\n return nn.Activation(gelu)\n\n self.net = Sequential([\n nn.Dense(units=hidden_dim),\n GELU(),\n nn.Dropout(rate=dropout),\n nn.Dense(units=dim),\n nn.Dropout(rate=dropout)\n ])\n\n def call(self, x, training=True):\n return self.net(x, training=training)\n\nclass Attention(Layer):\n def __init__(self, dim, heads=8, dim_head=64, dropout=0.0):\n super(Attention, self).__init__()\n inner_dim = dim_head * heads\n project_out = not (heads == 1 and dim_head == dim)\n\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.attend = nn.Softmax()\n self.to_qkv = nn.Dense(units=inner_dim * 3, use_bias=False)\n\n if project_out:\n self.to_out = [\n nn.Dense(units=dim),\n nn.Dropout(rate=dropout)\n ]\n else:\n self.to_out = []\n\n self.to_out = Sequential(self.to_out)\n\n def call(self, x, training=True):\n qkv = self.to_qkv(x)\n qkv = tf.split(qkv, num_or_size_splits=3, axis=-1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), qkv)\n\n # dots = tf.matmul(q, tf.transpose(k, perm=[0, 1, 3, 2])) * self.scale\n dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n attn = self.attend(dots)\n\n # x = tf.matmul(attn, v)\n x = einsum('b h i j, b h j d -> b h i d', attn, v)\n x = rearrange(x, 'b h n d -> b n (h d)')\n x = self.to_out(x, training=training)\n\n return x\n\nclass Transformer(Layer):\n def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.0):\n super(Transformer, self).__init__()\n\n self.layers = []\n\n for _ in range(depth):\n self.layers.append([\n PreNorm(Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout)),\n PreNorm(MLP(dim, mlp_dim, dropout=dropout))\n ])\n\n def call(self, x, training=True):\n for attn, mlp in self.layers:\n x = attn(x, training=training) + x\n x = mlp(x, training=training) + x\n\n return x\n\nclass ViT(Model):\n def __init__(self, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim,\n pool='cls', dim_head=64, dropout=0.0, emb_dropout=0.0):\n \"\"\"\n image_size: int.\n -> Image size. If you have rectangular images, make sure your image size is the maximum of the width and height\n patch_size: int.\n -> Number of patches. image_size must be divisible by patch_size.\n -> The number of patches is: n = (image_size // patch_size) ** 2 and n must be greater than 16.\n num_classes: int.\n -> Number of classes to classify.\n dim: int.\n -> Last dimension of output tensor after linear transformation nn.Linear(..., dim).\n depth: int.\n -> Number of Transformer blocks.\n heads: int.\n -> Number of heads in Multi-head Attention layer.\n mlp_dim: int.\n -> Dimension of the MLP (FeedForward) layer.\n dropout: float between [0, 1], default 0..\n -> Dropout rate.\n emb_dropout: float between [0, 1], default 0.\n -> Embedding dropout rate.\n pool: string, either cls token pooling or mean pooling\n \"\"\"\n super(ViT, self).__init__()\n\n image_height, image_width = pair(image_size)\n patch_height, patch_width = pair(patch_size)\n\n assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'\n\n num_patches = (image_height // patch_height) * (image_width // patch_width)\n assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'\n\n self.patch_embedding = Sequential([\n Rearrange('b (h p1) (w p2) c -> b (h w) (p1 p2 c)', p1=patch_height, p2=patch_width),\n nn.Dense(units=dim)\n ], name='patch_embedding')\n\n self.pos_embedding = tf.Variable(initial_value=tf.random.normal([1, num_patches + 1, dim]))\n self.cls_token = tf.Variable(initial_value=tf.random.normal([1, 1, dim]))\n self.dropout = nn.Dropout(rate=emb_dropout)\n\n self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)\n\n self.pool = pool\n\n self.mlp_head = Sequential([\n nn.LayerNormalization(),\n nn.Dense(units=num_classes)\n ], name='mlp_head')\n\n def call(self, img, training=True, **kwargs):\n x = self.patch_embedding(img)\n b, n, d = x.shape\n\n cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b)\n x = tf.concat([cls_tokens, x], axis=1)\n x += self.pos_embedding[:, :(n + 1)]\n x = self.dropout(x, training=training)\n\n x = self.transformer(x, training=training)\n\n if self.pool == 'mean':\n x = tf.reduce_mean(x, axis=1)\n else:\n x = x[:, 0]\n\n x = self.mlp_head(x)\n\n return x\n\n\"\"\" Usage\n\nv = ViT(\n image_size = 256,\n patch_size = 32,\n num_classes = 1000,\n dim = 1024,\n depth = 6,\n heads = 16,\n mlp_dim = 2048,\n dropout = 0.1,\n emb_dropout = 0.1\n)\n\nimg = tf.random.normal(shape=[1, 256, 256, 3])\npreds = v(img) # (1, 1000)\n\n\"\"\"" ]
[ [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.matmul", "tensorflow.concat", "tensorflow.transpose", "tensorflow.pow", "tensorflow.keras.activations.swish", "tensorflow.keras.layers.Dense", "tensorflow.cast", "tensorflow.identity", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout", "tensorflow.split", "tensorflow.keras.layers.Softmax" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.concat", "tensorflow.keras.layers.Activation", "tensorflow.reduce_mean", "tensorflow.pow", "tensorflow.keras.layers.Dense", "tensorflow.cast", "tensorflow.keras.Sequential", "tensorflow.einsum", "tensorflow.keras.layers.Dropout", "tensorflow.split", "tensorflow.random.normal", "tensorflow.keras.layers.Softmax" ] ]
YourRoyalLinus/dataset-integration
[ "de75c409347a94de729905903ae4a2ac9daeaa31" ]
[ "dataset_integration/calculations.py" ]
[ "from _collections_abc import Sequence\nfrom numpy import ndarray\nfrom scipy.integrate import cumulative_trapezoid\n\n\ndef strictly_increasing(s: Sequence):\n \"\"\"Returns true if sequence s is monotonically increasing\"\"\"\n return all(x < y for x, y in zip(s, s[1:]))\n\ndef strictly_decreasing(s: Sequence):\n \"\"\"Returns true if sequence s is monotonically decreasing\"\"\"\n return all(x > y for x, y in zip(s, s[1:]))\n\ndef is_monotonic(values :Sequence) -> bool:\n \"\"\"Returns true if the values sequence is monotonically increasing or \n monotonically decreasing\n \"\"\"\n if not (strictly_increasing(values) or strictly_decreasing(values)):\n return False\n else:\n return True\n\ndef integrate(x : Sequence, y: Sequence) -> ndarray:\n \"\"\"Return an ndarray as the result of cumulative integration of y along the \n axis using the composite trapezoidal rule. \n\n Positional Arguments:\n x : Sequence\n - values to represent the interval over which you're\n integrating\n\n y : Sequence\n - values to integrate\n \"\"\"\n return cumulative_trapezoid(y,x, initial=0)\n" ]
[ [ "scipy.integrate.cumulative_trapezoid" ] ]
fractalphile/michi
[ "449c784929e84b9d47728b8af4db8db2e292fb67" ]
[ "michi/geocoder/geocoder.py" ]
[ "import itertools\nimport pickle\nimport re\nfrom warnings import warn\n\nimport geopandas as gp\nfrom geosupport import Geosupport, GeosupportError\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom shapely.geometry import LineString, Point\nfrom shapely.ops import transform\nfrom rtree import index as rtree_index\nimport pyproj\n\nfrom ..config import MICHI_HOME\nfrom ..utils.lion import load_lion_gdf\nfrom ..utils.utils import drop_consecutive_duplicates, method_file_cache\nfrom .errors import (\n IntersectionNotFoundError,\n NotAStreetError,\n StreetNameNotFoundError,\n StreetStretchNotFoundError,\n WrongWayError\n)\nfrom .globals import BOROUGHS, BOROUGH_CODES\nfrom .network import (\n build_monodirectional_network, drop_internal_nodes,\n build_directional_network, default_cost_function, build_segment_network\n)\nfrom .street_stretch import StreetStretch\n\n\ndef _add_connector_segments(lion):\n \"\"\"\n Add attribute `connector_segment` which means that the segment connect\n roadbed segments with a generic segment on a divided street.\n\n Changes `lion` in place.\n \"\"\"\n generic_nodes = set()\n roadbed_nodes = set()\n\n # Segment IDs to exclude\n exceptions = ['0182982', '0283378']\n\n for endpoint in ['from', 'to']:\n generic_nodes.update(lion[\n (lion['node_level_%s' % endpoint] == '*') &\n (lion['segment_type'].isin(['G', 'B', 'T']))\n ][['node_id_%s' % endpoint]]['node_id_%s' % endpoint])\n\n roadbed_nodes.update(lion[\n (lion['segment_type'].isin(['R', 'T']))\n ][['node_id_%s' % endpoint]]['node_id_%s' % endpoint])\n\n lion['connector_segment'] = (\n ~lion['segment_id'].isin(exceptions) &\n ((\n lion['node_id_to'].isin(generic_nodes) &\n lion['node_id_from'].isin(roadbed_nodes)\n ) | (\n lion['node_id_to'].isin(roadbed_nodes) &\n lion['node_id_from'].isin(generic_nodes)\n ))\n )\n\ndef _handle_special_addresses(lion):\n \"\"\"\n When there are special address codes/names, ensure that there is a duplicate\n row with the special name and code as the primary.\n\n Note: Only for special address type 'P' - addressable place names\n \"\"\"\n\n special = lion[\n (lion['special_address_type'].isin(['P', 'B', 'G'])) &\n (lion['street'] != lion['special_address_street_name'])\n ].drop(columns=['street', 'street_code'])\n\n special['street'] = special['special_address_street_name']\n special['street_code'] = special['special_address_street_code']\n special['special_address_street_code'] = \"\"\n special['special_address_street_name'] = \"\"\n\n lion = pd.concat([lion, special], sort=True).reset_index(drop=True)\n\n return lion\n\ndef _clean_lion_df(lion, crs):\n \"\"\"\n Load LION from SDE and then modify it for Geocoder.\n\n Parameters\n ----------\n lion_version: str\n The version of LION in YY{ABCD} format such as 19A, 18D, etc.\n crs: str\n The coordinate reference system (CRS) to convert the geometry to as a\n EPSG code such as 'epsg:2263' or 'epsg:4326'.\n \"\"\"\n # Strip extra whitespace from strings\n for col, dtype in lion.dtypes.reset_index().values:\n if (dtype == 'O') and (col is not 'geometry'):\n lion[col] = lion[col].str.strip()\n\n _add_connector_segments(lion)\n\n lion = lion[\n ((lion['node_level_from'] != '*') | (lion['node_level_from'] != '*')) &\n ~pd.isnull(lion['physical_id'])\n ].copy()\n lion['physical_id'] = lion['physical_id'].astype(int).astype(str)\n lion['node_to'] = lion['node_id_to'] + lion['node_level_to']\n lion['node_from'] = lion['node_id_from'] + lion['node_level_from']\n lion['borough_code'] = lion['street_code'].str[0]\n lion = lion.fillna({'number_travel_lanes': lion['number_total_lanes']})\n lion['number_travel_lanes'] = (\n lion['number_travel_lanes'].str.replace(r'^\\s*$', '1').astype(int)\n )\n\n lion = _handle_special_addresses(lion)\n\n lion = lion.drop_duplicates(\n subset=[col for col in lion.columns if col is not \"geometry\"]\n )\n\n # Merge streets that _should_ have a single street code\n # Keep the original street code as 'original_street_code'\n lion = _create_generic_street_codes(lion)\n\n lion['len'] = lion['geometry'].length\n if crs:\n lion = lion.to_crs({'init': crs})\n\n return lion\n\ndef _get_nodes_df(lion_df):\n columns = ['segment_id', 'street', 'street_code', 'borough_code']\n streets = lion_df[\n lion_df['traffic_direction'].isin(['W', 'A', 'T'])\n ].copy()\n\n streets['geometry_from'] = streets['geometry'].apply(\n lambda g: Point(g.coords[0])\n )\n streets['geometry_to'] = streets['geometry'].apply(\n lambda g: Point(g.coords[-1])\n )\n\n return pd.concat([\n streets[columns + ['node_to', 'geometry_to']].rename(\n columns={'node_to': 'node', 'geometry_to': 'geometry'}\n ),\n streets[columns + ['node_from', 'geometry_from']].rename(\n columns={'node_from': 'node', 'geometry_from': 'geometry'}\n )\n ]).drop_duplicates(columns + ['node'])\n\ndef _get_cscl_segments_df(lion):\n \"\"\"\n Given the LION dataframe, return a dataframe where each row is a CSCL\n segment instead of a LION segment.\n\n Parameters\n ----------\n lion : pandas.DataFrame\n The geocoder's lion_df\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n def get_traffic_direction(group):\n \"\"\"\n Get a single traffic direction (T, W or A) for a physical_id and\n handle when there is more than one traffic direction for a single\n physical ID. If there is more than one, return the most common. If\n There are two tied, return 'T' for two-way.\n \"\"\"\n # If there's more than one traffic direction and there's a tie for the\n # most segments with a traffic direction, then return 'T' for two-way.\n # i.e there are 7 lion segmets or the physical_id, three have 'A',\n # three have 'W' and one has 'T', return 'T'.\n # `group['len']` is the number of segments with a given traffic\n # direction. See lines below this function for details.\n if (len(group) > 1) and (group['len'].iloc[0] == group['len'].iloc[1]):\n return 'T'\n else:\n return group['traffic_direction'].iloc[0]\n\n # Create a dataframe which has a count of the number of each traffic\n # direction per physical_id. Then apply `get_traffic_direction` to each\n # physical_id group to handle any cases where there are multiple directions\n # on a single physical_id. In the end, get a Series of traffic directions\n # with physical_id as the index.\n traffic_directions = lion.drop_duplicates([\n 'segment_id', 'traffic_direction'\n ]).groupby([\n 'physical_id', 'traffic_direction'\n ]).count()['len'].sort_values(\n ascending=False\n ).reset_index().groupby(\n 'physical_id'\n ).apply(get_traffic_direction)\n\n def get_street_street_code_pairs(group):\n \"\"\"\n Get a list of tuples of (street name, street code) for each physical_id\n sorted in descending order by the number of segments on that physical_id\n with that pair.\n \"\"\"\n return group.groupby([\n 'street', 'street_code'\n ]).count().sort_values('len', ascending=False).index.tolist()\n\n def get_segment_type(group):\n \"\"\"\n Get the segment type of the physical_id.\n \"\"\"\n types = group['segment_type'].tolist()\n\n # If any of the segments are 'T' (Terminator) or 'R' (Roadbed),\n # return that value.\n if len(types) > 1:\n for t in ['T', 'R']:\n if t in types:\n return t\n\n # Otherwise, just return the first value.\n return types[0]\n\n # Create the dataframe of cscl_segments with each row summarizing the LION\n # segments that compose the physical_id.\n df = pd.concat([\n # A set of the street codes on the segment\n lion.groupby('physical_id').apply(\n lambda g: set(g['street_code'])\n ).rename('street_code'),\n\n # A set of the street names\n lion.groupby('physical_id').apply(\n lambda g: set(g['street'])\n ).rename('street'),\n\n # A list of tuples of the street/street code pairs\n lion.groupby('physical_id').apply(\n get_street_street_code_pairs\n ).rename('street_street_code_pair'),\n\n # A set of the segment_ids\n lion.groupby('physical_id').apply(\n lambda g: set(g['segment_id'])\n ).rename('segment_ids'),\n\n # The length in feet of the physical_id\n lion.drop_duplicates([\n 'segment_id', 'physical_id'\n ]).groupby('physical_id')['len'].sum(),\n\n # The maximum number of travel lanes\n lion.groupby('physical_id')['number_travel_lanes'].max(),\n\n # The traffic direction\n traffic_directions.rename('traffic_direction'),\n\n # Whether any of the segments are connector segments\n lion.groupby('physical_id')['connector_segment'].any(),\n\n # The segment type\n lion.drop_duplicates(\n subset=['physical_id', 'segment_type']\n ).groupby('physical_id').apply(get_segment_type).rename('segment_type')\n ], axis=1)\n df['physical_id'] = df.index\n\n return df\n\ndef _get_spatial_index(df, base_crs, index_crs, version, name, object_id):\n \"\"\"\n creates an rtree spatial index on nodes to aid near searches\n\n Params:\n nodes_df: nodes data frame\n base_crs: crs of geocoder\n index_crs: crs of index if different from base, else None\n cache_key: cache_key created for _load_lion_data\n\n \"\"\"\n path = str(MICHI_HOME / version / 'geocoder' / name)\n index = rtree_index.Index(path)\n\n if len(index.leaves()) <= 1:\n print('Building {}. Base_CRS: {}, Index_CRS: {}'.format(\n name, base_crs, index_crs\n ))\n\n if index_crs:\n transformer = pyproj.Transformer.from_proj(\n pyproj.Proj(init=base_crs), pyproj.Proj(init=index_crs)\n )\n\n def get_bounding_box(geom, index_crs):\n if index_crs:\n geom = transform(transformer.transform, geom)\n X, Y = geom.xy\n X, Y = sorted(X), sorted(Y)\n xmin, xmax, ymin, ymax = X[0], X[-1], Y[0], Y[-1]\n return xmin, ymin, xmax, ymax\n\n for i, row in df.iterrows():\n index.insert(\n i, (get_bounding_box(row['geometry'], index_crs)),\n obj=row[object_id]\n )\n\n index.close()\n index = rtree_index.Index(path)\n\n return index\n\ndef _get_multi_borough_street_codes(nodes_df):\n def get_multi_borough_streets(group):\n borough_counts = group.groupby('node')['borough_code'].nunique()\n nodes = borough_counts[borough_counts > 1].index\n\n return group[\n group['node'].isin(nodes)\n ]['street_code'].unique() if len(nodes) else None\n\n multi_borough_streets = nodes_df.sort_values(\n 'borough_code'\n ).drop_duplicates(\n subset=['segment_id', 'node']\n ).groupby('street').apply(get_multi_borough_streets).dropna()\n\n records = []\n for street_codes in multi_borough_streets:\n for street_code in street_codes:\n records.append([street_code, '+'.join(sorted(street_codes))])\n\n street_code_df = pd.DataFrame.from_records(\n records, columns=['street_code', 'new_street_code']\n )\n\n return street_code_df\n\ndef _merge_generic_street_codes(lion_df, new_df):\n lion_df = lion_df.merge(new_df, on='street_code', how='left')\n lion_df['new_street_code'] = lion_df['new_street_code'].fillna(\n lion_df['street_code']\n )\n lion_df = lion_df.drop(columns=['street_code']).rename(\n columns={'new_street_code': 'street_code'}\n )\n return lion_df\n\ndef _east_west_generic_street_code(lion_df):\n connected_streets = []\n for borough_code in ['1', '2']:\n streets = lion_df[\n (lion_df['borough_code'] == borough_code) &\n lion_df['street'].str.contains('^(?:EAST|WEST){0,1} \\d+ STREET') &\n ~lion_df['street'].str.contains('(?:PEDESTRIAN|FOOTBRIDGE|BIKE)')\n ].copy()\n streets['number'] = streets['street'].str.extract(\n '^(EAST|WEST){0,1} (\\d+) STREET'\n )[1].str.zfill(3)\n streets_dict = _get_streets_dict(streets)\n\n def connected(group):\n street_codes = group['street_code'].unique()\n if len(street_codes) < 2:\n return (None, None)\n\n a, b = street_codes\n if streets_dict[a]['nodes'].intersection(\n streets_dict[b]['nodes']\n ):\n return (a, b), group['number'].iloc[0]\n\n return None, None\n\n for street_codes, number in streets.groupby('number').apply(connected):\n if number:\n for street_code in street_codes:\n connected_streets.append((\n street_code, '%s_%s_street' % (borough_code, number)\n ))\n\n return _merge_generic_street_codes(\n lion_df,\n pd.DataFrame.from_records(\n connected_streets, columns=['street_code', 'new_street_code']\n )\n )\n\ndef _create_generic_street_codes(lion_df):\n \"\"\"\n Handle places where the street continues, sometimes under a slightly\n different name, and always under a different street_code.\n\n For example, streets going accross the brooklyn queens border change\n street codes, but should be connected.\n\n Or enable \"Park Avenue\" to also refer to \"Park Avenue South\"\n \"\"\"\n lion_df['original_street_code'] = lion_df['street_code']\n\n # MN AND BX East/West streets that connect\n lion_df = _east_west_generic_street_code(lion_df)\n\n # Manhattan Park Ave\n lion_df.loc[\n (lion_df['borough_code'] == '1') &\n lion_df['street'].str.contains('^PARK AVENUE'),\n 'street_code'\n ] = 'mn_park_ave'\n\n # Manhattan 7th Ave\n lion_df.loc[\n (lion_df['borough_code'] == '1') &\n lion_df['street'].str.contains('^(?:7 AVENUE|ADAM CLAYTON POWELL)'),\n 'street_code'\n ] = 'mn_7_ave'\n\n # Flatbush Ave\n lion_df.loc[\n (lion_df['borough_code'] == '3') &\n lion_df['street'].isin(['FLATBUSH AVENUE', 'FLATBUSH AVENUE EXTENSION']),\n 'street_code'\n ] = 'bk_flatbush_ave'\n\n # HIGHWAYS! TOO HARD! GUH...\n\n # Henry Hudson\n henry_hudson_street_codes = lion_df[\n lion_df['street'].str.contains('HENRY HUDSON') &\n (lion_df['non_pedestrian'] == 'V')\n ]['street_code'].unique()\n\n lion_df.loc[\n lion_df['street_code'].isin(henry_hudson_street_codes),\n 'street_code'\n ] = 'west_side_highway'\n\n # Major Deegan\n lion_df.loc[\n lion_df['street'].str.contains('^(?:MAJOR DEEGAN|MDE)'),\n 'street_code'\n ] = 'major_deegan'\n\n # Merge streets that go across borough boundaries\n nodes_df = _get_nodes_df(lion_df)\n lion_df = _merge_generic_street_codes(\n lion_df, _get_multi_borough_street_codes(nodes_df)\n )\n\n return lion_df\n\ndef _get_dead_end_nodes(nodes_df):\n nodes_df = nodes_df[\n ~nodes_df['node'].str.contains('\\*') &\n (nodes_df['street'] != 'DRIVEWAY')\n ].drop_duplicates(['segment_id', 'node'])\n dead_ends = nodes_df.groupby('node').filter(lambda g: len(g) == 1)\n\n return set(dead_ends['node'])\n\ndef _get_streets_dict(lion_df):\n streets = {}\n for street_code, group in lion_df.groupby('street_code'):\n streets[street_code] = {\n 'df': group,\n 'nodes': set(pd.concat([group['node_to'], group['node_from']])),\n }\n\n return streets\n\n\ndef _get_nodes_dict(nodes_df):\n \"\"\"\n The same as `geocoder._get_nodes_dict` but also return whether the node is\n internal.\n \"\"\"\n return dict(nodes_df.groupby('node').apply(\n lambda g: {\n 'segments': set(g['segment_id']),\n 'geometry': g['geometry'].iloc[0],\n 'dead_end': g['dead_end'].iloc[0],\n 'internal': g['internal'].iloc[0]\n }\n ))\n\ndef _get_segments_dict(lion_df, columns=None):\n if columns is None:\n columns = [\n 'segment_id', 'physical_id', 'node_from', 'node_to', 'geometry',\n 'right_blockface_id', 'left_blockface_id',\n 'street', 'street_code',\n 'street_width_min', 'street_width_max', 'len',\n 'number_travel_lanes', 'traffic_direction',\n 'connector_segment', 'segment_type'\n ]\n\n columns = [col for col in columns if col in lion_df]\n\n def create_dict(group):\n d = dict([(col, group[col].iloc[0]) for col in columns])\n d['street_code'] = set(group['street_code'])\n d['street'] = set(group['street'])\n d['street_street_code_pair'] = set(\n [tuple(i) for i in group[['street', 'street_code']].values]\n )\n return d\n\n return dict(lion_df.groupby('segment_id').apply(create_dict))\n\n# Call super left to right\n# Geocoder -> SearchMixin -> NetworkMixin\n#class Geocoder(SearchMixin, NetworkMixin):\nclass Geocoder():\n \"\"\"\n A class that provides functionality for getting street stretches, etc.\n\n Exposes the following functions:\n\n normalize_street_name\n Returns a street in the format the LION and Geosupport use.\n get_street_code\n Given a street name, return the street code.\n street_code_exists\n Return whether the given street code exists in LION's streets\n autocomplete\n Given a text search, return possible street name matches.\n address\n Find the street segment and side of street of an address.\n \"\"\"\n def __init__(self, lion_version, crs='epsg:2263',\n lion_loader=load_lion_gdf, force_rebuild=False,\n network_type='cscl',\n include_spatial_index=False, spatial_index_crs=None, **kwargs):\n\n self.crs = crs\n self.lion_version = lion_version.lower()\n self.lion_loader = lion_loader\n self.force_rebuild = force_rebuild\n self.cache_path = MICHI_HOME / self.lion_version / 'geocoder'\n if not self.cache_path.exists():\n self.cache_path.mkdir(parents=True)\n\n # ensure CRS parameters are parsable before instantiation\n pyproj.crs.CRS(crs)\n if spatial_index_crs:\n pyproj.crs.CRS(spatial_index_crs)\n\n # Instantiate Geosupport\n try:\n if geosupport_version is None:\n geosupport_version = self.lion_version\n self.geosupport = Geosupport(geosupport_version=version)\n except:\n # Use default geosupport\n self.geosupport = Geosupport()\n warn(\n \"Using default Geosupport. \"\n \"May not match LION %s\" % self.lion_version\n )\n\n (\n self.lion_df, self.nodes_df,\n self.lion_segments, self.streets,\n self.street_code_map\n ) = self._load_lion_data()\n\n self.street_names = self._build_search_data()\n\n self.network_type = network_type\n if network_type == 'cscl':\n self.segment_column = 'physical_id'\n self.segment_dict = 'cscl_segments'\n elif network_type == 'lion':\n self.segment_column = 'segment_id'\n self.segment_dict = 'lion_segments'\n\n # A regex to parse geometry strings, to be used by `self.parse_geometry`\n self.geometry_regex = re.compile(r'([a-z_]*):*(\\d+)([A-Z]{0,1})')\n\n (\n self.cscl_segments_df, self.cscl_segments,\n self.node_network, self.segment_network,\n self.nodes_df, self.nodes\n ) = self._build_networks(self.network_type)\n\n self.segments = getattr(self, self.segment_dict)\n\n # SearchMixin\n # TODO Don't skip this, testing!\n #super().__init__(clear_cache=clear_cache, **kwargs)\n\n # get cache key to uniquely recognize spatial indexes\n #cache_key = get_cache_key(('_load_lion_data',) + (self,) + ({},))\n\n # load spatial indexes\n self.include_spatial_index = include_spatial_index\n self.spatial_index_crs = spatial_index_crs\n\n if self.include_spatial_index:\n self.lion_index = _get_spatial_index(\n self.lion_df, self.crs, self.spatial_index_crs, lion_version,\n 'segment_index', 'segment_id'\n )\n self.node_index = _get_spatial_index(\n self.nodes_df, self.crs, self.spatial_index_crs, lion_version,\n 'node_index', 'node'\n )\n\n @method_file_cache('lion.pkl')\n def _load_lion_data(self):\n print(\"Downloading and Building Geocoder Data...\")\n #lion_df = _load_lion_df(\n # self.lion_version, self.crs\n #)\n lion_df = self.lion_loader(self.lion_version)\n\n '''return (\n lion_df,# nodes_df,\n None, None, None, None\n #segments, streets,\n #street_code_map\n )'''\n\n lion_df = _clean_lion_df(lion_df, self.crs)\n\n # Create nodes_df\n nodes_df = _get_nodes_df(lion_df)\n\n # Get dead ends\n dead_end_nodes = _get_dead_end_nodes(nodes_df)\n nodes_df['dead_end'] = nodes_df['node'].isin(dead_end_nodes)\n\n # Convert to GeoDataFrame\n nodes_df = gp.GeoDataFrame(\n nodes_df, geometry='geometry', crs={'init': self.crs}\n )\n\n # Cache stuff for quick access\n streets = _get_streets_dict(lion_df)\n segments = _get_segments_dict(lion_df)\n\n # Add dead end nodes to `streets`\n dead_end_street_codes = self._get_dead_end_street_codes()\n streets['dead_end'] = {'nodes': dead_end_nodes, 'df': None}\n\n # Create a mapping of street codes to internal street codes\n street_code_map = dict(lion_df[[\n 'original_street_code', 'street_code'\n ]].drop_duplicates().values)\n\n # Add dead ends to street_code_map\n street_code_map.update(dict([\n (i, 'dead_end') for i in dead_end_street_codes\n ]))\n\n # Add mapping from new values to themselves\n for key, value in list(street_code_map.items()):\n if value not in street_code_map:\n street_code_map[value] = value\n\n return (\n lion_df, nodes_df,\n segments, streets,\n street_code_map\n )\n\n @method_file_cache('search.pkl')\n def _build_search_data(self):\n print(\"Building Search Data...\")\n\n # Create dataframe with street names for autocomplete\n street_names = self.lion_df[\n self.lion_df['feature_type'].isin(['0', '6', '8', 'A', 'W'])\n ].groupby([\n 'street', 'street_code', 'borough_code'\n ]).count()['segment_id'].reset_index()\n\n # Add options for a dead end for each borough to the dataframe.\n dead_end_street_codes = self._get_dead_end_street_codes()\n street_names = street_names.append([{\n 'street': 'DEAD END',\n 'street_code': 'dead_end',\n 'borough_code': street_code[0],\n 'segment_id': 100\n } for street_code in dead_end_street_codes])\n\n # A function to get the geometry of each \"street\" used for the\n # centroid distance functionality of autocomplete.\n def get_geometry(street_code):\n if street_code == 'dead_end':\n return self.nodes_df[\n self.nodes_df['node'].isin(self.streets['dead_end']['nodes'])\n ]['geometry'].unary_union\n\n return self.streets[street_code]['df']['geometry'].unary_union\n\n street_names['geometry'] = street_names['street_code'].apply(get_geometry)\n\n street_names = gp.GeoDataFrame(street_names.sort_values(\n 'segment_id', ascending=False\n ).reset_index(drop=True))\n\n return street_names\n\n def _get_street_code(self, borough, street):\n return self.geosupport.get_street_code({\n 'street_name': street, 'borough_code': borough\n })['B10SC - First Borough and Street Code']\n\n def _get_dead_end_street_codes(self):\n return [self._get_street_code(b, 'DEAD END') for b in BOROUGHS]\n\n def _get_intersection(self, street_code_1, street_code_2):\n street_code_1 = self.normalize_street_code(street_code_1)\n street_code_2 = self.normalize_street_code(street_code_2)\n\n return self.streets[street_code_1]['nodes'].intersection(\n self.streets[street_code_2]['nodes']\n )\n\n def get_street_code(self, borough, street):\n \"\"\"\n Given a borough and street name, return the street code.\n\n Parameters\n ----------\n street : str\n borough : str\n The borough code, abbreviation or name.\n\n Returns\n -------\n str\n The street code\n\n Raises\n ------\n NotAStreetError\n If the \"street\" is a valid identifier in Geosupport, but isn't a\n drivable street in LION.\n StreetNameNotFoundError\n If the street name isn't recognized raise this error. The error\n has an attribute `options` with up to 10 alternate street names\n that are similar to the given one.\n \"\"\"\n try:\n street_code = self.normalize_street_code(\n self._get_street_code(borough, street)\n )\n\n if self.street_code_exists(street_code):\n return street_code\n else:\n raise NotAStreetError(street, street_code)\n\n except GeosupportError as error:\n # If geosupport raised an error, include it's suggested street\n # names in the raised StreetNameNotFoundError.\n options = []\n for s in error.result['List of Street Names']:\n street_code = self.normalize_street_code(\n self._get_street_code(borough, s)\n )\n # Only return valid streets\n if self.street_code_exists(street_code):\n # Append the name and the code.\n options.append((s, street_code))\n\n raise StreetNameNotFoundError(street, options)\n\n def normalize_street_code(self, street_code):\n if street_code in self.street_code_map:\n return self.street_code_map[street_code]\n street_code = str(street_code)[:6].zfill(6)\n return self.street_code_map.get(street_code, street_code)\n\n def street_code_exists(self, street_code):\n return self.normalize_street_code(street_code) in self.streets\n\n def normalize_street_name(self, street):\n \"\"\"\n Return the street name normalized into the format used by LION and\n Geosupport.\n\n Parameters\n ----------\n street : str\n The raw street name.\n\n Returns\n -------\n str\n \"\"\"\n return re.sub(\n r'\\s+', ' ',\n self.geosupport.normalize_street_name(\n street=street\n )['First Street Name Normalized']\n )\n\n def autocomplete(self, text, borough=None, cross_street_code=None,\n return_top_n=10, centroid=None):\n \"\"\"\n Given a string and optional filter parameters, return the most likely\n streets.\n\n Parameters\n ----------\n text : str\n The street name or partial street name.\n borough : str, optional\n Optionally constrain the search to a single borough which can be\n provided as a borough code, abbreviation or full name.\n cross_street_code : str, optional\n Only return streets that intersect with the given street.\n return_top_n : int, optional\n The number of results to return. (Default 10)\n centroid : shapely.geometry.Point, optional\n A point in the same crs as geocoder.crs. If given, sort the results\n by distance from the centroid.\n \"\"\"\n text = text.strip().upper()\n text_normalized = self.normalize_street_name(text)\n\n df = self.street_names.copy()\n\n if borough:\n df = df[df['borough_code'] == str(BOROUGH_CODES[str(borough)])]\n\n if cross_street_code:\n cross_street_code = self.normalize_street_code(cross_street_code)\n\n # Get all the nodes that are on the given street code.\n nodes = self.nodes_df[self.nodes_df['node'].isin(\n self.streets[cross_street_code]['nodes']\n )]\n\n # Get a list of street codes that intersect with those nodes.\n street_codes = nodes['street_code'].unique().tolist()\n\n # If any of the nodes are a dead end, add that street.\n if nodes['dead_end'].any():\n street_codes.append('dead_end')\n\n boroughs = nodes['borough_code'].unique()\n\n df = df[\n df['street_code'].isin(street_codes) &\n df['borough_code'].isin(boroughs) # Handles the extra dead ends\n ]\n\n query = (\n # Starts with the text\n df['street'].str.startswith(text) |\n df['street'].str.startswith(text_normalized) |\n\n # Contains the full word\n df['street'].str.contains(r'\\b%s\\b' % text) |\n df['street'].str.contains(r'\\b%s\\b' % text_normalized)\n )\n\n # If the string is 5 or more characters, search anywhere in the string\n if len(text) >= 5:\n query = (\n query | df['street'].str.contains(text) |\n df['street'].str.contains(text_normalized)\n )\n\n df = df[query].copy()\n\n if centroid:\n # If a cross street is given, find the distance to the place\n # where the two streets intersect, and not just to anywhere along\n # the street.\n if cross_street_code:\n func = lambda s: centroid.distance(\n self.nodes[\n self._get_intersection(s, cross_street_code).pop()\n ]['geometry']\n )\n df['distance'] = df['street_code'].apply(func)\n else:\n df['distance'] = df['geometry'].distance(centroid)\n\n # By default, the options are sorted by \"segment_id\", which is the\n # number of segments with that street name in the city.\n # i.e, show common streets like \"Broadway\" before \"Broad Street\"\n # When sorting by distance, we want to take into account both\n # the distance and how common the street is.\n # The log of the count will map the count into a much smaller but\n # still increasing number. \"+ e - 1\" ensures that the devisor starts\n # at 1 for streets that only have 1 segment.\n # After division, if two streets are the same distance from the\n # centroid, the one with a higher count will have a lower\n # \"distance.\" But the count won't overpower the actual distance.\n # After, the options are sorted by distance in ascending order.\n df['distance'] = df['distance'] / np.log(df['segment_id'] + np.e - 1)\n df = df.sort_values('distance')\n\n df = df.head(return_top_n)\n\n # Convert the resulting dataframe into a list of dictionaries\n results = []\n for i,row in df.iterrows():\n results.append({\n 'street': row['street'],\n 'street_code': row['street_code'],\n 'borough_code': row['borough_code'],\n 'node': list(self._get_intersection(\n cross_street_code, row['street_code']\n )) if cross_street_code else None\n })\n\n return results\n\n def address(self, house_number, street, borough, drivable=True):\n \"\"\"\n Given an address as house number, street and borough, return the\n segment id, physical id, blockface id and side of street of that\n address.\n\n Parameters\n ----------\n house_number : str or int\n The house number, including hyphens for Queens addresses.\n street : str\n borough : str\n The borough code, abbreviation or name.\n drivable : bool, optional\n Whether to only return drivable segments. (Default True)\n\n Returns\n -------\n dict\n dict of segment id, physical id, blockface id and side of street\n \"\"\"\n street_code = self._get_street_code(borough, street)\n street_normalized = self.normalize_street_name(street)\n\n # Create a list of possible geographic identifiers for this address.\n # Sometimes the physical location of a building is not reflected\n # in its address, so we'll use GeoSupport to identify other options.\n # The first one is simply the house number and street code.\n lgis = [(house_number, street_code)]\n\n # Then pass the address to GeoSupport and iterate through its\n # list of geographic identifiers and add all of them.\n for lgi in self.geosupport.address(\n borough=borough, street=str(house_number) + ' ' + street_normalized\n )['LIST OF GEOGRAPHIC IDENTIFIERS']:\n lgis.append((\n lgi['High House Number'],\n lgi['Borough Code'] + lgi['5-Digit Street Code']\n ))\n\n # Eliminate generic segments and, if drivable is True, only include\n # drivable segments.\n df = self.lion_df[\n (\n self.lion_df['traffic_direction'].isin(['W', 'A', 'T'])\n if drivable else True\n ) & (~self.lion_df['segment_type'].isin(['G']))\n ]\n\n # A function to determine if an address is on the given side of the\n # street.\n def same_parity(a, b):\n return (a % 2) == (b % 2)\n\n for house, street_code in lgis:\n # First, normalize the house number.\n if type(house) == str:\n if '-' in house:\n if street_code[0] == '4':\n # If the street is in Queens and the number contains\n # a hyphen, then convert it into the format tha LION\n # uses by multiplying the first part by 1000 and adding\n # the second.\n a,b = house.split('-')\n house = 1000*int(a) + int(b)\n else:\n # If not Queens, treat it as a range and use the first.\n house = int(house.split('-')[0])\n else:\n house = int(house)\n\n # Get a dataframe of the segments that could match the given\n # street code and house number.\n segments = df[\n (df['original_street_code'] == street_code[:6]) &\n (\n (\n (df['from_left'] <= house) & (df['to_left'] >= house) &\n same_parity(df['from_left'], house)\n ) | (\n (df['from_right'] <= house) & (df['to_right'] >= house) &\n same_parity(df['from_right'], house)\n )\n )\n ]\n\n # If there are matches, check whether the address matches the\n # left or right side.\n for i,row in segments.iterrows():\n if (row['from_left'] != 0) and (\n (row['from_left'] % 2) == (house % 2)\n ):\n return {\n 'segment_id': row['segment_id'],\n 'physical_id': row['physical_id'],\n 'blockface_id': row['left_blockface_id'],\n 'side': 'L'\n }\n if (row['from_right'] != 0) and (\n (row['from_right'] % 2) == (house % 2)\n ):\n return {\n 'segment_id': row['segment_id'],\n 'physical_id': row['physical_id'],\n 'blockface_id': row['right_blockface_id'],\n 'side': 'R'\n }\n\n def _get_terminators(self, nodes):\n \"\"\"\n Terminator segments are segments which exist at the point when a\n multi-roadbed street becomes a single roadbed.\n\n Geocoder uses these to prevent u-turns from one roadbed onto another\n at these nodes where the terminator segments meet.\n\n This function returns a dictionary which has a key for each\n physical_id that is a terminator where the value is a set of all\n terminator physical_ids which connect to that segment.\n\n Parameters\n ----------\n nodes : dict\n The nodes dictionary from `_get_nodes_dict`\n\n Returns\n -------\n dict of physical_id -> set of physical_ids\n \"\"\"\n terminators = {}\n\n for node in nodes:\n # Get all the physical_ids that are terminator segments that\n # connect to the given nodes.\n pids = [\n self.lion_segments[sid]['physical_id']\n for sid in nodes[node]['segments']\n if self.lion_segments[sid]['segment_type'] == 'T'\n ]\n\n # Create a set of the physical_ids connected to this node and then\n # update that group with any other existing groups that overlap.\n group = set(pids)\n for p in pids:\n if p in terminators:\n group.update(terminators[p])\n\n # TODO: I think this is supposed to be\n # `for p in group:`\n # instead of in pids.\n for p in pids: # Add the group to the dictionary of terminators.\n terminators[p] = group\n\n return terminators\n\n @method_file_cache('network.pkl')\n def _build_networks(self, network_type):\n print(\"Building Routing Networks...\")\n\n # Create a basic network for both LION and CSCL\n # This is needed to create the physical_id geometry, at least.\n lion_network = build_monodirectional_network(self.lion_df, 'segment_id')\n cscl_network = build_monodirectional_network(self.lion_df, 'physical_id')\n drop_internal_nodes(cscl_network)\n\n # Update nodes with whether it's an internal node or not.\n cscl_nodes = [n.split(':')[1] for n in cscl_network.nodes if 'node' in n]\n self.nodes_df['internal'] = ~self.nodes_df['node'].isin(cscl_nodes)\n nodes = _get_nodes_dict(self.nodes_df)\n\n def merge_geometry(group):\n \"\"\"\n Merge the segments into a physical ID to get attributes that depend\n on the order of the segments.\n\n Parameters\n ----------\n group : pandas.DataFrame\n A subset of lion_df with all the segments of a single physical_id\n\n Returns\n -------\n shapely.LineString\n \"\"\"\n # Get all the segments and nodes that are part of the given physical_id\n nodes = set(\n ['segment_id:%s' % i for i in group['segment_id']] +\n ['node:%s' % i for i in group['node_from']] +\n ['node:%s' % i for i in group['node_to']]\n )\n\n # Get a subgraph of lion_network with all the nodes and segments\n # and all the edges between them.\n subnetwork = lion_network.subgraph(nodes)\n\n # Next, order all of the nodes in subnetwork so that the geometry\n # will be in the right order to be merged.\n try:\n # Most physical_ids can be sorted via topological_sort\n stretch = nx.topological_sort(subnetwork)\n segments = [s.split(':')[1] for s in stretch if 'segment_id' in s]\n except:\n # But a few physical ids have cicrles/cycles, so in that case,\n # get an order by trying all combinations of start/end point\n # and use the first one that contains all the nodes.\n for a, b in itertools.combinations(nodes, 2):\n try:\n stretch = nx.shortest_path(subnetwork, a, b)\n if len(stretch) == len(nodes):\n break\n except:\n pass\n segments = [s.split(':')[1] for s in stretch if 'segment_id' in s]\n\n # Create a LineString from the individual coordinates in the\n # ordered list of segments.\n coords = [\n c for s in segments\n for c in self.lion_segments[s]['geometry'].coords\n ]\n coords = drop_consecutive_duplicates(coords)\n return LineString(coords)\n\n geometry = self.lion_df.groupby('physical_id').apply(\n merge_geometry\n ).rename('geometry')\n\n terminators = self._get_terminators(nodes)\n terminators = self.lion_df.groupby('physical_id').apply(\n lambda g: terminators.get(g.iloc[0]['physical_id'])\n ).rename('terminator_group')\n\n # Create the DataFrame and dict for physical_ids\n cscl_segments_df = _get_cscl_segments_df(self.lion_df)\n cscl_segments_df = cscl_segments_df.join(\n geometry, how='left'\n ).join(terminators, how='left')\n cscl_segments = cscl_segments_df.to_dict('index')\n\n if self.network_type == 'cscl':\n segments = cscl_segments\n network = cscl_network\n else:\n segments = self.lion_segments\n network = lion_network\n\n # Create a directional network of the given type\n # This network still has nodes\n node_network = build_directional_network(network, segments)\n\n # Create a network where segments connect directly to segments.\n segment_network = build_segment_network(\n node_network, default_cost_function(\n segments, nodes, turn_cost=100000, intersection_cost=0\n )\n )\n\n return (\n cscl_segments_df, cscl_segments, node_network, segment_network,\n self.nodes_df, nodes\n )\n\n def get_segment(self, segment):\n \"\"\"\n Given a segment_id string, return a dictionary from `self.segments`.\n\n Parameters\n ----------\n segment : str\n A segment_id in a format accepted by `parse_geometry`.\n\n Returns\n -------\n dict\n \"\"\"\n type_, id_, side = self.geometry_regex.match(segment).groups()\n return self.segments.get(id_, None)\n\n def normalize_segment_id(self, id_):\n if self.segment_column == 'segment_id':\n return id_.zfill(7)\n else:\n return str(int(id_))\n\n def parse_geometry(self, geometry):\n \"\"\"\n Parse a \"geometry\" string and return a standardize geometry string,\n the type, id, and side of street.\n\n A geometry string is the type of geometry, followed by the id and\n optionally a side of street.\n\n For example:\n\n node:0055555M\n segment_id:0005555L\n physical_id:555R\n\n Parameters\n ----------\n geometry : str\n\n Returns\n -------\n geometry : str\n The geometry in a standardized format\n type : str\n node, segment_id or physical_id\n id : str\n The geometry ID\n side_of_street : str\n A letter for the side of street. One of: '', 'R', 'L', 'E', 'B'\n \"\"\"\n try:\n type_, id_, letter = self.geometry_regex.match(geometry).groups()\n if not type_:\n if (id_.zfill(7) + letter) in self.nodes:\n type_ = 'node'\n elif self.normalize_segment_id(id_) in self.segments:\n type_ = self.segment_column\n\n assert type_ in ['node', self.segment_column]\n if type_ == 'node':\n id_ = id_.zfill(7) + letter\n letter = ''\n assert id_ in self.nodes\n else:\n assert letter in ['', 'R', 'L', 'E', 'B']\n id_ = self.normalize_segment_id(id_)\n assert id_ in self.segments\n\n\n return '%s:%s%s' % (type_, id_, letter), type_, id_, letter\n except:\n raise ValueError(\"Unrecognized geometry: %s\" % geometry)\n\n\n def get_street_stretch_by_geometry(self, geometry_1, geometry_2,\n on_street_code=None):\n \"\"\"\n Given two endpoint geometries, return a shortest path street stretch.\n Geometries can either be nodes or segments or a combination of the two.\n\n For nodes, if an optional on_street_code is provided, only start and\n end on streets on the given street code.\n\n This function works by adding temporary nodes to the Geocoder's\n segment_network called START and END. START connects to\n geometry_1 and END connects to geometry_2.\n\n Then find a shortest path from START to END and return it as a\n StreetStretch.\n\n This function will always return a result if there is a possible path\n from geometry_1 to geometry_2 even if it is not actually a \"stretch\"\n (not all along one on street). You can use the StreetStretch object's\n attributes to determine if the stretch is valid for your use case.\n\n Parameters\n ----------\n geometry_1, geometry_2 : str\n Start and endpoints for the stretch\n on_street_code : str, optional\n An on street code to start and end on.\n\n Returns\n -------\n StreetStretch\n \"\"\"\n geometry_1, type_1, id_1, side_1 = self.parse_geometry(geometry_1)\n geometry_2, type_2, id_2, side_2 = self.parse_geometry(geometry_2)\n\n # Add start node (START -> geometry_1)\n if type_1 == 'node':\n # Since we will be routing on the segment network, for nodes connect\n # START to all segments that the node connects to.\n for segment in self.node_network[geometry_1]:\n # If on_street_code is provided, only connect to segments\n # on the given street.\n if (\n (on_street_code is None) or\n (on_street_code in self.get_segment(segment)['street_code'])\n ):\n self.segment_network.add_edge('START', segment, weight=1)\n elif type_1 == self.segment_column:\n # For segments, connect the segment itself.\n # If side isn't given, allow both sides.\n if not side_1:\n for side in ['L', 'R']:\n self.segment_network.add_edge('START', geometry_1 + side, weight=1)\n else:\n self.segment_network.add_edge('START', geometry_1, weight=1)\n\n # Add End Node in the same fashion, but connecting geometry_2 -> END\n if type_2 == 'node':\n for segment in self.node_network.predecessors(geometry_2):\n if (\n (on_street_code is None) or\n (on_street_code in self.get_segment(segment)['street_code'])\n ):\n self.segment_network.add_edge(segment, 'END', weight=1)\n elif type_2 == self.segment_column:\n if not side_2:\n for side in ['L', 'R']:\n self.segment_network.add_edge(geometry_2 + side, 'END', weight=1)\n else:\n self.segment_network.add_edge(geometry_2, 'END', weight=1)\n\n try:\n path = nx.bidirectional_dijkstra(\n self.segment_network, 'START', 'END', weight='weight'\n )[1]\n return StreetStretch(self, path[1:-1])\n finally:\n # Even if there's an error, make sure to remove START and END from\n # the network.\n for n in ['START', 'END']:\n self.segment_network.remove_node(n)\n\n def get_street_stretch_by_code(self, on_street_code, from_street_code,\n to_street_code):\n \"\"\"\n Given an on street code, from street code and to street code,\n return a list of possible stretches.\n\n There can be more than one because sometime streets intersect multiple\n times.\n\n Parameters\n ----------\n on_street_code, from_street_code, to_street_code : str\n The street codes of the on, from and to streets\n\n Returns\n -------\n list of StreetStretch\n A list of stretches for all combinations of from and to\n intersections sorted from shortest to longest.\n \"\"\"\n on_street_code = self.normalize_street_code(on_street_code)\n from_street_code = self.normalize_street_code(from_street_code)\n to_street_code = self.normalize_street_code(to_street_code)\n\n nodes_from = self._get_intersection(on_street_code, from_street_code)\n nodes_to = self._get_intersection(on_street_code, to_street_code)\n\n stretches = []\n\n for node_from in nodes_from:\n for node_to in nodes_to:\n if node_from != node_to:\n try:\n stretches.append(self.get_street_stretch_by_geometry(\n 'node:' + node_from, 'node:' + node_to,\n on_street_code=on_street_code\n ))\n except:\n pass\n\n return sorted(stretches, key=len)\n\n def __str__(self):\n return \"Geocoder (lion_version=%s, crs=%s, index_crs=%s)\" % (\n self.lion_version, self.crs, self.spatial_index_crs\n )\n" ]
[ [ "pandas.DataFrame.from_records", "pandas.concat", "numpy.log", "pandas.isnull" ] ]
chrdiller/mitsuba-visualize
[ "b6dfa8b23638b6cb805aa28107f718aea3a9462a" ]
[ "math_util/util.py" ]
[ "\nimport numpy as np\n\n\ndef find_closest_orthogonal_matrix(A: np.ndarray) -> np.ndarray:\n \"\"\"\n Find closest orthogonal matrix to *A* using iterative method.\n Based on the code from REMOVE_SOURCE_LEAKAGE function from OSL Matlab package.\n Reading:\n Colclough GL et al., A symmetric multivariate leakage correction for MEG connectomes.,\n Neuroimage. 2015 Aug 15;117:439-48. doi: 10.1016/j.neuroimage.2015.03.071\n\n :param A: array shaped k, n, where k is number of channels, n - data points\n :return: Orthogonalized matrix with amplitudes preserved\n \"\"\"\n # Code from https://gist.github.com/dokato/7a997b2a94a0ec6384a5fd0e91e45f8b\n MAX_ITER = 2000\n TOLERANCE = np.max((1, np.max(A.shape) * np.linalg.svd(A.T, False, False)[0])) * np.finfo(A.dtype).eps\n reldiff = lambda a, b: 2 * abs(a - b) / (abs(a) + abs(b))\n convergence = lambda rho, prev_rho: reldiff(rho, prev_rho) <= TOLERANCE\n\n A_b = A.conj()\n d = np.sqrt(np.sum(A * A_b, axis=1))\n\n rhos = np.zeros(MAX_ITER)\n\n for i in range(MAX_ITER):\n scA = A.T * d\n u, s, vh = np.linalg.svd(scA, False)\n V = np.dot(u, vh)\n d = np.sum(A_b * V.T, axis=1)\n\n L = (V * d).T\n E = A - L\n rhos[i] = np.sqrt(np.sum(E * E.conj()))\n if i > 0 and convergence(rhos[i], rhos[i - 1]):\n break\n\n return L\n\n\nif __name__ == \"__main__\":\n raise NotImplementedError(\"Cannot call this math_util script directly\")\n" ]
[ [ "numpy.dot", "numpy.linalg.svd", "numpy.finfo", "numpy.max", "numpy.zeros", "numpy.sum" ] ]
jlauman/data_engineering_project_03
[ "722c0f5226ed29c00d6b33e64da5982fe0be69e0" ]
[ "rs_etl.py" ]
[ "import configparser, os, glob, csv, json, hashlib, time\nimport pandas as pd\nimport psycopg2\nfrom pprint import pprint\nfrom rs_sql_queries import staging_events_insert, staging_songs_insert\nfrom rs_sql_queries import insert_table_queries\n\nimport boto3\nfrom botocore import UNSIGNED\nfrom botocore.config import Config\n\n\nDEND_BUCKET='udacity-dend'\n\n# global lookup table\nNAME_TO_GENDER = {}\n\n\ndef load_gender_lookup():\n \"\"\"Load lookup dictionary to find gender given a name.\n \"\"\"\n base_path = os.getcwd() + '/data/names'\n for root, dirs, files in os.walk(base_path):\n file_paths = glob.glob(os.path.join(root,'*.txt'))\n for file_path in file_paths:\n print('names: %s' % file_path)\n with open(file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n # pprint(row)\n NAME_TO_GENDER[row[0]] = row[1]\n # pprint(NAME_TO_GENDER)\n True\n\n\ndef get_object_paths(s3, bucket, prefix):\n \"\"\"List objects in S3 bucket with given prefix.\n Uses paginator to ensure a complete list of object paths is returned.\n \"\"\"\n # r1 = s3.list_objects(Bucket=DEND_BUCKET, Prefix=prefix)\n # r2 = list(map(lambda obj: obj['Key'], r1['Contents']))\n # r3 = list(filter(lambda str: str.endswith('.json'), r2))\n # s3 client does not need to be closed\n object_paths = []\n paginator = s3.get_paginator('list_objects')\n pages = paginator.paginate(Bucket=bucket, Prefix=prefix)\n for page in pages:\n # print(\"len(page['Contents'])=\" + str(len(page['Contents'])))\n r1 = list(map(lambda obj: obj['Key'], page['Contents']))\n r2 = list(filter(lambda str: str.endswith('.json'), r1))\n object_paths.extend(r2)\n print('%s/%s total object paths = %d' % (bucket, prefix, len(object_paths)))\n time.sleep(2)\n return object_paths\n\n\ndef load_staging_log_data(cur, conn):\n \"\"\"Load song-play event records into s_songplay_event table.\n \"\"\"\n # import pdb; pdb.set_trace()\n # load log_data (events) into s_event table\n s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n file_paths = get_object_paths(s3, DEND_BUCKET, 'log_data')\n pprint(file_paths)\n for file_path in file_paths:\n sql = str(staging_events_insert)\n print('log_data: %s' % file_path)\n obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)\n str1 = obj1['Body'].read().decode('utf-8').strip()\n df = pd.read_json(str1, lines=True)\n df = df[df.page == 'NextSong']\n df['timestamp'] = pd.to_datetime(df['ts'], unit='ms')\n df['year'] = df['timestamp'].dt.year\n df['week'] = df['timestamp'].dt.weekofyear\n df['month'] = df['timestamp'].dt.month\n df['day'] = df['timestamp'].dt.day\n df['hour'] = df['timestamp'].dt.hour\n df['weekday'] = df['timestamp'].dt.weekday\n # pprint(df)\n for index, row in df.iterrows():\n # create a sha256 hash for event's unique id\n event_id = hashlib.sha256((str(row.userId) + ' ' + str(row.sessionId) + ' ' + row.timestamp.strftime('%Y%m%d%H%M') + ' ' + row.song).encode('utf-8')).hexdigest()\n str1 = (\"(\" +\n \"'\" + event_id + \"', \" +\n \"'\" + row.artist.replace(\"'\", \"''\") + \"', \" +\n \"'\" + row.auth + \"', \" +\n \"'\" + row.firstName.replace(\"'\", \"''\") + \"', \" +\n \"\" + str(row.itemInSession) + \", \" +\n \"'\" + row.lastName.replace(\"'\", \"''\") + \"', \" +\n \"'\" + NAME_TO_GENDER[row.firstName] + \"', \" +\n \"\" + str(row.length) + \", \" +\n \"'\" + row.level + \"', \" +\n \"'\" + row.location.replace(\"'\", \"''\") + \"', \" +\n \"'\" + row.method + \"', \" +\n \"'\" + row.page + \"', \" +\n \"'\" + str(row.registration) + \"', \" +\n \"'\" + str(row.sessionId) + \"', \" +\n \"'\" + row.song.replace(\"'\", \"''\") + \"', \" +\n \"'\" + str(row.status) + \"', \" +\n \"'\" + row.timestamp.strftime('%Y-%m-%d %H') + \"', \" +\n \"\" + str(row.year) + \", \" +\n \"\" + str(row.week) + \", \" +\n \"\" + str(row.month) + \", \" +\n \"\" + str(row.day) + \", \" +\n \"\" + str(row.hour) + \", \" +\n \"\" + str(row.weekday) + \", \" +\n \"'\" + row.userAgent.replace(\"'\", \"''\") + \"', \" +\n \"'\" + str(row.userId) + \"'\" +\n \"),\\n\")\n sql += str1\n sql = ''.join(sql).strip()[:-1] + ';'\n # print(sql)\n # import pdb; pdb.set_trace()\n cur.execute(sql)\n conn.commit()\n\n\ndef load_staging_song_data(cur, conn):\n \"\"\"Load song records into s_song staging table.\n \"\"\"\n sql = str(staging_songs_insert)\n s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))\n file_paths = get_object_paths(s3, DEND_BUCKET, 'song_data')\n pprint(file_paths)\n for file_path in file_paths:\n print('song_data: %s' % file_path)\n obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)\n str1 = obj1['Body'].read().decode('utf-8').strip()\n data = json.loads(str1)\n if data['year'] == 0: data['year'] = None\n # fix link string...\n if str(data['artist_location']).startswith('<a'): data['artist_location'] = None\n # pprint(data)\n str2 = (\"(\" +\n \"'\" + data['artist_id'] + \"', \" +\n \"\" + (str(data['artist_latitude']) if not data['artist_latitude'] == None else 'null') + \", \" +\n \"'\" + str(data['artist_location']).replace(\"'\", \"''\") + \"', \" +\n \"\" + (str(data['artist_longitude']) if not data['artist_longitude'] == None else 'null') + \", \" +\n \"'\" + str(data['artist_name']).replace(\"'\", \"''\") + \"', \" +\n \"\" + str(data['duration']) + \", \" +\n \"\" + str(data['num_songs']) + \", \" +\n \"'\" + data['song_id'] + \"', \" +\n \"'\" + str(data['title']).replace(\"'\", \"''\") + \"', \" +\n \"\" + (str(data['year']) if not data['year'] == None else 'null') + \"\" +\n \"),\\n\")\n sql += str2\n # print(str2)\n # batch inserts at 8k character threshold\n if len(sql) > 8192:\n print(' 8k insert...')\n sql = ''.join(sql).strip()[:-1] + ';'\n cur.execute(sql)\n conn.commit()\n sql = str(staging_songs_insert)\n print('last insert...')\n sql = ''.join(sql).strip()[:-1] + ';'\n # print(sql)\n # import pdb; pdb.set_trace()\n cur.execute(sql)\n conn.commit()\n\n\ndef load_staging_tables(cur, conn):\n load_staging_song_data(cur, conn)\n load_staging_log_data(cur, conn)\n\n\ndef insert_tables(cur, conn):\n \"\"\"Populate staging, dimension and fact tables.\n The fact table must be the last item in the query list.\n \"\"\"\n for query in insert_table_queries:\n if query.strip() != \"\":\n pprint(query)\n cur.execute(query)\n conn.commit()\n\n\ndef main():\n \"\"\"Run Redshift ETL for staging, dimension and fact tables.\n \"\"\"\n config = configparser.ConfigParser()\n config.read('rs_dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n\n load_gender_lookup()\n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "pandas.to_datetime", "pandas.read_json" ] ]
fcole90/demotivational-policy-descent
[ "9193487587c03530bf5a962fda7b6ac5a4ceaae4" ]
[ "demotivational_policy_descent/agents/actor_critic_old.py" ]
[ "import logging\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nimport numpy as np\nimport torch\n\nfrom demotivational_policy_descent.agents.agent_interface import AgentInterface\nfrom demotivational_policy_descent.utils.utils import discount_rewards, softmax_sample\n\nclass Policy(torch.nn.Module):\n def __init__(self, state_space, action_space):\n super().__init__()\n # Create layers etc\n self.state_space = state_space\n self.action_space = action_space\n self.fc1 = torch.nn.Linear(state_space, 50)\n self.fc_mean = torch.nn.Linear(50, action_space)\n self.fc_s = torch.nn.Linear(50, action_space)\n self.fc_v = torch.nn.Linear(50, action_space)\n\n # Initialize neural network weights\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if type(m) is torch.nn.Linear:\n torch.nn.init.uniform_(m.weight)\n torch.nn.init.zeros_(m.bias)\n\n def forward(self, x):\n x = self.fc1(x)\n x = F.relu(x)\n mean = self.fc_mean(x)\n s = self.fc_s(x)\n s = torch.sigmoid(s)\n v = self.fc_v(x)\n return mean, s, v\n\n\nclass ActorCritic(AgentInterface):\n def __init__(self, env, state_space, action_space, policy, player_id:int=1):\n super().__init__(env=env, player_id=player_id)\n\n self.train_device = \"cpu\" #if torch.cuda.is_available() else \"cpu\"\n self.policy = policy.to(self.train_device)\n self.optimizer = torch.optim.RMSprop(policy.parameters(), lr=5e-3)\n self.batch_size = 1\n self.gamma = 0.98\n\n self.reset() # Call reset here to avoid code duplication!\n\n def reset(self):\n logging.debug(\"Resetting parameters...\")\n self.observations = []\n self.actions = []\n self.rewards = []\n self.dones = []\n self.next_states = []\n logging.debug(\"Reset!\")\n\n def episode_finished(self, episode_number):\n # Stack data\n all_actions = torch.stack(self.actions, dim=0).to(self.train_device).squeeze(-1)\n all_states = torch.stack([o for o in self.observations], dim=0).to(self.train_device).squeeze(-1)\n all_next_states = torch.stack([o for o in self.next_states], dim=0).to(self.train_device).squeeze(-1)\n all_rewards = torch.stack(self.rewards, dim=0).to(self.train_device).squeeze(-1)\n all_done = torch.stack([torch.Tensor(o) for o in self.dones], dim=0).to(self.train_device).squeeze(-1)\n\n\n self.reset()\n\n # compute state value estimates\n _, _, v_old = self.policy(all_states)\n _, _, v_next_state = self.policy(all_next_states)\n\n # zero out for terminal states\n v_next_state = v_next_state.squeeze(-1) * (1 - all_done)\n\n # detach\n v_next_state = v_next_state.detach()\n v_old = v_old.squeeze(-1)\n\n # normalise rewards\n discounted_rewards = discount_rewards(all_rewards, self.gamma)\n discounted_rewards -= torch.mean(discounted_rewards)\n discounted_rewards /= torch.std(discounted_rewards)\n\n # estimate of state value and critic loss\n updated_state_values = all_rewards + self.gamma * v_next_state\n\n # delta and normalised\n delta = updated_state_values - v_old\n\n # critic_loss = F.mse_loss(delta)\n critic_loss = torch.sum(delta ** 2)\n\n delta -= torch.mean(delta)\n delta /= torch.std(delta)\n delta = delta.detach()\n\n # actor update\n weighted_probs = all_actions * delta\n actor_loss = torch.sum(weighted_probs)\n\n loss = actor_loss + 0.5 * critic_loss\n loss.backward()\n\n if (episode_number + 1) % self.batch_size == 0:\n self.update_policy()\n\n def update_policy(self):\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n def get_action(self, observation, evaluation=False, frame: np.array=None) -> int:\n observation = observation.flatten()\n x = torch.from_numpy(observation).float().to(self.train_device)#float().to(self.train_device)\n mean, s, v = self.policy.forward(x)\n if evaluation:\n action = np.argmax(mean)\n else:\n action = Normal(loc=mean, scale=s).sample()\n\n log_prob = Normal(loc=mean, scale=s).log_prob(action)\n chosen_action = softmax_sample(torch.exp(log_prob))\n return chosen_action, log_prob[chosen_action], v\n\n def fix_negative_strides(self, observation):\n fixed_observation = observation.copy()\n del observation\n return fixed_observation\n\n\n def store_outcome(self, observation, next_state, action_output, reward, done):\n try:\n self.observations.append(torch.Tensor(observation))\n except ValueError:\n self.observations.append(torch.Tensor(self.fix_negative_strides(observation)))\n\n try:\n self.next_states.append(torch.Tensor(next_state))\n except ValueError:\n self.next_states.append(torch.Tensor(self.fix_negative_strides(next_state)))\n\n self.actions.append(-action_output)\n self.rewards.append(torch.Tensor([reward]))\n self.dones.append([done])\n\n\nif __name__ == \"__main__\":\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n dummy = ActorCritic(env=None, player_id=1)\n dummy.test_attribute = 100\n name = \"actor_critic.mdl\"\n dummy.save_model(name)\n dummy.test_attribute = 200\n dummy.load_model(name)\n assert dummy.test_attribute == 100\n dummy.reset()\n assert dummy.test_attribute == 5\n print(\"Dummy action\", dummy.get_action(np.zeros(1)))\n" ]
[ [ "torch.mean", "torch.sigmoid", "torch.nn.init.uniform_", "torch.Tensor", "torch.sum", "torch.from_numpy", "torch.exp", "torch.nn.Linear", "torch.nn.functional.relu", "torch.std", "numpy.argmax", "torch.nn.init.zeros_", "torch.stack", "numpy.zeros", "torch.distributions.Normal" ] ]
ssakhavi/pytorch-lightning
[ "fd7814d287a86046bdda0367e02085a8b709fe33" ]
[ "tests/models/test_cpu.py" ]
[ "import os\nimport platform\nfrom collections import namedtuple\n\nimport pytest\nimport torch\nfrom packaging.version import parse as version_parse\n\nimport tests.base.utils as tutils\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import EarlyStopping\nfrom tests.base import EvalModelTemplate\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\n\ndef test_cpu_slurm_save_load(tmpdir):\n \"\"\"Verify model save/load/checkpoint on CPU.\"\"\"\n hparams = EvalModelTemplate.get_default_hparams()\n model = EvalModelTemplate(**hparams)\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n version = logger.version\n\n # fit model\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n train_percent_check=0.2,\n val_percent_check=0.2,\n checkpoint_callback=ModelCheckpoint(tmpdir)\n )\n result = trainer.fit(model)\n real_global_step = trainer.global_step\n\n # traning complete\n assert result == 1, 'cpu model failed to complete'\n\n # predict with trained model before saving\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n model.eval()\n pred_before_saving = model(x)\n\n # test HPC saving\n # simulate snapshot on slurm\n saved_filepath = trainer.hpc_save(tmpdir, logger)\n assert os.path.exists(saved_filepath)\n\n # new logger file to get meta\n logger = tutils.get_default_logger(tmpdir, version=version)\n\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n checkpoint_callback=ModelCheckpoint(tmpdir),\n )\n model = EvalModelTemplate(**hparams)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_pred_same():\n assert trainer.global_step == real_global_step and trainer.global_step > 0\n\n # predict with loaded model to make sure answers are the same\n trainer.model.eval()\n new_pred = trainer.model(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n model.on_epoch_start = assert_pred_same\n\n # by calling fit again, we trigger training, loading weights from the cluster\n # and our hook to predict using current model before any more weight updates\n trainer.fit(model)\n\n\ndef test_early_stopping_cpu_model(tmpdir):\n \"\"\"Test each of the trainer options.\"\"\"\n stopping = EarlyStopping(monitor='val_loss', min_delta=0.1)\n trainer_options = dict(\n default_root_dir=tmpdir,\n early_stop_callback=stopping,\n max_epochs=2,\n gradient_clip_val=1.0,\n overfit_pct=0.20,\n track_grad_norm=2,\n train_percent_check=0.1,\n val_percent_check=0.1,\n )\n\n model = EvalModelTemplate()\n tutils.run_model_test(trainer_options, model, on_gpu=False)\n\n # test freeze on cpu\n model.freeze()\n model.unfreeze()\n\n\n@pytest.mark.spawn\n@pytest.mark.skipif(platform.system() == \"Windows\",\n reason=\"Distributed training is not supported on Windows\")\n@pytest.mark.skipif((platform.system() == \"Darwin\" and\n version_parse(torch.__version__) < version_parse(\"1.3.0\")),\n reason=\"Distributed training is not supported on MacOS before Torch 1.3.0\")\ndef test_multi_cpu_model_ddp(tmpdir):\n print('in ddp test')\n \"\"\"Make sure DDP works.\"\"\"\n tutils.set_random_master_port()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n gpus=None,\n num_processes=2,\n distributed_backend='ddp_cpu'\n )\n\n model = EvalModelTemplate()\n tutils.run_model_test(trainer_options, model, on_gpu=False)\n\n\ndef test_lbfgs_cpu_model(tmpdir):\n \"\"\"Test each of the trainer options.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n progress_bar_refresh_rate=0,\n weights_summary='top',\n train_percent_check=0.2,\n val_percent_check=0.2,\n )\n\n hparams = EvalModelTemplate.get_default_hparams()\n hparams.update(optimizer_name='lbfgs',\n learning_rate=0.004)\n model = EvalModelTemplate(**hparams)\n model.configure_optimizers = model.configure_optimizers__lbfgs\n tutils.run_model_test_without_loggers(trainer_options, model, min_acc=0.25)\n\n\ndef test_default_logger_callbacks_cpu_model(tmpdir):\n \"\"\"Test each of the trainer options.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n gradient_clip_val=1.0,\n overfit_pct=0.20,\n progress_bar_refresh_rate=0,\n train_percent_check=0.01,\n val_percent_check=0.01,\n )\n\n model = EvalModelTemplate()\n tutils.run_model_test_without_loggers(trainer_options, model)\n\n # test freeze on cpu\n model.freeze()\n model.unfreeze()\n\n\ndef test_running_test_after_fitting(tmpdir):\n \"\"\"Verify test() on fitted model.\"\"\"\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # logger file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=2,\n train_percent_check=0.4,\n val_percent_check=0.2,\n test_percent_check=0.2,\n checkpoint_callback=checkpoint,\n logger=logger\n )\n result = trainer.fit(model)\n\n assert result == 1, 'training failed to complete'\n\n trainer.test()\n\n # test we have good test accuracy\n tutils.assert_ok_model_acc(trainer, thr=0.5)\n\n\ndef test_running_test_no_val(tmpdir):\n \"\"\"Verify `test()` works on a model with no `val_loader`.\"\"\"\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # logger file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n # fit model\n trainer = Trainer(\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.2,\n test_percent_check=0.2,\n checkpoint_callback=checkpoint,\n logger=logger,\n early_stop_callback=False\n )\n result = trainer.fit(model)\n\n assert result == 1, 'training failed to complete'\n\n trainer.test()\n\n # test we have good test accuracy\n tutils.assert_ok_model_acc(trainer)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"test requires GPU machine\")\ndef test_single_gpu_batch_parse():\n trainer = Trainer()\n\n # batch is just a tensor\n batch = torch.rand(2, 3)\n batch = trainer.transfer_batch_to_gpu(batch, 0)\n assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor'\n\n # tensor list\n batch = [torch.rand(2, 3), torch.rand(2, 3)]\n batch = trainer.transfer_batch_to_gpu(batch, 0)\n assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor'\n assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor'\n\n # tensor list of lists\n batch = [[torch.rand(2, 3), torch.rand(2, 3)]]\n batch = trainer.transfer_batch_to_gpu(batch, 0)\n assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'\n assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor'\n\n # tensor dict\n batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]\n batch = trainer.transfer_batch_to_gpu(batch, 0)\n assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor'\n assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor'\n\n # tuple of tensor list and list of tensor dict\n batch = ([torch.rand(2, 3) for _ in range(2)],\n [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)])\n batch = trainer.transfer_batch_to_gpu(batch, 0)\n assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'\n\n assert batch[1][0]['a'].device.index == 0\n assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'\n\n assert batch[1][0]['b'].device.index == 0\n assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'\n\n # namedtuple of tensor\n BatchType = namedtuple('BatchType', ['a', 'b'])\n batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)]\n batch = trainer.transfer_batch_to_gpu(batch, 0)\n assert batch[0].a.device.index == 0\n assert batch[0].a.type() == 'torch.cuda.FloatTensor'\n\n\ndef test_simple_cpu(tmpdir):\n \"\"\"Verify continue training session on CPU.\"\"\"\n model = EvalModelTemplate()\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_percent_check=0.1,\n train_percent_check=0.1,\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1, 'amp + ddp model failed to complete'\n\n\ndef test_cpu_model(tmpdir):\n \"\"\"Make sure model trains on CPU.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n progress_bar_refresh_rate=0,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.4\n )\n\n model = EvalModelTemplate()\n\n tutils.run_model_test(trainer_options, model, on_gpu=False)\n\n\ndef test_all_features_cpu_model(tmpdir):\n \"\"\"Test each of the trainer options.\"\"\"\n trainer_options = dict(\n default_root_dir=tmpdir,\n gradient_clip_val=1.0,\n overfit_pct=0.20,\n track_grad_norm=2,\n progress_bar_refresh_rate=0,\n accumulate_grad_batches=2,\n max_epochs=1,\n train_percent_check=0.4,\n val_percent_check=0.4\n )\n\n model = EvalModelTemplate()\n tutils.run_model_test(trainer_options, model, on_gpu=False)\n\n\ndef test_tbptt_cpu_model(tmpdir):\n \"\"\"Test truncated back propagation through time works.\"\"\"\n truncated_bptt_steps = 2\n sequence_size = 30\n batch_size = 30\n\n x_seq = torch.rand(batch_size, sequence_size, 1)\n y_seq_list = torch.rand(batch_size, sequence_size, 1).tolist()\n\n class MockSeq2SeqDataset(torch.utils.data.Dataset):\n def __getitem__(self, i):\n return x_seq, y_seq_list\n\n def __len__(self):\n return 1\n\n class BpttTestModel(EvalModelTemplate):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.test_hidden = None\n\n def training_step(self, batch, batch_idx, hiddens):\n assert hiddens == self.test_hidden, \"Hidden state not persistent between tbptt steps\"\n self.test_hidden = torch.rand(1)\n\n x_tensor, y_list = batch\n assert x_tensor.shape[1] == truncated_bptt_steps, \"tbptt split Tensor failed\"\n\n y_tensor = torch.tensor(y_list, dtype=x_tensor.dtype)\n assert y_tensor.shape[1] == truncated_bptt_steps, \"tbptt split list failed\"\n\n pred = self(x_tensor.view(batch_size, truncated_bptt_steps))\n loss_val = torch.nn.functional.mse_loss(\n pred, y_tensor.view(batch_size, truncated_bptt_steps))\n return {\n 'loss': loss_val,\n 'hiddens': self.test_hidden,\n }\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(\n dataset=MockSeq2SeqDataset(),\n batch_size=batch_size,\n shuffle=False,\n sampler=None,\n )\n\n hparams = EvalModelTemplate.get_default_hparams()\n hparams.update(\n batch_size=batch_size,\n in_features=truncated_bptt_steps,\n hidden_dim=truncated_bptt_steps,\n out_features=truncated_bptt_steps\n )\n\n model = BpttTestModel(**hparams)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n truncated_bptt_steps=truncated_bptt_steps,\n val_percent_check=0,\n weights_summary=None,\n early_stop_callback=False\n )\n result = trainer.fit(model)\n\n assert result == 1, 'training failed to complete'\n" ]
[ [ "torch.eq", "torch.rand", "torch.cuda.is_available", "torch.tensor" ] ]
FitzLu/drl-snake-ai
[ "0c0f817fd91ee7061b99f01fed429598252fa54f" ]
[ "train_with_tensorflow.py" ]
[ "import collections\nimport numpy as np\nfrom env.game import SnakeGame\nfrom agent.brain import Agent\n\n\ndef main():\n last_frames_num = 4\n actions_num = 3\n exploration_rate = 1.0\n min_exploration_rate = 0.1\n episode_num = 10000\n exploration_decay = ((exploration_rate - min_exploration_rate) / episode_num)\n\n game = SnakeGame(is_tick=True)\n agent = Agent(last_frames_num, game.observation_shape, actions_num, 0)\n\n for episode in range(episode_num):\n first_step = game.new_round()\n game.render()\n game_over = False\n\n w, h = game.observation_shape\n\n last_frames = collections.deque([first_step.observation] * last_frames_num)\n state = np.array(last_frames)\n state = np.reshape(state, (-1, last_frames_num, w, h))\n\n while not game_over:\n if np.random.random() < exploration_rate:\n action = np.random.randint(actions_num)\n else:\n action = agent.predict(state)\n\n one_step = game.step(action)\n\n reward = one_step.reward\n last_frames.append(one_step.observation)\n last_frames.popleft()\n next_state = np.array(last_frames)\n next_state = np.reshape(next_state, (-1, last_frames_num, w, h))\n game_over = one_step.game_over\n\n agent.store_memory(state, action, reward, next_state)\n agent.learn()\n\n if game_over is True:\n break\n\n state = next_state\n if exploration_rate > min_exploration_rate:\n exploration_rate -= exploration_decay\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.random.random", "numpy.random.randint" ] ]
dhingratul/Data-Analysis
[ "8aa9695375b143fbbcb1355e9ade7a57ab68592d" ]
[ "Lesson-3/L3_pd_shift.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 19 19:13:02 2017\n\n@author: dhingratul\n\"\"\"\n\nimport pandas as pd\n\n# --- Quiz ---\n# Cumulative entries and exits for one station for a few hours.\nentries_and_exits = pd.DataFrame({\n 'ENTRIESn': [3144312, 3144335, 3144353, 3144424, 3144594,\n 3144808, 3144895, 3144905, 3144941, 3145094],\n 'EXITSn': [1088151, 1088159, 1088177, 1088231, 1088275,\n 1088317, 1088328, 1088331, 1088420, 1088753]\n})\n\n\ndef get_hourly_entries_and_exits(entries_and_exits):\n '''\n Fill in this function to take a DataFrame with cumulative entries\n and exits (entries in the first column, exits in the second) and\n return a DataFrame with hourly entries and exits (entries in the\n first column, exits in the second).\n '''\n df2 = entries_and_exits.shift(periods=1)\n df = entries_and_exits - df2\n df.iloc[0] = entries_and_exits.iloc[0]\n return df\n" ]
[ [ "pandas.DataFrame" ] ]
ucas-vg/PointTinyBenchmark
[ "63fc417c40ed9484f8d7bb6a2212162c9e98b711" ]
[ "TOV_mmdetection/mmdet/core/bbox/assigners/hungarian_assigner.py" ]
[ "import torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..match_costs import build_match_cost\nfrom ..transforms import bbox_cxcywh_to_xyxy\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\ntry:\n from scipy.optimize import linear_sum_assignment\nexcept ImportError:\n linear_sum_assignment = None\n\n\n@BBOX_ASSIGNERS.register_module()\nclass HungarianAssigner(BaseAssigner):\n \"\"\"Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, regression L1 cost and regression iou cost. The\n targets don't include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_weight (int | float, optional): The scale factor for classification\n cost. Default 1.0.\n bbox_weight (int | float, optional): The scale factor for regression\n L1 cost. Default 1.0.\n iou_weight (int | float, optional): The scale factor for regression\n iou cost. Default 1.0.\n iou_calculator (dict | optional): The config for the iou calculation.\n Default type `BboxOverlaps2D`.\n iou_mode (str | optional): \"iou\" (intersection over union), \"iof\"\n (intersection over foreground), or \"giou\" (generalized\n intersection over union). Default \"giou\".\n \"\"\"\n\n def __init__(self,\n cls_cost=dict(type='ClassificationCost', weight=1.),\n reg_cost=dict(type='BBoxL1Cost', weight=1.0),\n iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):\n self.cls_cost = build_match_cost(cls_cost)\n self.reg_cost = build_match_cost(reg_cost)\n self.iou_cost = build_match_cost(iou_cost)\n\n def assign(self,\n bbox_pred,\n cls_pred,\n gt_bboxes,\n gt_labels,\n img_meta,\n gt_bboxes_ignore=None,\n eps=1e-7):\n \"\"\"Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n \"\"\"\n assert gt_bboxes_ignore is None, \\\n 'Only case when gt_bboxes_ignore is None is supported.'\n num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)\n\n # 1. assign -1 by default\n assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n assigned_labels = bbox_pred.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n if num_gts == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n if num_gts == 0:\n # No ground truth, assign all to background\n assigned_gt_inds[:] = 0\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n img_h, img_w, _ = img_meta['img_shape']\n factor = gt_bboxes.new_tensor([img_w, img_h, img_w,\n img_h]).unsqueeze(0)\n\n # 2. compute the weighted costs\n # classification and bboxcost.\n cls_cost = self.cls_cost(cls_pred, gt_labels)\n # regression L1 cost\n normalize_gt_bboxes = gt_bboxes / factor\n reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)\n # regression iou cost, defaultly giou is used in official DETR.\n bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor\n iou_cost = self.iou_cost(bboxes, gt_bboxes)\n # weighted sum of above three costs\n cost = cls_cost + reg_cost + iou_cost\n\n # 3. do Hungarian matching on CPU using linear_sum_assignment\n cost = cost.detach().cpu()\n if linear_sum_assignment is None:\n raise ImportError('Please run \"pip install scipy\" '\n 'to install scipy first.')\n matched_row_inds, matched_col_inds = linear_sum_assignment(cost)\n matched_row_inds = torch.from_numpy(matched_row_inds).to(\n bbox_pred.device)\n matched_col_inds = torch.from_numpy(matched_col_inds).to(\n bbox_pred.device)\n\n # 4. assign backgrounds and foregrounds\n # assign all indices to backgrounds first\n assigned_gt_inds[:] = 0\n # assign foregrounds based on matching results\n assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n\n\n# add by hui ###################################################################################\n@BBOX_ASSIGNERS.register_module()\nclass HungarianAssignerV2(BaseAssigner):\n \"\"\"Computes one-to-one matching between predictions and ground truth.\n \"\"\"\n\n def __init__(self,\n cls_costs=[dict(type='ClassificationCost', weight=1.)],\n reg_costs=[dict(type='BBoxL1Cost', weight=1.0, norm_with_img_size=True),\n dict(type='IoUCost', iou_mode='giou', weight=1.0)],\n topk_k=1\n ):\n cls_costs = cls_costs if isinstance(cls_costs, (tuple, list)) else [cls_costs]\n reg_costs = reg_costs if isinstance(reg_costs, (tuple, list)) else [reg_costs]\n self.cls_costs = [build_match_cost(cls_cost) for cls_cost in cls_costs]\n self.reg_costs = [build_match_cost(reg_cost) for reg_cost in reg_costs]\n self.topk_k = topk_k\n\n def assign(self,\n bbox_pred,\n cls_pred,\n gt_bboxes,\n gt_labels,\n img_meta,\n gt_bboxes_ignore=None,\n eps=1e-7):\n \"\"\"Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with unnormalized coordinates\n (x1, y1, x2, y2, ...),. Shape [num_query, k*2].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2, ...). Shape [num_gt, k*2].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n \"\"\"\n assert gt_bboxes_ignore is None, \\\n 'Only case when gt_bboxes_ignore is None is supported.'\n num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)\n\n # 1. assign -1 by default\n assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long)\n assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long)\n if num_gts == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n if num_gts == 0:\n # No ground truth, assign all to background\n assigned_gt_inds[:] = 0\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n\n # 2. compute the weighted costs\n # classification and bboxcost.\n cls_costs = [cls_cost(cls_pred, gt_labels) for cls_cost in self.cls_costs]\n # regression L1 cost && regression iou cost, defaultly giou is used in official DETR.\n reg_costs = [reg_cost(bbox_pred, gt_bboxes, img_meta) for reg_cost in self.reg_costs]\n # weighted sum of above costs\n cost = sum(cls_costs) + sum(reg_costs)\n\n # 3. do Hungarian matching on CPU using linear_sum_assignment\n cost = cost.detach().cpu()\n\n if self.topk_k == 1:\n if linear_sum_assignment is None:\n raise ImportError('Please run \"pip install scipy\" '\n 'to install scipy first.')\n matched_row_inds, matched_col_inds = linear_sum_assignment(cost)\n matched_row_inds = torch.from_numpy(matched_row_inds).to(\n bbox_pred.device)\n matched_col_inds = torch.from_numpy(matched_col_inds).to(\n bbox_pred.device)\n\n # 4. assign backgrounds and foregrounds\n # assign all indices to backgrounds first\n assigned_gt_inds[:] = 0\n # assign foregrounds based on matching results\n assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n else:\n cost_assign = bbox_pred.new_full((num_bboxes,), 0, dtype=torch.long)\n assigned_gt_inds[:] = 0\n\n index = torch.nonzero(cost_assign == 0).squeeze(1)\n cost_new = cost[cost_assign == 0]\n num = 0\n while cost_new.shape[0] // num_gts != 0 and num + 1 <= self.topk_k:\n num = num + 1\n matched_row_inds, matched_col_inds = linear_sum_assignment(cost_new)\n matched_row_inds = torch.from_numpy(matched_row_inds).to(\n bbox_pred.device)\n matched_col_inds = torch.from_numpy(matched_col_inds).to(\n bbox_pred.device)\n matched_row_inds = index[matched_row_inds]\n\n assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n cost_assign[matched_row_inds] = matched_col_inds + 1\n assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n index = torch.nonzero(cost_assign == 0).squeeze(1)\n cost_new = cost[cost_assign == 0]\n return AssignResult(\n num_gts, assigned_gt_inds, None, labels=assigned_labels)\n###################################################################################\n" ]
[ [ "scipy.optimize.linear_sum_assignment", "torch.from_numpy", "torch.nonzero" ] ]
pat4life360/LEAP-Camera-Face-Tracker-
[ "be31183f9047f432ae400888aef1e0b0c58ff72b" ]
[ "build/PersonFollower.py" ]
[ "#Python Script for Object-Face Servo Tracking\n#Import necesasry libraries\nfrom picamera.array import PiRGBArray\n\nfrom picamera import PiCamera\nimport time\nimport cv2\nimport sys\nimport numpy as np\nimport math\nfrom adafruit_servokit import ServoKit\n\n#Initialize Servos on PCA9685 driver\nkit = ServoKit(channels = 16)\nservo1 = kit.servo[4]\nservo2 = kit.servo[0]\n\n\n#Set up servo PWM width and range\nservo1.set_pulse_width_range(400,2500)\nservo1.actuation_range = 180\nservo2.set_pulse_width_range(400,2500)\nservo2.actuation_range = 180\n\n#Define pan/tilt inerface for servos\ndef pan_tilt(x_axis, y_axis):\n servo1.angle = x_axis + 90\n servo2.angle = y_axis + 90\n\n#Initialize moving average list and object positions\nrolling_list= []\nsample_size = 5\ncurrent_pos = [0, 0]\n\n#Define default method for HSV sliders\ndef nothing(x):\n pass\n\n#Initiializa PiCam with necessary parameters\ncamera = PiCamera()\ncamera.resolution = (640, 480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(640, 480))\ntime.sleep(0.1)\n\n#Read xml file\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')\n\n# Create a window\ncv2.namedWindow('image')\ncv2.namedWindow('viewbox')\n\n\n# create trackbars for color change\ncv2.createTrackbar('HMin','image',0,179,nothing) # Hue is from 0-179 for Opencv\ncv2.createTrackbar('SMin','image',0,255,nothing)\ncv2.createTrackbar('VMin','image',0,255,nothing)\ncv2.createTrackbar('HMax','image',0,179,nothing)\ncv2.createTrackbar('SMax','image',0,255,nothing)\ncv2.createTrackbar('VMax','image',0,255,nothing)\n\n# Set default value for MAX HSV trackbars. (Optimized for bright green opaque surfaces)\ncv2.setTrackbarPos('HMin', 'image', 19)\ncv2.setTrackbarPos('SMin', 'image', 90)\ncv2.setTrackbarPos('VMin', 'image', 110)\ncv2.setTrackbarPos('HMax', 'image', 113)\ncv2.setTrackbarPos('SMax', 'image', 255)\ncv2.setTrackbarPos('VMax', 'image', 255)\n\n# Initialize to check if HSV min/max value changes\nhMin = sMin = vMin = hMax = sMax = vMax = 0\nphMin = psMin = pvMin = phMax = psMax = pvMax = 0\n\n\n#Initialize lists of faces and object coordinates in each iteration\nface_center_coordinates = []\nobject_center_coordinates = []\nface_center_coords = []\nobject_center_coords = [[0,0]]\n\n#Main script loop\nwhile(1):\n\n # Capture frame-by-frame\n for imag in camera.capture_continuous(rawCapture, format = \"bgr\", use_video_port=True):\n img = imag.array\n output = img\n frame = img\n #facial detection using the xml dataset\n faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2, minSize=(20, 20))\n\n\n # get current positions of all trackbars\n hMin = cv2.getTrackbarPos('HMin','image')\n sMin = cv2.getTrackbarPos('SMin','image')\n vMin = cv2.getTrackbarPos('VMin','image')\n\n hMax = cv2.getTrackbarPos('HMax','image')\n sMax = cv2.getTrackbarPos('SMax','image')\n vMax = cv2.getTrackbarPos('VMax','image')\n\n # Set minimum and max HSV values to display\n lower = np.array([hMin, sMin, vMin])\n upper = np.array([hMax, sMax, vMax])\n\n # Create HSV Image and threshold into a range.\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, lower, upper)\n output = cv2.bitwise_and(img,img, mask= mask)\n\n # Print if there is a change in HSV value\n if( (phMin != hMin) | (psMin != sMin) | (pvMin != vMin) | (phMax != hMax) | (psMax != sMax) | (pvMax != vMax) ):\n print(\"(hMin = %d , sMin = %d, vMin = %d), (hMax = %d , sMax = %d, vMax = %d)\" % (hMin , sMin , vMin, hMax, sMax , vMax))\n phMin = hMin\n psMin = sMin\n pvMin = vMin\n phMax = hMax\n psMax = sMax\n pvMax = vMax\n\n\n #Set array values for HSV numbers\n boxlower = np.array([hMin, sMin, vMin], dtype = \"uint8\")\n boxupper = np.array([hMax, sMax, vMax], dtype = \"uint8\")\n \n #filters out the original video according the hsv values adjusted above or default.\n #Is not shown in the final screen because it is used to create bounding boxes for\n #The view box\n boxmask = cv2.inRange(hsv, boxlower, boxupper)\n viewbox = img\n \n #cnts are all the contours needed to create a box fit around certain colors left\n #visible by the boxmask\n cnts = cv2.findContours(boxmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n \n #drawing a yellow rectangle around a face and a red dot in the center. Calculates which\n #faces are the closest to the object and adds them to the list of such faces.\n for (x, y, w, h) in faces:\n face_center_coordinates = (x+int(w/2), y+int(h/2))\n cv2.rectangle(viewbox, (x, y), (x + w, y + h), (36,255,255), 2)\n cv2.ellipse(viewbox, face_center_coordinates, (0,0), 0, 0, 360, (0, 0, 255), 5)\n face_x = face_center_coordinates[0]\n face_y = face_center_coordinates[1]\n object_x = object_center_coords[-1][0]\n object_y = object_center_coords[-1][1]\n dist_thresh = 100\n curr_dist = int(np.sqrt((face_x-object_x)**2 + (face_y-object_y)**2))\n if curr_dist < dist_thresh:\n face_center_coords.append(face_center_coordinates)\n \n #draws a box around desired object (likely matte in color and highly saturated with few\n #other colors of similar value in the room)\n for c in cnts:\n x,y,w,h = cv2.boundingRect(c)\n area = w*h\n width = w\n height = h\n if (area > 2000):\n cv2.rectangle(viewbox, (x, y), (x + w, y + h), (36,255,12), 2)\n object_center_coordinates = (x+int(width/2), y+int(height/2))\n print(\"object center coordinates x = %d y = %d\" % (x+int(width/2), y+int(height/2)))\n cv2.ellipse(viewbox, object_center_coordinates, (0,0), 0, 0, 360, (0, 0, 255), 5)\n object_center_coords.append(object_center_coordinates)\n\n cv2.imshow('image', output)\n cv2.imshow('viewbox', viewbox)\n \n \n \n direction = [0, 0]\n \n #Conditions for object being tracked to stay in pixel frame width defined\n if object_center_coordinates:\n if object_center_coordinates[0] < 220:\n direction[0] = 1\n elif object_center_coordinates[0] > 420:\n direction[0] = -1\n \n if object_center_coordinates[1] < 140:\n direction[1] = 1\n elif object_center_coordinates[1] > 340:\n direction[1] = -1\n \n if face_center_coords:\n closest_face = face_center_coords[-1]\n \n if closest_face[0] < 300:\n direction[0] = 1\n elif closest_face[0] > 340:\n direction[0] = -1\n \n if closest_face[1] < 220:\n direction[1] = 1\n elif closest_face[1] > 260:\n direction[1] = -1\n \n #Instantiate Rolling Average Values\n average = 0\n x_avg = 0\n y_avg = 0\n \n #Get next coordinate for servo to move\n next_pos = [current_pos[0] + direction[0], current_pos[1] + direction[1]]\n rolling_list.append(next_pos)\n \n #Ensure values for average calculation stays within the rolling average\n if len(rolling_list) >= sample_size:\n x_sum = sum(i[0] for i in rolling_list)\n y_sum = sum(i[1] for i in rolling_list)\n average = [x_sum/len(rolling_list),y_sum/len(rolling_list)]\n rolling_list.pop(0)\n x_avg = average[0]\n y_avg = average[1]\n \n #Move servos by calculated degrees\n pan_tilt(x_avg,y_avg)\n current_pos = next_pos\n \n # Wait longer to prevent freeze for videos.\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n rawCapture.truncate(0)\n \n# Release resources\ncap.release()\ncv2.destroyAllWindows()" ]
[ [ "numpy.array", "numpy.sqrt" ] ]
CMURoboTouch/Taxim
[ "d067cc63892fab8de736a1d3f449d01368b32205" ]
[ "MarkerMotionSimulation/compose/dataLoader.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom os import path as osp\nimport cv2\nfrom scipy import interpolate\n\nimport sys\nsys.path.append(\"../..\")\nimport Basics.sensorParams as psp\n\ndef fill_blank(img):\n \"\"\"\n fill the zero value holes with interpolation\n \"\"\"\n if np.max(img) == np.min(img):\n return img\n x = np.arange(0, img.shape[1])\n y = np.arange(0, img.shape[0])\n #mask invalid values\n array = np.ma.masked_where(img == 0, img)\n xx, yy = np.meshgrid(x, y)\n #get only the valid values\n x1 = xx[~array.mask]\n y1 = yy[~array.mask]\n newarr = img[~array.mask]\n\n GD1 = interpolate.griddata((x1, y1), newarr.ravel(),\n (xx, yy),\n method='linear', fill_value = 0) # cubic # nearest # linear\n return GD1\n\nclass dataLoader:\n\n def __init__(self, x_path, y_path, z_path):\n self.x_list = []\n self.y_list = []\n self.z_list = []\n self.dx_list = []\n self.dy_list = []\n self.dz_list = []\n with open(z_path) as f:\n lines = f.readlines()\n num_lines = len(lines)\n for i in range(1,num_lines):\n ll = lines[i].split()\n # meter -> millimeter\n self.x_list.append(float(ll[1])*1000.0)\n self.y_list.append(float(ll[2])*1000.0)\n self.z_list.append(float(ll[3])*1000.0)\n self.dz_list.append(float(ll[4])*1000.0)\n\n with open(x_path) as f:\n lines = f.readlines()\n num_lines = len(lines)\n for i in range(1,num_lines):\n ll = lines[i].split()\n self.dx_list.append(float(ll[4])*1000.0)\n\n with open(y_path) as f:\n lines = f.readlines()\n num_lines = len(lines)\n for i in range(1,num_lines):\n ll = lines[i].split()\n self.dy_list.append(float(ll[4])*1000.0)\n self.num_points = num_lines-1\n # centralize the points\n self.x_list = self.x_list - np.mean(self.x_list)\n self.y_list = self.y_list - np.mean(self.y_list)\n self.z_list = self.z_list - np.min(self.z_list)\n\n def generateDeformMap(self, dx=0, dy=0):\n \"\"\"\n from the raw txt data to the deformatiom map\n return:\n deformMap (4, d, d): height map, dx, dy, dz\n mask: sparse nodes\n \"\"\"\n # origin height, dx, dy, dz\n deformMap = np.zeros((4, psp.d, psp.d)) # here 640 * 640, easy to rotate\n mask = np.zeros((psp.d, psp.d))\n y_shift = psp.d//2\n x_shift = psp.d//2\n\n for i in range(self.num_points):\n x_pix = int(self.x_list[i]/psp.pixmm)\n y_pix = int(self.y_list[i]/psp.pixmm)\n\n # XY coordinate in pixel\n x_local = int(-1*x_pix + x_shift +dx/psp.pixmm)\n y_local = int(-1*y_pix + y_shift +dy/psp.pixmm)\n # check boundary\n if x_local < 0 or x_local >= psp.d:\n continue\n if y_local < 0 or y_local >= psp.d:\n continue\n z_pix = self.z_list[i]/psp.pixmm\n deformMap[0, y_local,x_local] = z_pix\n\n dx_pix = self.dx_list[i]/psp.pixmm\n dy_pix = self.dy_list[i]/psp.pixmm\n dz_pix = self.dz_list[i]/psp.pixmm\n deformMap[1, y_local,x_local] = -1*dx_pix\n deformMap[2, y_local,x_local] = -1*dy_pix\n deformMap[3, y_local,x_local] = dz_pix\n mask[y_local,x_local] = 1\n return deformMap, mask\n\n def correctSym_dz(self, deformMap):\n # this is used to force symmetric\n z = fill_blank(deformMap[0,:,:])\n dx = fill_blank(deformMap[1,:,:])\n dy = fill_blank(deformMap[2,:,:])\n dz = fill_blank(deformMap[3,:,:])\n idx_x = np.arange(psp.d//2)\n idx_vx = psp.d-1 - idx_x\n idx_y = np.arange(psp.d//2)\n idx_vy = psp.d-1 - idx_y\n error_x = dx[:,idx_x] + dx[:,idx_vx]\n error_y = dy[idx_y,:] + dy[idx_vy,:]\n # correct the error\n dx[:,idx_vx] -= error_x/2.0\n dx[:,idx_x] -= error_x/2.0\n dy[idx_vy,:] -= error_y/2.0\n dy[idx_y,:] -= error_y/2.0\n #\n error_y = dy[:,idx_x] - dy[:,idx_vx]\n error_x = dx[idx_y,:] - dx[idx_vy,:]\n\n #\n dy[:,idx_x] -= error_y/2.0\n dy[:,idx_vx] += error_y/2.0\n dx[idx_y,:] -= error_x/2.0\n dx[idx_vy,:] += error_x/2.0\n\n # z\n error_zy = dz[:,idx_x] - dz[:,idx_vx]\n dz[:,idx_x] -= error_zy/2.0\n dz[:,idx_vx] += error_zy/2.0\n error_zx = dz[idx_y,:] - dz[idx_vy,:]\n dz[idx_y,:] -= error_zx/2.0\n dz[idx_vy,:] += error_zx/2.0\n\n filledMap = np.zeros((4, psp.d, psp.d))\n filledMap[0,:,:] = z\n filledMap[1,:,:] = dx\n filledMap[2,:,:] = dy\n filledMap[3,:,:] = dz\n\n return filledMap\n\n def correctSym_dxdz(self, deformMap):\n # input a [dx, 0, dz]\n # output 4 symmetric deformMap\n # [dx, 0, dz], [0, dy, dz], [-dx, 0, dz], [0, -dy, dz]\n z = fill_blank(deformMap[0,:,:])\n dx = fill_blank(deformMap[1,:,:])\n dy = fill_blank(deformMap[2,:,:])\n dz = fill_blank(deformMap[3,:,:])\n idx_x = np.arange(psp.d//2)\n idx_vx = psp.d-1 - idx_x\n idx_y = np.arange(psp.d//2)\n idx_vy = psp.d-1 - idx_y\n error_x = dx[:,idx_x] + dx[:,idx_vx]\n error_y = dy[idx_y,:] + dy[idx_vy,:]\n\n # correct the error\n dy[idx_vy,:] -= error_y/2.0\n dy[idx_y,:] -= error_y/2.0\n\n #\n error_y = dy[:,idx_x] - dy[:,idx_vx]\n error_x = dx[idx_y,:] - dx[idx_vy,:]\n\n #\n dx[idx_y,:] -= error_x/2.0\n dx[idx_vy,:] += error_x/2.0\n\n # z\n error_zx = dz[idx_y,:] - dz[idx_vy,:]\n dz[idx_y,:] -= error_zx/2.0\n dz[idx_vy,:] += error_zx/2.0\n\n filledMap = np.zeros((4, psp.d, psp.d))\n filledMap[0,:,:] = z\n filledMap[1,:,:] = dx\n filledMap[2,:,:] = dy\n filledMap[3,:,:] = dz\n\n deform_list = []\n deform_list.append(filledMap)\n\n fullMap = np.zeros((4, psp.d, psp.d))\n fullMap[0,:,:] = z\n fullMap[1,:,:] = dx\n fullMap[2,:,:] = dy\n fullMap[3,:,:] = dz\n\n # rotate 3 times\n filledMap = np.zeros((4, psp.d, psp.d))\n filledMap[0,:,:] = np.rot90(fullMap[0,:,:])\n filledMap[2,:,:] = -1*np.rot90(fullMap[1,:,:])\n filledMap[1,:,:] = np.rot90(fullMap[2,:,:])\n filledMap[3,:,:] = np.rot90(fullMap[3,:,:])\n deform_list.append(filledMap)\n\n filledMap = np.zeros((4, psp.d, psp.d))\n filledMap[0,:,:] = np.rot90(fullMap[0,:,:],2)\n filledMap[1,:,:] = -1*np.rot90(fullMap[1,:,:],2)\n filledMap[2,:,:] = -1*np.rot90(fullMap[2,:,:],2)\n filledMap[3,:,:] = np.rot90(fullMap[3,:,:],2)\n deform_list.append(filledMap)\n\n filledMap = np.zeros((4, psp.d, psp.d))\n filledMap[0,:,:] = np.rot90(fullMap[0,:,:],3)\n filledMap[2,:,:] = np.rot90(fullMap[1,:,:],3)\n filledMap[1,:,:] = -1*np.rot90(fullMap[2,:,:],3)\n filledMap[3,:,:] = np.rot90(fullMap[3,:,:],3)\n deform_list.append(filledMap)\n\n return deform_list\n" ]
[ [ "numpy.rot90", "numpy.meshgrid", "numpy.min", "numpy.arange", "numpy.max", "numpy.mean", "numpy.ma.masked_where", "numpy.zeros" ] ]
gvdeynde/deepburn
[ "1af3d62ec0e70b82250bce31342326adcf561002" ]
[ "tests/tools/test_burnup_problem.py" ]
[ "import pytest\n\nfrom numpy import allclose\nfrom scipy.sparse import dok_matrix\nfrom deepburn.isotope import Isotope\nfrom deepburn.burnup_problem import IsotopicComposition, Transitions, BUP, Polonium\n\n\ndef test_isotopic_composition_init():\n ics = IsotopicComposition()\n assert ics._ics == {}\n\n\ndef test_isotopic_composition_add():\n ics = IsotopicComposition()\n iso1 = Isotope(\"Bi209\")\n iso2 = Isotope(\"Bi210\")\n iso3 = Isotope(\"Po210\")\n\n isolist = [iso1, iso2, iso3]\n\n ics.add_value(iso1, 1e17)\n\n assert allclose(ics(isolist), [1e17, 0, 0])\n\n\ndef test_isotopic_composition_add_two():\n ics = IsotopicComposition()\n iso1 = Isotope(\"Bi209\")\n iso2 = Isotope(\"Bi210\")\n iso3 = Isotope(\"Po210\")\n\n isolist = [iso1, iso2, iso3]\n\n ics.add_value(iso1, 1e17)\n ics.add_value(iso3, 1e5)\n\n assert allclose(ics(isolist), [1e17, 0, 1e5])\n\n\ndef test_isotopic_composition_overwrite():\n ics = IsotopicComposition()\n iso1 = Isotope(\"Bi209\")\n iso2 = Isotope(\"Bi210\")\n iso3 = Isotope(\"Po210\")\n\n isolist = [iso1, iso2, iso3]\n\n ics.add_value(iso1, 1e17)\n ics.add_value(iso1, 1e5)\n\n assert allclose(ics(isolist), [1e5, 0, 0])\n\n\ndef test_isotopic_composition_reorder():\n ics = IsotopicComposition()\n iso1 = Isotope(\"Bi209\")\n iso2 = Isotope(\"Bi210\")\n iso3 = Isotope(\"Po210\")\n\n isolist = [iso3, iso2, iso1]\n\n ics.add_value(iso1, 1e17)\n\n assert allclose(ics(isolist), [0, 0, 1e17])\n\n\ndef test_transitions_init():\n t = Transitions()\n assert isinstance(t, Transitions)\n assert isinstance(t._trans, list) and not t._trans\n\n\n@pytest.fixture\ndef Po210_example():\n iso = []\n iso.append(Isotope(\"Bi209\"))\n iso.append(Isotope(\"Bi210\"))\n iso.append(Isotope(\"Po210\"))\n\n trans = []\n trans.append(1.83163e-12)\n trans.append(1.60035e-6)\n trans.append(5.79764e-8)\n\n return (iso, trans)\n\n\ndef test_transition_iso(Po210_example):\n t = Transitions()\n iso, trans = Po210_example\n t.add_transition(iso[0], trans[0], iso[1])\n\n assert set(t.isotopes) == set([iso[0], iso[1]])\n\n\ndef test_transition(Po210_example):\n t = Transitions()\n iso, trans = Po210_example\n t.add_transition(iso[0], trans[0], iso[1])\n\n assert t._trans == [(iso[0], trans[0], iso[1])]\n\n\ndef test_transition_null(Po210_example):\n t = Transitions()\n iso, trans = Po210_example\n t.add_transition(iso[0], trans[0])\n assert t._trans == [(iso[0], trans[0], None)]\n\n\n@pytest.fixture\ndef Po210_example_trans(Po210_example):\n iso = []\n iso.append(Isotope(\"Bi209\"))\n iso.append(Isotope(\"Bi210\"))\n iso.append(Isotope(\"Po210\"))\n\n trans = []\n trans.append(1.83163e-12)\n trans.append(1.60035e-6)\n trans.append(5.79764e-8)\n\n t = Transitions()\n iso, trans = Po210_example\n t.add_transition(iso[0], trans[0], iso[1])\n t.add_transition(iso[1], trans[1], iso[2])\n t.add_transition(iso[2], trans[2])\n\n return iso, trans, t\n\n\ndef test_transition_full(Po210_example_trans):\n iso, trans, transition = Po210_example_trans\n\n assert transition._trans == [\n (iso[0], trans[0], iso[1]),\n (iso[1], trans[1], iso[2]),\n (iso[2], trans[2], None),\n ]\n\n\ndef test_Polonium(Po210_example_trans):\n iso, trans, transition = Po210_example_trans\n\n dok = dok_matrix((3, 3))\n dok[0, 0] = -trans[0]\n dok[1, 0] = +trans[0]\n dok[1, 1] = -trans[1]\n dok[2, 1] = +trans[1]\n dok[2, 2] = -trans[2]\n\n pol = Polonium()\n\n assert pol.name == \"Po210\"\n assert allclose(dok.A, pol.sparsematrix.A)\n assert pol.time_stamps == [20 * 24 * 3600, 180 * 24 * 3600]\n" ]
[ [ "scipy.sparse.dok_matrix", "numpy.allclose" ] ]
EthanChen1234/NLP
[ "16c4e07abba4f8349b4584a8e6d803525e78515c" ]
[ "dssm/siamese_rnn.py" ]
[ "# coding=utf8\n\"\"\"\npython=3.5\nTensorFlow=1.2.1\n\"\"\"\n\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport data_input\nfrom config import Config\nimport random\n\nrandom.seed(9102)\n\nstart = time.time()\n# 是否加BN层\nnorm, epsilon = False, 0.001\n\n# TRIGRAM_D = 21128\nTRIGRAM_D = 100\n# query batch size\nquery_BS = 100\n\n# 读取数据\nconf = Config()\ndata_train = data_input.get_data_siamese_rnn(conf.file_train)\ndata_vali = data_input.get_data_siamese_rnn(conf.file_vali)\n# print(len(data_train['query']), query_BS, len(data_train['query']) / query_BS)\ntrain_epoch_steps = int(len(data_train) / query_BS) - 1\nvali_epoch_steps = int(len(data_vali) / query_BS) - 1\n\n\ndef variable_summaries(var, name):\n \"\"\"Attach a lot of summaries to a Tensor.\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean/' + name, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))\n tf.summary.scalar('sttdev/' + name, stddev)\n tf.summary.scalar('max/' + name, tf.reduce_max(var))\n tf.summary.scalar('min/' + name, tf.reduce_min(var))\n tf.summary.histogram(name, var)\n\n\ndef get_cosine_score(query_arr, doc_arr):\n # query_norm = sqrt(sum(each x^2))\n pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.square(query_arr), 1))\n pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.square(doc_arr), 1))\n pooled_mul_12 = tf.reduce_sum(tf.multiply(query_arr, doc_arr), 1)\n cos_scores = tf.div(pooled_mul_12, pooled_len_1 * pooled_len_2 + 1e-8, name=\"cos_scores\")\n return cos_scores\n\n\nwith tf.name_scope('input'):\n # 预测时只用输入query即可,将其embedding为向量。\n query_batch = tf.placeholder(tf.int32, shape=[None, None], name='query_batch')\n doc_batch = tf.placeholder(tf.int32, shape=[None, None], name='doc_batch')\n doc_label_batch = tf.placeholder(tf.float32, shape=[None], name='doc_label_batch')\n query_seq_length = tf.placeholder(tf.int32, shape=[None], name='query_sequence_length')\n doc_seq_length = tf.placeholder(tf.int32, shape=[None], name='pos_seq_length')\n on_train = tf.placeholder(tf.bool)\n keep_prob = tf.placeholder(tf.float32, name='drop_out_prob')\n\nwith tf.name_scope('word_embeddings_layer'):\n _word_embedding = tf.get_variable(name=\"word_embedding_arr\", dtype=tf.float32,\n shape=[conf.nwords, TRIGRAM_D])\n query_embed = tf.nn.embedding_lookup(_word_embedding, query_batch, name='query_batch_embed')\n doc_embed = tf.nn.embedding_lookup(_word_embedding, doc_batch, name='doc_positive_embed')\n\nwith tf.name_scope('RNN'):\n # Abandon bag of words, use GRU, you can use stacked gru\n # query_l1 = add_layer(query_batch, TRIGRAM_D, L1_N, activation_function=None) # tf.nn.relu()\n # doc_positive_l1 = add_layer(doc_positive_batch, TRIGRAM_D, L1_N, activation_function=None)\n # doc_negative_l1 = add_layer(doc_negative_batch, TRIGRAM_D, L1_N, activation_function=None)\n if conf.use_stack_rnn:\n cell_fw = tf.contrib.rnn.GRUCell(conf.hidden_size_rnn, reuse=tf.AUTO_REUSE)\n stacked_gru_fw = tf.contrib.rnn.MultiRNNCell([cell_fw], state_is_tuple=True)\n cell_bw = tf.contrib.rnn.GRUCell(conf.hidden_size_rnn, reuse=tf.AUTO_REUSE)\n stacked_gru_bw = tf.contrib.rnn.MultiRNNCell([cell_fw], state_is_tuple=True)\n (output_fw, output_bw), (_, _) = tf.nn.bidirectional_dynamic_rnn(stacked_gru_fw, stacked_gru_bw)\n # not ready, to be continue ...\n else:\n cell_fw = tf.contrib.rnn.GRUCell(conf.hidden_size_rnn, reuse=tf.AUTO_REUSE)\n cell_bw = tf.contrib.rnn.GRUCell(conf.hidden_size_rnn, reuse=tf.AUTO_REUSE)\n # query\n (_, _), (query_output_fw, query_output_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, query_embed,\n sequence_length=query_seq_length,\n dtype=tf.float32)\n query_rnn_output = tf.concat([query_output_fw, query_output_bw], axis=-1)\n query_rnn_output = tf.nn.dropout(query_rnn_output, keep_prob)\n # doc_pos\n (_, _), (doc_pos_output_fw, doc_pos_output_bw) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw,\n doc_embed,\n sequence_length=doc_seq_length,\n dtype=tf.float32)\n doc_rnn_output = tf.concat([doc_pos_output_fw, doc_pos_output_bw], axis=-1)\n doc_rnn_output = tf.nn.dropout(doc_rnn_output, keep_prob)\n\nwith tf.name_scope('Cosine_Similarity'):\n # Cosine similarity\n cos_sim = get_cosine_score(query_rnn_output, doc_rnn_output)\n cos_sim_prob = tf.clip_by_value(cos_sim, 1e-8, 1.0)\n\nwith tf.name_scope('Loss'):\n # Train Loss\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=doc_label_batch, logits=cos_sim)\n losses = tf.reduce_sum(cross_entropy)\n tf.summary.scalar('loss', losses)\n pass\n\nwith tf.name_scope('Training'):\n # Optimizer\n train_step = tf.train.AdamOptimizer(conf.learning_rate).minimize(losses)\n pass\n# with tf.name_scope('Accuracy'):\n# correct_prediction = tf.equal(tf.argmax(prob, 1), 0)\n# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n# tf.summary.scalar('accuracy', accuracy)\n\nmerged = tf.summary.merge_all()\n\nwith tf.name_scope('Test'):\n average_loss = tf.placeholder(tf.float32)\n loss_summary = tf.summary.scalar('average_loss', average_loss)\n\nwith tf.name_scope('Train'):\n train_average_loss = tf.placeholder(tf.float32)\n train_loss_summary = tf.summary.scalar('train_average_loss', train_average_loss)\n\n\ndef pull_batch(data_map, batch_id):\n query_index, title_index, label_index, dsize = range(4)\n cur_data = data_map[batch_id * query_BS:(batch_id + 1) * query_BS]\n query_in = [x[query_index] for x in cur_data]\n doc_in = [x[title_index] for x in cur_data]\n label = [x[label_index] for x in cur_data]\n\n return query_in, doc_in, label\n\n\ndef feed_dict(on_training, data_set, batch_id, drop_prob):\n query_in, doc_in, label = pull_batch(data_set, batch_id)\n # query_in, doc_in, label = np.array(query_in), np.array(doc_in), np.array(label)\n query_len = len(query_in)\n query_seq_len = [conf.max_seq_len] * query_len\n pos_seq_len = [conf.max_seq_len] * query_len\n return {query_batch: query_in, doc_batch: doc_in, doc_label_batch: label, on_train: on_training,\n keep_prob: drop_prob, query_seq_length: query_seq_len, doc_seq_length: pos_seq_len}\n\n\n# config = tf.ConfigProto() # log_device_placement=True)\n# config.gpu_options.allow_growth = True\n# if not config.gpu:\n# config = tf.ConfigProto(device_count= {'GPU' : 0})\n\n# 创建一个Saver对象,选择性保存变量或者模型。\nsaver = tf.train.Saver()\n# with tf.Session(config=config) as sess:\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_writer = tf.summary.FileWriter(conf.summaries_dir + '/train', sess.graph)\n\n start = time.time()\n for epoch in range(conf.num_epoch):\n random.shuffle(data_train)\n for batch_id in range(train_epoch_steps):\n # print(batch_id)\n sess.run(train_step, feed_dict=feed_dict(True, data_train, batch_id, 0.5))\n end = time.time()\n # train loss\n epoch_loss = 0\n for i in range(train_epoch_steps):\n loss_v = sess.run(losses, feed_dict=feed_dict(False, data_train, i, 1))\n epoch_loss += loss_v\n\n epoch_loss /= (train_epoch_steps)\n train_loss = sess.run(train_loss_summary, feed_dict={train_average_loss: epoch_loss})\n train_writer.add_summary(train_loss, epoch + 1)\n print(\"\\nEpoch #%d | Train Loss: %-4.3f | PureTrainTime: %-3.3fs\" %\n (epoch, epoch_loss, end - start))\n\n # test loss\n start = time.time()\n epoch_loss = 0\n for i in range(vali_epoch_steps):\n loss_v = sess.run(losses, feed_dict=feed_dict(False, data_vali, i, 1))\n epoch_loss += loss_v\n epoch_loss /= (vali_epoch_steps)\n test_loss = sess.run(loss_summary, feed_dict={average_loss: epoch_loss})\n train_writer.add_summary(test_loss, epoch + 1)\n # test_writer.add_summary(test_loss, step + 1)\n print(\"Epoch #%d | Test Loss: %-4.3f | Calc_LossTime: %-3.3fs\" %\n (epoch, epoch_loss, start - end))\n\n # 保存模型\n save_path = saver.save(sess, \"model/model_1.ckpt\")\n print(\"Model saved in file: \", save_path)\n" ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.contrib.rnn.GRUCell", "tensorflow.reduce_sum", "tensorflow.nn.bidirectional_dynamic_rnn", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.div", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "tensorflow.train.Saver", "tensorflow.nn.dropout", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.nn.embedding_lookup", "tensorflow.summary.histogram", "tensorflow.clip_by_value", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.reduce_min" ] ]
karlnapf/kernel_hmc
[ "8ab93ae0470cc5916d5349b40bae7f91075bc385" ]
[ "kernel_hmc/proposals/base.py" ]
[ "from kernel_hmc.tools.assertions import assert_implements_log_pdf_and_grad\nfrom kernel_hmc.tools.log import Log\nimport numpy as np\n\n\nlogger = Log.get_logger()\n\ndef standard_sqrt_schedule(t):\n return 1. / np.sqrt(t + 1)\n\nclass ProposalBase(object):\n def __init__(self, target, D, step_size, adaptation_schedule, acc_star):\n self.target = target\n self.D = D\n self.step_size = step_size\n self.adaptation_schedule = adaptation_schedule\n self.acc_star = acc_star\n \n self.t = 0\n \n # some sanity checks\n assert acc_star is None or acc_star > 0 and acc_star < 1\n if adaptation_schedule is not None:\n lmbdas = np.array([adaptation_schedule(t) for t in np.arange(100)])\n assert np.all(lmbdas >= 0)\n assert np.allclose(np.sort(lmbdas)[::-1], lmbdas)\n \n assert_implements_log_pdf_and_grad(target, assert_grad=False)\n \n def initialise(self):\n pass\n \n def proposal(self):\n pass\n \n def update(self, samples, acc_probs):\n self.t += 1\n \n previous_accpept_prob = acc_probs[-1]\n \n if self.adaptation_schedule is not None and self.acc_star is not None:\n # always update with weight\n lmbda = self.adaptation_schedule(self.t)\n self._update_scaling(lmbda, previous_accpept_prob)\n \n def _update_scaling(self, lmbda, accept_prob):\n # difference desired and actuall acceptance rate\n diff = accept_prob - self.acc_star\n \n new_log_step_size = np.log(self.step_size) + lmbda * diff\n new_step_size = np.exp(new_log_step_size)\n \n logger.debug(\"Acc. prob. diff. was %.3f-%.3f=%.3f. Updating step-size from %s to %s.\" % \\\n (accept_prob, self.acc_star, diff, self.step_size, new_step_size))\n\n self.step_size = new_step_size\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.arange", "numpy.sort", "numpy.all", "numpy.exp" ] ]
brendenpetersen/deep-symbolic-optimization
[ "8724839dab910022e24d03debdf564236683474b", "8724839dab910022e24d03debdf564236683474b" ]
[ "dso/dso/tf_state_manager.py", "dso/dso/controller.py" ]
[ "from abc import ABC, abstractmethod\n\nimport tensorflow as tf\n\nfrom dso.program import Program\n\n\nclass StateManager(ABC):\n \"\"\"\n An interface for handling the tf.Tensor inputs to the Controller.\n \"\"\"\n\n def setup_manager(self, controller):\n \"\"\"\n Function called inside the controller to perform the needed initializations (e.g., if the tf context is needed)\n :param controller the controller class\n \"\"\"\n self.controller = controller\n self.max_length = controller.max_length\n\n @abstractmethod\n def get_tensor_input(self, obs):\n \"\"\"\n Convert an observation from a Task into a Tesnor input for the\n Controller, e.g. by performing one-hot encoding or embedding lookup.\n\n Parameters\n ----------\n obs : np.ndarray (dtype=np.float32)\n Observation coming from the Task.\n\n Returns\n --------\n input_ : tf.Tensor (dtype=tf.float32)\n Tensor to be used as input to the Controller.\n \"\"\"\n return\n\n def process_state(self, obs):\n \"\"\"\n Entry point for adding information to the state tuple.\n If not overwritten, this functions does nothing\n \"\"\"\n return obs\n\n\ndef make_state_manager(config):\n \"\"\"\n Parameters\n ----------\n config : dict\n Parameters for this StateManager.\n\n Returns\n -------\n state_manager : StateManager\n The StateManager to be used by the Controller.\n \"\"\"\n manager_dict = {\n \"hierarchical\": HierarchicalStateManager\n }\n\n if config is None:\n config = {}\n\n # Use HierarchicalStateManager by default\n manager_type = config.pop(\"type\", \"hierarchical\")\n\n manager_class = manager_dict[manager_type]\n state_manager = manager_class(**config)\n\n return state_manager\n\n\nclass HierarchicalStateManager(StateManager):\n \"\"\"\n Class that uses the previous action, parent, sibling, and/or dangling as\n observations.\n \"\"\"\n\n def __init__(self, observe_parent=True, observe_sibling=True,\n observe_action=False, observe_dangling=False, embedding=False,\n embedding_size=8):\n \"\"\"\n Parameters\n ----------\n observe_parent : bool\n Observe the parent of the Token being selected?\n\n observe_sibling : bool\n Observe the sibling of the Token being selected?\n\n observe_action : bool\n Observe the previously selected Token?\n\n observe_dangling : bool\n Observe the number of dangling nodes?\n\n embedding : bool\n Use embeddings for categorical inputs?\n\n embedding_size : int\n Size of embeddings for each categorical input if embedding=True.\n \"\"\"\n self.observe_parent = observe_parent\n self.observe_sibling = observe_sibling\n self.observe_action = observe_action\n self.observe_dangling = observe_dangling\n self.library = Program.library\n\n # Parameter assertions/warnings\n assert self.observe_action + self.observe_parent + self.observe_sibling + self.observe_dangling > 0, \\\n \"Must include at least one observation.\"\n\n self.embedding = embedding\n self.embedding_size = embedding_size\n\n def setup_manager(self, controller):\n super().setup_manager(controller)\n # Create embeddings if needed\n if self.embedding:\n initializer = tf.random_uniform_initializer(minval=-1.0,\n maxval=1.0,\n seed=0)\n with tf.variable_scope(\"embeddings\", initializer=initializer):\n if self.observe_action:\n self.action_embeddings = tf.get_variable(\"action_embeddings\",\n [self.library.n_action_inputs, self.embedding_size],\n trainable=True)\n if self.observe_parent:\n self.parent_embeddings = tf.get_variable(\"parent_embeddings\",\n [self.library.n_parent_inputs, self.embedding_size],\n trainable=True)\n if self.observe_sibling:\n self.sibling_embeddings = tf.get_variable(\"sibling_embeddings\",\n [self.library.n_sibling_inputs, self.embedding_size],\n trainable=True)\n\n def get_tensor_input(self, obs):\n observations = []\n action, parent, sibling, dangling = tf.unstack(obs, axis=1)\n\n # Cast action, parent, sibling to int for embedding_lookup or one_hot\n action = tf.cast(action, tf.int32)\n parent = tf.cast(parent, tf.int32)\n sibling = tf.cast(sibling, tf.int32)\n\n # Action, parent, and sibling inputs are either one-hot or embeddings\n if self.observe_action:\n if self.embedding:\n x = tf.nn.embedding_lookup(self.action_embeddings, action)\n else:\n x = tf.one_hot(action, depth=self.library.n_action_inputs)\n observations.append(x)\n if self.observe_parent:\n if self.embedding:\n x = tf.nn.embedding_lookup(self.parent_embeddings, parent)\n else:\n x = tf.one_hot(parent, depth=self.library.n_parent_inputs)\n observations.append(x)\n if self.observe_sibling:\n if self.embedding:\n x = tf.nn.embedding_lookup(self.sibling_embeddings, sibling)\n else:\n x = tf.one_hot(sibling, depth=self.library.n_sibling_inputs)\n observations.append(x)\n\n # Dangling input is just the value of dangling\n if self.observe_dangling:\n x = tf.expand_dims(dangling, axis=-1)\n observations.append(x)\n\n input_ = tf.concat(observations, -1)\n return input_\n", "\"\"\"Controller used to generate distribution over hierarchical, variable-length objects.\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom dso.program import Program\nfrom dso.memory import Batch\nfrom dso.prior import LengthConstraint\n\n\nclass LinearWrapper(tf.contrib.rnn.LayerRNNCell):\n \"\"\"\n RNNCell wrapper that adds a linear layer to the output.\n\n See: https://github.com/tensorflow/models/blob/master/research/brain_coder/single_task/pg_agent.py\n \"\"\"\n\n def __init__(self, cell, output_size):\n self.cell = cell\n self._output_size = output_size\n\n def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(type(self).__name__):\n outputs, state = self.cell(inputs, state, scope=scope)\n logits = tf.layers.dense(outputs, units=self._output_size)\n\n return logits, state\n\n @property\n def output_size(self):\n return self._output_size\n\n @property\n def state_size(self):\n return self.cell.state_size\n\n def zero_state(self, batch_size, dtype):\n return self.cell.zero_state(batch_size, dtype)\n\n\nclass Controller(object):\n \"\"\"\n Recurrent neural network (RNN) controller used to generate expressions.\n\n Specifically, the RNN outputs a distribution over pre-order traversals of\n symbolic expression trees. It is trained using REINFORCE with baseline.\n\n Parameters\n ----------\n sess : tf.Session\n TenorFlow Session object.\n\n prior : dso.prior.JointPrior\n JointPrior object used to adjust probabilities during sampling.\n\n state_manager: dso.tf_state_manager.StateManager\n Object that handles the state features to be used\n\n summary : bool\n Write tensorboard summaries?\n\n debug : int\n Debug level, also used in learn(). 0: No debug. 1: Print shapes and\n number of parameters for each variable.\n\n cell : str\n Recurrent cell to use. Supports 'lstm' and 'gru'.\n\n num_layers : int\n Number of RNN layers.\n\n num_units : int or list of ints\n Number of RNN cell units in each of the RNN's layers. If int, the value\n is repeated for each layer.\n\n initiailizer : str\n Initializer for the recurrent cell. Supports 'zeros' and 'var_scale'.\n\n optimizer : str\n Optimizer to use. Supports 'adam', 'rmsprop', and 'sgd'.\n\n learning_rate : float\n Learning rate for optimizer.\n\n entropy_weight : float\n Coefficient for entropy bonus.\n\n entropy_gamma : float or None\n Gamma in entropy decay. None (or\n equivalently, 1.0) turns off entropy decay.\n\n pqt : bool\n Train with priority queue training (PQT)?\n\n pqt_k : int\n Size of priority queue.\n\n pqt_batch_size : int\n Size of batch to sample (with replacement) from priority queue.\n\n pqt_weight : float\n Coefficient for PQT loss function.\n\n pqt_use_pg : bool\n Use policy gradient loss when using PQT?\n\n max_length : int or None\n Maximum sequence length. This will be overridden if a LengthConstraint\n with a maximum length is part of the prior.\n \"\"\"\n\n def __init__(self, sess, prior, state_manager, debug=0, summary=False,\n # RNN cell hyperparameters\n cell='lstm',\n num_layers=1,\n num_units=32,\n initializer='zeros',\n # Optimizer hyperparameters\n optimizer='adam',\n learning_rate=0.001,\n # Loss hyperparameters\n entropy_weight=0.005,\n entropy_gamma=1.0,\n # PQT hyperparameters\n pqt=False,\n pqt_k=10,\n pqt_batch_size=1,\n pqt_weight=200.0,\n pqt_use_pg=False,\n # Other hyperparameters\n max_length=30):\n\n self.sess = sess\n self.prior = prior\n self.summary = summary\n ###self.rng = np.random.RandomState(0) # Used for PPO minibatch sampling\n self.n_objects = Program.n_objects\n\n lib = Program.library\n\n # Find max_length from the LengthConstraint prior, if it exists\n # Both priors will never happen in the same experiment\n prior_max_length = None\n for single_prior in self.prior.priors:\n if isinstance(single_prior, LengthConstraint):\n if single_prior.max is not None:\n prior_max_length = single_prior.max\n self.max_length = prior_max_length\n break\n\n if prior_max_length is None:\n assert max_length is not None, \"max_length must be specified if \"\\\n \"there is no LengthConstraint.\"\n self.max_length = max_length\n print(\"WARNING: Maximum length not constrained. Sequences will \"\n \"stop at {} and complete by repeating the first input \"\n \"variable.\".format(self.max_length))\n elif max_length is not None and max_length != self.max_length:\n print(\"WARNING: max_length ({}) will be overridden by value from \"\n \"LengthConstraint ({}).\".format(max_length, self.max_length))\n self.max_length *= self.n_objects\n max_length = self.max_length\n\n # Hyperparameters\n self.entropy_weight = entropy_weight\n self.pqt = pqt\n self.pqt_k = pqt_k\n self.pqt_batch_size = pqt_batch_size\n\n n_choices = lib.L\n\n # Placeholders, computed after instantiating expressions\n self.batch_size = tf.placeholder(dtype=tf.int32, shape=(), name=\"batch_size\")\n self.baseline = tf.placeholder(dtype=tf.float32, shape=(), name=\"baseline\")\n\n # Entropy decay vector\n if entropy_gamma is None:\n entropy_gamma = 1.0\n entropy_gamma_decay = np.array([entropy_gamma**t for t in range(max_length)])\n\n # Build controller RNN\n with tf.name_scope(\"controller\"):\n\n def make_initializer(name):\n if name == \"zeros\":\n return tf.zeros_initializer()\n if name == \"var_scale\":\n return tf.contrib.layers.variance_scaling_initializer(\n factor=0.5, mode='FAN_AVG', uniform=True, seed=0)\n raise ValueError(\"Did not recognize initializer '{}'\".format(name))\n\n def make_cell(name, num_units, initializer):\n if name == 'lstm':\n return tf.nn.rnn_cell.LSTMCell(num_units, initializer=initializer)\n if name == 'gru':\n return tf.nn.rnn_cell.GRUCell(num_units, kernel_initializer=initializer, bias_initializer=initializer)\n raise ValueError(\"Did not recognize cell type '{}'\".format(name))\n\n # Create recurrent cell\n if isinstance(num_units, int):\n num_units = [num_units] * num_layers\n initializer = make_initializer(initializer)\n cell = tf.contrib.rnn.MultiRNNCell(\n [make_cell(cell, n, initializer=initializer) for n in num_units])\n cell = LinearWrapper(cell=cell, output_size=n_choices)\n\n task = Program.task\n initial_obs = task.reset_task(prior)\n state_manager.setup_manager(self)\n initial_obs = tf.broadcast_to(initial_obs, [self.batch_size, len(initial_obs)]) # (?, obs_dim)\n initial_obs = state_manager.process_state(initial_obs)\n\n # Get initial prior\n initial_prior = self.prior.initial_prior()\n initial_prior = tf.constant(initial_prior, dtype=tf.float32)\n initial_prior = tf.broadcast_to(initial_prior, [self.batch_size, n_choices])\n\n # Define loop function to be used by tf.nn.raw_rnn.\n initial_cell_input = state_manager.get_tensor_input(initial_obs)\n\n def loop_fn(time, cell_output, cell_state, loop_state):\n\n if cell_output is None: # time == 0\n finished = tf.zeros(shape=[self.batch_size], dtype=tf.bool)\n obs = initial_obs\n next_input = state_manager.get_tensor_input(obs)\n next_cell_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32) # 2-tuple, each shape (?, num_units)\n emit_output = None\n actions_ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False) # Read twice\n obs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, clear_after_read=True)\n priors_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, clear_after_read=True)\n prior = initial_prior\n lengths = tf.ones(shape=[self.batch_size], dtype=tf.int32)\n next_loop_state = (\n actions_ta,\n obs_ta,\n priors_ta,\n obs,\n prior,\n lengths, # Unused until implementing variable length\n finished)\n else:\n actions_ta, obs_ta, priors_ta, obs, prior, lengths, finished = loop_state\n logits = cell_output + prior\n next_cell_state = cell_state\n emit_output = logits\n # tf.multinomial is deprecated: TF recommends switching to tf.random.categorical\n # action = tf.random.categorical(logits=logits, num_samples=1, output_dtype=tf.int32, seed=1)[:, 0]\n action = tf.multinomial(logits=logits, num_samples=1, output_dtype=tf.int32, seed=1)[:, 0]\n\n # When implementing variable length:\n # action = tf.where(\n # tf.logical_not(finished),\n # tf.multinomial(logits=logits, num_samples=1, output_dtype=tf.int32)[:, 0],\n # tf.zeros(shape=[self.batch_size], dtype=tf.int32))\n next_actions_ta = actions_ta.write(time - 1, action) # Write chosen actions\n # Get current action batch\n actions = tf.transpose(next_actions_ta.stack()) # Shape: (?, time)\n\n # Compute obs and prior\n next_obs, next_prior = tf.py_func(func=task.get_next_obs,\n inp=[actions, obs],\n Tout=[tf.float32, tf.float32])\n next_prior.set_shape([None, lib.L])\n next_obs.set_shape([None, task.OBS_DIM])\n next_obs = state_manager.process_state(next_obs)\n next_input = state_manager.get_tensor_input(next_obs)\n next_obs_ta = obs_ta.write(time - 1, obs) # Write OLD obs\n next_priors_ta = priors_ta.write(time - 1, prior) # Write OLD prior\n finished = next_finished = tf.logical_or(\n finished,\n time >= max_length)\n # When implementing variable length:\n # finished = next_finished = tf.logical_or(tf.logical_or(\n # finished, # Already finished\n # next_dangling == 0), # Currently, this will be 0 not just the first time, but also at max_length\n # time >= max_length)\n next_lengths = tf.where(\n finished, # Ever finished\n lengths,\n tf.tile(tf.expand_dims(time + 1, 0), [self.batch_size]))\n next_loop_state = (next_actions_ta,\n next_obs_ta,\n next_priors_ta,\n next_obs,\n next_prior,\n next_lengths,\n next_finished)\n\n return (finished, next_input, next_cell_state, emit_output, next_loop_state)\n\n # Returns RNN emit outputs TensorArray (i.e. logits), final cell state, and final loop state\n with tf.variable_scope('policy'):\n _, _, loop_state = tf.nn.raw_rnn(cell=cell, loop_fn=loop_fn)\n actions_ta, obs_ta, priors_ta, _, _, _, _ = loop_state\n\n self.actions = tf.transpose(actions_ta.stack(), perm=[1, 0]) # (?, max_length)\n self.obs = tf.transpose(obs_ta.stack(), perm=[1, 2, 0]) # (?, obs_dim, max_length)\n self.priors = tf.transpose(priors_ta.stack(), perm=[1, 0, 2]) # (?, max_length, n_choices)\n\n # Generates dictionary containing placeholders needed for a batch of sequences\n def make_batch_ph(name):\n with tf.name_scope(name):\n batch_ph = {\n \"actions\": tf.placeholder(tf.int32, [None, max_length]),\n \"obs\": tf.placeholder(tf.float32, [None, task.OBS_DIM, self.max_length]),\n \"priors\": tf.placeholder(tf.float32, [None, max_length, n_choices]),\n \"lengths\": tf.placeholder(tf.int32, [None, ]),\n \"rewards\": tf.placeholder(tf.float32, [None], name=\"r\"),\n \"on_policy\": tf.placeholder(tf.int32, [None, ])\n }\n batch_ph = Batch(**batch_ph)\n\n return batch_ph\n\n def safe_cross_entropy(p, logq, axis=-1):\n safe_logq = tf.where(tf.equal(p, 0.), tf.ones_like(logq), logq)\n return - tf.reduce_sum(p * safe_logq, axis)\n\n # Generates tensor for neglogp of a given batch\n def make_neglogp_and_entropy(B):\n with tf.variable_scope('policy', reuse=True):\n logits, _ = tf.nn.dynamic_rnn(cell=cell,\n inputs=state_manager.get_tensor_input(B.obs),\n sequence_length=B.lengths, # Backpropagates only through sequence length\n dtype=tf.float32)\n logits += B.priors\n probs = tf.nn.softmax(logits)\n logprobs = tf.nn.log_softmax(logits)\n\n # Generate mask from sequence lengths\n # NOTE: Using this mask for neglogp and entropy actually does NOT\n # affect training because gradients are zero outside the lengths.\n # However, the mask makes tensorflow summaries accurate.\n mask = tf.sequence_mask(B.lengths, maxlen=max_length, dtype=tf.float32)\n\n # Negative log probabilities of sequences\n actions_one_hot = tf.one_hot(B.actions, depth=n_choices, axis=-1, dtype=tf.float32)\n neglogp_per_step = safe_cross_entropy(actions_one_hot, logprobs, axis=2) # Sum over action dim\n\n neglogp = tf.reduce_sum(neglogp_per_step * mask, axis=1) # Sum over time dim\n\n # NOTE 1: The above implementation is the same as the one below:\n # neglogp_per_step = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=actions)\n # neglogp = tf.reduce_sum(neglogp_per_step, axis=1) # Sum over time\n # NOTE 2: The above implementation is also the same as the one below, with a few caveats:\n # Exactly equivalent when removing priors.\n # Equivalent up to precision when including clipped prior.\n # Crashes when prior is not clipped due to multiplying zero by -inf.\n # neglogp_per_step = -tf.nn.log_softmax(logits + tf.clip_by_value(priors, -2.4e38, 0)) * actions_one_hot\n # neglogp_per_step = tf.reduce_sum(neglogp_per_step, axis=2)\n # neglogp = tf.reduce_sum(neglogp_per_step, axis=1) # Sum over time\n\n # If entropy_gamma = 1, entropy_gamma_decay_mask == mask\n entropy_gamma_decay_mask = entropy_gamma_decay * mask # ->(batch_size, max_length)\n entropy_per_step = safe_cross_entropy(probs, logprobs, axis=2) # Sum over action dim -> (batch_size, max_length)\n entropy = tf.reduce_sum(entropy_per_step * entropy_gamma_decay_mask, axis=1) # Sum over time dim -> (batch_size, )\n\n return neglogp, entropy\n\n # On policy batch\n self.sampled_batch_ph = make_batch_ph(\"sampled_batch\")\n\n # Memory batch\n self.memory_batch_ph = make_batch_ph(\"memory_batch\")\n memory_neglogp, _ = make_neglogp_and_entropy(self.memory_batch_ph)\n self.memory_probs = tf.exp(-memory_neglogp)\n self.memory_logps = -memory_neglogp\n\n # PQT batch\n if pqt:\n self.pqt_batch_ph = make_batch_ph(\"pqt_batch\")\n\n # Setup losses\n with tf.name_scope(\"losses\"):\n\n neglogp, entropy = make_neglogp_and_entropy(self.sampled_batch_ph)\n r = self.sampled_batch_ph.rewards\n\n # Entropy loss\n entropy_loss = -self.entropy_weight * tf.reduce_mean(entropy, name=\"entropy_loss\")\n loss = entropy_loss\n\n\n if not pqt or (pqt and pqt_use_pg):\n # Baseline is the worst of the current samples r\n pg_loss = tf.reduce_mean((r - self.baseline) * neglogp, name=\"pg_loss\")\n # Loss already is set to entropy loss\n loss += pg_loss\n\n # Priority queue training loss\n if pqt:\n pqt_neglogp, _ = make_neglogp_and_entropy(self.pqt_batch_ph)\n pqt_loss = pqt_weight * tf.reduce_mean(pqt_neglogp, name=\"pqt_loss\")\n loss += pqt_loss\n\n self.loss = loss\n\n def make_optimizer(name, learning_rate):\n if name == \"adam\":\n return tf.train.AdamOptimizer(learning_rate=learning_rate)\n if name == \"rmsprop\":\n return tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=0.99)\n if name == \"sgd\":\n return tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n raise ValueError(\"Did not recognize optimizer '{}'\".format(name))\n\n # Create training op\n optimizer = make_optimizer(name=optimizer, learning_rate=learning_rate)\n with tf.name_scope(\"train\"):\n self.grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(self.grads_and_vars)\n # The two lines above are equivalent to:\n # self.train_op = optimizer.minimize(self.loss)\n with tf.name_scope(\"grad_norm\"):\n self.grads, _ = list(zip(*self.grads_and_vars))\n self.norms = tf.global_norm(self.grads)\n\n if debug >= 1:\n total_parameters = 0\n print(\"\")\n for variable in tf.trainable_variables():\n shape = variable.get_shape()\n n_parameters = np.product(shape)\n total_parameters += n_parameters\n print(\"Variable: \", variable.name)\n print(\" Shape: \", shape)\n print(\" Parameters:\", n_parameters)\n print(\"Total parameters:\", total_parameters)\n\n # Create summaries\n with tf.name_scope(\"summary\"):\n if self.summary:\n if not pqt or (pqt and pqt_use_pg):\n tf.summary.scalar(\"pg_loss\", pg_loss)\n \n if pqt:\n tf.summary.scalar(\"pqt_loss\", pqt_loss)\n tf.summary.scalar(\"entropy_loss\", entropy_loss)\n tf.summary.scalar(\"total_loss\", self.loss)\n tf.summary.scalar(\"reward\", tf.reduce_mean(r))\n tf.summary.scalar(\"baseline\", self.baseline)\n tf.summary.histogram(\"reward\", r)\n tf.summary.histogram(\"length\", self.sampled_batch_ph.lengths)\n for g, v in self.grads_and_vars:\n tf.summary.histogram(v.name, v)\n tf.summary.scalar(v.name + '_norm', tf.norm(v))\n tf.summary.histogram(v.name + '_grad', g)\n tf.summary.scalar(v.name + '_grad_norm', tf.norm(g))\n tf.summary.scalar('gradient norm', self.norms)\n self.summaries = tf.summary.merge_all()\n else:\n self.summaries = tf.no_op()\n\n def sample(self, n):\n \"\"\"Sample batch of n expressions\"\"\"\n\n feed_dict = {self.batch_size : n}\n\n actions, obs, priors = self.sess.run([self.actions, self.obs, self.priors], feed_dict=feed_dict)\n\n return actions, obs, priors\n\n\n def compute_probs(self, memory_batch, log=False):\n \"\"\"Compute the probabilities of a Batch.\"\"\"\n\n feed_dict = {\n self.memory_batch_ph : memory_batch\n }\n\n if log:\n fetch = self.memory_logps\n else:\n fetch = self.memory_probs\n probs = self.sess.run([fetch], feed_dict=feed_dict)[0]\n return probs\n\n\n def train_step(self, b, sampled_batch, pqt_batch):\n \"\"\"Computes loss, trains model, and returns summaries.\"\"\"\n feed_dict = {\n self.baseline : b,\n self.sampled_batch_ph : sampled_batch\n }\n\n if self.pqt:\n feed_dict.update({\n self.pqt_batch_ph : pqt_batch\n })\n\n summaries, _ = self.sess.run([self.summaries, self.train_op], feed_dict=feed_dict)\n\n return summaries\n" ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.unstack", "tensorflow.random_uniform_initializer", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.one_hot", "tensorflow.variable_scope", "tensorflow.nn.embedding_lookup" ], [ "tensorflow.nn.raw_rnn", "numpy.product", "tensorflow.nn.log_softmax", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.equal", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.py_func", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.logical_or", "tensorflow.layers.dense", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.nn.rnn_cell.GRUCell", "tensorflow.norm", "tensorflow.train.RMSPropOptimizer", "tensorflow.TensorArray", "tensorflow.zeros_initializer", "tensorflow.exp", "tensorflow.placeholder", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.merge_all", "tensorflow.one_hot", "tensorflow.no_op", "tensorflow.sequence_mask", "tensorflow.global_norm", "tensorflow.summary.histogram", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.broadcast_to", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.variable_scope", "tensorflow.multinomial" ] ]
chiptrontech/WiredQTv1.0
[ "760948bb736867db4e772031b23ed9151e0364b9" ]
[ "WiredQT/examples/BySomeBody/flask/wired_module.py" ]
[ "import time\nimport datetime\nimport os\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtGui import * \nfrom PyQt5.Qsci import QsciScintilla, QsciLexerPython\nfrom copy import deepcopy\ntry:\n\timport RPi.GPIO as GPIO\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setwarnings(False)\n\tHIGH=GPIO.HIGH\n\tLOW=GPIO.LOW\n\tINPUT=GPIO.IN\n\tOUTPUT=GPIO.OUT\n\tINPUT_PULLUP=GPIO.PUD_UP\n\tdef digitalWrite(pin,state):\n\t\tGPIO.output(pin,state)\n\tdef digitalRead(pin):\n\t\treturn HIGH if GPIO.input(pin) else LOW\n\tdef pinMode(pin,direction):\n\t\tif direction==INPUT_PULLUP:\n\t\t\tGPIO.setup(pin,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n\t\telse:\n\t\t\tGPIO.setup(pin,direction)\nexcept:\n\tprint (\"\")\ndef wtaprop():\n\twtaprop=eval(\"{'Name': 'Label1', 'Var': '', 'Font': '', 'Enable': 'True', 'Top': '0', 'Width': '100', 'ParentsType': '', 'Picture': '', 'ForeColor': '(0,0,0,1)', 'Visible': 'True', 'BackColor': '(1,1,1,0.25)', 'Events': '[]', 'Height': '100', 'Left': '0', 'Tag': '', 'Text': 'Label1', 'Help': ''}\")\n\treturn deepcopy(wtaprop)\nclass TableModel(QtCore.QAbstractTableModel):\n\tdef __init__(self, columnName,data):\n\t\tsuper(TableModel, self).__init__()\n\t\tself._data = data\n\t\tself._columnName = columnName\n\tdef data(self, index, role):\n\t\tif role == Qt.DisplayRole:\n\t\t\treturn self._data[index.row()][index.column()]\n\t\tif role == Qt.BackgroundRole:\n\t\t\treturn QColor('#fcaeff')\t\t\n\tdef rowCount(self, index):\n\t\treturn len(self._data)\n\n\tdef columnCount(self, index):\n\t\treturn len(self._data[0])\n\tdef headerData(self, column, orientation, role=QtCore.Qt.DisplayRole):\n\t\tif role!=QtCore.Qt.DisplayRole:\n\t\t\treturn QtCore.QVariant()\n\t\tif orientation==QtCore.Qt.Horizontal:\n\t\t\treturn QtCore.QVariant(self._columnName[column]) \t\ntry:\n\timport ctypes\n\tuser32 = ctypes.windll.user32\n\tscreensize = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]\n\tresizeW=float(screensize[0]/1366.0)\n\tresizeH=float(screensize[1]/768.0)\t\n\tprint (screensize)\nexcept:\n\tscreensize = [1320, 768]\n\tresizeW=float(screensize[0]/1320.0)\n\tresizeH=float(screensize[1]/768.0)\n\t\ndef millis():\n\treturn time.time()*1000\nclass Scheduler:\n\tdef __init__(self,ms_timeout):\n\t\tself._ms_timeout=ms_timeout\n\t\tself._flag=False;\n\tdef Start(self):\n\t\tself._startms=millis()\n\t\tself._flag=True;\n\tdef Stop(self):\n\t\tself._flag=False;\t\t\n\tdef Event(self):\n\t\tbuf=0;\n\t\tif(self._flag==False):\n\t\t\treturn False;\n\t\tbuf=millis()-self._startms;\n\t\tif(buf>=self._ms_timeout):\n\t\t\tself._flag=False;\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;\ndef getCSV(x,delimiter,str_data):\n\tgetCSVret = \"\"\n\tif(x<0):\n\t\treturn \"\" #if negative index\n\tb = 0\n\twhile (x != 0):\n\t\tif (str_data[b] == delimiter):\n\t\t\tx = x - 1\n\t\tb = b + 1\n\tif(b>=len(str_data)):\n\t\treturn \"\" #if index out of range\n\twhile ((str_data[b] != delimiter) and ((len(str_data)+1) != b)):\n\t\tgetCSVret = getCSVret + str_data[b]\n\t\tb = b + 1\n\t\tif(b>=len(str_data)):\n\t\t\tbreak\n\treturn getCSVret\n\ndef delay(x):\n\ttime.sleep(x*1.0/1000)\ndef delayMicroseconds(x):\n\n\ttime.sleep(x*1.0/1000000)\n\nclass LapTimer:\n\tdef __init__(self):\n\t\tself.startms=millis();\n\t\tself.started=False;\n\tdef start(self):\n\t\tself.started=True;\n\t\tself.startms=millis();\n\tdef stop(self):\n\t\tself.started=False;\n\tdef currTime(self):\n\t\treturn millis()-self.startms;\nclass Poll(object):\n\tdef __init__(self,search):\n\t\tself.currPointer=0\n\t\tself.size_st=len(search)\n\t\tself.st=search\n\tdef Poll(self,x):\n\t\tif(self.st[self.currPointer]==x):\n\t\t\tself.currPointer=self.currPointer+1\n\t\telse:\n\t\t\tself.currPointer=0;\n\t\t\tif(self.st[self.currPointer]==x):\t\t#WTF \n\t\t\t\tself.currPointer=self.currPointer+1\t#WTF\t\t\t\t\n\t\tif(self.currPointer==self.size_st):\n\t\t\tself.currPointer=0\n\t\t\treturn True\n\t\treturn False\t\n'''\nx=ParserLangguage(sequence=\"eat that\")\nword=\"dont\\teat\\r\\t that\\nball\\r\" \nt=x.Parse(word)\nprint(word[t[0]:])\nprint(word[t[1]:])\nreturns tuple of start and end if sequence is found in word\nreturn None if not found\n'''\nclass ParserLangguage:\n\tdef __init__(self,sequence,wspace=\"\\r\\n\\t \\'\\\"(\"):\n\t\tself.sequence=sequence.split()\n\t\tself.poll=[]\n\t\tfor a in self.sequence:\n\t\t\t_poll=Poll(a)\n\t\t\tself.poll.append(_poll)\n\t\tself.wspace=wspace\n\n\tdef Parse(self,strings):\n\t\tcurr_sequence=0\n\t\tstrs=\"\"\n\t\tstart=0\n\t\tend=0\n\t\tfor i,a in enumerate(strings):\n\t\t\tif a in self.wspace:\n\t\t\t\t#print(strs)\n\t\t\t\tif strs!=\"\" and curr_sequence!=0:\n\t\t\t\t\tcurr_sequence=0\t#word not in order,reset curr_sequence\n\t\t\t\tstrs=\"\"\n\t\t\t\tcontinue\n\t\t\tstrs+=a\n\t\t\tif self.poll[curr_sequence].Poll(a):\n\t\t\t\t\n\t\t\t\tif self.sequence[curr_sequence]==strs:\n\t\t\t\t\t#print(i,strs)\n\t\t\t\t\t#print strings[i+1:i+20]\n\t\t\t\t\tcurr_sequence+=1\n\t\t\t\t\tif curr_sequence==1:\n\t\t\t\t\t\tstart=i-len(strs)+1\n\t\t\t\t\tif curr_sequence==len(self.sequence):\n\t\t\t\t\t\tend=i+1\n\t\t\t\t\t\treturn start,end \n\t\t\t\t\tstrs=\"\"\n\t\t\t\t\tself.poll[curr_sequence].currPointer=0#bug to fix this\n\t\treturn None\t\t\t\n\nclass Parser():\n\tdef __init__(self,_header,_terminator,numterminator,_sizeofdata=10):\n\t\tself.size=_sizeofdata\n\t\tself._numterminator=numterminator\n\t\tself._numterminatorbuf=0\n\t\tself.terminator=_terminator\n\t\tself.st=_header\n\t\tself.currPointer=0\n\t\tself.state=False\n\t\tself.size_st=len(_header);\n\tdef Poll(self,x):\n\t\tif(self.st[self.currPointer]==x):\n\t\t\tself.currPointer=self.currPointer+1\n\t\telse:\n\t\t\tself.currPointer=0;\n\t\tif(self.currPointer==self.size_st):\n\t\t\tself.currPointer=0\n\t\t\treturn True\n\t\treturn False\n\tdef available(self,x):\n\t\tif(self.Poll(x)==True):\n\t\t\tself.state=True\n\t\t\t#self.index=0\n\t\t\tself.data=\"\"\n\t\t\treturn False\n\t\tif(self.state):\n\t\t\tif(x==self.terminator or self.terminator==''):\n\t\t\t\tself._numterminatorbuf=self._numterminatorbuf+1\n\t\t\t\tif(self._numterminator==self._numterminatorbuf):\n\t\t\t\t\tself._numterminatorbuf=0\n\t\t\t\t\t#*(data+(index%size))=0;\n\t\t\t\t\tself.state=False;\n\t\t\t\t\treturn True\n\t\t\tself.data=self.data+x\n\t\t\t\t#self.index++\n\t\treturn False\n\n\ndef Map(x, in_min , in_max , out_min, out_max):\n\treturn (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n\ndef Constrain(amt, low, high):\n\tif (amt < low):\n\t\tamt = low\n\tif (amt > high):\n\t\tamt = high\n\treturn amt\n\n\n\ndef GetFileSize(fname):\n\tfname=fname.replace('\\\\','/')\n\treturn os.stat(fname).st_size\ndef GetFileStr(fname):\n\tfname=fname.replace('\\\\','/')\n\tfo = open(fname, \"r+\")\n\tstr = fo.read(GetFileSize(fname))\n\treturn str\ndef SaveFileStr(fname,str_to_write):\n\tfname=fname.replace('\\\\','/')\n\tfo = open(fname, \"w\")\n\tfo.write(str_to_write);\n\tfo.close()\ndef FileExist(fname):\n\tfname=fname.replace('\\\\','/')\n\treturn os.path.exists(fname)\ndef ListAllFile():\n\treturn filter(os.path.isfile, os.listdir('.')) \ndef ListAllDir():\n\treturn filter(os.path.isdir, os.listdir('.'))\ndef GetFilename(fname):\n\tfname=fname.replace('\\\\','/')\n\tret=\"\"\n\tfor a in fname[::-1]:\n\t\tif a=='\\\\' or a=='/':\n\t\t\tbreak \n\t\tret+=a\n\treturn ret[::-1]\ndef GetFilenameNoEXT(fname):\n\t\n\tstrs=GetFilename(fname)\n\tret=\"\"\n\tfor a in strs:\n\t\tif a=='.':\n\t\t\tbreak \n\t\tret+=a \n\treturn ret\ndef GetPath(fname):\n\treturn os.path.dirname(fname)+\"/\"\n\ndef CreateFileName():\n\ts=time.localtime(time.time())\n\treturn str(s.tm_mon) + '_'+str(s.tm_mday) + '_'+str(s.tm_year) + '_'+str(s.tm_hour) + '_'+str(s.tm_min) + '_'+str(s.tm_sec) \ndef DateToString():\n\ts=time.localtime(time.time())\n\treturn str(s.tm_mon) + '/'+str(s.tm_mday) + '/'+str(s.tm_year) \ndef Date():\n\ts=time.localtime(time.time())\n\treturn datetime.date(s.tm_year,s.tm_mon,s.tm_mday) \n\ndef TimeToString():\n\ts=time.localtime(time.time())\n\treturn str(s.tm_hour) + ':'+str(s.tm_min) + ':'+str(s.tm_sec)\ndef Time():\n\ts=time.localtime(time.time())\n\treturn datetime.time(s.tm_hour,s.tm_min,s.tm_sec)\ndef Now():\n\treturn datetime.datetime.now()\ndef NowToString():\n\ts=Now()\n\treturn str(s.month) + \"/\" +str(s.day) + \"/\" + str(s.year) + \" \" +str(s.hour) + \":\" +str(s.minute) + \":\" +str(s.second)\n\ndef TimeInBetween(mintime,maxtime,curr):\n\treturn (mintime<=curr) and (maxtime>=curr)\n\n\n\n\n\ndef refreshGTK():\n\tpass\ndef TrueFalse(val):\n\tif val in ['True',\"False\"]:\n\t\tif val=='True':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\treturn val\t\n\ndef ExitApplication():\n\t#Gtk.main_quit()\n\tpass\nclass usercontrol(object):\n\tdef __init__(self):\n\t\ta=0\n\t\tpass#self.usercontrol\n\t@property\t\n\tdef Top(self):\n\t\treturn self.y()\n\t@Top.setter\t\n\tdef Top(self,value):\n\t\tself.move(self.x(),int(value))\n\t@property\t\n\tdef Left(self):\n\t\treturn self.x()\n\t@Left.setter\t\n\tdef Left(self,value):\n\t\tself.move(value,self.y())\n\t@property\t\n\tdef Width(self):\n\t\treturn self.width()\n\t@Width.setter\t\n\tdef Width(self,value):\n\t\tself.setFixedWidth(int(value))\n\t@property\t\n\tdef Height(self):\n\t\t'''fck'''\n\t\treturn self.height()\n\t@Height.setter\t\n\tdef Height(self,value):\n\t\t'''fck'''\n\t\tself.setFixedHeight(int(value))\n\t@property\t\n\tdef Visible(self):\n\t\treturn self.isVisible()\n\t@Visible.setter\t\n\tdef Visible(self,value):\n\t\t\n\t\tself.setVisible(TrueFalse(value))\n\t@property\n\tdef Text(self):\n\t\treturn self._text\n\t@Text.setter\n\tdef Text(self,value):\n\t\tself._text=value \n\t\tself.setWindowTitle(value)\ndef initUI(self,param,w=1366,h=768,title=\"WiredQT5.0\",controlbox=True,startpos=(200,200),timeoutdestroy=-1):\n\timport os\n\t\n\tself.caller=None\n\tself.param=None\n\tif len(param)!=0:\n\t\tself.caller=param[0]\n\t\tif len(param)>1:\n\t\t\tself.param=param[1]\t\t\n\tself.wiredevents={}\n\tself.setGeometry(startpos[0], startpos[1], w,h)\n\tself.setWindowTitle(title)\n\tif len(param)!=0:\n\t\tself.caller=param[0]\n\t\tif len(param)>1:\n\t\t\tself.param=param[1]\t\n\tdir_path = os.path.dirname(os.path.realpath(__file__))\n\tos.chdir(dir_path)\n\tself.form_load=False\n\tself.timeoutdestroy=timeoutdestroy;\n\tself.setWindowIcon(QIcon('icon.png'))\ndef createWidget(self,prop,control,parent,event=[]):\n\tprop=eval(prop)\n\tname=prop[\"Name\"]\n\n\tif prop['Tag']=='Activex':\n\t\tif control.find(\"/\")!=-1:#plugin ActiveX\n\t\t\timport importlib.util\n\t\t\tspec = importlib.util.spec_from_file_location(GetFilenameNoEXT(control), control)\n\t\t\tfoo = importlib.util.module_from_spec(spec)\t\n\t\t\tspec.loader.exec_module(foo)\n\t\t\texec(\"self.%s=foo.Handler(self)\" % (name)) \n\t\t\t\n\t\t\tcontrol=GetFilenameNoEXT(control)\n\t\t\t\n\t\telse:\t\n\t\t\texec('import '+ control) \n\t\t\texec(\"self.%s=%s.Handler(self)\" % (name,control))\n\t\texec(\"self._%s=self.%s\" % (name,name))\n\t\texec(\"self.%sW=forms(self._%s)\" % (name,name))\n\t\texec(\"self.%s.objectName='%s'\" % (name,name))\n\t\tfor a in prop:\n\t\t\tusr_act=eval(\"self.%s\"%(name))\n\t\t\tif hasattr(usr_act,a):\n\t\t\t\ttry:\n\t\t\t\t\texec(\"self.%s.%s='%s'\"%(name,a,prop[a]))\t\t\n\t\t\t\texcept:\n\t\t\t\t\ta=a\n\telse:\t\n\t\n\t\texec(\"self._%s=%s()\" % (name,control))\n\t\texec(\"self._%s.objectName='%s'\" % (name,name))\n\t\t\t\n\t\texec(\"self.%s=forms(self._%s)\" % (name,name))\n\tdontaltertextandcolor=[\"QScrollArea\",\"QWidget\"]\t\n\tif (control in dontaltertextandcolor):\n\t\tcontrol=control\n\t\t\n\t\t\n\telse:\t\n\t\texec(\"self.%s.Text='%s'\"%(name,prop[\"Text\"]))\t\n\t\tif (control in dontaltertextandcolor)==False:\n\t\t\t\n\t\t\tprop[\"BackColor\"]=prop[\"BackColor\"].replace(\"(\",\"\");prop[\"BackColor\"]=prop[\"BackColor\"].replace(\")\",\"\");\n\t\t\texec(\"self.%s.BackColor=%s\"%(name,prop[\"BackColor\"]))\t\t\t\n\t\t\tprop[\"ForeColor\"]=prop[\"ForeColor\"].replace(\"(\",\"\");prop[\"ForeColor\"]=prop[\"ForeColor\"].replace(\")\",\"\");\n\t\t\texec(\"self.%s.ForeColor=%s\"%(name,prop[\"ForeColor\"]))\t\t\n\tif prop[\"Font\"]!='':\t\n\t\texec(\"self.%s.Font='%s'\"%(name,prop[\"Font\"]))\n\texec(\"self.%s.Enable=%s\"%(name,TrueFalse(prop[\"Enable\"])))\n\t\n\tif prop['Tag']=='Activex':\n\t\texec(\"self.%sW.Dimension=%s\"%(name,prop[\"Width\"]+\",\"+prop[\"Height\"]))\n\t\texec(\"self.%s.activeXcreated()\"%(name))\n\t\t\n\telse:\n\t\t\"self.%s.setParent(self);self.%s.Left=%s;self.%s.Top=%s;\"%(name,name,prop[\"Left\"],name,prop[\"Top\"])#widget that has no parent,put it to our usercontrol\n\t\tif control==\"QScrollArea\" and 0:\n\t\t\texec(\"self._%swid=QWidget()\" % (name))\n\t\t\texec(\"self._%s.setWidget(self._%swid)\" % (name,name))\n\t\t\texec(\"self._%swid.setFixedHeight(%s)\" % (name,prop[\"Height\"]))\n\t\t\texec(\"self._%swid.setFixedWidth(%s)\" % (name,prop[\"Width\"]))\n\t\t\texec(\"forms(self._%swid).Left=%s\" % (name,0))\n\t\t\texec(\"forms(self._%swid).Top=%s\" % (name,0))\n\t\t\texec(\"self._%swid.setVisible(True)\" % (name))\t\t\n\t\n\tif prop['ParentsType'] in [\"QWidget\",\"QGroupBox\",\"QFrame\",\"QScrollArea\"]:\n\t\tcmd=\"self.%s.setParent(%s.obj)\"%(name,parent)\t\n\telif prop['ParentsType'] in[\"QScrollArea\"]:\n\t\tcmd=\"self.%s.setWidget(%s.obj)\"%(name,parent)\t\n\telse:\n\t\tif prop['Tag']=='Activex':\n\t\t\tcmd=\"self.%s.setParent(self);self.%s.Left=%s;self.%s.Top=%s;\"%(name,name,prop[\"Left\"],name,prop[\"Top\"])#widget that has no parent,put it to our usercontrol\n\t\telse:\n\t\t\tcmd=\"self.%s.setParent(self);self.%s.Left=%s;self.%s.Top=%s;\"%(name,name,prop[\"Left\"],name,prop[\"Top\"])#widget that has no parent,put it to our usercontrol\n\t\n\n\texec(cmd)\n\ttry:\n\t\texec(\"self.%s.Width=%s;\"%(name,prop[\"Width\"]))\n\t\texec(\"self.%s.Height=%s;\"%(name,prop[\"Height\"]))\n\t\texec(\"self.%s.Top=%s;\"%(name,prop[\"Top\"]))\n\t\texec(\"self.%s.Left=%s;\"%(name,prop[\"Left\"]))\t\t\n\texcept:\n\t\tpass\n\tif control==\"QLabel\":\n\t\ttry:\n\t\t\texec(\"self.%s.LoadPicture='%s'\" % (name, prop['Picture']))\n\t\texcept:\n\t\t\tpass\n\t\t\n\tevent=eval(event)\n\texec(\"self.%s.Visible=%s\"%(name,TrueFalse(prop[\"Visible\"])))\n\tfor a in event:\n\t\tif prop['Tag']=='Activex':\n\t\t\tstrs=\"self.%s.connect('%s',self.%s_%s)\"%(name,a[0],name,a[0].replace('-','_'))\n\t\telse:\n\t\t\tstrs=\"self._%s.%s.connect(self.%s_%s)\"%(name,a[0],name,a[0].replace('-','_'))\n\t\ttry:\n\t\t\t\t\n\t\t\teval(strs)\n\t\texcept:\n\t\t\ttry:\n\t\t\t\tstrs=\"self._%s.%s(self)\"%(name,a[0])\n\t\t\t\teval(strs)\t\t\t\t\t\n\t\t\texcept:\t\n\t\t\t\tprint(\"Unknown Signal '%s' for self._%s\"%(a[0],name))\ndef setPicture (imgGTK,imgCV2):\n\tchannel=1\t\n\timport cv2\n\tif len(imgCV2.shape)==3:\n\t\theight, width, channel = imgCV2.shape\n\tif len(imgCV2.shape)==2:\n\t\theight, width = imgCV2.shape\n\tbytesPerLine = channel * width\n\tif channel==4:\n\t\tqImg = QImage(imgCV2.data, width, height, bytesPerLine, QImage.Format_ARGB32)\n\tif channel==3:\n\t\tqImg = QImage(imgCV2.data, width, height, bytesPerLine, QImage.Format_RGB888)\n\tif channel==1:\n\t\tqImg = QImage(imgCV2.data, width, height, bytesPerLine, QImage.Format_Grayscale8)\n\n\timgGTK.setPixmap(QPixmap.fromImage(qImg));\n\treturn imgCV2\n\ndef app_path():\n\treturn os.path.dirname(os.path.realpath(__file__))\t\t\t\nclass forms(object):\n\tdef __init__(self,obj):\n\t\tself.obj=obj\n\t\tself.var=\"\"\n\t\tself.cv=None\n\tdef __getattr__(self,attr):#WTF!!! it took me a month to figure this out,. \n\t\t#thanks goes to StackOverFlow and to their brilliant and very helpful users\n\t\tdef wrapper(*args, **kw):\n\t\t\t#print('called with %r and %r' % (args, kw))\n\t\t\treturn getattr(self.obj, attr)(*args, **kw)\n\t\treturn wrapper\t\t\n\tdef SetFocus(self):\n\t\tpass\t\n\t@property\n\tdef Text(self):\n\t\tself.obj\n\t\tpass\n\t@Text.setter\n\tdef Text(self,value):\n\t\tif type(value)==int or type(value)==float:\n\t\t\tvalue=str(value)\n\t\tpass\n\t@property\n\tdef LoadPicture(self):\t\t\n\t\tpass\n\t@LoadPicture.setter\n\tdef LoadPicture(self,fname):\n#\t\tif type(self.obj)==Gtk.Image:\n#\t\t\tself.obj.set_from_file (fname)\t\n\t\tif type(self.obj)==QLabel:\n\t\t\tself.obj.setAlignment(Qt.AlignCenter)\n\t\t\tpix=QPixmap()\n\t\t\t_fname=GetFilename(fname)\n\t\t\tif FileExist(_fname):\n\t\t\t\tfname=_fname\n\t\t\tif pix.load(fname):\n\t\t\t\tpix = pix.scaled(self.obj.size(),Qt.KeepAspectRatio)\n\t\t\t\tself.obj.setPixmap(pix)\t\n\t\t\t\t#Use QImage to cv2\n\t\t\t\timport numpy as np\n\t\t\t\timport cv2\n\t\t\t\ttmp = QImage(pix)\n\t\t\t\ttmp = tmp.convertToFormat(4)\n\t\t\t\n\t\t\t\twidth = tmp.width()\n\t\t\t\theight = tmp.height()\n\t\t\t\n\t\t\t\tptr = tmp.bits()\n\t\t\t\tptr.setsize(tmp.byteCount())\n\t\t\t\tarr = np.array(ptr).reshape(height, width, 4) # Copies the data\n\t\t\t\tself.cv= cv2.cvtColor(arr,cv2.COLOR_RGBA2BGR)\t\t\t\t\n\t\t\tpass\n\t@property\n\tdef LoadPictureNoResize(self):\n\t\tpass\n\t@LoadPicture.setter\n\tdef LoadPictureNoResize(self,fname):\n\t\tpass\n\tdef imread(self):\n\t\treturn self.cv\n\t@property\n\tdef LoadPictureOCV(self):\t\t\n\t\tpass\n\t@LoadPicture.setter\n\tdef LoadPictureOCV(self,imOCV):\n\t\tif type(self.obj)==QLabel:\n\t\t\timport cv2\n\t\t\tif len(imOCV.shape)==3:\n\t\t\t\theight, width, channel = imOCV.shape\n\t\t\t\tif channel==3:\n\t\t\t\t\timOCV= cv2.cvtColor(imOCV,cv2.COLOR_BGR2RGB)\n\t\t\tself.cv=setPicture(self.obj,imOCV)\t\n\t@property\n\tdef Active(self):\n\t\tif hasattr(self.obj,\"isChecked\"):\n\t\t\treturn self.obj.isChecked()\n\t@Active.setter\n\tdef Active(self,value):\n\t\tif hasattr(self.obj,\"isChecked\"):\n\t\t\tself.obj.setChecked(TrueFalse(value))\n\t@property\n\tdef Var(self):\t\t\n\t\treturn self.var\n\t@Var.setter\n\tdef Var(self,value):\n\t\tself.var=value\t\n\t\t\n\t@property\n\tdef List(self):\n\t\tlst=[]\n\t\tfor a in range(self.obj.count()):\n\t\t\tlst.append(self.obj.itemText(a))\n\t\treturn lst\n\t@List.setter\n\tdef List(self,value):\n\t\tself.obj.clear()\t\t\n\t\tself.obj.addItems(value)\t\t\n\t'''@property\n\tdef LoadPictureOCV(self):\t\t\n\t\tpass\n\t@LoadPicture.setter\n\tdef LoadPictureOCV(self,imOCV):\n\t\tif type(self.obj)==Gtk.Image:\n\t\t\tsetPicture(self.obj,imOCV)\n\t'''\n\t\n\t@property\n\tdef ListIndex(self):\t\n\t\treturn self.obj.currentIndex()\n\t\t\n\t@ListIndex.setter\t\n\tdef ListIndex(self,index):\n\t\tself.obj.setCurrentIndex(index)\n\t@property\n\tdef ListClear(self):\n\t\tpass\n\t@property\n\tdef ForeColor(self):\t\t#value=0.0, 1.0, 0.0, 1.0 #RGBA\n\t\tbuf=self.obj.styleSheet()\n\t\tif buf==\"\":return\n\t\ta=buf.split(';')\n\t\tstyle=''\n\t\tfor b in a:\n\t\t\tif b.find('color')!=1 and b.find('background')==-1 and b!='':\n\t\t\t\tret=eval(b[b.find('('):])#return tuple\n\t\t\t\tret=list(ret);ret[0]/=255;ret[1]/=255;ret[2]/=255;ret[3]/=255;\n\t\t\t\treturn ret\n\t\t\n\t\treturn (0,0,0,1)\n\t@ForeColor.setter\n\tdef ForeColor(self,*value):\t\t#value=0.0, 1.0, 0.0, 1.0\n\t\tif type(value[0])==str:\n\t\t\tvalue=[eval(value[0])]\n\t\ttry:\n\t\t\tbuf=self.obj.styleSheet()\n\t\t\ta=buf.split(';')\n\t\t\tstyle=''\n\t\t\tfor b in a:\n\t\t\t\tif (b.find('color')==-1 or b.find('background')!=-1) and b!='':\n\t\t\t\t\tstyle+=b+\";\"\n\t\t\tx=255*value[0][0],255*value[0][1],255*value[0][2],255*value[0][3]\n\t\t\tbck='color:rgba'+str(x)+';'\t\t\t\n\t\t\tstyle+=bck\n\t\t\tself.obj.setStyleSheet(style); \n\t\texcept:\n\t\t\tpass\n\t\t#self.obj.override_background_color(Gtk.StateFlags.NORMAL, Gdk.RGBA(value[0][0],value[0][1],value[0][2],value[0][3])) \n\t\tpass\n\t\tpass\n\t@property\n\tdef BackColor(self):\t\t#value=0.0, 1.0, 0.0, 1.0 #RGBA\n\t\tbuf=self.obj.styleSheet()\n\t\ta=buf.split(';')\n\t\tstyle=''\n\t\tfor b in a:\n\t\t\tif b.find('background-color')!=-1:\n\t\t\t\tret=eval(b[b.find('('):])#return tuple\n\t\t\t\tret=list(ret);ret[0]/=255;ret[1]/=255;ret[2]/=255;ret[3]/=255;\t\t\t\t\n\t\t\t\treturn ret\n\t\t\n\t\treturn (0,0,0,0)\n\t@BackColor.setter\n\tdef BackColor(self,*value):\t\t#value=0.0, 1.0, 0.0, 1.0\n\t\tif type(value[0])==str:\n\t\t\tvalue=[eval(value[0])]\t\t\n\t\ttry:\n\t\t\tbuf=self.obj.styleSheet()\n\t\t\ta=buf.split(';')\n\t\t\tstyle=''\n\t\t\tfor b in a:\n\t\t\t\tif b.find('background-color')==-1 and b!='':\n\t\t\t\t\tstyle+=b+\";\"\n\t\t\tx=int(255*value[0][0]),int(255*value[0][1]),int(255*value[0][2]),int(255*value[0][3])\n\t\t\tbck='background-color:rgba'+str(x)+';'\t\t\t\n\t\t\tstyle+=bck\n\t\t\tself.obj.setStyleSheet(style); \n\t\texcept:\n\t\t\tpass\n\t\t#self.obj.override_background_color(Gtk.StateFlags.NORMAL, Gdk.RGBA(value[0][0],value[0][1],value[0][2],value[0][3])) \n\t\tpass\n\n\t@property \n\tdef Enable(self):\t\t#value=0.0, 1.0, 0.0, 1.0 #RGBA\n\t\treturn self.obj.isEnabled()\n\t@Enable.setter\n\tdef Enable(self,value):\t\t#value=0.0, 1.0, 0.0, 1.0\n\t\tself.obj.setEnabled(TrueFalse(value))\n\t\tpass#self.obj.set_sensitive(value)\n\tdef Move(self,xy):\t\t\n\t\tpass\n\tdef Location(self):\t\t#value=0.0, 1.0, 0.0, 1.0 #RGBA\n\t\tpass\n\t@property\n\tdef Dimension(self):\n\t\treturn forms(self.obj).Width,forms(self.obj).Height\t\n\t@Dimension.setter\n\tdef Dimension(self,wh):\n\t\tforms(self.obj).Width=int(wh[0])\n\t\tforms(self.obj).Height=int(wh[1])\n\t\tpass\n\t\n\t@property\n\tdef Font(self):\n\t\tpass\n\t@Font.setter\n\tdef Font(self,font):\n\t\ttry:\n\t\t\t_font=font.split(' ')\n\t\t\tfont=font.replace(\" \" + _font[-1],\"\")\n\t\t\tsize=10\n\t\t\ttry:size=int(_font[-1])\n\t\t\texcept:pass\n\t\t\tself.obj.setFont(QFont(font,size))#font='Dejavu Sans Mono 20'\t\n\t\texcept:\n\t\t\tpass\n\t@property\t\n\tdef Top(self):\n\t\treturn self.obj.y()\n\t@Top.setter\t\n\tdef Top(self,value):\n\t\ttry:\n\t\t\tself.obj.move(self.obj.x(),int(value))\n\t\texcept:\n\t\t\tpass\n\t@property\t\n\tdef Left(self):\n\t\treturn self.obj.x()\n\t@Left.setter\t\n\tdef Left(self,value):\n\t\ttry:\n\t\t\tself.obj.move(int(value),self.obj.y())\n\t\texcept:\n\t\t\tpass\n\t@property\t\n\tdef Width(self):\n\t\treturn self.obj.width()\n\t@Width.setter\t\n\tdef Width(self,value):\n\t\tself.obj.setFixedWidth(int(value))\n\t@property\t\n\tdef Height(self):\n\t\treturn self.obj.height()\n\t@Height.setter\t\n\tdef Height(self,value):\n\t\tself.obj.setFixedHeight(int(value))\n\t@property\t\n\tdef Visible(self):\n\t\treturn self.obj.isVisible()\n\t@Visible.setter\t\n\tdef Visible(self,value):\n\t\ttry:\n\t\t\tself.obj.setVisible(TrueFalse(value))\n\t\texcept:\n\t\t\tpass\n\t@property\n\tdef Text(self):\n\t\ttry:\n\t\t\tif type(self.obj)==QTextEdit:\n\t\t\t\treturn self.obj.toPlainText()\n\t\t\tif type(self.obj)==QComboBox:\n\t\t\t\treturn self.obj.currentText()\t\n\t\t\tif type(self.obj)==QCheckBox:\n\t\t\t\treturn str(self.obj.isChecked())\n\t\t\treturn self.obj.text()\n\t\texcept:\n\t\t\treturn \"\"\n\t\t\n\t\t\n\t@Text.setter\n\tdef Text(self,value):\n\t\ttry:\n\t\t\tif type(self.obj)==QComboBox:\n\t\t\t\tself.obj.setCurrentText(value)\n\t\t\t\treturn\n\t\t\tif type(self.obj)==QCheckBox:\n\t\t\t\tself.obj.setChecked(TrueFalse(value))\n\t\t\t\treturn\n\t\t\tself.obj.setText(value)\t\n\t\texcept:\n\t\t\tpass\n\n\t@property\n\tdef ComboValues(self):\n\t\tpass\n\t@ComboValues.setter\t\t\t#=['123','456',1024]\n\tdef ComboValues(self,listvalues):\n\t\tpass\n\n\t@property\n\tdef ComboClear(self):\n\t\tpass\n\n\t@property\n\tdef Alignment(self):\t\t\n\t\tpass\n\t@Alignment.setter\n\tdef Alignment(self,value):\n\t\tif value=='LEFT' or value=='':\n\t\t\tvalue=0\n\t\tif value=='CENTER':\n\t\t\tvalue=0.5\t\t\t\n\t\tif value=='RIGHT':\n\t\t\tvalue=1\t\n\t\tpass\n\t'''\n\t\tif type(self.obj)==Gtk.Entry:\n\t\t\tself.obj.set_alignment (value)\n\t\tif type(self.obj)==Gtk.Label:\n\t\t\t_,y=self.obj.get_alignment ()\t\t\t\n\t\t\tself.obj.set_alignment (value,y)\t\t\t\n\t\t\t\t\n\t'''\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
qgoestch/sinecity_testcases
[ "ec04ba707ff69b5c1b4b42e56e522855a2f34a65" ]
[ "main/case3_pe_ground_imp.py" ]
[ "# -*- coding: utf-8 -*-\n##\n# \\file case3_pe_ground_imp.py\n# \\title Study of an acoustic impulse reflected by an impedance ground.\n# \\author Pierre Chobeau\n# \\version 0.1\n# \\license BSD 3-Clause License\n# \\inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)\n# \\date 2017, 21 Nov.\n##\nimport numpy as np\nimport os\nimport site\n\npe_path = os.path.join(os.getcwd().rsplit(os.sep, 1)[0], 'num_methods', 'pe')\nsite.addsitedir(pe_path)\nfrom init_pe_ground import pe_init_impgr\n\ntools_path = os.path.join(os.getcwd().rsplit(os.sep, 2)[0], 'tools')\nsite.addsitedir(tools_path)\nfrom pe_att_spectrum import attenuation_spectrum\n\n\ndef main(d_sr, h_s, h_r, sigma, freq):\n \"\"\"\n General script that launch the parabolic equation simulations. The main\n simulation parameters are defined from here, then send to the\n initialization module *init_pe_ground.py*.\n The results postprocessing (errors, spectrum...) may also be launched\n from here.\n\n :param d_sr: horizontal distances between the source and the receivers (m).\n :type d_sr: list of floats\n :param h_s: height of the source (m).\n :type h_s: float\n :param h_r: height of the receiver (m).\n :type h_r: float\n :param sigma: specific airflow resistivity (kNm-4s==kPam-2s==CGS).\n :type sigma: list of int\n :param freq: frequency of the simulation (Hz).\n :type freq: 1darray of floats\n \"\"\"\n rho = 1.2 # density (kg.m-3)\n c = 340.00 # sound speed (m.s-1)\n lamb = c / freq # wavelength (m) - list of floats\n delta_x = lamb / 5. # 0.017 # spatial step (m) - list of floats\n # delta_x = round(340. / 545.56 / 8. +\n # abs(340/545.56/8 - 340/545.56/10)/2., 3)\n # delta_x = 0.125\n case = 38 # integer that sorts of the saved folders in the results dir.\n # free_field = False\n disp_inst_p = True # display the pressure inside the domain, boolean.\n # for f_idx, f in enumerate(freq):\n # for sig in sigma:\n # for ff in [True, False]:\n # pe_init_impgr(delta_x[f_idx], d_sr, h_s, h_r, f, rho, c,\n # sig, case, ff, disp_inst_p)\n # # pe_init_impgr(delta_x[f_idx], d_sr, h_s, h_r, f, rho, c,\n # # sig, case, ff, disp_inst_p)\n\n attenuation_spectrum(rho, c, h_s, h_r, d_sr, freq, sigma, case)\n\n\nif __name__ == '__main__':\n d_sr = [10., 60., 100.]\n h_s = 2.\n h_r = 2.\n freq = np.logspace(np.log10(100.), np.log10(428.13291827), 13)\n # freq = np.logspace(np.log10(100.), np.log10(545.559), 15)\n # freq = np.logspace(np.log10(100.), np.log10(1000.), 20)\n # freq = np.arange(50., 275., 25)\n sigma = [200, 20000]\n main(d_sr, h_s, h_r, sigma, freq)\n" ]
[ [ "numpy.log10" ] ]
psFournier/robo-gym
[ "0e67a36c0cbeac885c53b92de8f3f1f13e286c9a" ]
[ "tests/robo-gym/envs/ur/test_ur_ee_positioning.py" ]
[ "import gym\nimport robo_gym\nimport math\nimport numpy as np \nimport pytest\n\nur_models = [pytest.param('ur3', marks=pytest.mark.nightly), \\\n pytest.param('ur3e', marks=pytest.mark.nightly), \\\n pytest.param('ur5', marks=pytest.mark.commit), \\\n pytest.param('ur5e', marks=pytest.mark.nightly), \\\n pytest.param('ur10', marks=pytest.mark.nightly), \\\n pytest.param('ur10e', marks=pytest.mark.nightly), \\\n pytest.param('ur16e', marks=pytest.mark.nightly), \\\n]\n\n@pytest.fixture(scope='module', params=ur_models)\ndef env(request):\n env = gym.make('EndEffectorPositioningURSim-v0', ip='robot-servers', ur_model=request.param)\n env.request_param = request.param\n yield env\n env.kill_sim()\n\n@pytest.mark.commit \ndef test_initialization(env):\n assert env.ur.model == env.request_param\n env.reset()\n done = False\n env.step([0,0,0,0,0])\n for _ in range(10):\n if not done:\n action = env.action_space.sample()\n observation, _, done, _ = env.step(action)\n\n assert env.observation_space.contains(observation)\n\n@pytest.mark.nightly\n@pytest.mark.flaky(reruns=3)\ndef test_self_collision(env):\n collision_joint_config = {'ur3': [0.0, 0.0, -3.14, -1.77, 1.0], \\\n 'ur3e': [0.0, -1.88, 2.8, -0.75, -1.88], \\\n 'ur5': [0.0, -1.26, -3.14, 0.0, 0.0], \\\n 'ur5e': [0.0, -0.50, -3.14, 3.14, 0.0], \\\n 'ur10': [0.0, -1.5, 3.14, 0.0, 0.0], \\\n 'ur10e': [0.0, -0.15, -2.83, -2.51, 1.63], \\\n 'ur16e': [0.0, -1.15, 2.9, -0.19, 0.42]}\n env.reset()\n action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])\n done = False\n while not done:\n _, _, done, info = env.step(action) \n assert info['final_status'] == 'collision'\n\n@pytest.mark.nightly\n@pytest.mark.flaky(reruns=3)\ndef test_collision_with_ground(env):\n collision_joint_config = {'ur3': [0.0, 2.64, -1.95, -2.98, 0.41], \\\n 'ur3e': [1.13, 1.88, -2.19, -3.43, 2.43], \\\n 'ur5': [0.0, 1.0, 1.8, 0.0, 0.0], \\\n 'ur5e': [0.0, 3.52, -2.58, 0.0, 0.0], \\\n 'ur10': [0.0, 1.0, 1.15, 0.0, 0.0], \\\n 'ur10e': [-2.14, -0.13, 0.63, -1.13, 1.63], \\\n 'ur16e': [0.0, -0.15, 1.32, 0.0, 1.63]}\n env.reset()\n action = env.ur.normalize_joint_values(collision_joint_config[env.ur.model])\n done = False\n while not done:\n _, _, done, info = env.step(action) \n assert info['final_status'] == 'collision'\n\n@pytest.mark.nightly \ndef test_reset_joint_positions(env):\n joint_positions = [0.2, -2.5, 1.1, -2.0, -1.2, 1.2]\n\n state = env.reset(joint_positions=joint_positions)\n assert np.isclose(env.ur.normalize_joint_values(joint_positions), state[3:9], atol=0.1).all()\n\n@pytest.mark.commit \ndef test_object_coordinates(env):\n\n params = {\n #? robot up-right, target_coord_in_ee_frame 0.0, -0.3, 0.2, coordinates of target calculated using official dimensions from DH parameters. \n #? first value is d4+d6\n #? second value is: d1+a2+a3+d5\n 'ur3': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.194 +0.2), (0.692 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},\n 'ur3e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.223 +0.2), (0.694 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}},\n 'ur5': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.191 +0.2), (1.001 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}, \n 'ur5e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.233 +0.2), (1.079 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}, \n 'ur10': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.256 +0.2), (1.428 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}, \n 'ur10e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.291 +0.2), (1.485 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}, \n 'ur16e': {'joint_positions':[0.0, -1.57, 0.0, -1.57, 0.0, 0.0], 'object_coords':[0.0, (0.291 +0.2), (1.139 + 0.3), 0.0, 0.0, 0.0], 'polar_coords':{'r': 0.360, 'theta': 0.983, 'phi': -1.571}}\n }\n\n state = env.reset(joint_positions=params[env.ur.model]['joint_positions'], ee_target_pose=params[env.ur.model]['object_coords'])\n assert np.isclose([params[env.ur.model]['polar_coords']['r'], params[env.ur.model]['polar_coords']['theta'], params[env.ur.model]['polar_coords']['phi']], state[0:3], atol=0.1).all()\n \n\ntest_ur_fixed_joints = [\n ('EndEffectorPositioningURSim-v0', True, False, False, False, False, False, 'ur3'), # fixed shoulder_pan\n ('EndEffectorPositioningURSim-v0', False, True, False, False, False, False, 'ur3e'), # fixed shoulder_lift\n ('EndEffectorPositioningURSim-v0', False, False, False, False, False, True, 'ur5'), # fixed wrist_3\n ('EndEffectorPositioningURSim-v0', True, False, True, False, False, False, 'ur5e'), # fixed Base and Elbow\n ('EndEffectorPositioningURSim-v0', False, False, True, False, False, False, 'ur10'), # fixed elbow\n ('EndEffectorPositioningURSim-v0', False, False, False, True, False, False, 'ur10e'), # fixed wrist_1\n ('EndEffectorPositioningURSim-v0', False, False, False, False, True, False, 'ur16e'), # fixed wrist_2\n]\n\n@pytest.mark.nightly\n@pytest.mark.parametrize('env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model', test_ur_fixed_joints)\n@pytest.mark.flaky(reruns=3)\ndef test_fixed_joints(env_name, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model):\n env = gym.make(env_name, ip='robot-servers', fix_base=fix_base, fix_shoulder=fix_shoulder, fix_elbow=fix_elbow, \n fix_wrist_1=fix_wrist_1, fix_wrist_2=fix_wrist_2, fix_wrist_3=fix_wrist_3, ur_model=ur_model)\n state = env.reset()\n initial_joint_positions = state[3:9]\n # Take 20 actions\n action = env.action_space.sample()\n for _ in range(20):\n state, _, _, _ = env.step(action)\n joint_positions = state[3:9]\n\n if fix_base:\n assert math.isclose(initial_joint_positions[0], joint_positions[0], abs_tol=0.05)\n if fix_shoulder:\n assert math.isclose(initial_joint_positions[1], joint_positions[1], abs_tol=0.05)\n if fix_elbow:\n assert math.isclose(initial_joint_positions[2], joint_positions[2], abs_tol=0.05)\n if fix_wrist_1:\n assert math.isclose(initial_joint_positions[3], joint_positions[3], abs_tol=0.05)\n if fix_wrist_2:\n assert math.isclose(initial_joint_positions[4], joint_positions[4], abs_tol=0.05)\n if fix_wrist_3:\n assert math.isclose(initial_joint_positions[5], joint_positions[5], abs_tol=0.05)\n\n env.kill_sim()\n\n@pytest.mark.commit \ndef test_success(env):\n params = {\n 'ur3': {'object_coords':[0.0, 0.194, 0.692, 0.0, 0.0, 0.0]},\n 'ur3e': {'object_coords':[0.0, 0.223, 0.694, 0.0, 0.0, 0.0]},\n 'ur5': {'object_coords':[0.0, 0.191, 1.001, 0.0, 0.0, 0.0]}, \n 'ur5e': {'object_coords':[0.0, 0.233, 1.079, 0.0, 0.0, 0.0]}, \n 'ur10': {'object_coords':[0.0, 0.256, 1.428, 0.0, 0.0, 0.0]}, \n 'ur10e': {'object_coords':[0.0, 0.291, 1.485, 0.0, 0.0, 0.0]}, \n 'ur16e': {'object_coords':[0.0, 0.291, 1.139, 0.0, 0.0, 0.0]}\n }\n\n env.reset(joint_positions=[0.0, -1.3, 0.0, -1.3, 0.0, 0.0], ee_target_pose=params[env.ur.model]['object_coords'])\n action = env.ur.normalize_joint_values([0.0, -1.57, 0.0, -1.57, 0.0])\n done = False\n while not done:\n _, _, done, info = env.step(action) \n assert info['final_status'] == 'success'" ]
[ [ "numpy.isclose" ] ]
haohy/deepts_torch
[ "166cb0ac2ea1d89d1196a33f40caf89c3f2c23c5" ]
[ "unittests/layers/test_interaction.py" ]
[ "import pytest\nimport os, sys\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.insert(1, os.getcwd())\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\n\nfrom deepts.layers import AttentionBlock, TemporalBlock, TemporalConvNet,TCN\nfrom unittests.config import SAMPLE_SIZE, BATCH_SIZE, N_BACK\n\n\n@pytest.mark.parametrize(\n 'use_scale, softmax_axis',\n [(True, 1), (True, 2)]\n)\ndef test_AttentionBlock(use_scale, softmax_axis):\n Tq, Tv, dim = 10, 20, 15\n attention_block_layer = AttentionBlock(use_scale, softmax_axis)\n query = tf.random.normal((BATCH_SIZE, Tq, dim))\n key = tf.random.normal((BATCH_SIZE, Tv, dim))\n value = tf.random.normal((BATCH_SIZE, Tv, dim))\n outputs = attention_block_layer([query, key, value])\n print(outputs.shape)\n\n@pytest.mark.parametrize(\n 'temp_attn, en_res, is_conv, softmax_axis',\n [(True, True, True, 1),\n (True, True, True, 2),\n (False, True, True, 1),\n (False, False, True, 1),\n (True, False, True, 1),\n (True, True, False, 1),]\n)\ndef test_TemporalBlock(temp_attn, en_res, is_conv, softmax_axis):\n n_inputs = 10\n n_outputs = 20\n kernel_size = 3\n num_sub_blocks = 2\n attn_dim = 10\n stride = 1\n dilation = 2\n visual = False\n temporalblock_layer = TemporalBlock(softmax_axis, n_outputs, kernel_size, \n num_sub_blocks, attn_dim, temp_attn, en_res, is_conv, \n stride, dilation, visual, dropout=0.2)\n inputs = tf.random.normal((BATCH_SIZE, N_BACK, n_inputs))\n outputs = temporalblock_layer(inputs)\n print(outputs[0].shape)\n\n@pytest.mark.parametrize(\n 'temp_attn, en_res, is_conv, softmax_axis',\n [(True, True, True, 1),\n (True, True, True, 2),\n (False, True, True, 1),\n (False, False, True, 1),\n (True, False, True, 1),\n (True, True, False, 1),]\n)\ndef test_TemporalConvNet(temp_attn, en_res, is_conv, softmax_axis):\n emb_dim = 10\n channel_list = [20, 30, 40]\n kernel_size = 3\n num_sub_blocks = 2\n attn_dim = 10\n visual = False\n temporalconvnet_layer = TemporalConvNet(attn_dim, channel_list, num_sub_blocks, \n temp_attn, en_res, is_conv, softmax_axis, kernel_size,\n visual, dropout=0.2)\n inputs = tf.random.normal((BATCH_SIZE, N_BACK, emb_dim))\n outputs = temporalconvnet_layer(inputs)\n print(outputs[0].shape)\n\n@pytest.mark.parametrize(\n '',\n [()]\n)\ndef test_TCN():\n tcn_layer = TCN()\n x_num = tf.random.normal((BATCH_SIZE, 168))\n x_cat = tf.random.uniform((BATCH_SIZE, 24, 8), minval=0, maxval=2, dtype=tf.int64)\n outputs = tcn_layer(x_num, x_cat)\n print(outputs.shape)\n\nif __name__ == '__main__':\n physical_devices = tf.config.list_physical_devices('GPU') \n tf.config.set_visible_devices(physical_devices[1:], 'GPU')\n # test_AttentionBlock(True, 2)\n # pytest.main(['-s', '-k', 'AttentionBlock', './unittests/layers/test_interaction.py'])\n # test_TemporalBlock(True, True, True, 1)\n # pytest.main(['-s', '-k', 'TemporalBlock', './unittests/layers/test_interaction.py'])\n # test_TemporalConvNet(True, True, False, 1)\n # pytest.main(['-s', '-k', 'TemporalConvNet', './unittests/layers/test_interaction.py'])\n test_TCN()" ]
[ [ "tensorflow.random.uniform", "tensorflow.random.normal", "tensorflow.config.list_physical_devices", "tensorflow.config.set_visible_devices" ] ]
alialamiidrissi/pinkfish
[ "c34920d970281b60ae4d46d6c52af13f6b3761f0" ]
[ "examples/240.double-7s-portfolio/strategy.py" ]
[ "\"\"\"\nThe double-7s-portfolio stategy.\n\nThis is double-7s strategy applied to a portfolio.\nThe simple double 7's strategy was revealed in the book\n'Short Term Strategies that Work: A Quantified Guide to Trading Stocks\nand ETFs', by Larry Connors and Cesar Alvarez. It's a mean reversion\nstrategy looking to buy dips and sell on strength and was initially\ndesigned for ETFs.\n\nThis module allows us to examine this strategy and try different\nperiod, stop loss percent, margin, and whether to use a regime filter\nor not. We split up the total capital between the symbols in the\nportfolio and allocate based on either equal weight or volatility\nparity weight (inverse volatility).\n\"\"\"\n\nimport datetime\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom talib.abstract import *\n\nimport pinkfish as pf\n\n\ndefault_options = {\n 'use_adj' : False,\n 'use_cache' : True,\n 'stop_loss_pct' : 1.0,\n 'margin' : 1,\n 'period' : 7,\n 'use_regime_filter' : True,\n 'use_vola_weight' : False\n}\n\nclass Strategy:\n\n def __init__(self, symbols, capital, start, end, options=default_options):\n \n self.symbols = symbols\n self.capital = capital\n self.start = start\n self.end = end\n self.options = options.copy()\n \n self.ts = None\n self.rlog = None\n self.tlog = None\n self.dbal = None\n self.stats = None\n\n def _algo(self):\n\n pf.TradeLog.cash = self.capital\n pf.TradeLog.margin = self.options['margin']\n\n # Create a stop_loss dict for each symbol.\n stop_loss = {symbol:0 for symbol in self.portfolio.symbols}\n \n # stop loss pct should range between 0 and 1, user may have\n # expressed this as a percentage 0-100\n if self.options['stop_loss_pct'] > 1:\n self.options['stop_loss_pct'] /= 100\n\n period_high_field = 'period_high' + str(self.options['period'])\n period_low_field = 'period_low' + str(self.options['period'])\n\n # Loop though timeseries.\n for i, row in enumerate(self.ts.itertuples()):\n\n date = row.Index.to_pydatetime()\n end_flag = pf.is_last_row(self.ts, i)\n \n # Get the prices for this row, put in dict p.\n p = self.portfolio.get_prices(row,\n fields=['close', 'regime', period_high_field, period_low_field, 'vola'])\n\n # Sum the inverse volatility for each row.\n inverse_vola_sum = 0\n for symbol in self.portfolio.symbols:\n inverse_vola_sum += 1 / p[symbol]['vola']\n\n # Loop though each symbol in portfolio.\n for symbol in self.portfolio.symbols:\n\n # Use variables to make code cleaner\n close = p[symbol]['close']\n regime = p[symbol]['regime']\n period_high = p[symbol][period_high_field]\n period_low = p[symbol][period_low_field]\n inverse_vola = 1 / p[symbol]['vola']\n \n # Sell Logic\n # First we check if an existing position in symbol should be sold\n # - sell if price closes at X day high\n # - sell if price closes below stop loss\n # - sell if end of data by adjusted the percent to zero\n\n if symbol in self.portfolio.positions:\n if close == period_high or close < stop_loss[symbol] or end_flag:\n if close < stop_loss[symbol]: print('STOP LOSS!!!')\n self.portfolio.adjust_percent(date, close, 0, symbol, row)\n \n # Buy Logic\n # First we check to see if there is an existing position, if so do nothing\n # - Buy if (regime > 0 or not use_regime_filter) and price closes at X day low\n\n else:\n if (regime > 0 or not self.options['use_regime_filter']) and close == period_low:\n # Use volatility weight.\n if self.options['use_vola_weight']:\n weight = inverse_vola / inverse_vola_sum\n # Use equal weight.\n else:\n weight = 1 / len(self.portfolio.symbols)\n self.portfolio.adjust_percent(date, close, weight, symbol, row)\n # Set stop loss\n stop_loss[symbol] = (1-self.options['stop_loss_pct'])*close\n\n # record daily balance\n self.portfolio.record_daily_balance(date, row)\n\n def run(self):\n self.portfolio = pf.Portfolio()\n self.ts = self.portfolio.fetch_timeseries(self.symbols, self.start, self.end,\n use_cache=self.options['use_cache'], use_adj=self.options['use_adj'])\n\n # Add technical indicator: 200 sma regime filter for each symbol.\n def _crossover(ts, ta_param, input_column):\n return pf.CROSSOVER(ts, timeperiod_fast=1, timeperiod_slow=200,\n price=input_column, prevday=False)\n\n self.ts = self.portfolio.add_technical_indicator(\n self.ts, ta_func=_crossover, ta_param=None,\n output_column_suffix='regime', input_column_suffix='close')\n \n # Add technical indicator: volatility.\n def _volatility(ts, ta_param, input_column):\n return pf.VOLATILITY(ts, price=input_column)\n \n self.ts = self.portfolio.add_technical_indicator(\n self.ts, ta_func=_volatility, ta_param=None,\n output_column_suffix='vola', input_column_suffix='close')\n\n # Add technical indicator: X day high.\n def _period_high(ts, ta_param, input_column):\n return pd.Series(ts[input_column]).rolling(ta_param).max()\n\n self.ts = self.portfolio.add_technical_indicator(\n self.ts, ta_func=_period_high, ta_param=self.options['period'],\n output_column_suffix='period_high'+str(self.options['period']),\n input_column_suffix='close')\n\n # Add technical indicator: X day low.\n def _period_low(ts, ta_param, input_column):\n return pd.Series(ts[input_column]).rolling(ta_param).min()\n\n self.ts = self.portfolio.add_technical_indicator(\n self.ts, ta_func=_period_low, ta_param=self.options['period'],\n output_column_suffix='period_low'+str(self.options['period']),\n input_column_suffix='close')\n\n # Finalize timeseries.\n self.ts, self.start = self.portfolio.finalize_timeseries(self.ts, self.start)\n\n # Init trade log objects.\n self.portfolio.init_trade_logs(self.ts)\n\n self._algo()\n self._get_logs()\n self._get_stats()\n\n def _get_logs(self):\n self.rlog, self.tlog, self.dbal = self.portfolio.get_logs()\n\n def _get_stats(self):\n self.stats = pf.stats(self.ts, self.tlog, self.dbal, self.capital)\n\n" ]
[ [ "pandas.Series" ] ]
alchemistlee/bert
[ "8837f10cad4317cfd8a792a1c954e15f0dc4b791" ]
[ "spear/multi_lable_classifier_v1.py" ]
[ "# coding=utf-8\n\"\"\"BERT finetuning runner.\"\"\"\n\n# @time : 2019/5/17 19:01\n# @author : alchemistlee\n# @fileName: multi_lable_classifier_v1.py\n# @abstract:\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport csv\nimport os\nimport modeling\nimport optimization\nimport tokenization\nimport tensorflow as tf\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"data_dir\", None,\n \"The input data dir. Should contain the .tsv files (or other data files) \"\n \"for the task.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"task_name\", None, \"The name of the task to train.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_bool(\n \"do_predict\", False,\n \"Whether to run the model in inference mode on the test set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 8, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass PaddingInputExample(object):\n \"\"\"Fake example so the num input examples is a multiple of the batch size.\n\n When running eval/predict on the TPU, we need to pad the number of examples\n to be a multiple of the batch size, because the TPU requires a fixed batch\n size. The alternative is to drop the last batch, which is bad because it means\n the entire output data won't be generated.\n\n We use this class instead of `None` because treating `None` as padding\n battches could cause silent errors.\n \"\"\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n label_id,\n is_real_example=True):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.is_real_example = is_real_example\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for prediction.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n\nclass XnliProcessor(DataProcessor):\n \"\"\"Processor for the XNLI data set.\"\"\"\n\n def __init__(self):\n self.language = \"zh\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(\n os.path.join(data_dir, \"multinli\",\n \"multinli.train.%s.tsv\" % self.language))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"train-%d\" % (i)\n text_a = tokenization.convert_to_unicode(line[0])\n text_b = tokenization.convert_to_unicode(line[1])\n label = tokenization.convert_to_unicode(line[2])\n if label == tokenization.convert_to_unicode(\"contradictory\"):\n label = tokenization.convert_to_unicode(\"contradiction\")\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n lines = self._read_tsv(os.path.join(data_dir, \"xnli.dev.tsv\"))\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"dev-%d\" % (i)\n language = tokenization.convert_to_unicode(line[0])\n if language != tokenization.convert_to_unicode(self.language):\n continue\n text_a = tokenization.convert_to_unicode(line[6])\n text_b = tokenization.convert_to_unicode(line[7])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test_matched.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, tokenization.convert_to_unicode(line[0]))\n text_a = tokenization.convert_to_unicode(line[8])\n text_b = tokenization.convert_to_unicode(line[9])\n if set_type == \"test\":\n label = \"contradiction\"\n else:\n label = tokenization.convert_to_unicode(line[-1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = tokenization.convert_to_unicode(line[3])\n text_b = tokenization.convert_to_unicode(line[4])\n if set_type == \"test\":\n label = \"0\"\n else:\n label = tokenization.convert_to_unicode(line[0])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n # Only the test set has a header\n if set_type == \"test\" and i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n if set_type == \"test\":\n text_a = tokenization.convert_to_unicode(line[1])\n label = \"0\"\n else:\n text_a = tokenization.convert_to_unicode(line[3])\n label = tokenization.convert_to_unicode(line[1])\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\ndef convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n \"\"\"Converts a single `InputExample` into a single `InputFeatures`.\"\"\"\n\n if isinstance(example, PaddingInputExample):\n return InputFeatures(\n input_ids=[0] * max_seq_length,\n input_mask=[0] * max_seq_length,\n segment_ids=[0] * max_seq_length,\n label_id=0,\n is_real_example=False)\n\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n if ex_index < 5:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"guid: %s\" % (example.guid))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n feature = InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n is_real_example=True)\n return feature\n\n\ndef file_based_convert_examples_to_features(\n examples, label_list, max_seq_length, tokenizer, output_file):\n \"\"\"Convert a set of `InputExample`s to a TFRecord file.\"\"\"\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()\n\n\ndef file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"label_ids\": tf.FixedLenFeature([], tf.int64),\n \"is_real_example\": tf.FixedLenFeature([], tf.int64),\n }\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn\n\n\n# This function is not used by this file but is still used by the Colab and\n# people who depend on it.\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer):\n \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list,\n max_seq_length, tokenizer)\n\n features.append(feature)\n return features\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n processors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mrpc\": MrpcProcessor,\n \"xnli\": XnliProcessor,\n }\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:\n raise ValueError(\n \"At least one of `do_train`, `do_eval` or `do_predict' must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n task_name = FLAGS.task_name.lower()\n\n if task_name not in processors:\n raise ValueError(\"Task not found: %s\" % (task_name))\n\n processor = processors[task_name]()\n\n label_list = processor.get_labels()\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = processor.get_train_examples(FLAGS.data_dir)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n train_file = os.path.join(FLAGS.output_dir, \"train.tf_record\")\n file_based_convert_examples_to_features(\n train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num examples = %d\", len(train_examples))\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = file_based_input_fn_builder(\n input_file=train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_eval:\n eval_examples = processor.get_dev_examples(FLAGS.data_dir)\n num_actual_eval_examples = len(eval_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on. These do NOT count towards the metric (all tf.metrics\n # support a per-instance weight, and these get a weight of 0.0).\n while len(eval_examples) % FLAGS.eval_batch_size != 0:\n eval_examples.append(PaddingInputExample())\n\n eval_file = os.path.join(FLAGS.output_dir, \"eval.tf_record\")\n file_based_convert_examples_to_features(\n eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)\n\n tf.logging.info(\"***** Running evaluation *****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(eval_examples), num_actual_eval_examples,\n len(eval_examples) - num_actual_eval_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.eval_batch_size)\n\n # This tells the estimator to run through the entire set.\n eval_steps = None\n # However, if running eval on the TPU, you will need to specify the\n # number of steps.\n if FLAGS.use_tpu:\n assert len(eval_examples) % FLAGS.eval_batch_size == 0\n eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder)\n\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n\n output_eval_file = os.path.join(FLAGS.output_dir, \"eval_results.txt\")\n with tf.gfile.GFile(output_eval_file, \"w\") as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n tf.logging.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n if FLAGS.do_predict:\n predict_examples = processor.get_test_examples(FLAGS.data_dir)\n num_actual_predict_examples = len(predict_examples)\n if FLAGS.use_tpu:\n # TPU requires a fixed batch size for all batches, therefore the number\n # of examples must be a multiple of the batch size, or else examples\n # will get dropped. So we pad with fake examples which are ignored\n # later on.\n while len(predict_examples) % FLAGS.predict_batch_size != 0:\n predict_examples.append(PaddingInputExample())\n\n predict_file = os.path.join(FLAGS.output_dir, \"predict.tf_record\")\n file_based_convert_examples_to_features(predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file)\n\n tf.logging.info(\"***** Running prediction*****\")\n tf.logging.info(\" Num examples = %d (%d actual, %d padding)\",\n len(predict_examples), num_actual_predict_examples,\n len(predict_examples) - num_actual_predict_examples)\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n predict_drop_remainder = True if FLAGS.use_tpu else False\n predict_input_fn = file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder)\n\n result = estimator.predict(input_fn=predict_input_fn)\n\n output_predict_file = os.path.join(FLAGS.output_dir, \"test_results.tsv\")\n with tf.gfile.GFile(output_predict_file, \"w\") as writer:\n num_written_lines = 0\n tf.logging.info(\"***** Predict results *****\")\n for (i, prediction) in enumerate(result):\n probabilities = prediction[\"probabilities\"]\n if i >= num_actual_predict_examples:\n break\n output_line = \"\\t\".join(\n str(class_probability)\n for class_probability in probabilities) + \"\\n\"\n writer.write(output_line)\n num_written_lines += 1\n assert num_written_lines == num_actual_predict_examples\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.metrics.accuracy", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.gfile.GFile", "tensorflow.cast", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.app.run", "tensorflow.nn.dropout", "tensorflow.metrics.mean", "tensorflow.matmul", "tensorflow.gfile.Open", "tensorflow.shape", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Features", "tensorflow.nn.bias_add", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.train.Scaffold", "tensorflow.reduce_mean", "tensorflow.flags.DEFINE_string", "tensorflow.variable_scope" ] ]
HudZah/AutoDrawer
[ "e23e8c012c8000197f6fd34ee8bb7f14eb04e7f5" ]
[ "AutoDrawer/mouseAutomater.py" ]
[ "import numpy as np\r\nimport pyautogui\r\nimport time\r\nimport os\r\nfrom PIL import Image, ImageFilter, ImageOps\r\n\r\nclass MouseAutomater(object):\r\n\r\n pyautogui.PAUSE = 0.00\r\n pyautogui.FAILSAFE = True\r\n\r\n def openPaint():\r\n openPain = input(\"Would you like to open paint? (yes to continue) (Windows only)\")\r\n if openPain == \"yes\":\r\n os.startfile(\"C:\\WINDOWS\\system32\\mspaint.exe\")\r\n\r\n @staticmethod\r\n def imageToLines(imageArray : np.array, offset: int, rsleep:int, lsleep:int) -> None:\r\n startPosX,startPosY = pyautogui.position()\r\n\r\n for row in imageArray:\r\n xoffset = 0\r\n isDrawing = False\r\n for value in row:\r\n if value == False: # Don't draw\r\n if isDrawing == True:\r\n xoffset += offset\r\n else:\r\n startLine = startPosX + xoffset\r\n isDrawing = True\r\n xoffset += offset\r\n if value == True:\r\n if isDrawing == False:\r\n xoffset += offset\r\n else:\r\n pyautogui.moveTo(startLine, startPosY)\r\n pyautogui.dragTo(startPosX + xoffset, startPosY, duration=lsleep, button=\"left\")\r\n time.sleep(lsleep)\r\n isDrawing = False\r\n xoffset += offset\r\n if value == False:\r\n if isDrawing == True:\r\n pyautogui.moveTo(startLine, startPosY)\r\n pyautogui.dragTo(startPosX + xoffset, startPosY, duration=lsleep, button=\"left\")\r\n time.sleep(lsleep)\r\n xoffset += offset\r\n startPosY += offset\r\n time.sleep(rsleep)\r\n\r\n\r\nclass ImageController(object):\r\n def __init__(self, image):\r\n self.image = Image.open(image)\r\n \r\n @staticmethod\r\n def getImage() -> str:\r\n path = input(\"Path to image: \")\r\n return path\r\n \r\n def convertToBW(self) -> None:\r\n self.image = self.image.convert(mode=\"1\" , dither=None)\r\n\r\n def invertImage(self, image) -> None:\r\n self.image = ImageOps.invert(Image.open(image))\r\n\r\n def resize(self, resizeValue) -> None:\r\n horSize, verSize = self.image.size\r\n\r\n if horSize > verSize:\r\n conversion = horSize/ resizeValue\r\n self.image = self.image.resize((int(horSize/ conversion), int(verSize/ conversion)))\r\n else:\r\n conversion = verSize/ resizeValue\r\n self.image = self.image.resize((int(horSize/conversion), int(verSize/conversion)))\r\n \r\n def newImageArray(self) -> np.array:\r\n array = np.array(self.image)\r\n return array\r\n" ]
[ [ "numpy.array" ] ]
ResearchSoftwareInstitute/MyHPOM
[ "2d48fe5ac8d21173b1685eb33059bb391fe24414" ]
[ "hs_file_types/models/netcdf.py" ]
[ "import os\nimport shutil\nimport logging\nimport re\n\nfrom functools import partial, wraps\nimport netCDF4\nimport numpy as np\n\nfrom django.db import models, transaction\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.uploadedfile import UploadedFile\nfrom django.template import Template, Context\nfrom django.forms.models import formset_factory, BaseFormSet\n\nfrom dominate.tags import div, legend, form, button, p, textarea, strong, input\n\nfrom hs_core.hydroshare import utils\nfrom hs_core.hydroshare.resource import delete_resource_file\nfrom hs_core.forms import CoverageTemporalForm, CoverageSpatialForm\nfrom hs_core.models import Creator, Contributor\n\nfrom hs_app_netCDF.models import NetCDFMetaDataMixin, OriginalCoverage, Variable\nfrom hs_app_netCDF.forms import VariableForm, VariableValidationForm, OriginalCoverageForm\n\nfrom base import AbstractFileMetaData, AbstractLogicalFile\nimport hs_file_types.nc_functions.nc_utils as nc_utils\nimport hs_file_types.nc_functions.nc_dump as nc_dump\nimport hs_file_types.nc_functions.nc_meta as nc_meta\n\n\nclass NetCDFFileMetaData(NetCDFMetaDataMixin, AbstractFileMetaData):\n # the metadata element models are from the netcdf resource type app\n model_app_label = 'hs_app_netCDF'\n\n def get_metadata_elements(self):\n elements = super(NetCDFFileMetaData, self).get_metadata_elements()\n elements += [self.original_coverage]\n elements += list(self.variables.all())\n return elements\n\n @classmethod\n def get_metadata_model_classes(cls):\n metadata_model_classes = super(NetCDFFileMetaData, cls).get_metadata_model_classes()\n metadata_model_classes['originalcoverage'] = OriginalCoverage\n metadata_model_classes['variable'] = Variable\n return metadata_model_classes\n\n @property\n def original_coverage(self):\n # There can be at most only one instance of type OriginalCoverage associated\n # with this metadata object\n return self.ori_coverage.all().first()\n\n def get_html(self):\n \"\"\"overrides the base class function\"\"\"\n\n html_string = super(NetCDFFileMetaData, self).get_html()\n if self.spatial_coverage:\n html_string += self.spatial_coverage.get_html()\n if self.originalCoverage:\n html_string += self.originalCoverage.get_html()\n if self.temporal_coverage:\n html_string += self.temporal_coverage.get_html()\n variable_legend = legend(\"Variables\", cls=\"pull-left\", style=\"margin-top:20px;\")\n html_string += variable_legend.render()\n for variable in self.variables.all():\n html_string += variable.get_html()\n\n # ncdump text from the txt file\n html_string += self.get_ncdump_html().render()\n template = Template(html_string)\n context = Context({})\n return template.render(context)\n\n def get_html_forms(self, dataset_name_form=True, temporal_coverage=True, **kwargs):\n \"\"\"overrides the base class function\"\"\"\n\n root_div = div(\"{% load crispy_forms_tags %}\")\n with root_div:\n self.get_update_netcdf_file_html_form()\n super(NetCDFFileMetaData, self).get_html_forms()\n with div(cls=\"row\"):\n with div(cls=\"col-lg-6 col-xs-12\", id=\"original-coverage-filetype\"):\n with form(id=\"id-origcoverage-file-type\",\n action=\"{{ orig_coverage_form.action }}\",\n method=\"post\", enctype=\"multipart/form-data\"):\n div(\"{% crispy orig_coverage_form %}\")\n with div(cls=\"row\", style=\"margin-top:10px;\"):\n with div(cls=\"col-md-offset-10 col-xs-offset-6 \"\n \"col-md-2 col-xs-6\"):\n button(\"Save changes\", type=\"button\",\n cls=\"btn btn-primary pull-right\",\n style=\"display: none;\")\n\n with div(cls=\"col-lg-6 col-xs-12\", id=\"spatial-coverage-filetype\"):\n with form(id=\"id-spatial-coverage-file-type\",\n action=\"{{ spatial_coverage_form.action }}\",\n method=\"post\", enctype=\"multipart/form-data\"):\n div(\"{% crispy spatial_coverage_form %}\")\n with div(cls=\"row\", style=\"margin-top:10px;\"):\n with div(cls=\"col-md-offset-10 col-xs-offset-6 \"\n \"col-md-2 col-xs-6\"):\n button(\"Save changes\", type=\"button\",\n cls=\"btn btn-primary pull-right\",\n style=\"display: none;\")\n\n with div(cls=\"pull-left col-sm-12\"):\n # id has to be variables to get the vertical scrollbar\n with div(cls=\"well\", id=\"variables\"):\n with div(cls=\"row\"):\n with div(\"{% for form in variable_formset_forms %}\"):\n with div(cls=\"col-sm-6 col-xs-12\"):\n with form(id=\"{{ form.form_id }}\", action=\"{{ form.action }}\",\n method=\"post\", enctype=\"multipart/form-data\"):\n div(\"{% crispy form %}\")\n with div(cls=\"row\", style=\"margin-top:10px;\"):\n with div(cls=\"col-md-offset-10 col-xs-offset-6 \"\n \"col-md-2 col-xs-6\"):\n button(\"Save changes\", type=\"button\",\n cls=\"btn btn-primary pull-right\",\n style=\"display: none;\")\n div(\"{% endfor %}\")\n\n self.get_ncdump_html()\n\n template = Template(root_div.render())\n temp_cov_form = self.get_temporal_coverage_form()\n update_action = \"/hydroshare/hsapi/_internal/NetCDFLogicalFile/{0}/{1}/{2}/update-file-metadata/\"\n create_action = \"/hydroshare/hsapi/_internal/NetCDFLogicalFile/{0}/{1}/add-file-metadata/\"\n if self.temporal_coverage:\n temp_action = update_action.format(self.logical_file.id, \"coverage\",\n self.temporal_coverage.id)\n else:\n temp_action = create_action.format(self.logical_file.id, \"coverage\")\n\n temp_cov_form.action = temp_action\n\n orig_cov_form = self.get_original_coverage_form()\n if self.originalCoverage:\n temp_action = update_action.format(self.logical_file.id, \"originalcoverage\",\n self.originalCoverage.id)\n else:\n temp_action = create_action.format(self.logical_file.id, \"originalcoverage\")\n\n orig_cov_form.action = temp_action\n\n spatial_cov_form = self.get_spatial_coverage_form(allow_edit=True)\n if self.spatial_coverage:\n temp_action = update_action.format(self.logical_file.id, \"coverage\",\n self.spatial_coverage.id)\n else:\n temp_action = create_action.format(self.logical_file.id, \"coverage\")\n\n spatial_cov_form.action = temp_action\n context_dict = dict()\n context_dict[\"temp_form\"] = temp_cov_form\n context_dict[\"orig_coverage_form\"] = orig_cov_form\n context_dict[\"spatial_coverage_form\"] = spatial_cov_form\n context_dict[\"variable_formset_forms\"] = self.get_variable_formset().forms\n context = Context(context_dict)\n rendered_html = template.render(context)\n return rendered_html\n\n def get_update_netcdf_file_html_form(self):\n form_action = \"/hydroshare/hsapi/_internal/{}/update-netcdf-file/\".format(self.id)\n style = \"display:none;\"\n if self.is_dirty:\n style = \"margin-bottom:10px\"\n root_div = div(id=\"div-netcdf-file-update\", cls=\"row\", style=style)\n\n with root_div:\n with div(cls=\"col-sm-12\"):\n with div(cls=\"alert alert-warning alert-dismissible\", role=\"alert\"):\n strong(\"NetCDF file needs to be synced with metadata changes.\")\n input(id=\"metadata-dirty\", type=\"hidden\", value=self.is_dirty)\n with form(action=form_action, method=\"post\", id=\"update-netcdf-file\"):\n button(\"Update NetCDF File\", type=\"button\", cls=\"btn btn-primary\",\n id=\"id-update-netcdf-file\")\n\n return root_div\n\n def get_original_coverage_form(self):\n return OriginalCoverage.get_html_form(resource=None, element=self.originalCoverage,\n file_type=True)\n\n def get_variable_formset(self):\n VariableFormSetEdit = formset_factory(\n wraps(VariableForm)(partial(VariableForm, allow_edit=True)),\n formset=BaseFormSet, extra=0)\n variable_formset = VariableFormSetEdit(\n initial=self.variables.all().values(), prefix='Variable')\n\n for frm in variable_formset.forms:\n if len(frm.initial) > 0:\n frm.action = \"/hydroshare/hsapi/_internal/%s/%s/variable/%s/update-file-metadata/\" % (\n \"NetCDFLogicalFile\", self.logical_file.id, frm.initial['id'])\n frm.number = frm.initial['id']\n\n return variable_formset\n\n def get_ncdump_html(self):\n \"\"\"\n Generates html code to display the contents of the ncdump text file. The generated html\n is used for netcdf file type metadata view and edit modes.\n :return:\n \"\"\"\n\n nc_dump_div = div()\n nc_dump_res_file = None\n for f in self.logical_file.files.all():\n if f.extension == \".txt\":\n nc_dump_res_file = f\n break\n if nc_dump_res_file is not None:\n nc_dump_div = div(style=\"clear: both\", cls=\"col-xs-12\")\n with nc_dump_div:\n legend(\"NetCDF Header Information\")\n p(nc_dump_res_file.full_path[33:])\n header_info = nc_dump_res_file.resource_file.read()\n header_info = header_info.decode('utf-8')\n textarea(header_info, readonly=\"\", rows=\"15\",\n cls=\"input-xlarge\", style=\"min-width: 100%\")\n\n return nc_dump_div\n\n @classmethod\n def validate_element_data(cls, request, element_name):\n \"\"\"overriding the base class method\"\"\"\n\n if element_name.lower() not in [el_name.lower() for el_name\n in cls.get_supported_element_names()]:\n err_msg = \"{} is nor a supported metadata element for NetCDF file type\"\n err_msg = err_msg.format(element_name)\n return {'is_valid': False, 'element_data_dict': None, \"errors\": err_msg}\n element_name = element_name.lower()\n if element_name == 'variable':\n form_data = {}\n for field_name in VariableValidationForm().fields:\n try:\n # when the request comes from the UI, the variable attributes have a prefix of\n # '-'\n matching_key = [key for key in request.POST if '-' + field_name in key][0]\n except IndexError:\n if field_name in request.POST:\n matching_key = field_name\n else:\n continue\n form_data[field_name] = request.POST[matching_key]\n element_form = VariableValidationForm(form_data)\n elif element_name == 'originalcoverage':\n element_form = OriginalCoverageForm(data=request.POST)\n elif element_name == 'coverage' and 'start' not in request.POST:\n element_form = CoverageSpatialForm(data=request.POST)\n else:\n # here we are assuming temporal coverage\n element_form = CoverageTemporalForm(data=request.POST)\n\n if element_form.is_valid():\n return {'is_valid': True, 'element_data_dict': element_form.cleaned_data}\n else:\n return {'is_valid': False, 'element_data_dict': None, \"errors\": element_form.errors}\n\n def add_to_xml_container(self, container):\n \"\"\"Generates xml+rdf representation of all metadata elements associated with this\n logical file type instance\"\"\"\n\n container_to_add_to = super(NetCDFFileMetaData, self).add_to_xml_container(container)\n if self.originalCoverage:\n self.originalCoverage.add_to_xml_container(container_to_add_to)\n\n for variable in self.variables.all():\n variable.add_to_xml_container(container_to_add_to)\n\n\nclass NetCDFLogicalFile(AbstractLogicalFile):\n metadata = models.OneToOneField(NetCDFFileMetaData, related_name=\"logical_file\")\n data_type = \"Multidimensional\"\n\n @classmethod\n def get_allowed_uploaded_file_types(cls):\n \"\"\"only .nc file can be set to this logical file group\"\"\"\n return [\".nc\"]\n\n @classmethod\n def get_allowed_storage_file_types(cls):\n \"\"\"file types allowed in this logical file group are: .nc and .txt\"\"\"\n return [\".nc\", \".txt\"]\n\n @classmethod\n def create(cls):\n \"\"\"this custom method MUST be used to create an instance of this class\"\"\"\n netcdf_metadata = NetCDFFileMetaData.objects.create(keywords=[])\n return cls.objects.create(metadata=netcdf_metadata)\n\n @property\n def supports_resource_file_move(self):\n \"\"\"resource files that are part of this logical file can't be moved\"\"\"\n return False\n\n @property\n def supports_resource_file_add(self):\n \"\"\"doesn't allow a resource file to be added\"\"\"\n return False\n\n @property\n def supports_resource_file_rename(self):\n \"\"\"resource files that are part of this logical file can't be renamed\"\"\"\n return False\n\n @property\n def supports_delete_folder_on_zip(self):\n \"\"\"does not allow the original folder to be deleted upon zipping of that folder\"\"\"\n return False\n\n def update_netcdf_file(self, user):\n \"\"\"\n writes metadata to the netcdf file associated with this instance of the logical file\n :return:\n \"\"\"\n\n log = logging.getLogger()\n\n nc_res_file = ''\n txt_res_file = ''\n for f in self.files.all():\n if f.extension == '.nc':\n nc_res_file = f\n break\n\n for f in self.files.all():\n if f.extension == '.txt':\n txt_res_file = f\n break\n if not nc_res_file:\n msg = \"No netcdf file exists for this logical file.\"\n log.exception(msg)\n raise ValidationError(msg)\n\n netcdf_file_update(self, nc_res_file, txt_res_file, user)\n\n @classmethod\n def set_file_type(cls, resource, file_id, user):\n \"\"\"\n Sets a tif or zip raster resource file to GeoRasterFile type\n :param resource: an instance of resource type CompositeResource\n :param file_id: id of the resource file to be set as GeoRasterFile type\n :param user: user who is setting the file type\n :return:\n \"\"\"\n\n # had to import it here to avoid import loop\n from hs_core.views.utils import create_folder, remove_folder\n\n log = logging.getLogger()\n\n # get the file from irods\n res_file = utils.get_resource_file_by_id(resource, file_id)\n\n if res_file is None:\n raise ValidationError(\"File not found.\")\n\n if res_file.extension != '.nc':\n raise ValidationError(\"Not a NetCDF file.\")\n\n # base file name (no path included)\n file_name = res_file.file_name\n # file name without the extension\n nc_file_name = file_name[:-len(res_file.extension)]\n\n resource_metadata = []\n file_type_metadata = []\n files_to_add_to_resource = []\n upload_folder = ''\n if res_file.has_generic_logical_file:\n # get the file from irods to temp dir\n temp_file = utils.get_file_from_irods(res_file)\n temp_dir = os.path.dirname(temp_file)\n files_to_add_to_resource.append(temp_file)\n # file validation and metadata extraction\n nc_dataset = nc_utils.get_nc_dataset(temp_file)\n if isinstance(nc_dataset, netCDF4.Dataset):\n # Extract the metadata from netcdf file\n res_dublin_core_meta, res_type_specific_meta = nc_meta.get_nc_meta_dict(temp_file)\n # populate resource_metadata and file_type_metadata lists with extracted metadata\n add_metadata_to_list(resource_metadata, res_dublin_core_meta,\n res_type_specific_meta, file_type_metadata, resource)\n\n # create the ncdump text file\n dump_file = create_header_info_txt_file(temp_file, nc_file_name)\n files_to_add_to_resource.append(dump_file)\n file_folder = res_file.file_folder\n with transaction.atomic():\n # create a netcdf logical file object to be associated with\n # resource files\n logical_file = cls.create()\n\n # by default set the dataset_name attribute of the logical file to the\n # name of the file selected to set file type unless the extracted metadata\n # has a value for title\n dataset_title = res_dublin_core_meta.get('title', None)\n if dataset_title is not None:\n logical_file.dataset_name = dataset_title\n else:\n logical_file.dataset_name = nc_file_name\n logical_file.save()\n\n try:\n # create a folder for the netcdf file type using the base file\n # name as the name for the new folder\n new_folder_path = cls.compute_file_type_folder(resource, file_folder,\n nc_file_name)\n\n create_folder(resource.short_id, new_folder_path)\n log.info(\"Folder created:{}\".format(new_folder_path))\n\n new_folder_name = new_folder_path.split('/')[-1]\n if file_folder is None:\n upload_folder = new_folder_name\n else:\n upload_folder = os.path.join(file_folder, new_folder_name)\n # add all new files to the resource\n for f in files_to_add_to_resource:\n uploaded_file = UploadedFile(file=open(f, 'rb'),\n name=os.path.basename(f))\n # the added resource file will be part of a new generic logical file\n # by default\n new_res_file = utils.add_file_to_resource(\n resource, uploaded_file, folder=upload_folder\n )\n\n # delete the generic logical file object\n if new_res_file.logical_file is not None:\n # deleting the file level metadata object will delete the associated\n # logical file object\n new_res_file.logical_file.metadata.delete()\n\n # make each resource file we added part of the logical file\n logical_file.add_resource_file(new_res_file)\n\n log.info(\"NetCDF file type - new files were added to the resource.\")\n\n # use the extracted metadata to populate resource metadata\n for element in resource_metadata:\n # here k is the name of the element\n # v is a dict of all element attributes/field names and field values\n k, v = element.items()[0]\n if k == 'title':\n # update title element\n title_element = resource.metadata.title\n resource.metadata.update_element('title', title_element.id, **v)\n else:\n resource.metadata.create_element(k, **v)\n\n log.info(\"Resource - metadata was saved to DB\")\n\n # use the extracted metadata to populate file metadata\n for element in file_type_metadata:\n # here k is the name of the element\n # v is a dict of all element attributes/field names and field values\n k, v = element.items()[0]\n if k == 'subject':\n logical_file.metadata.keywords = v\n logical_file.metadata.save()\n # update resource level keywords\n resource_keywords = [subject.value.lower() for subject in\n resource.metadata.subjects.all()]\n for kw in logical_file.metadata.keywords:\n if kw.lower() not in resource_keywords:\n resource.metadata.create_element('subject', value=kw)\n else:\n logical_file.metadata.create_element(k, **v)\n log.info(\"NetCDF file type - metadata was saved to DB\")\n # set resource to private if logical file is missing required metadata\n resource.update_public_and_discoverable()\n # delete the original resource file\n delete_resource_file(resource.short_id, res_file.id, user)\n log.info(\"Deleted original resource file.\")\n except Exception as ex:\n msg = \"NetCDF file type. Error when setting file type. Error:{}\"\n msg = msg.format(ex.message)\n log.exception(msg)\n if upload_folder:\n # delete any new files uploaded as part of setting file type\n folder_to_remove = os.path.join('data', 'contents', upload_folder)\n remove_folder(user, resource.short_id, folder_to_remove)\n log.info(\"Deleted newly created file type folder\")\n raise ValidationError(msg)\n finally:\n # remove temp dir\n if os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n else:\n err_msg = \"Not a valid NetCDF file. File type file validation failed.\"\n log.error(err_msg)\n # remove temp dir\n if os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n raise ValidationError(err_msg)\n\n\ndef add_metadata_to_list(res_meta_list, extracted_core_meta, extracted_specific_meta,\n file_meta_list=None, resource=None):\n \"\"\"\n Helper function to populate metadata lists (*res_meta_list* and *file_meta_list*) with\n extracted metadata from the NetCDF file. These metadata lists are then used for creating\n metadata element objects by the caller.\n :param res_meta_list: a list to store data to create metadata elements at the resource level\n :param extracted_core_meta: a dict of extracted dublin core metadata\n :param extracted_specific_meta: a dict of extracted metadata that is NetCDF specific\n :param file_meta_list: a list to store data to create metadata elements at the file type level\n (must be None when this helper function is used for NetCDF resource and must not be None\n when used for NetCDF file type\n :param resource: an instance of BaseResource (must be None when this helper function is used\n for NteCDF resource and must not be None when used for NetCDF file type)\n :return:\n \"\"\"\n\n # add title\n if resource is not None and file_meta_list is not None:\n # file type\n if resource.metadata.title.value.lower() == 'untitled resource':\n add_title_metadata(res_meta_list, extracted_core_meta)\n else:\n # resource type\n add_title_metadata(res_meta_list, extracted_core_meta)\n\n # add abstract (Description element)\n if resource is not None and file_meta_list is not None:\n # file type\n if resource.metadata.description is None:\n add_abstract_metadata(res_meta_list, extracted_core_meta)\n else:\n # resource type\n add_abstract_metadata(res_meta_list, extracted_core_meta)\n\n # add keywords\n if file_meta_list is not None:\n # file type\n add_keywords_metadata(file_meta_list, extracted_core_meta)\n else:\n # resource type\n add_keywords_metadata(res_meta_list, extracted_core_meta, file_type=False)\n\n # add creators:\n if resource is not None:\n # file type\n add_creators_metadata(res_meta_list, extracted_core_meta,\n resource.metadata.creators.all())\n else:\n # resource type\n add_creators_metadata(res_meta_list, extracted_core_meta,\n Creator.objects.none())\n\n # add contributors:\n if resource is not None:\n # file type\n add_contributors_metadata(res_meta_list, extracted_core_meta,\n resource.metadata.contributors.all())\n else:\n # resource type\n add_contributors_metadata(res_meta_list, extracted_core_meta,\n Contributor.objects.none())\n\n # add source (applies only to NetCDF resource type)\n if extracted_core_meta.get('source') and file_meta_list is None:\n source = {'source': {'derived_from': extracted_core_meta['source']}}\n res_meta_list.append(source)\n\n # add relation (applies only to NetCDF resource type)\n if extracted_core_meta.get('references') and file_meta_list is None:\n relation = {'relation': {'type': 'cites',\n 'value': extracted_core_meta['references']}}\n res_meta_list.append(relation)\n\n # add rights (applies only to NetCDF resource type)\n if extracted_core_meta.get('rights') and file_meta_list is None:\n raw_info = extracted_core_meta.get('rights')\n b = re.search(\"(?P<url>https?://[^\\s]+)\", raw_info)\n url = b.group('url') if b else ''\n statement = raw_info.replace(url, '') if url else raw_info\n rights = {'rights': {'statement': statement, 'url': url}}\n res_meta_list.append(rights)\n\n # add coverage - period\n if file_meta_list is not None:\n # file type\n add_temporal_coverage_metadata(file_meta_list, extracted_core_meta)\n else:\n # resource type\n add_temporal_coverage_metadata(res_meta_list, extracted_core_meta)\n\n # add coverage - box\n if file_meta_list is not None:\n # file type\n add_spatial_coverage_metadata(file_meta_list, extracted_core_meta)\n else:\n # resource type\n add_spatial_coverage_metadata(res_meta_list, extracted_core_meta)\n\n # add variables\n if file_meta_list is not None:\n # file type\n add_variable_metadata(file_meta_list, extracted_specific_meta)\n else:\n # resource type\n add_variable_metadata(res_meta_list, extracted_specific_meta)\n\n # add original spatial coverage\n if file_meta_list is not None:\n # file type\n add_original_coverage_metadata(file_meta_list, extracted_core_meta)\n else:\n # resource type\n add_original_coverage_metadata(res_meta_list, extracted_core_meta)\n\n\ndef add_original_coverage_metadata(metadata_list, extracted_metadata):\n \"\"\"\n Adds data for the original coverage element to the *metadata_list*\n :param metadata_list: list to which original coverage data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :return:\n \"\"\"\n\n ori_cov = {}\n if extracted_metadata.get('original-box'):\n coverage_data = extracted_metadata['original-box']\n projection_string_type = \"\"\n projection_string_text = \"\"\n datum = \"\"\n if extracted_metadata.get('projection-info'):\n projection_string_type = extracted_metadata[\n 'projection-info']['type']\n projection_string_text = extracted_metadata[\n 'projection-info']['text']\n datum = extracted_metadata['projection-info']['datum']\n\n ori_cov = {'originalcoverage':\n {'value': coverage_data,\n 'projection_string_type': projection_string_type,\n 'projection_string_text': projection_string_text,\n 'datum': datum\n }\n }\n if ori_cov:\n metadata_list.append(ori_cov)\n\n\ndef add_creators_metadata(metadata_list, extracted_metadata, existing_creators):\n \"\"\"\n Adds data for creator(s) to the *metadata_list*\n :param metadata_list: list to which creator(s) data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :param existing_creators: a QuerySet object for existing creators\n :return:\n \"\"\"\n if extracted_metadata.get('creator_name'):\n name = extracted_metadata['creator_name']\n # add creator only if there is no creator already with the same name\n if not existing_creators.filter(name=name).exists():\n email = extracted_metadata.get('creator_email', '')\n url = extracted_metadata.get('creator_url', '')\n creator = {'creator': {'name': name, 'email': email, 'homepage': url}}\n metadata_list.append(creator)\n\n\ndef add_contributors_metadata(metadata_list, extracted_metadata, existing_contributors):\n \"\"\"\n Adds data for contributor(s) to the *metadata_list*\n :param metadata_list: list to which contributor(s) data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :param existing_contributors: a QuerySet object for existing contributors\n :return:\n \"\"\"\n if extracted_metadata.get('contributor_name'):\n name_list = extracted_metadata['contributor_name'].split(',')\n for name in name_list:\n # add contributor only if there is no contributor already with the\n # same name\n if not existing_contributors.filter(name=name).exists():\n contributor = {'contributor': {'name': name}}\n metadata_list.append(contributor)\n\n\ndef add_title_metadata(metadata_list, extracted_metadata):\n \"\"\"\n Adds data for the title element to the *metadata_list*\n :param metadata_list: list to which title data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :return:\n \"\"\"\n if extracted_metadata.get('title'):\n res_title = {'title': {'value': extracted_metadata['title']}}\n metadata_list.append(res_title)\n\n\ndef add_abstract_metadata(metadata_list, extracted_metadata):\n \"\"\"\n Adds data for the abstract (Description) element to the *metadata_list*\n :param metadata_list: list to which abstract data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :return:\n \"\"\"\n\n if extracted_metadata.get('description'):\n description = {'description': {'abstract': extracted_metadata['description']}}\n metadata_list.append(description)\n\n\ndef add_variable_metadata(metadata_list, extracted_metadata):\n \"\"\"\n Adds variable(s) related data to the *metadata_list*\n :param metadata_list: list to which variable data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :return:\n \"\"\"\n for var_name, var_meta in extracted_metadata.items():\n meta_info = {}\n for element, value in var_meta.items():\n if value != '':\n meta_info[element] = value\n metadata_list.append({'variable': meta_info})\n\n\ndef add_spatial_coverage_metadata(metadata_list, extracted_metadata):\n \"\"\"\n Adds data for one spatial coverage metadata element to the *metadata_list**\n :param metadata_list: list to which spatial coverage data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :return:\n \"\"\"\n if extracted_metadata.get('box'):\n box = {'coverage': {'type': 'box', 'value': extracted_metadata['box']}}\n metadata_list.append(box)\n\n\ndef add_temporal_coverage_metadata(metadata_list, extracted_metadata):\n \"\"\"\n Adds data for one temporal metadata element to the *metadata_list*\n :param metadata_list: list to which temporal coverage data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :return:\n \"\"\"\n if extracted_metadata.get('period'):\n period = {\n 'coverage': {'type': 'period', 'value': extracted_metadata['period']}}\n metadata_list.append(period)\n\n\ndef add_keywords_metadata(metadata_list, extracted_metadata, file_type=True):\n \"\"\"\n Adds data for subject/keywords element to the *metadata_list*\n :param metadata_list: list to which keyword data needs to be added\n :param extracted_metadata: a dict containing netcdf extracted metadata\n :param file_type: If True then this metadata extraction is for netCDF file type, otherwise\n metadata extraction is for NetCDF resource\n :return:\n \"\"\"\n if extracted_metadata.get('subject'):\n keywords = extracted_metadata['subject'].split(',')\n if file_type:\n metadata_list.append({'subject': keywords})\n else:\n for keyword in keywords:\n metadata_list.append({'subject': {'value': keyword}})\n\n\ndef create_header_info_txt_file(nc_temp_file, nc_file_name):\n \"\"\"\n Creates the header text file using the *nc_temp_file*\n :param nc_temp_file: the netcdf file copied from irods to django\n for metadata extraction\n :return:\n \"\"\"\n\n if nc_dump.get_nc_dump_string_by_ncdump(nc_temp_file):\n dump_str = nc_dump.get_nc_dump_string_by_ncdump(nc_temp_file)\n else:\n dump_str = nc_dump.get_nc_dump_string(nc_temp_file)\n\n # file name without the extension\n temp_dir = os.path.dirname(nc_temp_file)\n dump_file_name = nc_file_name + '_header_info.txt'\n dump_file = os.path.join(temp_dir, dump_file_name)\n if dump_str:\n # refine dump_str first line\n first_line = list('netcdf {0} '.format(nc_file_name))\n first_line_index = dump_str.index('{')\n dump_str_list = first_line + list(dump_str)[first_line_index:]\n dump_str = \"\".join(dump_str_list)\n with open(dump_file, 'w') as dump_file_obj:\n dump_file_obj.write(dump_str)\n else:\n with open(dump_file, 'w') as dump_file_obj:\n dump_file_obj.write(\"\")\n\n return dump_file\n\n\ndef netcdf_file_update(instance, nc_res_file, txt_res_file, user):\n log = logging.getLogger()\n # check the instance type\n file_type = isinstance(instance, NetCDFLogicalFile)\n\n # get the file from irods to temp dir\n temp_nc_file = utils.get_file_from_irods(nc_res_file)\n nc_dataset = netCDF4.Dataset(temp_nc_file, 'a')\n\n try:\n # update title\n title = instance.dataset_name if file_type else instance.metadata.title.value\n\n if title.lower() != 'untitled resource':\n if hasattr(nc_dataset, 'title'):\n delattr(nc_dataset, 'title')\n nc_dataset.title = title\n\n # update keywords\n keywords = instance.metadata.keywords if file_type \\\n else [item.value for item in instance.metadata.subjects.all()]\n\n if hasattr(nc_dataset, 'keywords'):\n delattr(nc_dataset, 'keywords')\n\n if keywords:\n nc_dataset.keywords = ', '.join(keywords)\n\n # update key/value metadata\n extra_metadata_dict = instance.metadata.extra_metadata if file_type \\\n else instance.extra_metadata\n\n if hasattr(nc_dataset, 'hs_extra_metadata'):\n delattr(nc_dataset, 'hs_extra_metadata')\n\n if extra_metadata_dict:\n extra_metadata = []\n for k, v in extra_metadata_dict.items():\n extra_metadata.append(\"{}:{}\".format(k, v))\n nc_dataset.hs_extra_metadata = ', '.join(extra_metadata)\n\n # update temporal coverage\n temporal_coverage = instance.metadata.temporal_coverage if file_type \\\n else instance.metadata.coverages.all().filter(type='period').first()\n\n for attr_name in ['time_coverage_start', 'time_coverage_end']:\n if hasattr(nc_dataset, attr_name):\n delattr(nc_dataset, attr_name)\n\n if temporal_coverage:\n nc_dataset.time_coverage_start = temporal_coverage.value['start']\n nc_dataset.time_coverage_end = temporal_coverage.value['end']\n\n # update spatial coverage\n spatial_coverage = instance.metadata.spatial_coverage if file_type \\\n else instance.metadata.coverages.all().filter(type='box').first()\n\n for attr_name in ['geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lon_min',\n 'geospatial_lon_max']:\n if hasattr(nc_dataset, attr_name):\n delattr(nc_dataset, attr_name)\n\n if spatial_coverage:\n nc_dataset.geospatial_lat_min = spatial_coverage.value['southlimit']\n nc_dataset.geospatial_lat_max = spatial_coverage.value['northlimit']\n nc_dataset.geospatial_lon_min = spatial_coverage.value['westlimit']\n nc_dataset.geospatial_lon_max = spatial_coverage.value['eastlimit']\n\n # update variables\n if instance.metadata.variables.all():\n dataset_variables = nc_dataset.variables\n for variable in instance.metadata.variables.all():\n if variable.name in dataset_variables.keys():\n dataset_variable = dataset_variables[variable.name]\n\n # update units\n if hasattr(dataset_variable, 'units'):\n delattr(dataset_variable, 'units')\n if variable.unit != 'Unknown':\n dataset_variable.setncattr('units', variable.unit)\n\n # update long_name\n if hasattr(dataset_variable, 'long_name'):\n delattr(dataset_variable, 'long_name')\n if variable.descriptive_name:\n dataset_variable.setncattr('long_name', variable.descriptive_name)\n\n # update method\n if hasattr(dataset_variable, 'comment'):\n delattr(dataset_variable, 'comment')\n if variable.method:\n dataset_variable.setncattr('comment', variable.method)\n\n # update missing value\n if variable.missing_value:\n if hasattr(dataset_variable, 'missing_value'):\n missing_value = dataset_variable.missing_value\n delattr(dataset_variable, 'missing_value')\n else:\n missing_value = ''\n try:\n dt = np.dtype(dataset_variable.datatype.name)\n missing_value = np.fromstring(variable.missing_value + ' ',\n dtype=dt.type, sep=\" \")\n except:\n pass\n\n if missing_value:\n dataset_variable.setncattr('missing_value', missing_value)\n\n # Update metadata element that only apply to netCDF resource\n if not file_type:\n\n # update summary\n if hasattr(nc_dataset, 'summary'):\n delattr(nc_dataset, 'summary')\n if instance.metadata.description:\n nc_dataset.summary = instance.metadata.description.abstract\n\n # update contributor\n if hasattr(nc_dataset, 'contributor_name'):\n delattr(nc_dataset, 'contributor_name')\n\n contributor_list = instance.metadata.contributors.all()\n if contributor_list:\n res_contri_name = []\n for contributor in contributor_list:\n res_contri_name.append(contributor.name)\n\n nc_dataset.contributor_name = ', '.join(res_contri_name)\n\n # update creator\n for attr_name in ['creator_name', 'creator_email', 'creator_url']:\n if hasattr(nc_dataset, attr_name):\n delattr(nc_dataset, attr_name)\n\n creator = instance.metadata.creators.all().filter(order=1).first()\n if creator:\n nc_dataset.creator_name = creator.name if creator.name else creator.organization\n\n if creator.email:\n nc_dataset.creator_email = creator.email\n if creator.description or creator.homepage:\n nc_dataset.creator_url = creator.homepage if creator.homepage \\\n else 'https://www.hydroshare.org' + creator.description\n\n # update license\n if hasattr(nc_dataset, 'license'):\n delattr(nc_dataset, 'license')\n if instance.metadata.rights:\n nc_dataset.license = \"{0} {1}\".format(instance.metadata.rights.statement,\n instance.metadata.rights.url)\n\n # update reference\n if hasattr(nc_dataset, 'references'):\n delattr(nc_dataset, 'references')\n\n reference_list = instance.metadata.relations.all().filter(type='cites')\n if reference_list:\n res_meta_ref = []\n for reference in reference_list:\n res_meta_ref.append(reference.value)\n nc_dataset.references = ' \\n'.join(res_meta_ref)\n\n # update source\n if hasattr(nc_dataset, 'source'):\n delattr(nc_dataset, 'source')\n\n source_list = instance.metadata.sources.all()\n if source_list:\n res_meta_source = []\n for source in source_list:\n res_meta_source.append(source.derived_from)\n nc_dataset.source = ' \\n'.join(res_meta_source)\n\n # close nc dataset\n nc_dataset.close()\n\n except Exception as ex:\n log.exception(ex.message)\n if os.path.exists(temp_nc_file):\n shutil.rmtree(os.path.dirname(temp_nc_file))\n raise ex\n\n # create the ncdump text file\n nc_file_name = os.path.basename(temp_nc_file).split(\".\")[0]\n temp_text_file = create_header_info_txt_file(temp_nc_file, nc_file_name)\n\n # push the updated nc file and the txt file to iRODS\n utils.replace_resource_file_on_irods(temp_nc_file, nc_res_file,\n user)\n utils.replace_resource_file_on_irods(temp_text_file, txt_res_file,\n user)\n\n metadata = instance.metadata\n metadata.is_dirty = False\n metadata.save()\n\n # cleanup the temp dir\n if os.path.exists(temp_nc_file):\n shutil.rmtree(os.path.dirname(temp_nc_file))\n" ]
[ [ "numpy.fromstring", "numpy.dtype" ] ]
RichardScottOZ/uncover-ml
[ "05faede0efa989997f9eabaf18c45267011eb861" ]
[ "uncoverml/scripts/gridsearch_cli.py" ]
[ "\"\"\"\nRun the various machine learning parameter optimisation using\nscikit-learn GridSearchCv.\n\nAvailable scorers:\n Regression:\n 'r2', 'expvar', 'smse', 'lins_ccc'\n Classification:\n 'accuracy'\n Classification with probability:\n 'log_loss', 'auc'\n\nNot yet implemented:\n Regression with variance (models w/ `predict_dist`):\n 'mll'\n Classification:\n 'mean_confusion', 'mean_confusion_normalized'\n\nIf no scorers are provided, then the default `score` method of the model\nwill be used.\n\nNote that GridSearchCV is run with `refit` set to False. This is because\nthe `_best_estimator` (best model found by GridSearchCV) is never used\nto predict anything, so fitting it is a waste.\n\nIf you ever do want to use refit, keep in mind that if you use mutliple\nscorers, refit needs to be set to the name of a scorer used to find the\nbest parameters for refitting the model.\n\n.. program-output:: gridsearch --help\n\"\"\"\nimport logging\nimport os\nfrom collections import OrderedDict\nfrom itertools import product\n\nimport click\nimport pandas as pd\nfrom sklearn import decomposition\nfrom sklearn.gaussian_process.kernels import WhiteKernel\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\n\nimport uncoverml as ls\nimport uncoverml.config\nimport uncoverml.mllog\nfrom uncoverml.config import ConfigException\nfrom uncoverml.optimise.models import (\n TransformedGPRegressor,\n kernels,\n TransformedSVR\n )\nfrom uncoverml.scripts.learn_cli import _load_data\nfrom uncoverml.transforms import target as transforms\nfrom uncoverml.optimise.models import transformed_modelmaps as all_modelmaps\nfrom uncoverml.optimise.scorers import (regression_predict_scorers, classification_predict_scorers,\n classification_proba_scorers)\n\n_logger = logging.getLogger(__name__)\n\n\npca = decomposition.PCA()\nalgos = {k: v() for k, v in all_modelmaps.items()}\nalgos['transformedgp'] = TransformedGPRegressor(n_restarts_optimizer=10,\n normalize_y=True)\nalgos['transformedsvr'] = TransformedSVR(verbose=True, max_iter=1000000)\n\n\ndef setup_pipeline(config):\n if config.optimisation['algorithm'] not in algos:\n raise ConfigException('Optimisation algorithm must exist in avilable algorithms: {}'\n .format(list(algos.keys())))\n\n steps = []\n param_dict = {}\n\n if 'featuretransforms' in config.optimisation:\n config.featuretransform = config.optimisation['featuretransforms']\n if 'pca' in config.featuretransform:\n steps.append(('pca', pca))\n for k, v in config.featuretransform['pca'].items():\n param_dict['pca__' + k] = v\n\n if 'scorers' in config.optimisation:\n scorers = config.optimisation['scorers']\n scorer_maps = [regression_predict_scorers, classification_proba_scorers, \n classification_predict_scorers]\n\n scoring = {}\n\n for s in scorers:\n for sm in scorer_maps:\n f = sm.get(s)\n if f is not None:\n break\n if f is None:\n _logger.warning(f\"Scorer '{s}' not found!\")\n else:\n scoring[s] = f\n if not scoring:\n scoring = None\n else:\n scoring = None\n\n if 'hyperparameters' in config.optimisation:\n steps.append((config.optimisation['algorithm'],\n algos[config.optimisation['algorithm']]))\n for k, v in config.optimisation['hyperparameters'].items():\n if k == 'target_transform':\n v = [transforms.transforms[vv]() for vv in v]\n if k == 'kernel':\n # for scikitlearn kernels\n if isinstance(v, dict):\n V = []\n for kk, value in v.items():\n value = OrderedDict(value)\n values = [v for v in value.values()]\n prod = product(* values)\n keys = value.keys()\n combinations = []\n for p in prod:\n d = {}\n for kkk, pp in zip(keys, p):\n d[kkk] = pp\n combinations.append(d)\n V += [kernels[kk](** c) + WhiteKernel()\n for c in combinations]\n v = V\n \n param_dict[config.optimisation['algorithm'] + '__' + k] = v\n\n pipe = Pipeline(steps=steps)\n\n estimator = GridSearchCV(pipe,\n param_dict,\n n_jobs=config.n_jobs,\n iid=False,\n scoring=scoring,\n refit=False,\n pre_dispatch='2*n_jobs',\n verbose=True,\n cv=5,\n )\n\n return estimator, scoring\n\n\ndef main(pipeline_file, partitions, njobs):\n config = ls.config.Config(pipeline_file)\n config.n_jobs = njobs\n estimator, scoring = setup_pipeline(config)\n _logger.info('Running optimisation for {}'.format(config.optimisation['algorithm']))\n\n training_data, _ = _load_data(config, partitions)\n targets_all = training_data.targets_all\n x_all = training_data.x_all\n \n _logger.info(\"Optimising {} model\".format(config.optimisation['algorithm']))\n # Runs 'fit' on selected model ('estimator' in scikit-learn) with \n # hyperparameter combinations.\n estimator.fit(X=x_all, y=targets_all.observations)\n\n if scoring is None:\n sort_by = 'rank_test_score'\n else:\n sort_by = 'rank_test_' + list(scoring.keys())[0]\n\n pd.DataFrame.from_dict(estimator.cv_results_).sort_values(by=sort_by)\\\n .to_csv(config.optimisation_results_file)\n\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "sklearn.pipeline.Pipeline", "pandas.DataFrame.from_dict", "sklearn.gaussian_process.kernels.WhiteKernel", "sklearn.decomposition.PCA" ] ]
Guangyi-Zhang/eiv-treatment-response
[ "66ce76c52a3c05cfac681455c1ce9ae507614270" ]
[ "inducingpolicy.py" ]
[ "import numpy as np\nimport pymc3 as pm\n\n\ndef inducing_policy3(n_inducing_points, t, y, tx, bwin=60, awin=180):\n '''\n uniform excluding the regions of treatment\n '''\n idx_t = t > -1 # all True\n for tx_ in tx:\n idx_t &= (t > tx_+awin) | (t < tx_-bwin)\n t_valid = t[idx_t]\n\n idx = np.array([i_ for i_ in range(0, len(t_valid), len(t_valid)//n_inducing_points)])\n tu = np.stack([t_valid[idx], t_valid[idx]], 1)\n return tu, None\n\n\ndef inducing_policy2(n_inducing_points, t, y, window=10*60):\n '''\n kmeans([t,y]), then remove y>=window_mean\n '''\n feat = np.stack([t, y], 1)\n tu = pm.gp.util.kmeans_inducing_points(n_inducing_points, feat)\n\n idx, idx_exc = [], []\n for i, (t_, y_) in enumerate(tu):\n mean = y[(t > (t_-window//2)) & (t < (t_+window//2))].mean()\n if y_ < mean:\n idx.append(i)\n else:\n idx_exc.append(i)\n return tu[np.array(idx)], tu[np.array(idx_exc)]\n\n\ndef inducing_policy1(n_inducing_points, t, y):\n '''\n kmeans([t,y]), then remove y>=mean\n '''\n feat = np.stack([t, y], 1)\n tu = pm.gp.util.kmeans_inducing_points(n_inducing_points, feat)\n idx = tu[:,1] < y.mean()\n return tu[idx,:], tu[~idx,:]\n\n\ndef inducing_policy0(n_inducing_points, t, y):\n '''\n uniform\n '''\n idx = np.array([i_ for i_ in range(0, len(t), len(t)//n_inducing_points)])\n tu = np.stack([t[idx], y[idx]], 1)\n return tu, None\n" ]
[ [ "numpy.array", "numpy.stack" ] ]
AMOLFResonantNanophotonics/CPA
[ "da665fa0fe3c90b1fcd87236e018a44f3fe8b2e0" ]
[ "Scripts_simulate_data/simulate_qdot.py" ]
[ "'''\nFunction that simulates photon counting data with the following characteristics\n- The data is distributed randomly over two detector channels\n- The data is assumed generated by a pulsed laser excitation\n- The emitter itself perfectly antibunches, the two detectors have independent noise\n- The data is specified is time tags - detector and reference arrival times\n(int 64, relative to a base unit that is a hypothetical counter card resolution)\n\n\nThe user gets to specify:\n- The number of levels nlevels. The simulated dot switches with no memory.\n- The instantaneous mean count rate and decay rate for levels n = 1...nlevels\n- On times for level n are taken from a powerlaw, exponents alpha_n specifiable\n- Background level\n- Total measurement duration / record length\n- Hypothetical set up info, i.e., laser rep. rate and timing card base unit\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nfrom matplotlib import gridspec as gsp\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nimport preamble_for_simulation as pre\n\n\ndef samplepowerlaw(x,alpha,xmin,xmax):\n # %http://up-rs-esp.github.io/bpl/_modules/bpl.html#sample\n beta = 1.-alpha\n a = xmin**beta\n b = xmax**beta-a\n\n s = (a+b*x)**(1./beta)\n return s\n\n\n\ndef MakeSimulatedData():\n\n if not (len(pre.alpha_lst) == len(pre.I_lst) == len(pre.g_lst) == pre.nlevels):\n print('all input lists must have length '+str(pre.nlevels)+'!')\n\n # =============================================================================\n # draw from a multi-powerlaw distribution\n # output is a sequence of nominal jumptimes, and level assignments\n # =============================================================================\n\n print('Performing random draw of jumptimes and level assignments')\n\n n=1\n success = 0\n while success==0:\n\n # sample nlevels powerlaws\n N_powerlawelements = int(n*1E4) # the number of powerlaw elements to sample. This is the nr of jumps we simulate\n # Note that this will be cropped to fit in the measurement time\n\n which_level = (np.random.random(N_powerlawelements)*pre.nlevels).astype(int) # which intensity level are you in\n which_level = which_level[:-1][np.diff(which_level) != 0] # ensure you are not switching to an identical level\n\n # create the switching durations and timestamps\n time_durations_bin = np.zeros(N_powerlawelements)\n for i, j in enumerate(which_level):\n time_durations_bin[i] = samplepowerlaw(np.random.rand(1),pre.alpha_lst[j], pre.Tshortest_ns/pre.minibinsize_ns, pre.Tlongest_ns/pre.minibinsize_ns)[0]\n time_durations_bin = time_durations_bin.astype(int)\n\n # trim the jump times to the measurement time\n seg_times_mbin = np.cumsum(np.int64(time_durations_bin))\n seg_times_mbin = seg_times_mbin[seg_times_mbin<int(pre.NN)]\n\n # did we succesfully fill the entire measurement range with jumps\n if len(seg_times_mbin) < len(time_durations_bin):\n success = 1\n else:\n n = n+1\n print('Another round of sampling powerlaws')\n\n # adding the start and the end of the measurement time to the jump times, and giving it a more convenient name\n jumptimes = np.append(np.insert(seg_times_mbin, 0, 0), pre.NN-1)\n\n\n # =============================================================================\n # Create discrete photon events [does a give laser pulse [minibinsize] result in a click\n # .... and delay times, i.e., how long after the laser pulse does the click arise\n # =============================================================================\n\n trace_I = np.zeros(len(jumptimes)-1)\n trace_g = np.zeros(len(jumptimes)-1)\n\n\n # in principal we want to use the uniform probability distribution to draw zeroes and ones for empty, and full,\n # laser pulse cycles. However, this is memory wise quite prohibitive, as we are talking about maybe 1E9 or more bins, of which perhaps one promille is filled\n # hence we just try to allocate space for the photon events\n #\n # As we cycle over segments, the logical approach is to append photon events to an already existing list to build the time trace\n # However, append in Python is slow, as it reallocates the array\n # to avoid appending, we over allocate space for the photon counting array and insert values. We trim out overallocated space later\n maximumallocatedlength= int(max(pre.I_lst)*pre.NN*pre.minibinsize_ns*1E-9*2) # this should be so overdimensioned that no allocation error could occur later on\n maximumallocatednoise= int(pre.I_noise*pre.NN*pre.minibinsize_ns*1E-9*10)\n\n\n timestamps_bin = np.zeros(maximumallocatedlength).astype(np.int64)\n delaylist_bin = np.zeros(maximumallocatedlength).astype(np.int64)\n noiseclicks_chA = np.zeros(maximumallocatednoise).astype(np.int64)\n noiseclicks_chB = np.zeros(maximumallocatednoise).astype(np.int64)\n\n\n\n\n\n idxclick=0\n idxnoiseA=0\n idxnoiseB=0\n\n print('Performing random draw of photon detection events')\n\n for q in range(len(jumptimes)-1):\n pval = pre.I_lst[which_level[q]]*1E-9*pre.minibinsize_ns\n trace_I[q] = pre.I_lst[which_level[q]] #nominal instantaneous intensity\n trace_g[q] = pre.g_lst[which_level[q]] #nominal instantaneous decay rate\n\n\n # for given segment:\n #clicks due to the emitter with time granularity equal to the laser rep rate\n timestamps = ((jumptimes[q]+np.nonzero((np.random.rand(jumptimes[q+1]-jumptimes[q])<=pval).astype(int))[0]) * pre.minibinsize_ns / pre.dtau_ns).astype(np.int64)\n numclicks=len(timestamps)\n\n # arrival times relative to laser\n delaylist_ns = np.random.exponential(1/trace_g[q], size=numclicks) #random draws, expontl prob distrib\n\n\n\n #bookkeeping to insert segment clicks in bigger list\n if(numclicks>0):\n timestamps_bin[idxclick: idxclick+numclicks] = timestamps[:]\n delaylist_bin[ idxclick: idxclick+numclicks] =((delaylist_ns[:] / pre.dtau_ns).astype(np.int64))\n\n\n # same for noise for both detectors (granularity - per laser repeition rate here, fine time shifts added below)\n # doing this per segment avoids the memory over allocation problem.\n pval_noise = pre.I_noise*1E-9*pre.minibinsize_ns\n\n noiseA = ((jumptimes[q]+np.nonzero((np.random.rand(jumptimes[q+1]-jumptimes[q])<=pval_noise).astype(int))[0]) * pre.minibinsize_ns / pre.dtau_ns).astype(np.int64)\n noiseB = ((jumptimes[q]+np.nonzero((np.random.rand(jumptimes[q+1]-jumptimes[q])<=pval_noise).astype(int))[0]) * pre.minibinsize_ns / pre.dtau_ns).astype(np.int64)\n\n\n numnoiseA=len(noiseA)\n if(numnoiseA>0):\n noiseclicks_chA[idxnoiseA:idxnoiseA+numnoiseA]=noiseA[:]\n numnoiseB=len(noiseB)\n if(numnoiseB>0):\n noiseclicks_chB[idxnoiseB:idxnoiseB+numnoiseB]=noiseB[:]\n\n idxclick=idxclick+numclicks\n idxnoiseA=idxnoiseA+numnoiseA\n idxnoiseB=idxnoiseB+numnoiseB\n\n\n\n # =============================================================================\n # clip overdimensioned arrays\n # =============================================================================\n\n timestamps_bin = timestamps_bin[0:idxclick-1]\n delaylist_bin = delaylist_bin[0:idxclick-1]\n noiseclicks_chA=noiseclicks_chA[0:idxnoiseA]\n noiseclicks_chB=noiseclicks_chB[0:idxnoiseB]\n\n # =============================================================================\n # put the emitter event times and delays together. First in absence of background\n # =============================================================================\n\n timestamps_chR_bin = timestamps_bin # spoofs laser reference channel - for every detector click, the laser pulse clock\n timestamps_bin = timestamps_bin - int(pre.minibinsize_ns/pre.dtau_ns) + delaylist_bin # actual photon clicks, cares about fluorescence decay dynamics\n\n\n # =============================================================================\n # antibunch data into separate detectors as if for Hanbury-Brown Twiss\n # =============================================================================\n print('Antibunching the simulated emission events over detectors, subsequently including uncorrelated detector noise counts')\n\n whichdet = np.random.random(len(timestamps_bin))>0.5 # 50:50 routing ratio\n timestamps_chA_bin = timestamps_bin[whichdet]\n timestamps_chB_bin = timestamps_bin[np.logical_not(whichdet)]\n\n\n # =============================================================================\n # adding background counts a posteriori so the detector noise doesn't antibunch\n # =============================================================================\n\n #Generate random time moments relative to the 100 ns trigger windows that we already determined\n delay_noise_chA_bin = (np.random.rand(len(noiseclicks_chA))*(pre.minibinsize_ns/pre.dtau_ns)).astype(np.int64)\n delay_noise_chB_bin = (np.random.rand(len(noiseclicks_chB))*(pre.minibinsize_ns/pre.dtau_ns)).astype(np.int64)\n\n #full finegrained random click arrival times, intersperse in data\n timestamps_chA_bin = np.sort(np.concatenate((timestamps_chA_bin,noiseclicks_chA-delay_noise_chA_bin)))\n timestamps_chB_bin = np.sort(np.concatenate((timestamps_chB_bin,noiseclicks_chB-delay_noise_chB_bin)))\n\n #interspersing the related reference trigger data for the noise count\n noisereferencepulses=np.sort(np.concatenate((noiseclicks_chA,noiseclicks_chB)))\n timestamps_chR_bin = np.sort(np.concatenate((timestamps_chR_bin,noisereferencepulses)))\n\n\n # =============================================================================\n # Save the data\n # =============================================================================\n\n # create the folder to save the data\n\n savepath = pre.simulated_parquetpath + pre.dot_foldername\n if not os.path.exists(savepath):\n os.makedirs(savepath)\n \n # save the photon events\n print('\\nSaving generated photon and reference events in parquet files:')\n print('Folder: '+savepath)\n for label in ['A', 'B', 'R']:\n datalabel = 'timestamps_ch'+label+'_bin'\n df = pd.DataFrame({'ch'+label: eval(datalabel)})\n table = pa.Table.from_pandas(df, preserve_index=False)\n print('File: '+datalabel+'.parquet')\n pq.write_table(table, savepath+datalabel+'.parquet')\n\n # save the generated jump times\n print('\\nFor reference, also saving a .csv file with')\n print('the nominal jump time,intensity and decay rate sequence')\n print('according to which the photon events were drawn')\n\n filepath_seg= pre.simulated_parquetpath + pre.dot_foldername + pre.seg_filename\n print('File: '+filepath_seg)\n\n with open(filepath_seg,\"w\") as f:\n print(\"jumptime [bin, end] intensity decayrate [ns-1] \", file=f)\n np.savetxt(f, np.stack((jumptimes[1:], trace_I, trace_g),1), delimiter=', ')\n" ]
[ [ "numpy.logical_not", "numpy.random.random", "numpy.random.exponential", "numpy.stack", "numpy.concatenate", "numpy.int64", "numpy.diff", "numpy.insert", "numpy.random.rand", "numpy.zeros" ] ]