repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Thomaw/Rubik-s-cube-solver
[ "8258065a01707a14e8b61463d132d479e0e9f159" ]
[ "Code python/Pycharm/methode_video.py" ]
[ "import cv2\r\nimport numpy as np\r\nimport colorsys\r\nimport math\r\n\r\n\r\ndef visio():\r\n '''Initialisation de l'utilisation de la caméra'''\r\n cameratesting = True # Autorisation d'utiliser la webcam\r\n colors_modification = True # Autorisation de paramétrer les couleurs recherchées\r\n cam_port = 0 # Le port d'entrée d'une webcam pour un ordinateur portable\r\n # Ce serait 1 si c'était une webcam externe\r\n\r\n cam = cv2.VideoCapture(cam_port) # Création de l'objet caméra\r\n\r\n inter_color = []\r\n Color_list = [] # Liste des couleurs reconnues\r\n number = 0 # Compteur\r\n\r\n def empty_callback(x): # Fonction inutile mais nécessaire pour la suite\r\n pass\r\n\r\n ## Création des fenêtres ##\r\n cv2.namedWindow('Image webcam', 0) # Création d'une fenêtre ajustable\r\n cv2.namedWindow('Image filtree', 0) # Création d'une fenêtre ajustable\r\n # Dimensionnement\r\n cv2.resizeWindow('Image webcam', 600, 600) # Redimensionnement de l'image\r\n cv2.moveWindow('Image webcam', 750, 30) # Placement sur l'écran de l'image\r\n cv2.resizeWindow('Image filtree', 500, 500) # Redimensionnement de l'image\r\n cv2.moveWindow('Image filtree', 240, 140) # Placement sur l'écran de l'image\r\n\r\n # Positionnement des rectangles qui vont être placés sur l'image dans la fonction draw_current_stickers()\r\n xmax, ymax, size = 250, 200, 70\r\n current_stickers = [(xmax, ymax), (xmax + size, ymax), (xmax + 2 * size, ymax),\r\n (xmax, ymax + size), (xmax + size, ymax + size), (xmax + 2 * size, ymax + size),\r\n (xmax, ymax + 2 * size), (xmax + size, ymax + 2 * size), (xmax + 2 * size, ymax + 2 * size)]\r\n\r\n def draw_current_stickers(frame):\r\n \"\"\"Dessine les 9 rectangles sur l'image.\"\"\"\r\n for index, (x, y) in enumerate(current_stickers): # Pour chaque position définie ci-dessus\r\n # Création d'un rectangle de couleur blanche\r\n cv2.rectangle(frame, (x, y), (x + 32, y + 32), (255, 255, 255), 2)\r\n\r\n\r\n # Positionnement de cube pour afficher les couleurs d'une image une fois enregistrée\r\n # Utilisé dans la fonction draw_blank_cube()\r\n blank_stickers = [(10, 10), (35, 10), (60, 10),\r\n (10, 35), (35, 35), (60, 35),\r\n (10, 60), (35, 60), (60, 60)]\r\n\r\n # Définition des couleurs recensées lors de l'enregistrement\r\n # Utilisé dans see_stickers()\r\n picture = [[0, 0, 0], [0, 0, 0], [0, 0, 0],\r\n [0, 0, 0], [0, 0, 0], [0, 0, 0],\r\n [0, 0, 0], [0, 0, 0], [0, 0, 0]]\r\n\r\n def see_stickers(frame):\r\n \"\"\"Enregistrement des couleurs lues par la webcam\"\"\"\r\n\r\n for index, (x, y) in enumerate(current_stickers): # Pour chaque position des rectangles\r\n # Sélectionne la couleur du pixel central sur l'image frame\r\n pix = frame[int(x - size / 2), int(y + size)]\r\n a, b, c = pix[0], pix[1], pix[2] # Création des trois couleurs RGB\r\n if number == 3 and x == xmax + size and y == ymax + size: # Cube centrale de la face blanche\r\n a, b, c = 255, 255, 255 # Couleur blanche\r\n picture[index] = [int(a), int(b), int(c)] # Indexation des couleurs à un vecteur\r\n inter_color.append([a, b, c]) # Indexation des couleurs à la liste intermédiaire\r\n\r\n # Retourne le cube (car la webcam nous force à faire tourner le cube pour voir la face)\r\n for ii in range(6, 9):\r\n for jj in range(0, 3):\r\n Color_list.append(inter_color[ii - 3*jj]) # Indexation des couleurs à la liste finale\r\n # print(\"a=\", Color_list)\r\n\r\n # Transposition dans l'affichage des cubes en haut à gauche\r\n picture[1], picture[3] = picture[3], picture[1]\r\n picture[2], picture[6] = picture[6], picture[2]\r\n picture[5], picture[7] = picture[7], picture[5]\r\n\r\n inter_color.clear() # Réinitialisation des couleurs trouvées\r\n\r\n def draw_blank_cube(frame):\r\n \"\"\"Affichage des couleurs vues par l'ordinateur.\"\"\"\r\n for index, (x, y) in enumerate(blank_stickers): # Pour chaque position des rectangles\r\n # Affiche à l'utilisateur les couleurs enregistrées sur les rectangles en haut à gauche\r\n cv2.rectangle(frame, (x, y), (x + 20, y + 20), picture[index], -1)\r\n cv2.imshow('Image webcam', frame)\r\n\r\n # Méthode d'arrêt de l'application\r\n stop_application = False\r\n\r\n def save_face(frame, stop_application):\r\n # On regarde si l'on a appuyé 7 fois sur le bouton d'enregistrement\r\n if number != 7:\r\n # Si ce n'est pas le cas, on affiche le nombre d'images enregistrées\r\n cv2.putText(frame, \"Save Face = {}/6\".format(number),\r\n (20, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\r\n else:\r\n # Si c'est le cas, on arrête le programme\r\n stop_application = True\r\n\r\n return stop_application\r\n\r\n def_centers = ['yellow', 'blue', 'white','red', 'green', 'orange', 'all']\r\n # Définition des couleurs que l'on recherche ainsi que ses bornes limites par défaut\r\n def_colors = ['blue', 'yellow', 'orange', 'red', 'green', 'white']\r\n Lower = [[89, 178, 51], [21, 110, 117], [0, 110, 125], [150, 120, 0], [50, 150, 100], [100, 0, 150]] # hsv\r\n Upper = [[118, 255, 194], [45, 255, 255], [17, 255, 255], [200, 255, 255], [100, 255, 220], [150, 50, 255]] # hsv\r\n\r\n # Méthode de filtrage de l'image de la webcam\r\n def filtrage(Lower, Upper):\r\n lower_hsv_bleu = np.array(Lower[0]) # Minimum admissible pour la couleur bleue\r\n upper_hsv_bleu = np.array(Upper[0]) # Maximum admissible pour la couleur bleue\r\n # Vérifie si les éléments de l'image sont dans les limites imposées par la couleur bleue\r\n mask_bleu = cv2.inRange(hsv, lower_hsv_bleu, upper_hsv_bleu)\r\n\r\n # Modification de l'image à travers le masque bleu\r\n # Permets de ne garder que les pixels de l'image qui sont bleus\r\n midframe_bleu = cv2.bitwise_and(frame, frame, mask=mask_bleu)\r\n\r\n for i in range(1, 6):\r\n lower_hsv_z = np.array(Lower[i]) # Minimum admissible pour la couleur i\r\n upper_hsv_z = np.array(Upper[i]) # Maximum admissible pour la couleur i\r\n # Vérifie si les éléments de l'image sont dans les limites imposées par la couleur i\r\n mask_z = cv2.inRange(hsv, lower_hsv_z, upper_hsv_z)\r\n # Modification de l'image à travers le masque de l'image i\r\n midframe_z = cv2.bitwise_and(frame, frame, mask=mask_z)\r\n # Ajoute les pixels de la couleur i, aux autres pixels déjà trouvés\r\n midframe_bleu = cv2.bitwise_or(midframe_bleu, midframe_z)\r\n\r\n # Une fois sortie de la boucle, on se retrouve avec un filtre contenant les 6 couleurs\r\n # du rubik's Cube. Il ne reste qu'à l'afficher sur une autre image afin de voir le résultat\r\n frame2 = midframe_bleu\r\n return frame2\r\n\r\n # Début du programme\r\n while cameratesting:\r\n\r\n _, frame = cam.read() # Capture une image vidéo à partir de l'objet de la caméra\r\n frame = cv2.flip(frame, 1) # Rotation de la caméra\r\n draw_current_stickers(frame) # Affiche les rectangles au milieu de l'image\r\n draw_blank_cube(frame) # Affiche les rectangles vierges sur le côté de l'image\r\n stop_application = save_face(frame, stop_application) # Regarde le nombre d'enregistrement\r\n\r\n if stop_application == True: # Si 7 appuis\r\n break # Arrêt du programme\r\n\r\n cv2.putText(frame, \"Save {} center\".format(def_centers[number]),\r\n (380, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\r\n\r\n cv2.imshow('Image webcam', frame) # Affiche sur la fenêtre l'image de la webcam\r\n key = cv2.waitKey(10) # Définis la durée d'affichage d'une image en millisecondes\r\n\r\n if key % 256 == 27: # Si l'on appui sur \"échap\"\r\n print(\"Arrêt du programme...\") # Affichage de arrêt du programme\r\n break # Arrêt du programme\r\n\r\n elif key % 256 == 32: # Si l'on appui sur \"Espace\"\r\n number += 1 # Incrémente le compteur\r\n print('Enregistrement image ...') # Affiche que l'on a enregistré une image\r\n see_stickers(frame2) # Lis les couleurs sur l'image filtrée\r\n\r\n elif key == ord('c'): # Si l'on appui sur \"c\"\r\n if number > 0: # Si une image a déjà été enregistrée\r\n print('Suppression image ...') # Affiche que l'on supprime une image\r\n number -= 1 # Décrémente le compteur\r\n Color_list = Color_list[:-9] # Décrémente la liste finale\r\n\r\n elif key == ord('d'): # Si l'on appui sur \"d\"\r\n print('Réinitialisation des images ...')# Affichage de la suppression des images\r\n number = 0 # Réinitialisation du compteur\r\n Color_list = [] # Réinitialisation des couleurs\r\n\r\n elif key == ord('z'): # Si l'on appui sur \"z\"\r\n cv2.namedWindow('Calibrage cube', 0) # Création d'une nouvelle fenêtre\r\n # Dimensionnement de l'image\r\n cv2.resizeWindow('Calibrage cube', 300, 300)\r\n cv2.moveWindow('Calibrage cube', 0, 0)\r\n cl = 0\r\n\r\n for i in range(0, 6): # Pour chaque couleur\r\n\r\n '''Création de six curseurs pour régler chacun des paramètres d'une image en HSV\r\n - Hmin et Hmax\r\n - Smin et Smax\r\n - Vmin et Vmax'''\r\n\r\n cv2.createTrackbar('H Upper', \"Calibrage cube\", Upper[i][0], 179, empty_callback)\r\n cv2.createTrackbar('H Lower', \"Calibrage cube\", Lower[i][0], 179, empty_callback)\r\n cv2.createTrackbar('S Upper', \"Calibrage cube\", Upper[i][1], 255, empty_callback)\r\n cv2.createTrackbar('S Lower', \"Calibrage cube\", Lower[i][1], 255, empty_callback)\r\n cv2.createTrackbar('V Upper', \"Calibrage cube\", Upper[i][2], 255, empty_callback)\r\n cv2.createTrackbar('V Lower', \"Calibrage cube\", Lower[i][2], 255, empty_callback)\r\n\r\n # Tant que l'on modifie les couleurs\r\n while colors_modification:\r\n _, frame = cam.read() # On lit ce que voit la webcam\r\n # On enlève l'affichage réelle pour se concentrer sur l'image filtrée\r\n cv2.destroyWindow('Image webcam')\r\n\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Conversion des couleurs de l'image en HSV\r\n lower_hsv = np.array(Lower[i]) # Limite inférieure de notre couleur\r\n upper_hsv = np.array(Upper[i]) # Limite supérieure de notre couleur\r\n\r\n # Regarde quel pixel est dans les limites de la couleur i\r\n mask = cv2.inRange(hsv, lower_hsv, upper_hsv)\r\n # Garde l'image de la couleur i uniquement\r\n frame2 = cv2.bitwise_and(frame, frame, mask=mask)\r\n\r\n # Affichage de quelle couleur nous calibrons\r\n cv2.putText(frame2, \"Calibrating {} colors\".format(def_colors[i]),\r\n (60, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\r\n\r\n cv2.imshow('Image filtree', frame2) # Affiche l'image\r\n key = cv2.waitKey(10) # Temps d'affichage d'une image\r\n\r\n '''On récupère ce que l'utilisateur définit comme limite de la couleur en question\r\n et affiche en temps réel l'image modifiée filtrée'''\r\n Upper[i][0] = cv2.getTrackbarPos('H Upper', 'Calibrage cube')\r\n Lower[i][0] = cv2.getTrackbarPos('H Lower', 'Calibrage cube')\r\n Upper[i][1] = cv2.getTrackbarPos('S Upper', 'Calibrage cube')\r\n Lower[i][1] = cv2.getTrackbarPos('S Lower', 'Calibrage cube')\r\n Upper[i][2] = cv2.getTrackbarPos('V Upper', 'Calibrage cube')\r\n Lower[i][2] = cv2.getTrackbarPos('V Lower', 'Calibrage cube')\r\n\r\n if key % 256 == 27: # Si l'on appui sur \"échap\"\r\n cl = 1 # Variable d'arrêt sur 1\r\n break # casse la boucle\r\n\r\n if key == ord('y'): # Si l'on appui sur \"y\"\r\n break # Passe à la couleur suivante\r\n\r\n if cl == 1: # Si l'on souhaitait arrêter le programme\r\n cl = 0 # Variable d'arrêt sur 0\r\n break # Arrêt du calibrage\r\n\r\n cv2.destroyWindow('Calibrage cube') # Arrêt de l'affichage de la fenêtre de modification\r\n\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # génère une version hsv de l'image de la webcam\r\n # Applique notre filtrage pour les limites définit dans la partie calibrage\r\n frame2 = filtrage(Lower, Upper)\r\n draw_current_stickers(frame2) # Affiche les rectangles sur l'image filtrée\r\n cv2.imshow('Image filtree', frame2) # Affiche la nouvelle image filtrée\r\n\r\n '''Une fois les couleurs identifiées, fermeture des fenêtres'''\r\n cv2.destroyWindow('Calibrage cube')\r\n cv2.destroyWindow('Image webcam')\r\n cv2.destroyWindow('Image filtree')\r\n\r\n Color_list = Color_list[0:54]\r\n # Création d'une nouvelle liste\r\n final_color = []\r\n\r\n \"\"\"\r\n Dans la suite, on cherche à identifier les couleurs que la webcam a extraites\r\n Pour cela, on va d'abord regarder les limites inférieures et supérieures de chaque couleur;\r\n \r\n Puis, nous allons créer la couleur voulue à partir de la moyenne de ces deux limites (il est donc \r\n important de définir de la façon la plus précise possible les limites de chaque couleur)\r\n \"\"\"\r\n Middle = [0, 0, 0, 0, 0, 0] # Nouvelle valeur des couleurs\r\n\r\n for i in range(0, 6):\r\n h1, s1, v1 = Lower[i][0], Lower[i][1], Lower[i][2] # Valeurs minimales de la couleur\r\n h2, s2, v2 = Upper[i][0], Upper[i][1], Upper[i][2] # Valeurs maximales de la couleur\r\n\r\n h3, s3, v3 = (h1 + h2) / 2, (s1 + s2) / 2, (v1 + v2) / 2 # Moyennage 1/2 (lower + upper)\r\n r3, g3, b3 = colorsys.hsv_to_rgb(h3 / 180, s3 / 255, v3 / 255) # Conversion en RGB\r\n Middle[i] = [b3 * 255, g3 * 255, r3 * 255] # Modification de l'échelle\r\n\r\n '''\r\n La fonction ci-dessous donne la méthode pour trouver la bonne couleur lue \r\n Pour cela, on utilise les distances entre chaque couleur. Plus la distance entre deux couleurs\r\n est faible et plus les deux images se rapprochent\r\n '''\r\n def verif_norme_2():\r\n d = [0, 0, 0, 0, 0, 0] # Vecteur distance\r\n\r\n for i in range(0, 6): # Pour chauqe couleur\r\n c = Middle[i] # On prend la valeur de la couleur\r\n r = math.sqrt((c[0] - Color_in_list[0]) ** 2 +\r\n (c[1] - Color_in_list[1]) ** 2 +\r\n (c[2] - Color_in_list[2]) ** 2) # Calcul de la distance\r\n d[i] = r # implémentation de la distance dans un vecteur\r\n\r\n for j in range(0, 6): # Pour chaque couleur\r\n if min(d) == d[j]: # Si la position j est le minimum de d\r\n final_color.append(def_colors[j]) # La bonne couleur est la couleur de la position j\r\n break\r\n\r\n for m in range(0, len(Color_list)): # Pour toutes les couleurs de la liste\r\n Color_in_list = Color_list[m]\r\n verif_norme_2() # Détermine la couleur de chaque pixel\r\n\r\n print(final_color)\r\n\r\n return final_color\r\n" ]
[ [ "numpy.array" ] ]
Krozark/lab
[ "2d7a371170d054bfceb5c92c5fffebcaadbd78b8" ]
[ "ideas/python/create_words.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy\nimport os\n\nfrom matplotlib import pyplot\n\nnumber = 100 # number of words to export\nstore_matrix = True # export watrix as image\nstarting_letter = None # starting letter\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nfile_path = os.path.join(BASE_DIR, \"..\", \"data\", \"words.txt\")\n\n\ndef main():\n with open(file_path, \"r\") as f:\n matrix = compute_matrix(f)\n\n if store_matrix:\n save_matrix(matrix, \"matrix\")\n\n generate_words(\n matrix,\n word_numbers=number,\n size_min=4,\n size_max=14,\n start=starting_letter\n )\n\ndef compute_matrix(words):\n #make a matrix of all possible int\n matrix = numpy.zeros((256, 256, 256), dtype='int32')\n for word in words:\n word = word.strip().lower()\n i, j = 0, 0\n for k in [ord(c) for c in list(\"%s\\n\" % word)]: #\\n for ending\n if k >= 256:\n continue\n #add 1 to sectition of i,j \n matrix[i, j, k] += 1\n #incrementation\n i, j = j, k\n return matrix\n\ndef save_matrix(matrix, filename):\n count2D=matrix.sum(axis=0)\n p2D=count2D.astype('float')/numpy.tile(sum(count2D.T), (256, 1)).T\n p2D[numpy.isnan(p2D)] = 0\n\n # For better contrast, we plot p^alpha instead of p\n alpha = 0.33\n p2Da = p2D**alpha\n\n # We display only letters a to z, ie ASCII from 97 to 123.\n a = ord('a')\n z = ord('z')\n pyplot.figure(figsize=(8, 8))\n pyplot.imshow(p2Da[a:z+1, a:z+1], interpolation='nearest')\n pyplot.axis('off')\n\n for i in range(a, z +1):\n pyplot.text(-1, i-a, chr(i), horizontalalignment='center', verticalalignment='center')\n pyplot.text(i-a, -1, chr(i), horizontalalignment='center', verticalalignment='center')\n pyplot.savefig(filename + \".png\")\n\n\ndef generate_words(matrix, word_numbers=100, size_min=4, size_max=14, start=None):\n numpy.random.seed(None)\n s = matrix.sum(axis=2)\n st = numpy.tile(s.T, (256, 1, 1)).T\n p = matrix.astype('float')/st\n p[numpy.isnan(p)] = 0\n\n total = 0\n while total < word_numbers:\n if start:\n i, j = 0, ord(start[0])\n res = start\n else:\n i,j = 0,0\n res = ''\n\n while not j==ord('\\n'):\n\n #avoid non ascii char\n k = 255\n retry = 10\n while k > 128:\n k = numpy.random.choice(list(range(256)), 1 ,p=p[i,j,:])[0]\n retry -= 1\n if retry == 0: #avoid infinit loops\n k = ord('\\n')\n\n res = res + chr(k)\n i, j = j, k\n\n res = res[:-1] #remove trailling \\n\n if len(res) >= size_min and len(res) <= size_max:\n total += 1\n print(res) \n\n\nif __name__ == \"__main__\":\n main()\n\n" ]
[ [ "numpy.isnan", "numpy.zeros", "numpy.random.seed", "matplotlib.pyplot.savefig", "numpy.tile", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
Esther2013/SciSharp-Stack-Examples
[ "43c06d57a4fb5926c853dd9f18e6463b4c1104a7" ]
[ "src/tensorflow2.x-python-tutorial/keras_basic.py" ]
[ "\n# https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/1_Introduction/helloworld.ipynb\n\nimport tensorflow as tf\nimport numpy as np\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(5, input_shape=(3,)),\n tf.keras.layers.Softmax()])\nmodel.save('/tmp/model')\nloaded_model = tf.keras.models.load_model('/tmp/model')\nx = tf.random.uniform((10, 3))\nassert np.allclose(model.predict(x), loaded_model.predict(x))\n\ntensor = [0, 1, 2, 3]\nmask = np.array([True, False, True, False])\nmasked = tf.boolean_mask(tensor, mask)\n\na = tf.constant(0.0);\nb = 2.0 * a;\n\nX = tf.placeholder(tf.double)\nW = tf.constant(1.0)\nmul = tf.multiply(X, W)\n\nones = tf.zeros([300, 400], tf.int32) \n\nx = tf.Variable(10, name = \"x\");\nfor i in range(0, 5):\n x = x + 1;\n\n# Create a Tensor.\nhello = tf.constant(\"hello world\")\nprint(hello)\n\n# To access a Tensor value, call numpy().\nval = hello.numpy()\n\nprint(val)" ]
[ [ "numpy.array", "tensorflow.multiply", "tensorflow.zeros", "tensorflow.Variable", "tensorflow.random.uniform", "tensorflow.keras.layers.Dense", "tensorflow.keras.models.load_model", "tensorflow.constant", "tensorflow.placeholder", "tensorflow.keras.layers.Softmax", "tensorflow.boolean_mask" ] ]
GereonV/MNIST
[ "1dc196419acc7266c68fb58282a88015b42b7027" ]
[ "gui.py" ]
[ "import tkinter as tk\nimport numpy as np\nfrom neural_network import Network\n\n\nclass ResultGUI:\n def __init__(self, test_data: list[tuple[np.ndarray, np.ndarray]], network: Network, size: int = 560):\n self.set = test_data\n self.i = 0\n self.network = network\n self.size = size\n self.window = tk.Tk()\n self.window.title(\"Recognition-Results\")\n self.window.resizable(0, 0)\n self.canvas = tk.Canvas(width=size, height=size)\n self.label = tk.Label()\n self.output = tk.Label()\n self.button = tk.Button(text=\"Next\", command=self.next)\n self.print()\n self.window.mainloop()\n\n def next(self):\n self.i += 1\n self.print()\n\n def print(self):\n pixel_size = int(self.size / 28)\n self.canvas.delete(\"all\")\n for i in range(28):\n for j in range(28):\n pixel = self.set[self.i][0][i * 28 + j]\n x, y = j * pixel_size, i * pixel_size\n self.canvas.create_rectangle(x, y, x + pixel_size, y + pixel_size, fill=\"#\" + hex(int(255 * (1 - pixel)))[2:] * 3, width=0)\n self.label[\"text\"] = f\"Label: {self.set[self.i][1].argmax()}\"\n self.output[\"text\"] = f\"Output: {self.network.feed_forward(self.set[self.i][0]).argmax()}\"\n self.canvas.pack()\n self.label.pack()\n self.output.pack()\n self.button.pack()\n\n\nclass DrawGUI:\n def __init__(self, network: Network, size: int = 560, brush_size: int = 45):\n self.network = network\n self.size = size\n self.brush_size = brush_size\n self.image = [[0 for __ in range(size)] for _ in range(size)]\n self.window = tk.Tk()\n self.window.title(\"Live-Recognition\")\n self.window.resizable(0, 0)\n self.canvas = tk.Canvas(width=size, height=size)\n self.canvas.bind(\"<B1-Motion>\", self.paint)\n self.canvas.bind(\"<Button-3>\", lambda event: self.clear())\n self.label = tk.Label()\n self.button = tk.Button(text=\"Predict\", command=self.predict)\n self.transform = tk.Button(text=\"Transform\", command=self.transform)\n self.canvas.pack()\n self.label.pack()\n self.button.pack()\n self.transform.pack()\n self.window.mainloop()\n\n def paint(self, event):\n half_brush = int(self.brush_size / 2)\n x, y = event.x - half_brush, event.y - half_brush\n if 0 <= x <= self.size - self.brush_size and 0 <= y <= self.size - self.brush_size:\n for cx in range(x, x + self.brush_size):\n for cy in range(y, y + self.brush_size):\n distance = ((event.x - cx) ** 2 + (event.y - cy) ** 2) ** 0.5\n if not distance > half_brush:\n self.image[cy][cx] = 1\n self.canvas.create_oval(x, y, x + self.brush_size, y + self.brush_size, fill=\"#000\", width=0)\n\n def clear(self):\n del self.image\n self.image = [[0 for __ in range(self.size)] for _ in range(self.size)]\n self.canvas.delete(\"all\")\n\n def predict(self):\n pixels = np.zeros(784)\n pixel_size = int(self.size / 28)\n for i in range(28):\n for j in range(28):\n pixel = 0\n x, y = j * pixel_size, i * pixel_size\n for cy in range(y, y + pixel_size):\n for cx in range(x, x + pixel_size):\n pixel += self.image[cy][cx]\n pixels[i * 28 + j] = pixel / pixel_size ** 2\n output = self.network.feed_forward(pixels)\n index = output.argmax()\n self.label[\"text\"] = f\"Prediction: {index} - certainty: {output[index] * 100}%\"\n\n def transform(self):\n pixel_size = int(self.size / 28)\n window = tk.Toplevel(self.window)\n window.title(\"Transformed\")\n window.resizable(0, 0)\n canvas = tk.Canvas(master=window, width=self.size, height=self.size)\n for i in range(28):\n for j in range(28):\n pixel = 0\n x, y = j * pixel_size, i * pixel_size\n for cy in range(y, y + pixel_size):\n for cx in range(x, x + pixel_size):\n pixel += self.image[cy][cx]\n canvas.create_rectangle(x, y, x + pixel_size, y + pixel_size, fill=\"#\" + hex(int(255 * (1 - pixel / pixel_size ** 2)))[2:] * 3, width=0)\n canvas.pack()\n window.mainloop()\n" ]
[ [ "numpy.zeros" ] ]
xhan97/iNNE
[ "a2948770feeaed2c30997684feea980ba1426c81" ]
[ "inne/tests/test_inne.py" ]
[ "\n\"\"\"Tests for `inne` package.\"\"\"\n\nimport time\nfrom unittest.mock import Mock, patch\n\nimport numpy as np\nimport pytest\nfrom inne import IsolationNNE\nfrom scipy.sparse import csc_matrix, csr_matrix\nfrom sklearn.datasets import (load_diabetes, load_digits, load_iris,\n make_blobs, make_moons)\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import ParameterGrid, train_test_split\nfrom sklearn.utils import check_random_state\nfrom sklearn.utils._testing import (assert_allclose, assert_array_almost_equal,\n assert_array_equal, ignore_warnings)\n\nfrom sklearn.ensemble import IsolationForest\n\nrng = check_random_state(0)\n\n# load the iris dataset\n# and randomly permute it\niris = load_iris()\nperm = rng.permutation(iris.target.size)\niris.data = iris.data[perm]\niris.target = iris.target[perm]\n\n# also load the diabetes dataset\n# and randomly permute it\ndiabetes = load_diabetes()\nperm = rng.permutation(diabetes.target.size)\ndiabetes.data = diabetes.data[perm]\ndiabetes.target = diabetes.target[perm]\n\n\n# also load the digits dataset\n# and randomly permute it\ndigit = load_diabetes()\nperm = rng.permutation(digit.target.size)\ndigit.data = digit.data[perm]\ndigit.target = digit.target[perm]\n\n\ndef test_inne():\n \"\"\"Check Isolation NNE for various parameter settings.\"\"\"\n X_train = np.array([[0, 1], [1, 2]])\n X_test = np.array([[2, 1], [1, 1]])\n\n grid = ParameterGrid(\n {\"n_estimators\": [100, 200], \"max_samples\": [10, 20, 30]}\n )\n\n with ignore_warnings():\n for params in grid:\n IsolationNNE(random_state=0, **\n params).fit(X_train).predict(X_test)\n\n\ndef test_inne_performance():\n \"\"\"Test Isolation NNE performs well\"\"\"\n\n # Generate train/test data\n rng = check_random_state(2)\n X = 0.3 * rng.randn(120, 2)\n X_train = np.r_[X + 2, X - 2]\n X_train = X[:100]\n\n # Generate some abnormal novel observations\n X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))\n X_test = np.r_[X[100:], X_outliers]\n y_test = np.array([0] * 20 + [1] * 20)\n\n # fit the model\n clf = IsolationNNE(n_estimators=100, max_samples=16).fit(X_train)\n\n # predict scores (the lower, the more normal)\n y_pred = -clf.decision_function(X_test)\n\n # check that there is at most 6 errors (false positive or false negative)\n assert roc_auc_score(y_test, y_pred) > 0.98\n\n\n@pytest.mark.parametrize(\"contamination\", [0.25, \"auto\"])\ndef test_inne_works(contamination):\n # toy sample (the last two samples are outliers)\n X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]\n\n # Test IsolationForest\n clf = IsolationNNE(random_state=0, contamination=contamination)\n clf.fit(X)\n decision_func = -clf.decision_function(X)\n pred = clf.predict(X)\n # assert detect outliers:\n assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])\n assert_array_equal(pred, 6 * [1] + 2 * [-1])\n\n\ndef test_score_samples():\n X_train = [[1, 1], [1, 2], [2, 1]]\n clf1 = IsolationNNE(contamination=0.1)\n clf1.fit(X_train)\n clf2 = IsolationNNE()\n clf2.fit(X_train)\n assert_array_equal(\n clf1.score_samples([[2.0, 2.0]]),\n clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,\n )\n assert_array_equal(\n clf2.score_samples([[2.0, 2.0]]),\n clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,\n )\n assert_array_equal(\n clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])\n )\n\n\ndef test_fit_time():\n data = digit.data\n print(data.shape)\n clf = IsolationNNE(n_estimators=200, max_samples=100)\n t1 = time.time()\n clf.fit(data)\n t2 = time.time()\n anomaly_labels = clf.predict(data)\n t3 = time.time()\n print(t2-t1)\n print(t3-t2)\n\n clf2 = IsolationForest(n_estimators=200, max_samples=256)\n t1 = time.time()\n clf2.fit(data)\n t2 = time.time()\n anomaly_labels = clf2.predict(data)\n t3 = time.time()\n print(t2-t1)\n print(t3-t2)\n" ]
[ [ "numpy.max", "numpy.array", "sklearn.datasets.load_diabetes", "sklearn.utils._testing.assert_array_equal", "numpy.min", "sklearn.model_selection.ParameterGrid", "sklearn.utils.check_random_state", "sklearn.ensemble.IsolationForest", "sklearn.utils._testing.ignore_warnings", "sklearn.metrics.roc_auc_score", "sklearn.datasets.load_iris" ] ]
dreamerlin/MVFNet
[ "5bc41ae0451a2c3572302c441aaf35b444d04d56", "5bc41ae0451a2c3572302c441aaf35b444d04d56" ]
[ "codes/models/backbones/resnet_r3d.py", "codes/models/heads/i3d_clshead.py" ]
[ "\"\"\"R(2+1)D\nCode adopted from mmaction\nTODO: Debug\n\"\"\"\nimport numpy as np\nimport torch.nn as nn\nfrom mmcv.cnn import constant_init, kaiming_init\n\nfrom ...utils import get_root_logger, load_checkpoint\nfrom ..builder import BACKBONES\nfrom .resnet_r3d_utils import (add_bn, add_conv3d, conv3d_wbias, conv3d_wobias, ModuleList)\n\n\nclass BasicBlock(nn.Module):\n \"\"\"basicblock\"\"\"\n def __init__(self,\n input_filters,\n num_filters,\n base_filters,\n down_sampling=False,\n down_sampling_temporal=None,\n block_type='3d',\n is_real_3d=True,\n group=1,\n with_bn=True):\n\n super(BasicBlock, self).__init__()\n self.num_filters = num_filters\n self.base_filters = base_filters\n self.input_filters = input_filters\n self.with_bn = with_bn\n if self.with_bn:\n conv3d = conv3d_wobias\n else:\n conv3d = conv3d_wbias\n\n if block_type == '2.5d':\n assert is_real_3d\n if down_sampling_temporal is None:\n down_sampling_temporal = down_sampling\n if down_sampling:\n if is_real_3d and down_sampling_temporal:\n self.down_sampling_stride = [2, 2, 2]\n else:\n self.down_sampling_stride = [1, 2, 2]\n else:\n self.down_sampling_stride = [1, 1, 1]\n\n self.down_sampling = down_sampling\n\n self.relu = nn.ReLU()\n self.conv1 = add_conv3d(input_filters, num_filters,\n kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],\n stride=self.down_sampling_stride,\n pad=[1, 1, 1] if is_real_3d else [0, 1, 1],\n block_type=block_type, with_bn=self.with_bn)\n if self.with_bn:\n self.bn1 = add_bn(num_filters)\n self.conv2 = add_conv3d(num_filters, num_filters,\n kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],\n stride=[1, 1, 1],\n pad=[1, 1, 1] if is_real_3d else [0, 1, 1],\n block_type=block_type, with_bn=self.with_bn)\n if self.with_bn:\n self.bn2 = add_bn(num_filters)\n if num_filters != input_filters or down_sampling:\n self.conv3 = conv3d(input_filters, num_filters,\n kernel=[1, 1, 1],\n stride=self.down_sampling_stride,\n pad=[0, 0, 0])\n if self.with_bn:\n self.bn3 = nn.BatchNorm3d(num_filters, eps=1e-3)\n\n def forward(self, x):\n \"\"\"forward\"\"\"\n identity = x\n\n out = self.conv1(x)\n if self.with_bn:\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n if self.with_bn:\n out = self.bn2(out)\n\n if self.down_sampling or self.num_filters != self.input_filters:\n identity = self.conv3(identity)\n if self.with_bn:\n identity = self.bn3(identity)\n\n out += identity\n out = self.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n \"\"\"bottleneck\"\"\"\n def __init__(self,\n input_filters,\n num_filters,\n base_filters,\n down_sampling=False,\n down_sampling_temporal=None,\n block_type='3d',\n is_real_3d=True,\n group=1,\n with_bn=True):\n\n super(Bottleneck, self).__init__()\n self.num_filters = num_filters\n self.base_filters = base_filters\n self.input_filters = input_filters\n self.with_bn = with_bn\n if self.with_bn:\n conv3d = conv3d_wobias\n else:\n conv3d = conv3d_wbias\n\n if block_type == '2.5d':\n assert is_real_3d\n if down_sampling_temporal is None:\n down_sampling_temporal = down_sampling\n if down_sampling:\n if is_real_3d and down_sampling_temporal:\n self.down_sampling_stride = [2, 2, 2]\n else:\n self.down_sampling_stride = [1, 2, 2]\n else:\n self.down_sampling_stride = [1, 1, 1]\n\n self.down_sampling = down_sampling\n self.relu = nn.ReLU()\n\n self.conv0 = add_conv3d(input_filters, base_filters,\n kernel=[1, 1, 1],\n stride=[1, 1, 1],\n pad=[0, 0, 0],\n with_bn=self.with_bn)\n if self.with_bn:\n self.bn0 = add_bn(base_filters)\n\n self.conv1 = add_conv3d(base_filters, base_filters,\n kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],\n stride=self.down_sampling_stride,\n pad=[1, 1, 1] if is_real_3d else [0, 1, 1],\n block_type=block_type, with_bn=self.with_bn)\n if self.with_bn:\n self.bn1 = add_bn(base_filters)\n\n self.conv2 = add_conv3d(base_filters, num_filters,\n kernel=[1, 1, 1],\n pad=[0, 0, 0],\n stride=[1, 1, 1],\n with_bn=self.with_bn)\n\n if self.with_bn:\n self.bn2 = add_bn(num_filters)\n\n if num_filters != input_filters or down_sampling:\n self.conv3 = conv3d(input_filters, num_filters,\n kernel=[1, 1, 1],\n stride=self.down_sampling_stride,\n pad=[0, 0, 0])\n if self.with_bn:\n self.bn3 = nn.BatchNorm3d(num_filters, eps=1e-3)\n\n def forward(self, x):\n \"\"\"forward\"\"\"\n identity = x\n if self.with_bn:\n out = self.relu(self.bn0(self.conv0(x)))\n out = self.relu(self.bn1(self.conv1(out)))\n out = self.bn2(self.conv2(out))\n else:\n out = self.relu(self.conv0(x))\n out = self.relu(self.conv1(out))\n out = self.conv2(out)\n\n if self.down_sampling or self.num_filters != self.input_filters:\n identity = self.conv3(identity)\n if self.with_bn:\n identity = self.bn3(identity)\n\n out += identity\n out = self.relu(out)\n return out\n\n\ndef make_plain_res_layer(block, num_blocks,\n in_filters, num_filters,\n base_filters,\n block_type='3d',\n down_sampling=False,\n down_sampling_temporal=None,\n is_real_3d=True,\n with_bn=True):\n \"\"\"make_plain_res_layer\"\"\"\n layers = []\n layers.append(block(in_filters, num_filters, base_filters,\n down_sampling=down_sampling,\n down_sampling_temporal=down_sampling_temporal,\n block_type=block_type,\n is_real_3d=is_real_3d, with_bn=with_bn))\n for i in range(num_blocks - 1):\n layers.append(block(num_filters, num_filters, base_filters,\n block_type=block_type,\n is_real_3d=is_real_3d, with_bn=with_bn))\n return ModuleList(layers)\n\n\nBLOCK_CONFIG = {\n 10: (1, 1, 1, 1),\n 16: (2, 2, 2, 1),\n 18: (2, 2, 2, 2),\n 26: (2, 2, 2, 2),\n 34: (3, 4, 6, 3),\n 50: (3, 4, 6, 3),\n 101: (3, 4, 23, 3),\n 152: (3, 8, 36, 3),\n}\nSHALLOW_FILTER_CONFIG = [\n [64, 64],\n [128, 128],\n [256, 256],\n [512, 512]\n]\nDEEP_FILTER_CONFIG = [\n [256, 64],\n [512, 128],\n [1024, 256],\n [2048, 512]\n]\n\n\n@BACKBONES.register_module\nclass ResNet_R3D(nn.Module):\n \"\"\"R(2+1)D\"\"\"\n def __init__(self,\n pretrained=None,\n num_input_channels=3,\n depth=34,\n block_type='2.5d',\n channel_multiplier=1.0,\n bottleneck_multiplier=1.0,\n conv1_kernel_t=3,\n conv1_stride_t=1,\n use_pool1=False,\n bn_eval=True,\n bn_frozen=True,\n with_bn=True):\n # parameter initialization\n super(ResNet_R3D, self).__init__()\n self.pretrained = pretrained\n self.num_input_channels = num_input_channels\n self.depth = depth\n self.block_type = block_type\n self.channel_multiplier = channel_multiplier\n self.bottleneck_multiplier = bottleneck_multiplier\n self.conv1_kernel_t = conv1_kernel_t\n self.conv1_stride_t = conv1_stride_t\n self.use_pool1 = use_pool1\n self.relu = nn.ReLU()\n self.bn_eval = bn_eval\n self.bn_frozen = bn_frozen\n self.with_bn = with_bn\n global comp_count, comp_idx\n comp_idx = 0\n comp_count = 0\n\n if self.with_bn:\n conv3d = conv3d_wobias\n else:\n conv3d = conv3d_wbias\n\n# stem block\n if self.block_type in ['2.5d', '2.5d-sep']:\n self.conv1_s = conv3d(self.num_input_channels, 45, [\n 1, 7, 7], [1, 2, 2], [0, 3, 3])\n if self.with_bn:\n self.bn1_s = nn.BatchNorm3d(45, eps=1e-3)\n self.conv1_t = conv3d(45, 64,\n [self.conv1_kernel_t, 1, 1],\n [self.conv1_stride_t, 1, 1],\n [(self.conv1_kernel_t - 1) // 2, 0, 0])\n if self.with_bn:\n self.bn1_t = nn.BatchNorm3d(64, eps=1e-3)\n else:\n self.conv1 = conv3d(self.num_input_channels, 64,\n [self.conv1_kernel_t, 7, 7],\n [self.conv1_stride_t, 2, 2],\n [(self.conv1_kernel_t - 1) // 2, 3, 3])\n if self.with_bn:\n self.bn1 = nn.BatchNorm3d(64, eps=1e-3)\n\n if self.use_pool1:\n self.pool1 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[\n 1, 2, 2], padding=[0, 1, 1])\n\n self.stage_blocks = BLOCK_CONFIG[self.depth]\n if self.depth <= 18 or self.depth == 34:\n self.block = BasicBlock\n else:\n self.block = Bottleneck\n if self.depth <= 34:\n self.filter_config = SHALLOW_FILTER_CONFIG\n else:\n self.filter_config = DEEP_FILTER_CONFIG\n self.filter_config = np.multiply(\n self.filter_config, self.channel_multiplier).astype(np.int)\n\n layer1 = make_plain_res_layer(self.block, self.stage_blocks[0],\n 64, self.filter_config[0][0],\n int(self.filter_config[0][1]\n * self.bottleneck_multiplier),\n block_type=self.block_type,\n with_bn=self.with_bn)\n self.add_module('layer1', layer1)\n layer2 = make_plain_res_layer(self.block, self.stage_blocks[1],\n self.filter_config[0][0],\n self.filter_config[1][0],\n int(self.filter_config[1][1]\n * self.bottleneck_multiplier),\n block_type=self.block_type,\n down_sampling=True,\n with_bn=self.with_bn)\n self.add_module('layer2', layer2)\n layer3 = make_plain_res_layer(self.block, self.stage_blocks[2],\n self.filter_config[1][0],\n self.filter_config[2][0],\n int(self.filter_config[2][1]\n * self.bottleneck_multiplier),\n block_type=self.block_type,\n down_sampling=True,\n with_bn=self.with_bn)\n self.add_module('layer3', layer3)\n layer4 = make_plain_res_layer(self.block, self.stage_blocks[3],\n self.filter_config[2][0],\n self.filter_config[3][0],\n int(self.filter_config[3][1]\n * self.bottleneck_multiplier),\n block_type=self.block_type,\n down_sampling=True,\n with_bn=self.with_bn)\n self.add_module('layer4', layer4)\n self.res_layers = ['layer1', 'layer2', 'layer3', 'layer4']\n\n def forward(self, x):\n \"\"\"forward\"\"\"\n if self.block_type in ['2.5d', '2.5d-sep']:\n if self.with_bn:\n x = self.relu(self.bn1_s(self.conv1_s(x)))\n x = self.relu(self.bn1_t(self.conv1_t(x)))\n else:\n x = self.relu(self.conv1_s(x))\n x = self.relu(self.conv1_t(x))\n else:\n if self.with_bn:\n x = self.relu(self.bn1(self.conv1(x)))\n else:\n x = self.relu(self.conv1(x))\n\n if self.use_pool1:\n x = self.pool1(x)\n\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n\n return x\n\n def init_weights(self):\n \"\"\"init weight\"\"\"\n if isinstance(self.pretrained, str):\n logger = get_root_logger\n load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n elif self.pretrained is None:\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n kaiming_init(m)\n elif isinstance(m, nn.BatchNorm3d):\n constant_init(m, 1)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def train(self, mode=True):\n \"\"\"train\"\"\"\n super(ResNet_R3D, self).train(mode)\n if self.bn_eval and self.with_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm3d):\n m.eval()\n if self.bn_frozen:\n for params in m.parameters():\n params.requires_grad = False\n", "\"\"\"cls head for 3D input (B C T H W)\"\"\"\nimport torch.nn as nn\nfrom ..builder import HEADS\nfrom .base import BaseHead\n@HEADS.register_module\nclass I3DClsHead(BaseHead):\n \"\"\"cls head for 3D input (B C T H W)\"\"\"\n\n def __init__(self,\n spatial_type='avg',\n spatial_size=7,\n temporal_size=4,\n consensus_cfg=dict(type='avg', dim=1),\n dropout_ratio=0.5,\n in_channels=2048,\n num_classes=400,\n init_std=0.01,\n fcn_testing=False,\n extract_feat=False,\n ):\n super(I3DClsHead, self).__init__(spatial_size, dropout_ratio,\n in_channels, num_classes, init_std, extract_feat)\n self.spatial_type = spatial_type\n self.consensus_type = consensus_cfg['type']\n self.temporal_size = temporal_size\n assert not (self.spatial_size == -1) ^ (self.temporal_size == -1)\n if self.temporal_size == -1 and self.spatial_size == -1:\n self.pool_size = (1, 1, 1)\n if self.spatial_type == 'avg':\n self.Logits = nn.AdaptiveAvgPool3d(self.pool_size)\n if self.spatial_type == 'max':\n self.Logits = nn.AdaptiveMaxPool3d(self.pool_size)\n else:\n self.pool_size = (self.temporal_size, ) + self.spatial_size\n if self.spatial_type == 'avg':\n self.Logits = nn.AvgPool3d(\n self.pool_size, stride=1, padding=0)\n if self.spatial_type == 'max':\n self.Logits = nn.MaxPool3d(\n self.pool_size, stride=1, padding=0)\n self.fc_cls = nn.Linear(self.in_channels, self.num_classes)\n self.fcn_testing = fcn_testing\n self.new_cls = None\n\n def forward(self, x):\n \"\"\"forward \"\"\"\n if not self.fcn_testing:\n # [30 2048 4 8 8]\n x = self.Logits(x)\n # [30 2048 1 1 1]\n if self.dropout is not None:\n x = self.dropout(x)\n # [30 2048 1 1 1]\n x = x.view(x.shape[0], -1)\n # [30 2048]\n if self.extract_feat:\n cls_score = x # [30 2048]\n else:\n cls_score = self.fc_cls(x)\n # [B*clip_num 400] train:clip_num=1 test:b=1\n else:\n # [30 2048 4 8 8]\n if self.new_cls is None:\n self.new_cls = nn.Conv3d(\n self.in_channels,\n self.num_classes,\n 1, 1, 0).cuda()\n self.new_cls.load_state_dict(\n {'weight': self.fc_cls.weight.unsqueeze(-1).unsqueeze(\n -1).unsqueeze(-1),\n 'bias': self.fc_cls.bias})\n if self.extract_feat:\n class_map = x # [30 2048 4 8 8]\n else:\n class_map = self.new_cls(x)\n # [30 400 4 8 8]\n cls_score = class_map.mean([2, 3, 4])\n # [30 400] or [30 feat-dim]\n return cls_score\n\n def init_weights(self):\n \"\"\"init weights\"\"\"\n nn.init.normal_(self.fc_cls.weight, 0, self.init_std)\n nn.init.constant_(self.fc_cls.bias, 0)\n" ]
[ [ "torch.nn.ReLU", "torch.nn.MaxPool3d", "torch.nn.BatchNorm3d", "numpy.multiply" ], [ "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.MaxPool3d", "torch.nn.init.normal_", "torch.nn.Conv3d", "torch.nn.AdaptiveMaxPool3d", "torch.nn.AvgPool3d", "torch.nn.AdaptiveAvgPool3d" ] ]
theovincent/ReinforcementLearningWithDemonstration
[ "fc04afec0643c45860e12d3af4a45d32265a0c61" ]
[ "algorithms/API/replay_buffer.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass ReplayBuffer:\n def __init__(self, env, epsilon_decay, n_expert_samples=0, expert_policy=None):\n self.env = env\n self.epsilon_decay = epsilon_decay\n\n self.buffer_expert = []\n self.buffer_rl = []\n\n if n_expert_samples > 0: # i.e with Demonstration\n self.collect_expert_samples(n_expert_samples, expert_policy)\n\n def collect_expert_samples(self, n_samples, policy):\n self.env.reset()\n state = self.env.state\n\n for idx_sample in range(n_samples):\n if idx_sample > 0:\n state = np.random.choice(self.env._states)\n self.env.state = state\n\n action = policy[state]\n\n next_state, reward, _, _ = self.env.step(action)\n next_action = policy[next_state]\n\n self.buffer_expert.append((state, action, reward, next_state, next_action))\n\n def collect_rl_samples(self, n_samples, w, iteration):\n # Reset buffer rl to have strict on policy learning\n self.buffer_rl = []\n\n self.env.reset()\n state = self.env.state\n\n for idx_sample in range(n_samples):\n if idx_sample > 0:\n state = np.random.choice(self.env._states)\n self.env.state = state\n\n # Policy improvement\n if np.random.random() < self.epsilon_decay(2 * iteration):\n action = np.random.choice(self.env._actions)\n else:\n action = np.argmax([self.env.get_feature(state, action) @ w for action in self.env._actions])\n\n next_state, reward, _, _ = self.env.step(action)\n next_action = np.argmax([self.env.get_feature(next_state, action) @ w for action in self.env._actions])\n\n self.buffer_rl.append((state, action, reward, next_state, next_action))\n\n def display_statistics_on_samples(self):\n plt.figure()\n number_occurences = np.zeros(self.env.S)\n\n for (state, _, _, _, _) in self.buffer_expert:\n number_occurences[state] += 1\n\n for (state, _, _, _, _) in self.buffer_rl:\n number_occurences[state] += 1\n\n img = self.env.get_layout_img(number_occurences)\n plt.title(\"Statistics on occurences\")\n plt.imshow(img)\n plt.show()\n" ]
[ [ "numpy.random.choice", "numpy.zeros", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.random.random", "matplotlib.pyplot.imshow" ] ]
sylvieong/nmt-ported
[ "70fa4c473dfc6a3d9f576334145bd07c5e151431" ]
[ "nmt/utils/iterator_utils.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"For loading data into NMT models.\"\"\"\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\n\n__all__ = [\"BatchedInput\", \"get_iterator\", \"get_infer_iterator\"]\n\n\n# NOTE(ebrevdo): When we subclass this, instances' __dict__ becomes empty.\nclass BatchedInput(\n collections.namedtuple(\"BatchedInput\",\n (\"initializer\", \"source\", \"target_input\",\n \"target_output\", \"source_sequence_length\",\n \"target_sequence_length\"))):\n pass\n\n\ndef get_infer_iterator(src_dataset,\n src_vocab_table,\n batch_size,\n source_reverse,\n eos,\n src_max_len=None):\n src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)\n src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)\n\n if src_max_len:\n src_dataset = src_dataset.map(lambda src: src[:src_max_len])\n # Convert the word strings to ids\n src_dataset = src_dataset.map(\n lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))\n if source_reverse:\n src_dataset = src_dataset.map(lambda src: tf.reverse(src, axis=[0]))\n # Add in the word counts.\n src_dataset = src_dataset.map(lambda src: (src, tf.size(src)))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n src_eos_id, # src\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(src_dataset)\n batched_iter = batched_dataset.make_initializable_iterator()\n (src_ids, src_seq_len) = batched_iter.get_next()\n return BatchedInput(\n initializer=batched_iter.initializer,\n source=src_ids,\n target_input=None,\n target_output=None,\n source_sequence_length=src_seq_len,\n target_sequence_length=None)\n\n\ndef get_iterator(src_dataset,\n tgt_dataset,\n src_vocab_table,\n tgt_vocab_table,\n batch_size,\n sos,\n eos,\n source_reverse,\n random_seed,\n num_buckets,\n src_max_len=None,\n tgt_max_len=None,\n num_parallel_calls=4,\n output_buffer_size=None,\n skip_count=None,\n num_shards=1,\n shard_index=0):\n if not output_buffer_size:\n output_buffer_size = batch_size * 1000\n src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)\n tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)\n tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)\n\n src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))\n\n src_tgt_dataset = src_tgt_dataset.shard(num_shards, shard_index)\n if skip_count is not None:\n src_tgt_dataset = src_tgt_dataset.skip(skip_count)\n\n src_tgt_dataset = src_tgt_dataset.shuffle(output_buffer_size, random_seed)\n\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (\n tf.string_split([src]).values, tf.string_split([tgt]).values),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n\n # Filter zero length input sequences.\n src_tgt_dataset = src_tgt_dataset.filter(\n lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))\n\n if src_max_len:\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (src[:src_max_len], tgt),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n if tgt_max_len:\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (src, tgt[:tgt_max_len]),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n if source_reverse:\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (tf.reverse(src, axis=[0]), tgt),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n # Convert the word strings to ids. Word strings that are not in the\n # vocab get the lookup table's default_value integer.\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (tf.cast(src_vocab_table.lookup(src), tf.int32),\n tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n # Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt: (src,\n tf.concat(([tgt_sos_id], tgt), 0),\n tf.concat((tgt, [tgt_eos_id]), 0)),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n # Add in sequence lengths.\n src_tgt_dataset = src_tgt_dataset.map(\n lambda src, tgt_in, tgt_out: (\n src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in)),\n num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)\n\n # Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The first three entries are the source and target line rows;\n # these have unknown-length vectors. The last two entries are\n # the source and target row sizes; these are scalars.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt_input\n tf.TensorShape([None]), # tgt_output\n tf.TensorShape([]), # src_len\n tf.TensorShape([])), # tgt_len\n # Pad the source and target sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n src_eos_id, # src\n tgt_eos_id, # tgt_input\n tgt_eos_id, # tgt_output\n 0, # src_len -- unused\n 0)) # tgt_len -- unused\n\n if num_buckets > 1:\n\n def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):\n # Calculate bucket_width by maximum source sequence length.\n # Pairs with length [0, bucket_width) go to bucket 0, length\n # [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length\n # over ((num_bucket-1) * bucket_width) words all go into the last bucket.\n if src_max_len:\n bucket_width = (src_max_len + num_buckets - 1) // num_buckets\n else:\n bucket_width = 10\n\n # Bucket sentence pairs by the length of their source sentence and target\n # sentence.\n bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)\n return tf.to_int64(tf.minimum(num_buckets, bucket_id))\n\n def reduce_func(unused_key, windowed_data):\n return batching_func(windowed_data)\n\n batched_dataset = src_tgt_dataset.apply(\n tf.contrib.data.group_by_window(\n key_func=key_func, reduce_func=reduce_func, window_size=batch_size))\n\n else:\n batched_dataset = batching_func(src_tgt_dataset)\n batched_iter = batched_dataset.make_initializable_iterator()\n (src_ids, tgt_input_ids, tgt_output_ids, src_seq_len,\n tgt_seq_len) = (batched_iter.get_next())\n return BatchedInput(\n initializer=batched_iter.initializer,\n source=src_ids,\n target_input=tgt_input_ids,\n target_output=tgt_output_ids,\n source_sequence_length=src_seq_len,\n target_sequence_length=tgt_seq_len)\n" ]
[ [ "tensorflow.size", "tensorflow.minimum", "tensorflow.concat", "tensorflow.contrib.data.group_by_window", "tensorflow.TensorShape", "tensorflow.reverse", "tensorflow.constant", "tensorflow.maximum", "tensorflow.string_split", "tensorflow.data.Dataset.zip" ] ]
vamas/Capstone_ML
[ "d7fb044e42731b4a19abb15d4b94e354c598c9d3" ]
[ "helpers.py" ]
[ "import pandas as pd\nfrom sklearn.grid_search import GridSearchCV\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import Imputer, StandardScaler, MinMaxScaler, RobustScaler\n\n\n\ndef select_random_date(date_range, training_period, testing_period):\n cond = True\n while cond:\n #select random date\n training_start_date = np.random.choice(date_range)\n #check if there is enough days after \n if (date_range.index(training_start_date) < (len(date_range) - training_period - testing_period)):\n cond = False\n return training_start_date \n\ndef annualised_sharpe(returns, N=252):\n \"\"\"\n Calculate the annualised Sharpe ratio of a returns stream \n based on a number of trading periods, N. N defaults to 252,\n which then assumes a stream of daily returns.\n\n The function assumes that the returns are the excess of \n those compared to a benchmark.\n \"\"\"\n return np.sqrt(N) * returns.mean() / returns.std()\n\n\ndef equity_sharpe(ticker, date):\n \"\"\"\n Calculates the annualised Sharpe ratio based on the daily\n returns of an equity ticker symbol listed in Yahoo Finance.\n\n The dates have been hardcoded here for the QuantStart article \n on Sharpe ratios.\n \"\"\"\n\n # Obtain the equities daily historic data for the desired time period\n # and add to a pandas DataFrame\n full_df = stockData.raw_financial_data[ticker]\n pdf = full_df[full_df.index <= date]\n\n # Use the percentage change method to easily calculate daily returns\n pdf['daily_ret'] = pdf['Adj Close'].pct_change()\n\n # Assume an average annual risk-free rate over the period of 5%\n pdf['excess_daily_ret'] = pdf['daily_ret'] - 0.05/252\n\n # Return the annualised Sharpe ratio based on the excess daily returns\n return annualised_sharpe(pdf['excess_daily_ret'])\n" ]
[ [ "numpy.random.choice", "numpy.sqrt" ] ]
typhoonzero/models-1
[ "a3559618a013820385f43307261ad34351da2fbf" ]
[ "sqlflow_models/lstmclassifier.py" ]
[ "import tensorflow as tf\n\nclass StackedBiLSTMClassifier(tf.keras.Model):\n def __init__(self, feature_columns, stack_units=[32], hidden_size=64, n_classes=2):\n \"\"\"StackedBiLSTMClassifier\n :param feature_columns: All columns must be embedding of sequence column with same sequence_length.\n :type feature_columns: list[tf.embedding_column].\n :param stack_units: Units for LSTM layer.\n :type stack_units: vector of ints.\n :param n_classes: Target number of classes.\n :type n_classes: int.\n \"\"\"\n super(StackedBiLSTMClassifier, self).__init__()\n\n self.feature_layer = tf.keras.experimental.SequenceFeatures(feature_columns)\n self.stack_bilstm = []\n self.stack_size = len(stack_units)\n self.stack_units = stack_units\n self.n_classes = n_classes\n if self.stack_size > 1:\n for i in range(self.stack_size - 1):\n self.stack_bilstm.append(\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self.stack_units[i], return_sequences=True))\n )\n self.lstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self.stack_units[-1]))\n self.hidden = tf.keras.layers.Dense(hidden_size, activation='relu')\n if self.n_classes == 2:\n # special setup for binary classification\n pred_act = 'sigmoid'\n self._loss = 'binary_crossentropy'\n else:\n pred_act = 'softmax'\n self._loss = 'categorical_crossentropy'\n self.pred = tf.keras.layers.Dense(n_classes, activation=pred_act)\n\n def call(self, inputs):\n x, seq_len = self.feature_layer(inputs)\n seq_mask = tf.sequence_mask(seq_len)\n if self.stack_size > 1:\n for i in range(self.stack_size - 1):\n x = self.stack_bilstm[i](x, mask=seq_mask)\n x = self.lstm(x, mask=seq_mask)\n x = self.hidden(x)\n return self.pred(x)\n\n def optimizer(self):\n \"\"\"Default optimizer name. Used in model.compile.\"\"\"\n return 'adam'\n\n def loss(self):\n \"\"\"Default loss function. Used in model.compile.\"\"\"\n return self._loss\n\n\n def prepare_prediction_column(self, prediction):\n \"\"\"Return the class label of highest probability.\"\"\"\n return prediction.argmax(axis=-1)\n\n" ]
[ [ "tensorflow.keras.experimental.SequenceFeatures", "tensorflow.keras.layers.LSTM", "tensorflow.sequence_mask", "tensorflow.keras.layers.Dense" ] ]
liu-yushan/TLogic
[ "51c1feb1f196205437b25e82d9d721f55e84937c" ]
[ "mycode/score_functions.py" ]
[ "import numpy as np\n\n\ndef score1(rule, c=0):\n \"\"\"\n Calculate candidate score depending on the rule's confidence.\n\n Parameters:\n rule (dict): rule from rules_dict\n c (int): constant for smoothing\n\n Returns:\n score (float): candidate score\n \"\"\"\n\n score = rule[\"rule_supp\"] / (rule[\"body_supp\"] + c)\n\n return score\n\n\ndef score2(cands_walks, test_query_ts, lmbda):\n \"\"\"\n Calculate candidate score depending on the time difference.\n\n Parameters:\n cands_walks (pd.DataFrame): walks leading to the candidate\n test_query_ts (int): test query timestamp\n lmbda (float): rate of exponential distribution\n\n Returns:\n score (float): candidate score\n \"\"\"\n\n max_cands_ts = max(cands_walks[\"timestamp_0\"])\n score = np.exp(\n lmbda * (max_cands_ts - test_query_ts)\n ) # Score depending on time difference\n\n return score\n\n\ndef score_12(rule, cands_walks, test_query_ts, lmbda, a):\n \"\"\"\n Combined score function.\n\n Parameters:\n rule (dict): rule from rules_dict\n cands_walks (pd.DataFrame): walks leading to the candidate\n test_query_ts (int): test query timestamp\n lmbda (float): rate of exponential distribution\n a (float): value between 0 and 1\n\n Returns:\n score (float): candidate score\n \"\"\"\n\n score = a * score1(rule) + (1 - a) * score2(cands_walks, test_query_ts, lmbda)\n\n return score\n" ]
[ [ "numpy.exp" ] ]
GeorgOstrovski/jax
[ "578e5cf6d7272a9d29a6b9d50899efbcf17780f1" ]
[ "jax/test_util.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom contextlib import contextmanager\nimport functools\nimport re\nimport itertools as it\nimport os\nfrom typing import Dict, Sequence, Union\nfrom unittest import SkipTest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport numpy as onp\nimport numpy.random as npr\n\nfrom . import api\nfrom . import core\nfrom . import dtypes\nfrom . import lax\nfrom .config import flags, bool_env\nfrom .util import partial\nfrom .tree_util import tree_multimap, tree_all, tree_map, tree_reduce\nfrom .lib import xla_bridge\nfrom .interpreters import xla\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_enum(\n 'jax_test_dut', '',\n enum_values=['', 'cpu', 'gpu', 'tpu'],\n help=\n 'Describes the device under test in case special consideration is required.'\n)\n\nflags.DEFINE_integer(\n 'num_generated_cases',\n int(os.getenv('JAX_NUM_GENERATED_CASES', 10)),\n help='Number of generated cases to test')\n\nflags.DEFINE_bool(\n 'jax_skip_slow_tests',\n bool_env('JAX_SKIP_SLOW_TESTS', False),\n help=\n 'Skip tests marked as slow (> 5 sec).'\n)\n\nEPS = 1e-4\n\ndef _dtype(x):\n return (getattr(x, 'dtype', None) or\n onp.dtype(dtypes.python_scalar_dtypes.get(type(x), None)) or\n onp.asarray(x).dtype)\n\ndef is_sequence(x):\n try:\n iter(x)\n except TypeError:\n return False\n else:\n return True\n\n_default_tolerance = {\n onp.dtype(onp.bool_): 0,\n onp.dtype(onp.int8): 0,\n onp.dtype(onp.int16): 0,\n onp.dtype(onp.int32): 0,\n onp.dtype(onp.int64): 0,\n onp.dtype(onp.uint8): 0,\n onp.dtype(onp.uint16): 0,\n onp.dtype(onp.uint32): 0,\n onp.dtype(onp.uint64): 0,\n onp.dtype(dtypes.bfloat16): 1e-2,\n onp.dtype(onp.float16): 1e-3,\n onp.dtype(onp.float32): 1e-6,\n onp.dtype(onp.float64): 1e-15,\n onp.dtype(onp.complex64): 1e-6,\n onp.dtype(onp.complex128): 1e-15,\n}\n\ndef default_tolerance():\n if device_under_test() != \"tpu\":\n return _default_tolerance\n tol = _default_tolerance.copy()\n tol[onp.dtype(onp.float32)] = 1e-3\n tol[onp.dtype(onp.complex64)] = 1e-3\n return tol\n\ndefault_gradient_tolerance = {\n onp.dtype(dtypes.bfloat16): 1e-1,\n onp.dtype(onp.float16): 1e-2,\n onp.dtype(onp.float32): 2e-3,\n onp.dtype(onp.float64): 1e-5,\n onp.dtype(onp.complex64): 1e-3,\n onp.dtype(onp.complex128): 1e-5,\n}\n\ndef _assert_numpy_allclose(a, b, atol=None, rtol=None):\n a = a.astype(onp.float32) if a.dtype == dtypes.bfloat16 else a\n b = b.astype(onp.float32) if b.dtype == dtypes.bfloat16 else b\n kw = {}\n if atol: kw[\"atol\"] = atol\n if rtol: kw[\"rtol\"] = rtol\n onp.testing.assert_allclose(a, b, **kw)\n\ndef tolerance(dtype, tol=None):\n tol = {} if tol is None else tol\n if not isinstance(tol, dict):\n return tol\n tol = {onp.dtype(key): value for key, value in tol.items()}\n dtype = dtypes.canonicalize_dtype(onp.dtype(dtype))\n return tol.get(dtype, default_tolerance()[dtype])\n\ndef _normalize_tolerance(tol):\n tol = tol or 0\n if isinstance(tol, dict):\n return {onp.dtype(k): v for k, v in tol.items()}\n else:\n return {k: tol for k in _default_tolerance.keys()}\n\ndef join_tolerance(tol1, tol2):\n tol1 = _normalize_tolerance(tol1)\n tol2 = _normalize_tolerance(tol2)\n out = tol1\n for k, v in tol2.items():\n out[k] = max(v, tol1.get(k, 0))\n return out\n\ndef _assert_numpy_close(a, b, atol=None, rtol=None):\n assert a.shape == b.shape\n atol = max(tolerance(a.dtype, atol), tolerance(b.dtype, atol))\n rtol = max(tolerance(a.dtype, rtol), tolerance(b.dtype, rtol))\n _assert_numpy_allclose(a, b, atol=atol * a.size, rtol=rtol * b.size)\n\n\ndef check_eq(xs, ys):\n tree_all(tree_multimap(_assert_numpy_allclose, xs, ys))\n\n\ndef check_close(xs, ys, atol=None, rtol=None):\n assert_close = partial(_assert_numpy_close, atol=atol, rtol=rtol)\n tree_all(tree_multimap(assert_close, xs, ys))\n\n\ndef inner_prod(xs, ys):\n def contract(x, y):\n return onp.real(onp.dot(onp.conj(x).reshape(-1), y.reshape(-1)))\n return tree_reduce(onp.add, tree_multimap(contract, xs, ys))\n\n\nadd = partial(tree_multimap, lambda x, y: onp.add(x, y, dtype=_dtype(x)))\nsub = partial(tree_multimap, lambda x, y: onp.subtract(x, y, dtype=_dtype(x)))\nconj = partial(tree_map, lambda x: onp.conj(x, dtype=_dtype(x)))\n\ndef scalar_mul(xs, a):\n return tree_map(lambda x: onp.multiply(x, a, dtype=_dtype(x)), xs)\n\n\ndef rand_like(rng, x):\n shape = onp.shape(x)\n dtype = _dtype(x)\n randn = lambda: onp.asarray(rng.randn(*shape), dtype=dtype)\n if dtypes.issubdtype(dtype, onp.complexfloating):\n return randn() + dtype.type(1.0j) * randn()\n else:\n return randn()\n\n\ndef numerical_jvp(f, primals, tangents, eps=EPS):\n delta = scalar_mul(tangents, eps)\n f_pos = f(*add(primals, delta))\n f_neg = f(*sub(primals, delta))\n return scalar_mul(sub(f_pos, f_neg), 0.5 / eps)\n\n\ndef _merge_tolerance(tol, default):\n if tol is None:\n return default\n if not isinstance(tol, dict):\n return tol\n out = default.copy()\n for k, v in tol.items():\n out[onp.dtype(k)] = v\n return out\n\ndef check_jvp(f, f_jvp, args, atol=None, rtol=None, eps=EPS):\n atol = _merge_tolerance(atol, default_gradient_tolerance)\n rtol = _merge_tolerance(rtol, default_gradient_tolerance)\n rng = onp.random.RandomState(0)\n tangent = tree_map(partial(rand_like, rng), args)\n v_out, t_out = f_jvp(args, tangent)\n v_out_expected = f(*args)\n t_out_expected = numerical_jvp(f, args, tangent, eps=eps)\n # In principle we should expect exact equality of v_out and v_out_expected,\n # but due to nondeterminism especially on GPU (e.g., due to convolution\n # autotuning) we only require \"close\".\n check_close(v_out, v_out_expected, atol=atol, rtol=rtol)\n check_close(t_out, t_out_expected, atol=atol, rtol=rtol)\n\n\ndef check_vjp(f, f_vjp, args, atol=None, rtol=None, eps=EPS):\n atol = _merge_tolerance(atol, default_gradient_tolerance)\n rtol = _merge_tolerance(rtol, default_gradient_tolerance)\n _rand_like = partial(rand_like, onp.random.RandomState(0))\n v_out, vjpfun = f_vjp(*args)\n v_out_expected = f(*args)\n check_close(v_out, v_out_expected, atol=atol, rtol=rtol)\n tangent = tree_map(_rand_like, args)\n tangent_out = numerical_jvp(f, args, tangent, eps=eps)\n cotangent = tree_map(_rand_like, v_out)\n cotangent_out = conj(vjpfun(conj(cotangent)))\n ip = inner_prod(tangent, cotangent_out)\n ip_expected = inner_prod(tangent_out, cotangent)\n check_close(ip, ip_expected, atol=atol, rtol=rtol)\n\n\ndef check_grads(f, args, order,\n modes=[\"fwd\", \"rev\"], atol=None, rtol=None, eps=None):\n args = tuple(args)\n eps = eps or EPS\n\n _check_jvp = partial(check_jvp, atol=atol, rtol=rtol, eps=eps)\n _check_vjp = partial(check_vjp, atol=atol, rtol=rtol, eps=eps)\n\n def _check_grads(f, args, order):\n if \"fwd\" in modes:\n _check_jvp(f, partial(api.jvp, f), args)\n if order > 1:\n _check_grads(partial(api.jvp, f), (args, args), order - 1)\n\n if \"rev\" in modes:\n _check_vjp(f, partial(api.vjp, f), args)\n if order > 1:\n def f_vjp(*args):\n out_primal_py, vjp_py = api.vjp(f, *args)\n return vjp_py(out_primal_py)\n _check_grads(f_vjp, args, order - 1)\n\n _check_grads(f, args, order)\n\n\n@contextmanager\ndef count_primitive_compiles():\n xla.xla_primitive_callable.cache_clear()\n\n # We count how many times we call primitive_computation (which is called\n # inside xla_primitive_callable) instead of xla_primitive_callable so we don't\n # count cache hits.\n primitive_computation = xla.primitive_computation\n count = [0]\n\n def primitive_computation_and_count(*args, **kwargs):\n count[0] += 1\n return primitive_computation(*args, **kwargs)\n\n xla.primitive_computation = primitive_computation_and_count\n try:\n yield count\n finally:\n xla.primitive_computation = primitive_computation\n\n\n@contextmanager\ndef count_jit_and_pmap_compiles():\n # No need to clear any caches since we generally jit and pmap fresh callables\n # in tests.\n\n jaxpr_subcomp = xla.jaxpr_subcomp\n count = [0]\n\n def jaxpr_subcomp_and_count(*args, **kwargs):\n count[0] += 1\n return jaxpr_subcomp(*args, **kwargs)\n\n xla.jaxpr_subcomp = jaxpr_subcomp_and_count\n try:\n yield count\n finally:\n xla.jaxpr_subcomp = jaxpr_subcomp\n\n\ndef device_under_test():\n return FLAGS.jax_test_dut or xla_bridge.get_backend().platform\n\ndef if_device_under_test(device_type: Union[str, Sequence[str]],\n if_true, if_false):\n \"\"\"Chooses `if_true` of `if_false` based on device_under_test.\"\"\"\n if device_under_test() in ([device_type] if isinstance(device_type, str)\n else device_type):\n return if_true\n else:\n return if_false\n\ndef supported_dtypes():\n if device_under_test() == \"tpu\":\n return {onp.bool_, onp.int32, onp.uint32, dtypes.bfloat16, onp.float32,\n onp.complex64}\n else:\n return {onp.bool_, onp.int8, onp.int16, onp.int32, onp.int64,\n onp.uint8, onp.uint16, onp.uint32, onp.uint64,\n dtypes.bfloat16, onp.float16, onp.float32, onp.float64,\n onp.complex64, onp.complex128}\n\ndef skip_on_devices(*disabled_devices):\n \"\"\"A decorator for test methods to skip the test on certain devices.\"\"\"\n def skip(test_method):\n @functools.wraps(test_method)\n def test_method_wrapper(self, *args, **kwargs):\n device = device_under_test()\n if device in disabled_devices:\n test_name = getattr(test_method, '__name__', '[unknown test]')\n raise SkipTest('{} not supported on {}.'\n .format(test_name, device.upper()))\n return test_method(self, *args, **kwargs)\n return test_method_wrapper\n return skip\n\n\ndef skip_on_flag(flag_name, skip_value):\n \"\"\"A decorator for test methods to skip the test when flags are set.\"\"\"\n def skip(test_method): # pylint: disable=missing-docstring\n @functools.wraps(test_method)\n def test_method_wrapper(self, *args, **kwargs):\n flag_value = getattr(FLAGS, flag_name)\n if flag_value == skip_value:\n test_name = getattr(test_method, '__name__', '[unknown test]')\n raise SkipTest('{} not supported when FLAGS.{} is {}'\n .format(test_name, flag_name, flag_value))\n return test_method(self, *args, **kwargs)\n return test_method_wrapper\n return skip\n\n\ndef format_test_name_suffix(opname, shapes, dtypes):\n arg_descriptions = (format_shape_dtype_string(shape, dtype)\n for shape, dtype in zip(shapes, dtypes))\n return '{}_{}'.format(opname.capitalize(), '_'.join(arg_descriptions))\n\n\n# We use special symbols, represented as singleton objects, to distinguish\n# between NumPy scalars, Python scalars, and 0-D arrays.\nclass ScalarShape(object):\n def __len__(self): return 0\nclass _NumpyScalar(ScalarShape): pass\nclass _PythonScalar(ScalarShape): pass\nNUMPY_SCALAR_SHAPE = _NumpyScalar()\nPYTHON_SCALAR_SHAPE = _PythonScalar()\n\n\ndef _dims_of_shape(shape):\n \"\"\"Converts `shape` to a tuple of dimensions.\"\"\"\n if type(shape) in (list, tuple):\n return shape\n elif isinstance(shape, ScalarShape):\n return ()\n else:\n raise TypeError(type(shape))\n\n\ndef _cast_to_shape(value, shape, dtype):\n \"\"\"Casts `value` to the correct Python type for `shape` and `dtype`.\"\"\"\n if shape is NUMPY_SCALAR_SHAPE:\n # explicitly cast to NumPy scalar in case `value` is a Python scalar.\n return onp.dtype(dtype).type(value)\n elif shape is PYTHON_SCALAR_SHAPE:\n # explicitly cast to Python scalar via https://stackoverflow.com/a/11389998\n return onp.asarray(value).item()\n elif type(shape) in (list, tuple):\n assert onp.shape(value) == tuple(shape)\n return value\n else:\n raise TypeError(type(shape))\n\n\ndef dtype_str(dtype):\n return onp.dtype(dtype).name\n\n\ndef format_shape_dtype_string(shape, dtype):\n if shape is NUMPY_SCALAR_SHAPE:\n return dtype_str(dtype)\n elif shape is PYTHON_SCALAR_SHAPE:\n return 'py' + dtype_str(dtype)\n elif type(shape) in (list, tuple):\n shapestr = ','.join(str(dim) for dim in shape)\n return '{}[{}]'.format(dtype_str(dtype), shapestr)\n elif type(shape) is int:\n return '{}[{},]'.format(dtype_str(dtype), shape)\n elif isinstance(shape, onp.ndarray):\n return '{}[{}]'.format(dtype_str(dtype), shape)\n else:\n raise TypeError(type(shape))\n\n\ndef _rand_dtype(rand, shape, dtype, scale=1., post=lambda x: x):\n \"\"\"Produce random values given shape, dtype, scale, and post-processor.\n\n Args:\n rand: a function for producing random values of a given shape, e.g. a\n bound version of either onp.RandomState.randn or onp.RandomState.rand.\n shape: a shape value as a tuple of positive integers.\n dtype: a numpy dtype.\n scale: optional, a multiplicative scale for the random values (default 1).\n post: optional, a callable for post-processing the random values (default\n identity).\n\n Returns:\n An ndarray of the given shape and dtype using random values based on a call\n to rand but scaled, converted to the appropriate dtype, and post-processed.\n \"\"\"\n r = lambda: onp.asarray(scale * rand(*_dims_of_shape(shape)), dtype)\n if dtypes.issubdtype(dtype, onp.complexfloating):\n vals = r() + 1.0j * r()\n else:\n vals = r()\n return _cast_to_shape(onp.asarray(post(vals), dtype), shape, dtype)\n\n\ndef rand_default(scale=3):\n randn = npr.RandomState(0).randn\n return partial(_rand_dtype, randn, scale=scale)\n\n\ndef rand_nonzero():\n post = lambda x: onp.where(x == 0, onp.array(1, dtype=x.dtype), x)\n randn = npr.RandomState(0).randn\n return partial(_rand_dtype, randn, scale=3, post=post)\n\n\ndef rand_positive():\n post = lambda x: x + 1\n rand = npr.RandomState(0).rand\n return partial(_rand_dtype, rand, scale=2, post=post)\n\n\ndef rand_small():\n randn = npr.RandomState(0).randn\n return partial(_rand_dtype, randn, scale=1e-3)\n\n\ndef rand_not_small():\n post = lambda x: x + onp.where(x > 0, 10., -10.)\n randn = npr.RandomState(0).randn\n return partial(_rand_dtype, randn, scale=3., post=post)\n\n\ndef rand_small_positive():\n rand = npr.RandomState(0).rand\n return partial(_rand_dtype, rand, scale=2e-5)\n\ndef rand_uniform(low=0.0, high=1.0):\n assert low < high\n rand = npr.RandomState(0).rand\n post = lambda x: x * (high - low) + low\n return partial(_rand_dtype, rand, post=post)\n\n\ndef rand_some_equal():\n randn = npr.RandomState(0).randn\n rng = npr.RandomState(0)\n\n def post(x):\n x_ravel = x.ravel()\n if len(x_ravel) == 0:\n return x\n flips = rng.rand(*onp.shape(x)) < 0.5\n return onp.where(flips, x_ravel[0], x)\n\n return partial(_rand_dtype, randn, scale=100., post=post)\n\n\ndef rand_some_inf():\n \"\"\"Return a random sampler that produces infinities in floating types.\"\"\"\n rng = npr.RandomState(1)\n base_rand = rand_default()\n\n \"\"\"\n TODO: Complex numbers are not correctly tested\n If blocks should be switched in order, and relevant tests should be fixed\n \"\"\"\n def rand(shape, dtype):\n \"\"\"The random sampler function.\"\"\"\n if not dtypes.issubdtype(dtype, onp.floating):\n # only float types have inf\n return base_rand(shape, dtype)\n\n if dtypes.issubdtype(dtype, onp.complexfloating):\n base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype\n out = (rand(shape, base_dtype) +\n onp.array(1j, dtype) * rand(shape, base_dtype))\n return _cast_to_shape(out, shape, dtype)\n\n dims = _dims_of_shape(shape)\n posinf_flips = rng.rand(*dims) < 0.1\n neginf_flips = rng.rand(*dims) < 0.1\n\n vals = base_rand(shape, dtype)\n vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)\n vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)\n\n return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)\n\n return rand\n\ndef rand_some_nan():\n \"\"\"Return a random sampler that produces nans in floating types.\"\"\"\n rng = npr.RandomState(1)\n base_rand = rand_default()\n\n def rand(shape, dtype):\n \"\"\"The random sampler function.\"\"\"\n if dtypes.issubdtype(dtype, onp.complexfloating):\n base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype\n out = (rand(shape, base_dtype) +\n onp.array(1j, dtype) * rand(shape, base_dtype))\n return _cast_to_shape(out, shape, dtype)\n\n if not dtypes.issubdtype(dtype, onp.floating):\n # only float types have inf\n return base_rand(shape, dtype)\n\n dims = _dims_of_shape(shape)\n nan_flips = rng.rand(*dims) < 0.1\n\n vals = base_rand(shape, dtype)\n vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)\n\n return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)\n\n return rand\n\ndef rand_some_inf_and_nan():\n \"\"\"Return a random sampler that produces infinities in floating types.\"\"\"\n rng = npr.RandomState(1)\n base_rand = rand_default()\n\n \"\"\"\n TODO: Complex numbers are not correctly tested\n If blocks should be switched in order, and relevant tests should be fixed\n \"\"\"\n def rand(shape, dtype):\n \"\"\"The random sampler function.\"\"\"\n if not dtypes.issubdtype(dtype, onp.floating):\n # only float types have inf\n return base_rand(shape, dtype)\n\n if dtypes.issubdtype(dtype, onp.complexfloating):\n base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype\n out = (rand(shape, base_dtype) +\n onp.array(1j, dtype) * rand(shape, base_dtype))\n return _cast_to_shape(out, shape, dtype)\n\n dims = _dims_of_shape(shape)\n posinf_flips = rng.rand(*dims) < 0.1\n neginf_flips = rng.rand(*dims) < 0.1\n nan_flips = rng.rand(*dims) < 0.1\n\n vals = base_rand(shape, dtype)\n vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)\n vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)\n vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)\n\n return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)\n\n return rand\n\n# TODO(mattjj): doesn't handle complex types\ndef rand_some_zero():\n \"\"\"Return a random sampler that produces some zeros.\"\"\"\n rng = npr.RandomState(1)\n base_rand = rand_default()\n\n def rand(shape, dtype):\n \"\"\"The random sampler function.\"\"\"\n dims = _dims_of_shape(shape)\n zeros = rng.rand(*dims) < 0.5\n\n vals = base_rand(shape, dtype)\n vals = onp.where(zeros, onp.array(0, dtype=dtype), vals)\n\n return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype)\n\n return rand\n\n\ndef rand_int(low, high=None):\n randint = npr.RandomState(0).randint\n def fn(shape, dtype):\n return randint(low, high=high, size=shape, dtype=dtype)\n return fn\n\ndef rand_bool():\n rng = npr.RandomState(0)\n def generator(shape, dtype):\n return _cast_to_shape(rng.rand(*_dims_of_shape(shape)) < 0.5, shape, dtype)\n return generator\n\ndef check_raises(thunk, err_type, msg):\n try:\n thunk()\n assert False\n except err_type as e:\n assert str(e).startswith(msg), \"\\n{}\\n\\n{}\\n\".format(e, msg)\n\ndef check_raises_regexp(thunk, err_type, pattern):\n try:\n thunk()\n assert False\n except err_type as e:\n assert re.match(pattern, str(e)), \"{}\\n\\n{}\\n\".format(e, pattern)\n\n\ndef _iter_eqns(jaxpr):\n # TODO(necula): why doesn't this search in params?\n for eqn in jaxpr.eqns:\n yield eqn\n for subjaxpr in core.subjaxprs(jaxpr):\n yield from _iter_eqns(subjaxpr)\n\ndef assert_dot_precision(expected_precision, fun, *args):\n jaxpr = api.make_jaxpr(fun)(*args)\n precisions = [eqn.params['precision'] for eqn in _iter_eqns(jaxpr.jaxpr)\n if eqn.primitive == lax.dot_general_p]\n for precision in precisions:\n msg = \"Unexpected precision: {} != {}\".format(expected_precision, precision)\n assert precision == expected_precision, msg\n\n\n_CACHED_INDICES: Dict[int, Sequence[int]] = {}\n\ndef cases_from_list(xs):\n xs = list(xs)\n n = len(xs)\n k = min(n, FLAGS.num_generated_cases)\n # Random sampling for every parameterized test is expensive. Do it once and\n # cache the result.\n indices = _CACHED_INDICES.get(n)\n if indices is None:\n rng = npr.RandomState(42)\n _CACHED_INDICES[n] = indices = rng.permutation(n)\n return [xs[i] for i in indices[:k]]\n\ndef cases_from_gens(*gens):\n sizes = [1, 3, 10]\n cases_per_size = int(FLAGS.num_generated_cases / len(sizes)) + 1\n for size in sizes:\n for i in range(cases_per_size):\n yield ('_{}_{}'.format(size, i),) + tuple(gen(size) for gen in gens)\n\n\nclass JaxTestCase(parameterized.TestCase):\n \"\"\"Base class for JAX tests including numerical checks and boilerplate.\"\"\"\n\n def assertArraysAllClose(self, x, y, check_dtypes, atol=None, rtol=None):\n \"\"\"Assert that x and y are close (up to numerical tolerances).\"\"\"\n self.assertEqual(x.shape, y.shape)\n atol = max(tolerance(_dtype(x), atol), tolerance(_dtype(y), atol))\n rtol = max(tolerance(_dtype(x), rtol), tolerance(_dtype(y), rtol))\n\n _assert_numpy_allclose(x, y, atol=atol, rtol=rtol)\n\n if check_dtypes:\n self.assertDtypesMatch(x, y)\n\n def assertDtypesMatch(self, x, y):\n if FLAGS.jax_enable_x64:\n self.assertEqual(_dtype(x), _dtype(y))\n\n def assertAllClose(self, x, y, check_dtypes, atol=None, rtol=None):\n \"\"\"Assert that x and y, either arrays or nested tuples/lists, are close.\"\"\"\n if isinstance(x, dict):\n self.assertIsInstance(y, dict)\n self.assertEqual(set(x.keys()), set(y.keys()))\n for k in x.keys():\n self.assertAllClose(x[k], y[k], check_dtypes, atol=atol, rtol=rtol)\n elif is_sequence(x) and not hasattr(x, '__array__'):\n self.assertTrue(is_sequence(y) and not hasattr(y, '__array__'))\n self.assertEqual(len(x), len(y))\n for x_elt, y_elt in zip(x, y):\n self.assertAllClose(x_elt, y_elt, check_dtypes, atol=atol, rtol=rtol)\n elif hasattr(x, '__array__') or onp.isscalar(x):\n self.assertTrue(hasattr(y, '__array__') or onp.isscalar(y))\n if check_dtypes:\n self.assertDtypesMatch(x, y)\n x = onp.asarray(x)\n y = onp.asarray(y)\n self.assertArraysAllClose(x, y, check_dtypes=False, atol=atol, rtol=rtol)\n elif x == y:\n return\n else:\n raise TypeError((type(x), type(y)))\n\n def assertMultiLineStrippedEqual(self, expected, what):\n \"\"\"Asserts two strings are equal, after stripping each line.\"\"\"\n ignore_space_re = re.compile(r'\\s*\\n\\s*')\n expected_clean = re.sub(ignore_space_re, '\\n', expected.strip())\n what_clean = re.sub(ignore_space_re, '\\n', what.strip())\n self.assertMultiLineEqual(expected_clean, what_clean,\n msg=\"Found\\n{}\\nExpecting\\n{}\".format(what, expected))\n\n def _CompileAndCheck(self, fun, args_maker, check_dtypes,\n rtol=None, atol=None):\n \"\"\"Helper method for running JAX compilation and allclose assertions.\"\"\"\n args = args_maker()\n\n def wrapped_fun(*args):\n self.assertTrue(python_should_be_executing)\n return fun(*args)\n\n python_should_be_executing = True\n python_ans = fun(*args)\n\n python_shapes = tree_map(lambda x: onp.shape(x), python_ans)\n onp_shapes = tree_map(lambda x: onp.shape(onp.asarray(x)), python_ans)\n self.assertEqual(python_shapes, onp_shapes)\n\n cache_misses = xla.xla_primitive_callable.cache_info().misses\n python_ans = fun(*args)\n self.assertEqual(\n cache_misses, xla.xla_primitive_callable.cache_info().misses,\n \"Compilation detected during second call of {} in op-by-op \"\n \"mode.\".format(fun))\n\n cfun = api.jit(wrapped_fun)\n python_should_be_executing = True\n monitored_ans = cfun(*args)\n\n python_should_be_executing = False\n compiled_ans = cfun(*args)\n\n self.assertAllClose(python_ans, monitored_ans, check_dtypes, atol, rtol)\n self.assertAllClose(python_ans, compiled_ans, check_dtypes, atol, rtol)\n\n args = args_maker()\n\n python_should_be_executing = True\n python_ans = fun(*args)\n\n python_should_be_executing = False\n compiled_ans = cfun(*args)\n\n self.assertAllClose(python_ans, compiled_ans, check_dtypes, atol, rtol)\n\n def _CheckAgainstNumpy(self, numpy_reference_op, lax_op, args_maker,\n check_dtypes=False, tol=None):\n args = args_maker()\n numpy_ans = numpy_reference_op(*args)\n lax_ans = lax_op(*args)\n self.assertAllClose(numpy_ans, lax_ans, check_dtypes=check_dtypes,\n atol=tol, rtol=tol)\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.asarray", "numpy.random.RandomState", "numpy.shape", "numpy.where", "numpy.isscalar", "numpy.conj", "numpy.dtype" ] ]
SallyDa/typhon
[ "8bd265e263a155e8c64e6d4aa9cb6c0f1e3bcc7d" ]
[ "typhon/plots/colors.py" ]
[ "\"\"\"Utility functions related to plotting.\"\"\"\nimport csv\nimport os\nimport re\nfrom functools import lru_cache\nfrom warnings import warn\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom matplotlib.colors import LinearSegmentedColormap\nimport numpy as np\n\nfrom typhon.utils import deprecated\n\n__all__ = [\n 'mpl_colors',\n 'cmap2rgba',\n 'colors2cmap',\n 'cmap2txt',\n 'cmap2cpt',\n 'cmap2act',\n 'cmap2c3g',\n 'cmap2ggr',\n 'cmap_from_act',\n 'cmap_from_txt',\n 'get_material_design',\n]\n\n\n@deprecated(new_name='typhon.plots.cmap2rgba')\ndef mpl_colors(cmap=None, N=None):\n \"\"\"Return a list of RGB values.\n\n Parameters:\n cmap (str): Name of a registered colormap.\n N (int): Number of colors to return.\n If ``None`` use the number of colors defined in the colormap.\n\n Returns:\n np.array: Array with RGB and alpha values.\n\n Examples:\n >>> mpl_colors('viridis', 5)\n array([[ 0.267004, 0.004874, 0.329415, 1. ],\n [ 0.229739, 0.322361, 0.545706, 1. ],\n [ 0.127568, 0.566949, 0.550556, 1. ],\n [ 0.369214, 0.788888, 0.382914, 1. ],\n [ 0.993248, 0.906157, 0.143936, 1. ]])\n \"\"\"\n if cmap is None:\n cmap = plt.rcParams['image.cmap']\n\n if N is None:\n N = plt.get_cmap(cmap).N\n\n return plt.get_cmap(cmap)(np.linspace(0, 1, N))\n\n\ndef cmap2rgba(cmap=None, N=None, interpolate=True):\n \"\"\"Convert a colormap into a list of RGBA values.\n\n Parameters:\n cmap (str): Name of a registered colormap.\n N (int): Number of RGBA-values to return.\n If ``None`` use the number of colors defined in the colormap.\n interpolate (bool): Toggle the interpolation of values in the\n colormap. If ``False``, only values from the colormap are\n used. This may lead to the re-use of a color, if the colormap\n provides less colors than requested. If ``True``, a lookup table\n is used to interpolate colors (default is ``True``).\n\n Returns:\n ndarray: RGBA-values.\n\n Examples:\n >>> cmap2rgba('viridis', 5)\n array([[ 0.267004, 0.004874, 0.329415, 1. ],\n [ 0.229739, 0.322361, 0.545706, 1. ],\n [ 0.127568, 0.566949, 0.550556, 1. ],\n [ 0.369214, 0.788888, 0.382914, 1. ],\n [ 0.993248, 0.906157, 0.143936, 1. ]])\n \"\"\"\n cmap = plt.get_cmap(cmap)\n\n if N is None:\n N = cmap.N\n\n nlut = N if interpolate else None\n\n if interpolate and isinstance(cmap, colors.ListedColormap):\n # `ListedColormap` does not support lookup table interpolation.\n cmap = colors.LinearSegmentedColormap.from_list('', cmap.colors)\n return cmap(np.linspace(0, 1, N))\n\n return plt.get_cmap(cmap.name, lut=nlut)(np.linspace(0, 1, N))\n\n\ndef _to_hex(c):\n \"\"\"Convert arbitray color specification to hex string.\"\"\"\n ctype = type(c)\n\n # Convert rgb to hex.\n if ctype is tuple or ctype is np.ndarray or ctype is list:\n return colors.rgb2hex(c)\n\n if ctype is str:\n # If color is already hex, simply return it.\n regex = re.compile('^#[A-Fa-f0-9]{6}$')\n if regex.match(c):\n return c\n\n # Convert named color to hex.\n return colors.cnames[c]\n\n raise Exception(\"Can't handle color of type: {}\".format(ctype))\n\n\ndef colors2cmap(*args, name=None):\n \"\"\"Create a colormap from a list of given colors.\n\n Parameters:\n *args: Arbitrary number of colors (Named color, HEX or RGB).\n name (str): Name with which the colormap is registered.\n\n Returns:\n LinearSegmentedColormap.\n\n Examples:\n >>> colors2cmap('darkorange', 'white', 'darkgreen', name='test')\n \"\"\"\n if len(args) < 2:\n raise Exception(\"Give at least two colors.\")\n\n cmap_data = [_to_hex(c) for c in args]\n cmap = colors.LinearSegmentedColormap.from_list(name, cmap_data)\n plt.register_cmap(name, cmap)\n\n return cmap\n\n\ndef cmap2txt(cmap, filename=None, N=None, comments='%'):\n \"\"\"Export colormap to txt file.\n\n Parameters:\n cmap (str): Colormap name.\n filename (str): Optional filename.\n Default: cmap + '.txt'\n N (int): Number of colors.\n comments (str): Character to start comments with.\n\n \"\"\"\n colors = cmap2rgba(cmap, N)\n header = 'Colormap \"{}\"'.format(cmap)\n\n if filename is None:\n filename = cmap + '.txt'\n\n np.savetxt(filename, colors[:, :3], fmt='%.4f', header=header,\n comments=comments)\n\n\ndef cmap2cpt(cmap, filename=None, N=None):\n \"\"\"Export colormap to cpt file.\n\n Parameters:\n cmap (str): Colormap name.\n filename (str): Optional filename.\n Default: cmap + '.cpt'\n N (int): Number of colors.\n\n \"\"\"\n colors = cmap2rgba(cmap, N)\n header = ('# GMT palette \"{}\"\\n'\n '# COLOR_MODEL = RGB\\n'.format(cmap))\n\n left = '{:>3d} {:>3d} {:>3d} {:>3d} '.format\n right = '{:>3d} {:>3d} {:>3d} {:>3d}\\n'.format\n\n if filename is None:\n filename = cmap + '.cpt'\n\n with open(filename, 'w') as f:\n f.write(header)\n\n # For each level specify a ...\n for n in range(len(colors)):\n rgb = [int(c * 255) for c in colors[n, :3]]\n # ... start color ...\n f.write(left(n, *rgb))\n # ... and end color.\n f.write(right(n + 1, *rgb))\n\n\ndef cmap2act(cmap, filename=None, N=None):\n \"\"\"Export colormap to Adobe Color Table file.\n\n Parameters:\n cmap (str): Colormap name.\n filename (str): Optional filename.\n Default: cmap + '.cpt'\n N (int): Number of colors.\n\n \"\"\"\n if filename is None:\n filename = cmap + '.act'\n\n # If the number of color levels to export is not set...\n if N is None:\n # ... use the number of colors defined in the colormap.\n N = plt.get_cmap(cmap).N\n\n if N > 256:\n N = 256\n warn('Maximum number of colors is 256.')\n\n colors = cmap2rgba(cmap, N)[:, :3]\n\n rgb = np.zeros(256 * 3 + 2)\n rgb[:colors.size] = (colors.flatten() * 255).astype(np.uint8)\n rgb[768:770] = np.uint8(N // 2**8), np.uint8(N % 2**8)\n\n rgb.astype(np.uint8).tofile(filename)\n\n\ndef cmap2c3g(cmap, filename=None, N=None):\n \"\"\"Export colormap ass CSS3 gradient.\n\n Parameters:\n cmap (str): Colormap name.\n filename (str): Optional filename.\n Default: cmap + '.cpt'\n N (int): Number of colors.\n\n \"\"\"\n if filename is None:\n filename = cmap + '.c3g'\n\n colors = cmap2rgba(cmap, N)\n\n header = (\n '/*'\n ' CSS3 Gradient \"{}\"\\n'\n '*/\\n\\n'\n 'linear-gradient(\\n'\n ' 0deg,\\n'\n ).format(cmap)\n\n color_spec = ' rgb({:>3d},{:>3d},{:>3d}) {:>8.3%}'.format\n\n with open(filename, 'w') as f:\n f.write(header)\n\n ncolors = len(colors)\n for n in range(ncolors):\n r, g, b = [int(c * 255) for c in colors[n, :3]]\n f.write(color_spec(r, g, b, n / (ncolors - 1)))\n if n < ncolors - 1:\n f.write(',\\n')\n\n f.write('\\n );')\n\n\ndef cmap2ggr(cmap, filename=None, N=None):\n \"\"\"Export colormap as GIMP gradient.\n\n Parameters:\n cmap (str): Colormap name.\n filename (str): Optional filename.\n Default: cmap + '.cpt'\n N (int): Number of colors.\n\n \"\"\"\n if filename is None:\n filename = cmap + '.ggr'\n\n colors = cmap2rgba(cmap, N)\n header = ('GIMP Gradient\\n'\n 'Name: {}\\n'\n '{}\\n').format(cmap, len(colors) - 1)\n\n line = ('{:.6f} {:.6f} {:.6f} ' # start, middle, stop\n '{:.6f} {:.6f} {:.6f} {:.6f} ' # RGBA\n '{:.6f} {:.6f} {:.6f} {:.6f} ' # RGBA next level\n '0 0\\n').format\n\n def idx(x):\n return x / (len(colors) - 1)\n\n with open(filename, 'w') as f:\n f.write(header)\n\n for n in range(len(colors) - 1):\n rgb = colors[n, :]\n rgb_next = colors[n + 1, :]\n f.write(line(idx(n), idx(n + 0.5), idx(n + 1), *rgb, *rgb_next))\n\n\ndef cmap_from_act(file, name=None):\n \"\"\"Import colormap from Adobe Color Table file.\n\n Parameters:\n file (str): Path to act file.\n name (str): Colormap name. Defaults to filename without extension.\n\n Returns:\n LinearSegmentedColormap.\n \"\"\"\n # Extract colormap name from filename.\n if name is None:\n name = os.path.splitext(os.path.basename(file))[0]\n\n # Read binary file and determine number of colors\n rgb = np.fromfile(file, dtype=np.uint8)\n if rgb.shape[0] >= 770:\n ncolors = rgb[768] * 2**8 + rgb[769]\n else:\n ncolors = 256\n\n colors = rgb[:ncolors*3].reshape(ncolors, 3) / 255\n\n # Create and register colormap...\n cmap = LinearSegmentedColormap.from_list(name, colors, N=ncolors)\n plt.register_cmap(cmap=cmap) # Register colormap.\n\n # ... and the reversed colormap.\n cmap_r = LinearSegmentedColormap.from_list(\n name + '_r', np.flipud(colors), N=ncolors)\n plt.register_cmap(cmap=cmap_r)\n\n return cmap\n\n\ndef cmap_from_txt(file, name=None, N=-1, comments='%'):\n \"\"\"Import colormap from txt file.\n\n Reads colormap data (RGB/RGBA) from an ASCII file.\n Values have to be given in [0, 1] range.\n\n Parameters:\n file (str): Path to txt file.\n name (str): Colormap name. Defaults to filename without extension.\n N (int): Number of colors.\n ``-1`` means all colors (i.e., the complete file).\n comments (str): Character to start comments with.\n\n Returns:\n LinearSegmentedColormap.\n \"\"\"\n # Extract colormap name from filename.\n if name is None:\n name = os.path.splitext(os.path.basename(file))[0]\n\n # Read binary file and determine number of colors\n rgb = np.genfromtxt(file, comments=comments)\n if N == -1:\n N = np.shape(rgb)[0]\n\n if np.min(rgb) < 0 or np.max(rgb) > 1:\n raise Exception('RGB value out of range: [0, 1].')\n\n # Create and register colormap...\n cmap = LinearSegmentedColormap.from_list(name, rgb, N=N)\n plt.register_cmap(cmap=cmap)\n\n # ... and the reversed colormap.\n cmap_r = LinearSegmentedColormap.from_list(\n name + '_r', np.flipud(rgb), N=N)\n plt.register_cmap(cmap=cmap_r)\n\n return cmap\n\n\n@lru_cache(16)\ndef get_material_design(name, shade=None):\n \"\"\"Return material design colors.\n\n Parameters:\n name (str): Color name (e.g. 'red').\n shade (str): Color shade (e.g. '500').\n If ``None`` all defined shades are returned.\n\n Returns:\n str or list[str]: Hex RGB value or list of hex RGB values.\n\n References:\n https://material.io/design/color/the-color-system.html\n\n Raises:\n ValueError: If the specified ``name`` or ``shade`` is not defined.\n\n Examples:\n >>> get_material_design('red', shade='500')\n '#F44336'\n\n >>> get_material_design('red')\n ['#FFEBEE', '#FFCDD2', '#EF9A9A', '#E57373', '#EF5350', '#F44336',\n '#E53935', '#D32F2F', '#C62828', '#B71C1C', '#FF8A80', '#FF5252',\n '#FF1744', '#D50000']\n\n \"\"\"\n material_source = os.path.join(os.path.dirname(__file__), 'material.csv')\n with open(material_source, newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n\n available_colors = []\n for row in reader:\n available_colors.append(row['name'])\n if available_colors[-1] == name:\n available_shades = [k for k, v in row.items()\n if v and k != 'name']\n\n if shade is None:\n return [c for c in\n list(row.values())[1:len(available_shades) + 1]]\n\n if shade in available_shades:\n return row[shade]\n else:\n raise ValueError(\n f'Shade \"{shade}\" not defined for color \"{name}\". '\n f'Available shades are:\\n{available_shades}'\n )\n\n raise ValueError(\n f'Color \"{name}\" not defined. '\n f'Available colors are:\\n{available_colors}.'\n )\n" ]
[ [ "numpy.max", "numpy.uint8", "numpy.savetxt", "numpy.zeros", "matplotlib.pyplot.get_cmap", "numpy.genfromtxt", "matplotlib.pyplot.register_cmap", "numpy.min", "numpy.shape", "numpy.flipud", "numpy.fromfile", "numpy.linspace", "matplotlib.colors.LinearSegmentedColormap.from_list" ] ]
lgcharpe/Masters
[ "a8e01672c1e64633a03ec334fc0ecb328f1da691", "a8e01672c1e64633a03ec334fc0ecb328f1da691" ]
[ "Tensorflow_tutorials/Simple Tutorials/Load and preprocess data/images.py", "Tensorflow_tutorials/Simple Tutorials/save_and_load.py" ]
[ "import tensorflow as tf\nimport IPython.display as display\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport pathlib\nimport time\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\nprint(tf.__version__)\n\n# Setup\n\ndata_dir = tf.keras.utils.get_file(\n origin='https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',\n fname='flower_photos',\n untar=True\n)\n\ndata_dir = pathlib.Path(data_dir)\n\nimage_count = len(list(data_dir.glob('*/*.jpg')))\nprint(image_count)\n\nCLASS_NAMES = np.array([\n item.name for item in data_dir.glob('*') if item.name != \"LICENSE.txt\"])\nprint(CLASS_NAMES)\n\nroses = list(data_dir.glob('roses/*'))\n\nfor image_path in roses[:3]:\n display.display(Image.open(str(image_path)))\n\n# Load using keras.preprocessing\n\n# The 1./255 is to convert from uint8 to float32 in range [0,1].\nimage_generator = tf.keras.preprocessing.image.ImageDataGenerator(\n rescale=1. / 255)\n\nBATCH_SIZE = 32\nIMG_HEIGHT = 224\nIMG_WIDTH = 224\nSTEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE)\n\ntrain_data_gen = image_generator.flow_from_directory(\n directory=str(data_dir),\n batch_size=BATCH_SIZE,\n shuffle=True,\n target_size=(IMG_HEIGHT, IMG_WIDTH),\n classes=list(CLASS_NAMES)\n)\n\n\ndef show_batch(image_batch, label_batch):\n plt.figure(figsize=(10, 10))\n for n in range(25):\n _ = plt.subplot(5, 5, n+1)\n plt.imshow(image_batch[n])\n plt.title(CLASS_NAMES[label_batch[n] == 1][0].title())\n plt.axis('off')\n\n\nimage_batch, label_batch = next(train_data_gen)\nshow_batch(image_batch, label_batch)\n\n# Load using tf.data\n\nlist_ds = tf.data.Dataset.list_files(str(data_dir/'*/*'))\n\nfor f in list_ds.take(5):\n print(f.numpy)\n\n\ndef get_label(file_path):\n # Convert the path to a list of path components\n parts = tf.strings.split(file_path, os.path.sep)\n # The second to last is the class-directory\n return parts[-2] == CLASS_NAMES\n\n\ndef decode_img(img):\n # Convert the compressed images to a 3D uint8 tensor\n img = tf.image.decode_jpeg(img, channels=3)\n # Use `convert_image_dtype` to convert to floats in the [0,1] range.\n img = tf.image.convert_image_dtype(img, tf.float32)\n # resize the image to the desired size.\n return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])\n\n\ndef process_path(file_path):\n label = get_label(file_path)\n # load the raw data from the file as a string\n img = tf.io.read_file(file_path)\n img = decode_img(img)\n return img, label\n\n\n# Basic methods for training, this lets the data be:\n# Well shuffled\n# Batched\n# Quick of access\n\n# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.\nlabeled_ds = list_ds.map(process_path, num_parallel_calls=AUTOTUNE)\n\ndel list_ds\n\nfor image, label in labeled_ds.take(1):\n print(\"Image shape:\", image.numpy().shape)\n print('Label:', label.numpy())\n\ndel image, label\n\n\ndef prepare_for_training(ds, cache=True, shuffle_buffer_size=1000):\n \"\"\"\n This is a small dataset, only load it once, and keep it in memory.\n Use `.cache(filename)`to cache preprocessing work for datasets that do not\n fir in memory.\n \"\"\"\n\n if cache:\n if isinstance(cache, str):\n ds = ds.cache(cache)\n else:\n ds = ds.cache()\n\n ds = ds.shuffle(buffer_size=shuffle_buffer_size)\n\n # Repeat forever\n ds = ds.repeat()\n\n ds = ds.batch(BATCH_SIZE)\n\n # `prefetch` lets the data fetch batches in the background while\n # the model is training.\n ds = ds.prefetch(buffer_size=AUTOTUNE)\n\n return ds\n\n\ntrain_ds = prepare_for_training(labeled_ds)\n\nimage_batch, label_batch = next(iter(train_ds))\n\nshow_batch(image_batch.numpy(), label_batch.numpy())\n\n# Tricks to improve Performance\n\ndefault_timeit_steps = 1000\n\n\ndef timeit(ds, steps=default_timeit_steps):\n start = time.time()\n it = iter(ds)\n for i in range(steps):\n _ = next(it)\n if i % 10 == 0:\n print('.', end='')\n print()\n end = time.time()\n\n duration = end - start\n print(f'{steps} batches: {duration} s')\n print(f'{BATCH_SIZE * steps / duration:0.5f} Images/s')\n\n\n# `keras.preprocessing`\ntimeit(train_data_gen)\n# 1000 batches: 57.89s\n# 553 Images/s\n\n# `tf.data`\ntimeit(train_ds)\n# 1000 batches: 8.60s\n# 3719 Images/s\n\nuncached_ds = prepare_for_training(labeled_ds, cache=False)\ntimeit(uncached_ds)\n# 1000 batches: 35.84s\n# 893 Images/s\n\nfilecache_ds = prepare_for_training(labeled_ds, cache='./flowers.tfcache')\ntimeit(filecache_ds)\n# 1000 batches: 31.52s\n# 1015 Images/s\n\n\nlist_ds = tf.data.Dataset.list_files(str(data_dir/'*/*'))\nlabeled_ds = list_ds.map(process_path, num_parallel_calls=AUTOTUNE)\ndel list_ds\ntrain_ds = prepare_for_training(labeled_ds)\n# filecache_ds = prepare_for_training(labeled_ds, cache='./flowers.tfcache')\n# uncached_ds = prepare_for_training(labeled_ds, cache=False)\ndel labeled_ds\ntimeit(train_ds)\n# timeit(filecache_ds)\n# timeit(uncached_ds)\n", "import os\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nprint(tf.version.VERSION)\n\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.\\\n mnist.load_data()\n\ntrain_labels = train_labels[:1000]\ntest_labels = test_labels[:1000]\n\ntrain_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0\ntest_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0\n\n\n# Define a simple sequential model\ndef create_model():\n model = tf.keras.models.Sequential([\n keras.layers.Dense(512, activation='relu', input_shape=(784,)),\n keras.layers.Dropout(0.2),\n keras.layers.Dense(10)\n ])\n\n model.compile(\n optimizer='adam',\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy']\n )\n\n return model\n\n\n# Create a basic model instance\nmodel = create_model()\n\n# Display the model's architecture\nprint(model.summary())\n\n# Save checkpoints during training\n\ncheckpoint_path = 'training\\\\cp.ckpt'\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# Create a callback that saves the model's weights\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1\n)\n\n# Train the model with the new callback\nmodel.fit(\n train_images,\n train_labels,\n epochs=10,\n validation_data=(test_images, test_labels),\n callbacks=[cp_callback] # Pass callback to training\n)\n\n# This may generate warnings realted to saving the state of the optimizer.\n# These warnings (and similar warnings throughout this notebook) are in place\n# to discourage outdated usage, and can be ignored.\n\n# Loading the weights\n\n# Create a basic model instance\nmodel = create_model()\n\n# Evaluate the model\nloss, acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(f\"Untrained model, accuracy: {100 * acc:5.2f}\")\n\n# Loads the weights\nmodel.load_weights(checkpoint_path)\n\n# Re-evaluate the model\nloss, acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(f'Restored model, accuracy: {100 * acc:5.2f}')\n\n# Checkpoint callback options\n\n# Include the epoch in the file name (uses `str.format`)\ncheckpoint_path = \"training_2\\\\cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# Create a callback that saves the model's weigths every 5 epochs\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path,\n verbose=1,\n save_weights_only=True,\n period=5\n)\n\n# Create a model instance\nmodel = create_model()\n\n# Save the weights using the `checkpoint_path` format\nmodel.save_weights(checkpoint_path.format(epoch=0))\n\n# Train the model with the new callback\nmodel.fit(\n train_images,\n train_labels,\n epochs=50,\n callbacks=[cp_callback],\n validation_data=(test_images, test_labels),\n verbose=0\n)\n\nlatest = tf.train.latest_checkpoint(checkpoint_dir)\nprint(latest)\n\n# Create a new model instance\nmodel = create_model()\n\n# Load the previously saved weights\nmodel.load_weights(latest)\n\n# Re-evaluate the model\nloss, acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\n\n# Manually saving the weights\n\n# Save weights\nmodel.save_weights('.\\\\checkpoints\\\\my_checkpoint')\n\n# Create a new model instance\nmodel = create_model()\n\n# Restore the weights\nmodel.load_weights('.\\\\checkpoints\\\\my_checkpoint')\n\n# Evaluate the model\nloss, acc = model.evaluate(test_images, test_labels, verbose=2)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\n\n# Save the entire model (SavedModel)\n\n# Create and train a new model instance.\nmodel = create_model()\nmodel.fit(train_images, train_labels, epochs=5)\n\n# Save the entire model as a SavedModel\nmodel.save('saved_model\\\\my_model')\n\n# Load a model\nnew_model = tf.keras.models.load_model('saved_model\\\\my_model')\n\n# Check its architecture\nprint(new_model.summary())\n\n# Evaluate the restored model\nloss, acc = new_model.evaluate(test_images, test_labels, verbose=2)\nprint('Restored model, accuracy: {:5.2f}%'.format(100*acc))\n\nprint(new_model.predict(test_images).shape)\n\n\n# Save a model (HDF5)\n\n# Create and train a new model instance.\nmodel = create_model()\nmodel.fit(train_images, train_labels, epochs=5)\n\n# Save the entire model to a HDF5 file.\n# The '.h5' extension indicates that the model should be saved to HDF5\nmodel.save('my_model.h5')\n\n# Recreate the exact same model, including its weights and the optimizer\nnew_model = tf.keras.models.load_model('my_model.h5')\n\n# Show the model architecture\nprint(new_model.summary())\n\nloss, acc = new_model.evaluate(test_images, test_labels, verbose=2)\nprint('Restored model, accuracy: {:5.2f}%'.format(100*acc))\n" ]
[ [ "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.ceil", "tensorflow.keras.utils.get_file", "tensorflow.image.convert_image_dtype", "tensorflow.io.read_file", "tensorflow.strings.split", "matplotlib.pyplot.figure", "tensorflow.image.resize", "matplotlib.pyplot.imshow", "tensorflow.image.decode_jpeg", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplot" ], [ "tensorflow.keras.datasets.mnist.load_data", "tensorflow.train.latest_checkpoint", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.models.load_model", "tensorflow.keras.layers.Dropout", "tensorflow.losses.SparseCategoricalCrossentropy" ] ]
syoukera/void_py
[ "4a2d695cf16e38f731725ff781790834924d5e0c" ]
[ "boid_py/core.py" ]
[ "\nimport numpy as np\n\nclass boid:\n\t\"\"\"\n\tClass for calculating the positions of boids\n\t\"\"\"\n\tdef __init__(self, num_boid=100):\n\n\t\t# attributes:\n\t\tself.N = num_boid\n\t\t# strength of force\n\t\tself.cohesion_force = 0.0008\n\t\tself.separation_force = 1.15\n\t\tself.alignment_force = 1.0\n\n\t\t# distance at force-induction\n\t\tself.cohesion_distance = 0.8\n\t\tself.separation_distance = 0.08\n\t\tself.alignment_distance = 0.1\n\t\t# angle of force\n\t\tself.cohesion_angle = np.pi/2\n\t\tself.separation_angle = np.pi/2\n\t\tself.alignment_angle = np.pi/3\n\t\t# max/min of speed\n\t\tself.min_vel = 0.0005\n\t\tself.max_vel = 0.1\n\t\t# force at boundary\n\t\tself.boundary_force = 0.001\n\n\t\t# array for positions and distance\n\t\tself.x = np.random.rand(self.N, 3)*2 - 1\n\t\tself.v = (np.random.rand(self.N, 3)*2 - 1)*self.min_vel\n\t\tself.r = np.empty((self.N, 3))\n\n\t\t# array for tensor calculation\n\t\tself.diff_x = np.zeros((self.N, self.N, 3))\n\t\tself.distance = np.empty((self.N, self.N))\n\t\tself.angle = np.empty((self.N, self.N))\n\n\t\tself.coh_agents_x = np.empty((self.N, self.N, 3))\n\t\tself.sep_agents_x = np.empty((self.N, self.N, 3))\n\t\tself.ali_agents_v = np.empty((self.N, self.N, 3))\n\t\t\n\t\t# array for three forces\n\t\tself.dv_coh = np.empty((self.N, 3))\n\t\tself.dv_sep = np.empty((self.N, 3))\n\t\tself.dv_ali = np.empty((self.N, 3))\n\t\tself.dv_boundary = np.empty((self.N, 3))\n\n\tdef update(self):\n\t\t'''\n\t\tUpdate positions of boids\n\t\t1. Calculate distance and angle for every pairs of bois \n\t\t2. Calculate dv for Cohesion, Separation, Alignment, and Boundary\n\t\t3. Add dv to v\n\t\t4. Add v to x\n\t\t'''\n\t\tself.diff_x *= 0.0\n\t\tself.diff_x += self.x.reshape((-1, self.N, 3))\n\t\tself.diff_x -= self.x.reshape((self.N, -1, 3))\n\n\t\tself.distance = np.linalg.norm(self.diff_x, axis=2)\n\t\tself.angle = np.arccos(\n\t\t\t\t\t\tnp.divide(\n\t\t\t\t\t\t\tnp.sum(np.multiply(self.v, self.diff_x), axis=2),\n\t\t\t\t\t\t\tnp.multiply(np.linalg.norm(self.v, axis=1) , np.linalg.norm(self.diff_x, axis=2))\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\n\t\t# Cohesion\n\t\tcoh_agents_bool = (self.distance > self.cohesion_distance) | (self.angle > self.cohesion_angle)\n\n\t\tself.coh_agents_x *= 0.0\n\t\tself.coh_agents_x += self.x\n\t\tself.coh_agents_x[coh_agents_bool] = 0.0\n\t\t\n\t\tcoh_agents_num = coh_agents_bool.shape[1] - np.count_nonzero(coh_agents_bool, axis=1)\n\t\tcoh_agents_num[coh_agents_num == 0] = 1\n\t\t\n\t\tself.dv_coh = self.cohesion_force*(np.divide(np.sum(self.coh_agents_x, axis=1).T, coh_agents_num).T - self.x)\n\t\t\n\t\t# Separation\n\t\tsep_agents_bool = (self.distance > self.separation_distance) | (self.angle > self.separation_angle)\n\n\t\tself.sep_agents_x *= 0.0\n\t\tself.sep_agents_x += self.x\n\t\tself.sep_agents_x[sep_agents_bool] = 0.0\n\t\t\n\t\tsep_agents_num = sep_agents_bool.shape[1] - np.count_nonzero(sep_agents_bool, axis=1)\n\t\tsep_agents_num[sep_agents_num == 0] = 1\n\t\t\n\t\tself.dv_sep = self.separation_force*(np.divide(np.sum(self.sep_agents_x, axis=1).T, sep_agents_num).T - self.x)\n\n\t\t# Alignment\n\t\tali_agents_bool = (self.distance > self.alignment_distance) | (self.angle > self.alignment_angle)\n\n\t\tself.ali_agents_v *= 0.0\n\t\tself.ali_agents_v += self.v\n\t\tself.ali_agents_v[ali_agents_bool] = 0.0\n\t\t\n\t\tali_agents_num = ali_agents_bool.shape[1] - np.count_nonzero(ali_agents_bool, axis=1)\n\t\tali_agents_num[ali_agents_num == 0] = 1\n\t\t\n\t\tself.dv_ali = self.alignment_force*(np.divide(np.sum(self.ali_agents_v, axis=1).T, ali_agents_num).T - self.v)\n\n\t\t# Boundary\n\t\tself.dist_center = np.linalg.norm(self.x, axis=1)\n\t\tdist_center_bool = (self.dist_center < 1)\n\t\tself.dv_boundary = - self.boundary_force*np.multiply(\n\t\t\t\tself.x.T, \n\t\t\t\tnp.divide(self.dist_center-1, self.dist_center)\n\t\t\t).T\n\t\tself.dv_boundary[dist_center_bool] = 0.0\n\n\t\t# Update v\n\t\tself.v += self.dv_coh + self.dv_sep + self.dv_ali + self.dv_boundary\n\t\t\n\t\tv_abs = np.linalg.norm(self.v, axis=1)\n\n\t\tmin_vel_bool = (v_abs < self.min_vel)\n\t\tself.v[min_vel_bool] = self.min_vel*np.divide(self.v[min_vel_bool].T, v_abs[min_vel_bool]).T\n\t\tmax_vel_bool = (v_abs > self.max_vel)\n\t\tself.v[max_vel_bool] = self.max_vel*np.divide(self.v[max_vel_bool].T, v_abs[max_vel_bool]).T\n\n\t\tself.x += self.v" ]
[ [ "numpy.divide", "numpy.count_nonzero", "numpy.linalg.norm", "numpy.empty", "numpy.random.rand", "numpy.zeros", "numpy.sum", "numpy.multiply" ] ]
ErinZhang1998/howto100m-erin
[ "1152ea0fe328d20fcf2218a1d548644881632656", "1152ea0fe328d20fcf2218a1d548644881632656" ]
[ "loss.py", "youcook_dataloader.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport torch.nn.functional as F\nimport torch as th\nimport numpy as np\n\nclass MaxMarginRankingLoss(th.nn.Module):\n def __init__(self,\n margin=1.0,\n negative_weighting=False,\n batch_size=1,\n n_pair=1,\n hard_negative_rate=0.5,\n ):\n super(MaxMarginRankingLoss, self).__init__()\n self.margin = margin\n self.n_pair = n_pair\n self.batch_size = batch_size\n easy_negative_rate = 1 - hard_negative_rate\n self.easy_negative_rate = easy_negative_rate\n self.negative_weighting = negative_weighting\n if n_pair > 1:\n alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))\n mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha\n mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))\n mm_mask = th.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))\n self.mm_mask = mm_mask.float().cuda()\n\n\n def forward(self, x):\n d = th.diag(x)\n max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \\\n F.relu(self.margin + x - d.view(1, -1))\n if self.negative_weighting and self.n_pair > 1:\n max_margin = max_margin * self.mm_mask\n return max_margin.mean()\n\nclass TripletLoss(th.nn.Module):\n def __init__(self,\n margin=1.0,\n negative_weighting=False,\n batch_size=1,\n n_pair=1,\n hard_negative_rate=0.5,\n ):\n super(TripletLoss, self).__init__()\n self.margin = margin\n self.n_pair = n_pair\n self.batch_size = batch_size\n easy_negative_rate = 1 - hard_negative_rate\n self.easy_negative_rate = easy_negative_rate\n self.negative_weighting = negative_weighting\n if n_pair > 1:\n alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate))\n mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha\n mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair)))\n mm_mask = th.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate))\n self.mm_mask = mm_mask.float().cuda()\n\n def generate_mask(self, labels):\n '''\n labels: (N,1)\n mask: (N,N,N)\n\n False if any:\n 1) label i != label j\n 2) label i == label k\n 3) i == j\n '''\n labels = labels + 1\n N = len(labels)\n la_not_lp = labels[None, :] != labels[:, None]\n la_is_ln = labels[None, :] == labels[:, None]\n # print(labels.shape, la_not_lp.shape, la_is_ln.shape)\n la_not_lp = la_not_lp.view((N,N))\n la_is_ln = la_is_ln.view((N,N))\n mask1 = la_not_lp[:, :,None] + la_is_ln[:, None, :]\n\n ind_vec = th.arange(N).view((-1,1))\n a_eq_p = (ind_vec[None, :] == ind_vec[:, None]).view((N,N))\n a_eq_p = a_eq_p[:,:,None]\n all_false = (th.zeros(N) > 0).view((1,-1))\n all_false = all_false[None,:,:]\n mask2 = a_eq_p + all_false\n mask2 = mask2.to(mask1.device)\n\n mask = th.logical_not(mask1 + mask2)\n return mask\n\n def calculate_loss(self, pairwise_dist, labels):\n anchor_positive_dist = pairwise_dist[:, :, None] #th.unsqueeze(pairwise_dist, dim=2)\n anchor_negative_dist = pairwise_dist[:, None, :] #th.unsqueeze(pairwise_dist, dim=1)\n triplet_loss = anchor_positive_dist - anchor_negative_dist + self.margin\n\n mask = self.generate_mask(labels)\n triplet_loss = F.relu(triplet_loss) * mask\n\n return th.sum(triplet_loss) / th.sum(mask).item()\n\n def forward(self, pairwise_dist, labels):\n loss_tvv = self.calculate_loss(pairwise_dist, labels)\n loss_vtt = self.calculate_loss(pairwise_dist.T, labels)\n\n return loss_tvv + loss_vtt", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport torch as th\nfrom torch.utils.data import Dataset\nimport pickle\nimport torch.nn.functional as F\nimport numpy as np\nimport re\nfrom collections import defaultdict\nfrom torch.utils.data.dataloader import default_collate\n\nclass Youcook_DataLoader(Dataset):\n \"\"\"Youcook dataset loader.\"\"\"\n\n def __init__(\n self,\n data,\n we,\n we_dim=300,\n max_words=30,\n ):\n \"\"\"\n Args:\n \"\"\"\n self.data = pickle.load(open(data, 'rb'))\n self.we = we\n self.we_dim = we_dim\n self.max_words = max_words\n\n def __len__(self):\n return len(self.data)\n\n def custom_collate(self, batch):\n return default_collate(batch)\n\n def _zero_pad_tensor(self, tensor, size):\n if len(tensor) >= size:\n return tensor[:size]\n else:\n zero = np.zeros((size - len(tensor), self.we_dim), dtype=np.float32)\n return np.concatenate((tensor, zero), axis=0)\n\n def _tokenize_text(self, sentence):\n w = re.findall(r\"[\\w']+\", str(sentence))\n return w\n\n def _words_to_we(self, words):\n words = [word for word in words if word in self.we.vocab]\n if words:\n we = self._zero_pad_tensor(self.we[words], self.max_words)\n return th.from_numpy(we)\n else:\n return th.zeros(self.max_words, self.we_dim)\n\n def __getitem__(self, idx):\n feat_2d = F.normalize(th.from_numpy(self.data[idx]['2d']).float(), dim=0)\n feat_3d = F.normalize(th.from_numpy(self.data[idx]['3d']).float(), dim=0)\n video = th.cat((feat_2d, feat_3d))\n cap = self.data[idx]['caption']\n caption = self._words_to_we(self._tokenize_text(cap))\n \n cap_words = self._tokenize_text(cap)\n cap_words_filtered = [word for word in cap_words if word in self.we.vocab]\n caption_indices = [self.we.vocab[word].index for word in cap_words_filtered]\n caption_indices_tensor = np.array(caption_indices).reshape(-1,)\n if len(caption_indices_tensor) > self.max_words:\n caption_indices_tensor = caption_indices_tensor[:self.max_words]\n else:\n zero = np.zeros(self.max_words - len(caption_indices_tensor), dtype=np.float32)\n caption_indices_tensor = np.concatenate((caption_indices_tensor, zero), axis=0)\n caption_indices_tensor = th.FloatTensor(caption_indices_tensor)\n \n return {'video': video, 'text': caption, 'caption_idx': caption_indices_tensor, 'video_id': self.data[idx]['id']}\n" ]
[ [ "torch.zeros", "torch.arange", "numpy.ones", "torch.nn.functional.relu", "numpy.eye", "torch.logical_not", "torch.tensor", "torch.diag", "torch.sum" ], [ "numpy.concatenate", "torch.zeros", "torch.cat", "numpy.array", "torch.FloatTensor", "torch.from_numpy", "torch.utils.data.dataloader.default_collate" ] ]
bbennett80/ClinicalTrials
[ "651d53aadf1680002cd0a4f2f85336abee4dbf77" ]
[ "nlp/criteria_download.py" ]
[ "import json\nimport requests\nimport csv\nfrom glob import glob\nimport pandas as pd\nfrom pathlib import Path\nfrom tqdm import trange\n\ndef main():\n folder_setup()\n download_trials()\n write_txt()\n\n\ndef folder_setup():\n \"\"\"Makes directory 'Full_Studies' to which trial files are downloaded.\"\"\"\n current_directory = Path.cwd()\n \n global studies_directory \n studies_directory = current_directory / r'Full_Studies_test'\n\n not_available = studies_directory / r'log.txt'\n\n criteria_file = studies_directory / r'criteria.txt'\n\n if not Path.exists(studies_directory):\n Path.mkdir(studies_directory)\n\n if not Path.exists(not_available):\n pass\n else:\n Path.unlink(not_available)\n\n if not Path.exists(criteria_file):\n pass\n else:\n Path.unlink(criteria_file)\n return\n\ndef build_url(expr: str='Cancer',\n country: str='United States',\n status: str='Recruiting',\n study_type: str='Interventional',\n field_names: list=['NCTId','OfficialTitle','StartDate',\n 'PrimaryCompletionDate','LastUpdatePostDate',\n 'Condition','Gender','MaximumAge','EligibilityCriteria',\n 'CentralContactName','CentralContactPhone','CentralContactEMail',\n 'LocationFacility','LocationCity','LocationState',\n 'LocationZip','LeadSponsorName'],\n min_rnk: int=1,\n max_rnk: int=999,\n fmt: str='csv'\n ) -> str:\n \n \"\"\"returns api url for the study fields api on clinicaltrials.gov (https://clinicaltrials.gov/api/gui/demo/simple_study_fields).\n expr - defaults to Cancer trials. However, any expression one might consider for clinicaltrials.gov.\n country - defaults to The United States. However, any country can be entered.\n status - defaults to Recruiting. However, the following status can also be passed:\n Not yet recruiting: Participants are not yet being recruited\n Recruiting: Participants are currently being recruited, whether or not any participants have yet been enrolled\n Enrolling by invitation: Participants are being (or will be) selected from a predetermined population\n Active, not recruiting: Study is continuing, meaning participants are receiving an intervention or being examined, but new participants are not currently being recruited or enrolled\n Completed: The study has concluded normally; participants are no longer receiving an intervention or being examined (that is, last participant’s last visit has occurred)\n Suspended: Study halted prematurely but potentially will resume\n Terminated: Study halted prematurely and will not resume; participants are no longer being examined or receiving intervention\n Withdrawn: Study halted prematurely, prior to enrollment of first participant\n study_type - defaults to Interventional trials. However, Observational can also be passed.\n field_names - a list of data elements and their corresponding API fields as described in the crosswalk documentation. (https://clinicaltrials.gov/api/gui/ref/crosswalks)\n min_rnk = defaults to 1. Can be any interger.\n max_rnk - defaults to 1000 records. Can range from 1 - 1000.\n fmt - defaults to csv. However, json and xml can also be passed.\n \n \"\"\"\n \n base_url = 'https://clinicaltrials.gov/api/query/study_fields?'\n \n if not expr:\n expr = ''\n else:\n expr = f\"{expr.replace(' ', '+')}+AND+\"\n \n if not status:\n status = ''\n else:\n status = f\"{status.replace(' ', '+')}\"\n \n if study_type == 'Observational' or study_type == 'Interventional':\n study_type = study_type\n else:\n print(\"\"\" This paramater only accepts Observational or Interventional.\n The url will not build if other parameters are entered.\n \"\"\")\n \n country = country.replace(' ', '+')\n\n age = 'AND+AREA%5BMinimumAge%5D18+Years&'\n fields = \"%2C+\".join(field_names)\n \n api_url = f'{base_url}expr={expr}AREA%5BLocationCountry%5D{country}+AND+AREA%5BLocationStatus%5D{status}+AND+AREA%5BStudyType%5D{study_type}+{age}fields={fields}&min_rnk={min_rnk}&max_rnk={max_rnk}&fmt={fmt}'\n\n return api_url\n\n\ndef generate_urls():\n \"\"\"Gathers clinical trials from clinicaltrials.gov for search term\n defined in build_url() function and downloads to specified file format.\n \"\"\"\n \n api_call = build_url(expr='Cancer', max_rnk=1, fmt='json')\n r = requests.get(api_call)\n data = r.json()\n n_studies = data['StudyFieldsResponse']['NStudiesFound']\n print(f'{n_studies} studies found.\\n')\n print('\\nGenerating request urls...')\n\n urls = []\n\n for i in range(1, n_studies, 1000):\n url = build_url(expr='Cancer', field_names=['EligibilityCriteria'],\n min_rnk=f'{i}', max_rnk=f'{i+999}',\n fmt='csv')\n urls.append(url)\n \n return urls\n\n\ndef download_trials():\n urls = generate_urls()\n \n print('\\n-----Downloading trials-----\\n')\n \n for url, i in zip(urls, trange(1, len(urls))):\n df = pd.read_csv(url, skiprows=9)\n df = df.drop(columns='Rank')\n df.to_csv(f'{studies_directory}/trial_set_{i}.csv', index=False)\n\n print('\\n-----Downloads complete-----\\n')\n \n\ndef write_txt():\n all_files = glob(f'{studies_directory}/*.csv')\n\n for file in all_files:\n data = []\n print(f'Working on file {file}')\n with open(file, 'r', encoding='utf-8', errors='ignore') as f:\n reader = csv.DictReader(f)\n for criteria in reader:\n c = criteria['EligibilityCriteria']\n c = c.replace('Inclusion Criteria:||', '')\n c = c.split('|')\n for i in c:\n data.append(f'\\n{i}')\n with open(f'{studies_directory}/criteria.txt', 'a+', encoding='utf-8', errors='ignore') as f:\n for item in data:\n f.write(item) \n\n print('\\n-----Process complete-----')\n\n \nif __name__=='__main__':\n main()\n\n \n# https://bioportal.bioontology.org/annotatorplus\n" ]
[ [ "pandas.read_csv" ] ]
ajain-23/open_spiel
[ "38941dee3beb52ffdb134b66f420a758634d9a20" ]
[ "open_spiel/python/algorithms/mccfr.py" ]
[ "# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python base module for the implementations of Monte Carlo Counterfactual Regret Minimization.\"\"\"\n\nimport numpy as np\nfrom open_spiel.python import policy\n\nREGRET_INDEX = 0\nAVG_POLICY_INDEX = 1\n\n\nclass AveragePolicy(policy.Policy):\n \"\"\"A policy object representing the average policy for MCCFR algorithms.\"\"\"\n\n def __init__(self, game, player_ids, infostates):\n # Do not create a copy of the dictionary\n # but work on the same object\n super().__init__(game, player_ids)\n self._infostates = infostates\n\n def action_probabilities(self, state, player_id=None):\n \"\"\"Returns the MCCFR average policy for a player in a state.\n\n If the policy is not defined for the provided state, a uniform\n random policy is returned.\n\n Args:\n state: A `pyspiel.State` object.\n player_id: Optional, the player id for which we want an action. Optional\n unless this is a simultaneous state at which multiple players can act.\n\n Returns:\n A `dict` of `{action: probability}` for the specified player in the\n supplied state. If the policy is defined for the state, this\n will contain the average MCCFR strategy defined for that state.\n Otherwise, it will contain all legal actions, each with the same\n probability, equal to 1 / num_legal_actions.\n \"\"\"\n if player_id is None:\n player_id = state.current_player()\n legal_actions = state.legal_actions()\n info_state_key = state.information_state_string(player_id)\n retrieved_infostate = self._infostates.get(info_state_key, None)\n if retrieved_infostate is None:\n return {a: 1 / len(legal_actions) for a in legal_actions}\n avstrat = (\n retrieved_infostate[AVG_POLICY_INDEX] /\n retrieved_infostate[AVG_POLICY_INDEX].sum())\n return {legal_actions[i]: avstrat[i] for i in range(len(legal_actions))}\n\n\nclass MCCFRSolverBase(object):\n \"\"\"A base class for both outcome MCCFR and external MCCFR.\"\"\"\n\n def __init__(self, game):\n self._game = game\n self._infostates = {} # infostate keys -> [regrets, avg strat]\n self._num_players = game.num_players()\n\n def _lookup_infostate_info(self, info_state_key, num_legal_actions):\n \"\"\"Looks up an information set table for the given key.\n\n Args:\n info_state_key: information state key (string identifier).\n num_legal_actions: number of legal actions at this information state.\n\n Returns:\n A list of:\n - the average regrets as a numpy array of shape [num_legal_actions]\n - the average strategy as a numpy array of shape\n [num_legal_actions].\n The average is weighted using `my_reach`\n \"\"\"\n retrieved_infostate = self._infostates.get(info_state_key, None)\n if retrieved_infostate is not None:\n return retrieved_infostate\n\n # Start with a small amount of regret and total accumulation, to give a\n # uniform policy: this will get erased fast.\n self._infostates[info_state_key] = [\n np.ones(num_legal_actions, dtype=np.float64) / 1e6,\n np.ones(num_legal_actions, dtype=np.float64) / 1e6,\n ]\n return self._infostates[info_state_key]\n\n def _add_regret(self, info_state_key, action_idx, amount):\n self._infostates[info_state_key][REGRET_INDEX][action_idx] += amount\n\n def _add_avstrat(self, info_state_key, action_idx, amount):\n self._infostates[info_state_key][AVG_POLICY_INDEX][action_idx] += amount\n\n def average_policy(self):\n \"\"\"Computes the average policy, containing the policy for all players.\n\n Returns:\n An average policy instance that should only be used during\n the lifetime of solver object.\n \"\"\"\n return AveragePolicy(self._game, list(range(self._num_players)),\n self._infostates)\n\n def _regret_matching(self, regrets, num_legal_actions):\n \"\"\"Applies regret matching to get a policy.\n\n Args:\n regrets: numpy array of regrets for each action.\n num_legal_actions: number of legal actions at this state.\n\n Returns:\n numpy array of the policy indexed by the index of legal action in the\n list.\n \"\"\"\n positive_regrets = np.maximum(regrets,\n np.zeros(num_legal_actions, dtype=np.float64))\n sum_pos_regret = positive_regrets.sum()\n if sum_pos_regret <= 0:\n return np.ones(num_legal_actions, dtype=np.float64) / num_legal_actions\n else:\n return positive_regrets / sum_pos_regret\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
callzhang/mmtracking
[ "52a2ed94297685d4fe47ee7ece18917961cc39f0" ]
[ "mmtrack/models/reid/linear_reid_head.py" ]
[ "import warnings\n\nimport torch.nn as nn\nfrom mmcls.models.builder import HEADS\nfrom mmcls.models.heads.base_head import BaseHead\nfrom mmcls.models.losses import Accuracy\nfrom mmcv.cnn import constant_init, normal_init\nfrom mmdet.models.builder import build_loss\n\nfrom .fc_module import FcModule\n\n\n@HEADS.register_module()\nclass LinearReIDHead(BaseHead):\n \"\"\"Linear head for re-identification.\n\n Args:\n num_fcs (int): Number of fcs.\n in_channels (int): Number of channels in the input.\n fc_channels (int): Number of channels in the fcs.\n out_channels (int): Number of channels in the output.\n norm_cfg (dict, optional): Configuration of normlization method\n after fc. Defaults to None.\n act_cfg (dict, optional): Configuration of activation method after fc.\n Defaults to None.\n num_classes (int, optional): Number of the identities. Default to None.\n loss (dict, optional): Cross entropy loss to train the\n re-identificaiton module.\n loss_pairwise (dict, optional): Triplet loss to train the\n re-identificaiton module.\n topk (int, optional): Calculate topk accuracy. Default to False.\n \"\"\"\n\n def __init__(self,\n num_fcs,\n in_channels,\n fc_channels,\n out_channels,\n norm_cfg=None,\n act_cfg=None,\n num_classes=None,\n loss=None,\n loss_pairwise=None,\n topk=(1, )):\n super(LinearReIDHead, self).__init__()\n assert isinstance(topk, (int, tuple))\n if isinstance(topk, int):\n topk = (topk, )\n for _topk in topk:\n assert _topk > 0, 'Top-k should be larger than 0'\n self.topk = topk\n\n if not loss:\n if isinstance(num_classes, int):\n warnings.warn('Since cross entropy is not set, '\n 'the num_classes will be ignored.')\n if not loss_pairwise:\n raise ValueError('Please choose at least one loss in '\n 'triplet loss and cross entropy loss.')\n elif not isinstance(num_classes, int):\n raise TypeError('The num_classes must be a current number, '\n 'if there is cross entropy loss.')\n self.loss_cls = build_loss(loss) if loss else None\n self.loss_triplet = build_loss(\n loss_pairwise) if loss_pairwise else None\n\n self.num_fcs = num_fcs\n self.in_channels = in_channels\n self.fc_channels = fc_channels\n self.out_channels = out_channels\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.num_classes = num_classes\n self.accuracy = Accuracy(topk=self.topk)\n\n self._init_layers()\n\n def _init_layers(self):\n \"\"\"Initialize fc layers.\"\"\"\n self.fcs = nn.ModuleList()\n for i in range(self.num_fcs):\n in_channels = self.in_channels if i == 0 else self.fc_channels\n self.fcs.append(\n FcModule(in_channels, self.fc_channels, self.norm_cfg,\n self.act_cfg))\n in_channels = self.in_channels if self.num_fcs == 0 else \\\n self.fc_channels\n self.fc_out = nn.Linear(in_channels, self.out_channels)\n if self.num_classes:\n self.bn = nn.BatchNorm1d(self.out_channels)\n self.classifier = nn.Linear(self.out_channels, self.num_classes)\n\n def init_weights(self):\n \"\"\"Initalize model weights.\"\"\"\n normal_init(self.fc_out, mean=0, std=0.01, bias=0)\n if self.num_classes:\n constant_init(self.bn, 1, bias=0)\n normal_init(self.classifier, mean=0, std=0.01, bias=0)\n\n def simple_test(self, x):\n \"\"\"Test without augmentation.\"\"\"\n for m in self.fcs:\n x = m(x)\n feats = self.fc_out(x)\n return feats\n\n def forward_train(self, x, gt_label):\n \"\"\"Model forward.\"\"\"\n for m in self.fcs:\n x = m(x)\n feats = self.fc_out(x)\n losses = dict()\n if self.loss_triplet:\n losses['triplet_loss'] = self.loss_triplet(feats, gt_label)\n if self.loss_cls:\n feats_bn = self.bn(feats)\n out = self.classifier(feats_bn)\n losses['ce_loss'] = self.loss_cls(out, gt_label)\n # compute accuracy\n acc = self.accuracy(out, gt_label)\n assert len(acc) == len(self.topk)\n losses['accuracy'] = {\n f'top-{k}': a\n for k, a in zip(self.topk, acc)\n }\n return losses\n" ]
[ [ "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.nn.ModuleList" ] ]
eayvali/DeepRL
[ "4722af0f75487dd3167faafd4eabe8f01aea4305" ]
[ "Monte Carlo Prediction Control/MC_Control_Blackjack.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 6 15:26:00 2019\n\nBlackjack:\nEach state is a 3-tuple of:\n the player's current sum ∈{0,1,…,31} ,\n the dealer's face up card ∈{1,…,10} , and\n whether or not the player has a usable ace (no=0 , yes=1 ).\nThe agent has two potential actions:\n STICK = 0\n HIT = 1\n\"\"\"\n\nimport sys\nimport gym\nimport numpy as np\nfrom collections import defaultdict\nfrom plot_utils import plot_blackjack_values, plot_policy\n\nclass Run_Tests:\n def test_mc_prediction(env): \n # obtain the action-value function\n Q = MS_prediction.mc_prediction_q(env, 500000, MS_prediction.generate_episode_from_limit_stochastic)\n \n # obtain the corresponding state-value function\n V_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \\\n for k, v in Q.items())\n # plot the state-value function\n plot_blackjack_values(V_to_plot)\n \n def test_mc_control(env):\n # obtain the estimated optimal policy and action-value function\n policy, Q = MC_control.mc_control(env, 500000, 0.02) \n # obtain the corresponding state-value function\n V = dict((k,np.max(v)) for k, v in Q.items())\n \n # plot the state-value function\n plot_blackjack_values(V)\n # plot the policy\n plot_policy(policy)\n \nclass MS_prediction:\n def generate_episode_from_limit_stochastic(bj_env):\n #This policy selects action:STICK with 80% probability if the sum is greater than 18;\n # selects action HIT with 80% probability if the sum is 18 or below \n episode = []\n state = bj_env.reset()\n while True:\n probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]\n action = np.random.choice(np.arange(2), p=probs)\n next_state, reward, done, info = bj_env.step(action)\n episode.append((state, action, reward))\n state = next_state\n if done:\n break\n return episode\n\n def mc_prediction_q(env, num_episodes, generate_episode, gamma=0.9):\n #Implementation of every visit MC prediction\n returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))\n N = defaultdict(lambda: np.zeros(env.action_space.n))\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n episode=generate_episode(env) #(state, action, reward)\n states, actions, rewards=zip(*episode)\n #discount the rewards\n discounts = np.array([gamma**i for i in range(len(rewards)+1)]) \n # update the sum of the returns, number of visits, and action-value \n # function estimates for each state-action pair in the episode\n for i, (state, action) in enumerate(zip(states, actions)):\n returns_sum[state][action] += sum(rewards[i:]*discounts[:-(1+i)])\n print(rewards[i:],discounts[:-(1+i)])\n N[state][action] += 1.0\n Q[state][action] = returns_sum[state][action] / N[state][action] \n return Q\n\nclass MC_control: \n def mc_control(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.1):\n \"\"\" arguments:\n env: An instance of an OpenAI Gym environment.\n num_episodes:number of episodes that are generated through agent-environment interaction\n alpha: Step-size\n gamma: Discount rate between (0,1](default value: 1).\n outputs:\n Q: A dictionary (of one-dimensional arrays) where Q[s][a] is the estimated action value corresponding to state s and action a.\n policy: A dictionary where policy[s] returns the action that the agent chooses after observing state s.\"\"\"\n \n nA = env.action_space.n\n Q = defaultdict(lambda: np.zeros(nA))\n epsilon = eps_start\n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n # set the value of epsilon\n epsilon = max(epsilon*eps_decay, eps_min)\n # generate an episode by following epsilon-greedy policy\n episode = MC_control.generate_episode_from_Q(env, Q, epsilon, nA)\n # update the action-value function estimate using the episode\n Q = MC_control.update_Q(env, episode, Q, alpha, gamma)\n # determine the policy corresponding to the final action-value function estimate\n policy = dict((k,np.argmax(v)) for k, v in Q.items())\n return policy, Q\n\n def generate_episode_from_Q(env,Q,epsilon,nA):\n \"\"\" generates an episode from following the epsilon-greedy policy \"\"\"\n episode = []\n state = env.reset()\n while True:\n probs=MC_control.get_action_probs(Q[state], epsilon, nA) #Q[state] is the action row corresponding to the state\n action = np.random.choice(np.arange(nA), p=probs) if state in Q else env.action_space.sample()\n next_state, reward, done, info = env.step(action)\n episode.append((state, action, reward))\n state = next_state\n if done:\n break\n return episode\n\n\n def get_action_probs(Q_s, epsilon, nA):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy\n Q_s is a (1 x nA) row of possible actions corresponding to the state\"\"\"\n policy_s = np.ones(nA) * epsilon / nA\n best_a = np.argmax(Q_s)#return idx of max val\n policy_s[best_a] = 1 - epsilon + (epsilon / nA)\n return policy_s\n \n def update_Q(env, episode, Q, alpha, gamma):\n \"\"\" updates the action-value function estimate using the most recent episode \"\"\"\n states, actions, rewards=zip(*episode)\n #discount the rewards\n discounts = np.array([gamma**i for i in range(len(rewards)+1)]) \n # update the sum of the returns, number of visits, and action-value \n # function estimates for each state-action pair in the episode\n for i, (state, action) in enumerate(zip(states, actions)):\n Q_prev=Q[state][action]\n #calculate the difference between current estimated and sampled return\n sampled_return=sum(rewards[i:]*discounts[:-(1+i)]) \n diff_return= sampled_return-Q_prev\n Q[state][action] = Q_prev+alpha*(diff_return) \n return Q \n\n\n\nenv = gym.make('Blackjack-v0')\n#run tests\n#Run_Tests.test_mc_prediction(env)\nRun_Tests.test_mc_control(env)\n\n\n " ]
[ [ "numpy.max", "numpy.dot", "numpy.zeros", "numpy.ones", "numpy.arange", "numpy.argmax" ] ]
PFTL/py4lab
[ "e1ba563c3ffa14e85710c42c65a2975924060369" ]
[ "ch_07/PythonForTheLab/Model/experiment.py" ]
[ "import threading\nfrom datetime import datetime\nimport numpy as np\nimport os\nfrom time import sleep\nimport yaml\nfrom PythonForTheLab import ur\n\nclass Experiment:\n def __init__(self, config_file):\n self.config_file = config_file\n self.is_running = False # Variable to check if the scan is running\n\n def load_config(self):\n with open(self.config_file, 'r') as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n self.config = data\n\n def load_daq(self):\n name = self.config['DAQ']['name']\n port = self.config['DAQ']['port']\n if name == 'DummyDaq':\n from PythonForTheLab.Model.dummy_daq import DummyDaq\n self.daq = DummyDaq(port)\n\n elif name == 'AnalogDaq':\n from PythonForTheLab.Model.analog_daq import AnalogDaq\n self.daq = AnalogDaq(port)\n\n else:\n raise Exception('The daq specified is not yet supported')\n\n self.daq.initialize()\n\n def do_scan(self):\n if self.is_running:\n print('Scan already running')\n return\n self.is_running = True\n start = ur(self.config['Scan']['start']).m_as('V')\n stop = ur(self.config['Scan']['stop']).m_as('V')\n num_steps = int(self.config['Scan']['num_steps'])\n delay = ur(self.config['Scan']['delay'])\n self.scan_range = np.linspace(start, stop, num_steps) * ur('V')\n self.scan_data = np.zeros(num_steps) * ur('V')\n i = 0\n self.keep_running = True\n for volt in self.scan_range:\n if not self.keep_running:\n break\n self.daq.set_voltage(self.config['Scan']['channel_out'], volt)\n measured_voltage = self.daq.get_voltage(self.config['Scan']['channel_in'])\n self.scan_data[i] = measured_voltage\n i += 1\n sleep(delay.m_as('s'))\n self.is_running = False\n\n def start_scan(self):\n self.scan_thread = threading.Thread(target=self.do_scan)\n self.scan_thread.start()\n\n def stop_scan(self):\n self.keep_running = False\n\n def save_data(self):\n data_folder = self.config['Saving']['folder']\n today_folder = f'{datetime.today():%Y-%m-%d}'\n saving_folder = os.path.join(data_folder, today_folder)\n if not os.path.isdir(saving_folder):\n os.makedirs(saving_folder)\n\n data = np.vstack([self.scan_range, self.scan_data]).T\n header = \"Scan range in 'V', Scan Data in 'V'\"\n\n filename = self.config['Saving']['filename']\n base_name = filename.split('.')[0]\n ext = filename.split('.')[-1]\n i = 1\n while os.path.isfile(os.path.join(saving_folder, f'{base_name}_{i:04d}.{ext}')):\n i += 1\n data_file = os.path.join(saving_folder, f'{base_name}_{i:04d}.{ext}')\n metadata_file = os.path.join(saving_folder, f'{base_name}_{i:04d}_metadata.yml')\n np.savetxt(data_file, data.m_as('V'), header=header)\n with open(metadata_file, 'w') as f:\n f.write(yaml.dump(self.config, default_flow_style=False))\n\n def finalize(self):\n print('Finalizing Experiment')\n self.stop_scan()\n while self.is_running:\n sleep(.1)" ]
[ [ "numpy.linspace", "numpy.vstack", "numpy.zeros" ] ]
bibinwils/metrics
[ "e1c3fda24f90367803c2b04315ad7c8bced719db" ]
[ "torchmetrics/regression/mean_squared_error.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Optional\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.mean_squared_error import (\n _mean_squared_error_compute,\n _mean_squared_error_update,\n)\nfrom torchmetrics.metric import Metric\n\n\nclass MeanSquaredError(Metric):\n r\"\"\"\n Computes `mean squared error`_ (MSE):\n\n .. math:: \\text{MSE} = \\frac{1}{N}\\sum_i^N(y_i - \\hat{y_i})^2\n\n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a tensor of predictions.\n\n Args:\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n squared:\n If True returns MSE value, if False returns RMSE value.\n\n Example:\n >>> from torchmetrics import MeanSquaredError\n >>> target = torch.tensor([2.5, 5.0, 4.0, 8.0])\n >>> preds = torch.tensor([3.0, 5.0, 2.5, 7.0])\n >>> mean_squared_error = MeanSquaredError()\n >>> mean_squared_error(preds, target)\n tensor(0.8750)\n\n \"\"\"\n is_differentiable = True\n sum_squared_error: Tensor\n total: Tensor\n\n def __init__(\n self,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n dist_sync_fn: Callable = None,\n squared: bool = True,\n ) -> None:\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=dist_sync_fn,\n )\n\n self.add_state(\"sum_squared_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n self.squared = squared\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n sum_squared_error, n_obs = _mean_squared_error_update(preds, target)\n\n self.sum_squared_error += sum_squared_error\n self.total += n_obs\n\n def compute(self) -> Tensor:\n \"\"\"Computes mean squared error over state.\"\"\"\n return _mean_squared_error_compute(self.sum_squared_error, self.total, squared=self.squared)\n" ]
[ [ "torch.tensor" ] ]
Jiahao000/ORL
[ "2ad64f7389d20cb1d955792aabbe806a7097e6fb" ]
[ "tools/selective_search.py" ]
[ "import argparse\nimport importlib\nimport os\nimport os.path as osp\nimport time\nimport json\n\nimport mmcv\nimport torch\nfrom mmcv.runner import get_dist_info, init_dist\n\nfrom openselfsup.datasets import build_dataloader, build_dataset\nfrom openselfsup.models import build_model\nfrom openselfsup.utils import (get_root_logger, traverse_replace, print_log)\n\n\ndef nondist_single_forward_collect(func, data_loader, length):\n \"\"\"Forward and collect network outputs.\n\n This function performs forward propagation and collects outputs.\n It can be used to collect results, features, losses, etc.\n\n Args:\n func (function): The function to process data. The output must be\n a dictionary of CPU tensors.\n length (int): Expected length of output arrays.\n\n Returns:\n results_all (dict(list)): The concatenated outputs.\n \"\"\"\n results = []\n prog_bar = mmcv.ProgressBar(len(data_loader))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = func(**data)\n results.append(result)\n prog_bar.update()\n\n results_all = {}\n for k in results[0].keys():\n results_all[k] = [batch[k].squeeze().numpy().tolist() for batch in results]\n assert len(results_all[k]) == length\n return results_all\n\n\ndef dist_single_forward_collect(func, data_loader, rank, length):\n \"\"\"Forward and collect network outputs in a distributed manner.\n\n This function performs forward propagation and collects outputs.\n It can be used to collect results, features, losses, etc.\n\n Args:\n func (function): The function to process data. The output must be\n a dictionary of CPU tensors.\n rank (int): This process id.\n\n Returns:\n results_all (dict(list)): The concatenated outputs.\n \"\"\"\n results = []\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(data_loader))\n for idx, data in enumerate(data_loader):\n with torch.no_grad():\n result = func(**data) # dict{key: tensor}\n results.append(result)\n\n if rank == 0:\n prog_bar.update()\n\n results_all = {}\n for k in results[0].keys():\n results_list = [batch[k].squeeze().numpy().tolist() for batch in results]\n results_all[k] = results_list\n # assert len(results_all[k]) == length\n return results_all\n\n\ndef single_gpu_test(model, data_loader):\n model.eval()\n func = lambda **x: model(mode='test', **x)\n results = nondist_single_forward_collect(func, data_loader,\n len(data_loader.dataset))\n return results\n\n\ndef multi_gpu_test(model, data_loader):\n model.eval()\n func = lambda **x: model(mode='test', **x)\n rank, world_size = get_dist_info()\n results = dist_single_forward_collect(func, data_loader, rank,\n len(data_loader.dataset))\n return results\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Generate selective search region proposals')\n parser.add_argument('config', help='test config file path')\n parser.add_argument(\n 'output',\n type=str,\n help='output total selective search proposal json file')\n parser.add_argument(\n '--work_dir',\n type=str,\n default=None,\n help='the dir to save logs and models')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--port', type=int, default=29500,\n help='port only works when launcher==\"slurm\"')\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n return args\n\n\ndef main():\n args = parse_args()\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n # update configs according to CLI args\n if args.work_dir is not None:\n if not os.path.exists(args.work_dir):\n os.makedirs(args.work_dir)\n cfg.work_dir = args.work_dir\n\n # check memcached package exists\n if importlib.util.find_spec('mc') is None:\n traverse_replace(cfg, 'memcached', False)\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n if args.launcher == 'slurm':\n cfg.dist_params['port'] = args.port\n init_dist(args.launcher, **cfg.dist_params)\n\n # logger\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = osp.join(cfg.work_dir, 'test_{}.log'.format(timestamp))\n logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)\n\n # build the dataloader\n dataset = build_dataset(cfg.data.val)\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=cfg.data.imgs_per_gpu,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model\n model = build_model(cfg.model)\n\n if not distributed:\n outputs = single_gpu_test(model, data_loader)\n else:\n outputs = multi_gpu_test(model, data_loader) # dict{key: list}\n\n rank, _ = get_dist_info()\n if rank == 0:\n out = dataset.evaluate(**outputs)\n with open(args.output, 'w') as f:\n json.dump(out, f)\n print_log(\"Selective search proposal json file has been saved to: {}\".format(\n args.output), logger=logger)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.no_grad" ] ]
stefangachter/manif
[ "a4ba3df4f793fdce37c98b2cf9778321f9cea7c9" ]
[ "examples/se_2_3_localization.py" ]
[ "r\"\"\"\n\n\\file se_2_3_localization.py.\n\nCreated on: Jan 11, 2021\n\\author: Jeremie Deray\n\n---------------------------------------------------------\nThis file is:\n(c) 2021 Jeremie Deray\n\nThis file is part of `manif`, a C++ template-only library\nfor Lie theory targeted at estimation for robotics.\nManif is:\n(c) 2021 Jeremie Deray\n---------------------------------------------------------\n\n---------------------------------------------------------\n3D Robot localization and linear velocity estimation\nbased on strap-down IMU model and fixed beacons.\n\n---------------------------------------------------------\n\nWe consider a robot in 3D space surrounded by a small\nnumber of punctual landmarks or _beacons_.\nThe robot is assumed to be mounted with an IMU whose\nmeasurements are fed as exogeneous inputs to the system.\nThe robot is able to measure the location\nof the beacons w.r.t its own reference frame.\nWe assume in this example that the IMU frame coincides with the robot frame.\n\nThe robot extended pose X is in SE_2(3) and the beacon positions b_k in R^3,\n\n X = | R p v| // position, orientation and linear velocity\n | 1 |\n | 1|\n\n b_k = (bx_k, by_k, bz_k) // lmk coordinates in world frame\n\n alpha_k = (alphax_k, alphay_k, alphaz_k) // linear accelerometer measurements in IMU frame\n\n omega_k = (omegax_k, omegay_k, omegaz_k) // gyroscope measurements in IMU frame\n\n g = (0, 0, -9.80665) // acceleration due to gravity in world frame\n\nConsider robot coordinate frame B and world coordinate frame A.\n- p is the position of the origin of the robot frame B with respect to the world frame A\n- R is the orientation of the robot frame B with respect to the world frame A\n- v is the velocity of the robot frame with respect to the world frame,\n expressed in a frame whose origin coincides with the robot frame,\n oriented similar to the world frame\n (it is equivalent to p_dot in continuous time.\n This is usually called mixed-frame representation\n and is denoted as (B[A] v_AB),\n where B[A] is the mixed frame as described above.\n For reference, please see \"Multibody Dynamics Notation\" by\n Silvio Traversaro and Alessandro Saccon.\n Link: https://research.tue.nl/en/publications/multibody-dynamics-notation-version-2)\n- a is the frame acceleration in mixed-representation (equivalent to p_doubledot in continuous time).\n- omega_b as the angular velocity of the robot expressed in the robot frame\n\nThe kinematic equations (1) can be written as,\n p <-- p + v dt + 0.5 a dt^2\n R <-- R Exp_SO3(omega_b)\n v <-- v + a dt\n\nHowever, we would like to express the kinematics equations in the form,\nX <-- X * Exp(u)\nwhere, X \\in SE_2(3), u \\in R^9 and u_hat \\in se_2(3)\nNote that here input vector u is expressed in the local frame (robot frame).\nThis can be seen as a motion integration on a manifold defined by the group SE_2(3).\n\nThe exponential mapping of SE_2(3) is defined as,\nfor u = [u_p, u_w, u_v]\n Exp(u) = | Exp_SO3(u_w) JlSO3(u_w) u_p JlSO3(u_w) u_v |\n | 0 0 0 1 0 |\n | 0 0 0 0 1 |\nwhere, JlSO3 is the left Jacobian of the SO(3) group.\n\nPlease see the Appendix C of the paper\n\"A micro Lie theory for state estimation in robotics\",\nfor the definition of the left Jacobian of SO(3).\nPlease see the Appendix D of the paper,\nfor the definition of Exp map for SE(3).\nThe Exp map of SE_2(3) is a simple extension from the Exp map of SE(3).\nAlso, please refer to Example 7 of the paper to understand\nwhen and how the left Jacobian of SO(3) appears in the definitions of Exp maps.\nThe Example 7 illustrates the scenario for SE(3).\nWe use a direct extension here for SE_2(3).\nOne can arrive to such a definition by following\nthe convergent Taylor's series expansion\nfor the matrix exponential of\nthe Lie algebra element (Equation 16 of the paper).\n\nAs a result of X <-- X * Exp(u), we get (2)\n p <-- p + R JlSO3(u_w) u_p\n R <-- R Exp_SO3(u_w)\n v <-- v + R JlSO3(u_w) u_v\n\nIt is important to notice the subtle difference between (1) and (2) here,\nwhich is specifically the influence of the left Jacobian of SO(3) in (2).\nThe approach in (1) considers the motion integration is done by defining\nthe exponential map in R3xSO(3)xR3 instead of SE_2(3),\nin the sense explored in Example 7 of the Micro Lie theory paper.\nIt must be noted that as dt tends to 0,\nboth sets of equations (1) and (2) tend to be the same,\nsince JlSO3 tends to identity.\n\nSince, (2) exploits the algebra of the SE_2(3) group properly,\nwe would like to draw a relationship between the sets of equations (2)\nand the IMU measurements which will constitute\nthe exogeneous input vector u \\in se_2(3).\n\nConsidering R.T as the transpose of R, the IMU measurements are modeled as,\n - linear accelerometer measurements alpha = R.T (a - g) + w_acc\n - gyroscope measurements omega = omega_b + w_omega\nNote that the IMU measurements are expressed in the IMU frame\n(coincides with the robot frame - assumption).\nThe IMU measurements are corrupted by noise,\n - w_omega is the additive white noise affecting the gyroscope measurements\n - w_acc is the additive white noise affecting\n the linear accelerometer measurements\nIt must be noted that we do not consider IMU biases\nin the IMU measurement model in this example.\n\nTaking into account all of the above considerations,\nthe exogenous input vector u (3) becomes,\n u = (u_p, u_w, u_v) where,\n u_w = omega dt\n u_p = (R.T v dt + 0.5 dt^2 (alpha + R.T g)\n u_v = (alpha + R.T g) dt\n\nThis choice of input vector allows us to directly use measurements from the IMU\nfor an unified motion integration involving position,\norientation and linear velocity of the robot using SE_2(3).\nEquations (2) and (3) lead us to the following evolution equations,\n\n p <-- p + JlSO3 R.T v dt + 0.5 JlSO3 (alpha + R.T g) dt^2\n R <-- R Exp_SO3(omega dt)\n v <-- v + JlSO3 (alpha + R.T g) dt\n\nThe system propagation noise covariance matrix becomes,\n U = diagonal(0, 0, 0,\n sigma_omegax^2, sigma_omegay^2, sigma_omegaz^2,\n sigma_accx^2, sigma_accy^2, sigma_accz^2).\n\nAt the arrival of a exogeneous input u, the robot pose is updated\nwith X <-- X * Exp(u) = X + u.\n\nLandmark measurements are of the range and bearing type,\nthough they are put in Cartesian form for simplicity.\nTheir noise n is zero mean Gaussian, and is specified\nwith a covariances matrix R.\nWe notice that the SE_2(3) action is the same as a\nrigid motion action of SE(3).\nThis is the action of X \\in SE_2(3) on a 3-d point b \\in R^3 defined as,\nX b = R b + p\n\nThus, the landmark measurements can be expressed as\na group action on 3d points,\ny = h(X,b) = X^-1 * b\n\n y_k = (brx_k, bry_k, brz_k) // lmk coordinates in robot frame\n\nWe consider the beacons b_k situated at known positions.\nWe define the extended pose to estimate as X in SE_2(3).\nThe estimation error dx and its covariance P are expressed\nin the tangent space at X.\n\nAll these variables are summarized again as follows\n\n X : robot's extended pose, SE_2(3)\n u : robot control input, u = u(X, y_imu) \\in se_2(3) with X as state and\n y_imu = [alpha, omega] as IMU readings, see Eq. (3)\n U : control perturbation covariance\n b_k : k-th landmark position, R^3\n y : Cartesian landmark measurement in robot frame, R^3\n R : covariance of the measurement noise\n\nThe motion and measurement models are\n\n X_(t+1) = f(X_t, u) = X_t * Exp ( u ) // motion equation\n y_k = h(X, b_k) = X^-1 * b_k // measurement equation\n\nThe algorithm below comprises first a simulator to\nproduce measurements, then uses these measurements\nto estimate the state, using a Lie-based error-state Kalman filter.\n\nThis file has plain code with only one main() function.\nThere are no function calls other than those involving `manif`.\n\nPrinting simulated state and estimated state together\nwith an unfiltered state (i.e. without Kalman corrections)\nallows for evaluating the quality of the estimates.\n\nA side note: Besides the approach described here in this illustration example,\nthere are other interesting works like the paper,\nThe Invariant Extended Kalman filter as a stable observer\n(https://arxiv.org/pdf/1410.1465.pdf)\nwhich assume a specific structure for the\nsystem propagation dynamics \"f(X_t, u)\" (group affine dynamics)\nthat simplifies the covariance propagation and\nenables error dynamics with stronger convergence properties.\n\"\"\"\n\n\nfrom manifpy import SE_2_3, SE_2_3Tangent\n\nimport numpy as np\nfrom numpy.linalg import inv\n\n\nVector = np.array\n\n\ndef Covariance():\n return np.zeros((SE_2_3.DoF, SE_2_3.DoF))\n\n\ndef Jacobian():\n return np.zeros((SE_2_3.DoF, SE_2_3.DoF))\n\n\ndef random(dim):\n \"\"\"Random vector Rdim in [-1, 1].\"\"\"\n return np.random.uniform([-1]*dim, [1]*dim)\n\n\ndef skew(vec):\n mat = np.zeros((3, 3))\n mat[0, 1] = -vec[2]\n mat[0, 2] = +vec[1]\n mat[1, 0] = +vec[2]\n mat[1, 2] = -vec[0]\n mat[2, 0] = -vec[1]\n mat[2, 1] = +vec[0]\n return np.copy(mat)\n\n\ndef frange(start, stop, step):\n return [\n x*step+start for x in range(\n 0,\n round(abs((stop-start)/step)+0.5001),\n int((stop-start)/step < 0)*-2+1\n )\n ]\n\n\nif __name__ == '__main__':\n\n # START CONFIGURATION\n\n NUMBER_OF_LMKS_TO_MEASURE = 5\n\n # Define the robot pose element and its covariance\n X_simulation = SE_2_3.Identity()\n X = SE_2_3.Identity()\n X_unfiltered = SE_2_3.Identity()\n P = Covariance()\n P[0:3, 0:3] = np.diagflat([0.001, 0.001, 0.001])\n P[3:6, 3:6] = np.diagflat([0.01, 0.01, 0.01])\n P[6:9, 6:9] = np.diagflat([0.001, 0.001, 0.001])\n\n print(\"P: \", P)\n\n # acceleration due to gravity in world frame\n g = Vector([0.0, 0.0, -9.80665])\n dt = 0.01\n\n alpha_const = Vector([0.1, 0.01, 0.1]) # constant acceleration in IMU frame without gravity compensation\n omega = Vector([0.01, 0.1, 0.0]) # constant angular velocity about x- and y-direction in IMU frame\n\n # Previous IMU measurements in IMU frame initialized\n # to values expected when stationary\n alpha_prev = alpha_const - X_simulation.rotation().transpose() @ g\n alpha = alpha_const - X_simulation.rotation().transpose() @ g\n omega_prev = Vector([0.0, 0.0, 0.0])\n\n u_nom = Vector([0.0] * 9)\n u_est = Vector([0.0] * 9)\n u_sigmas = Vector([0.0, 0.0, 0.0, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])\n U = np.diagflat(np.square(u_sigmas))\n\n # Declare the Jacobians of the motion wrt robot and control\n F = Jacobian() # F = J_x_x + (J_x_u * J_u_x)\n J_x_x = Jacobian() # d(X * exp(u)) / dX\n J_x_u = Jacobian() # d(X * exp(u)) / du\n J_u_x = Jacobian() # du / dX, since u is a state-dependent vector\n\n # Define five landmarks in R^3\n landmarks = []\n landmarks.append(Vector([2.0, 0.0, 0.0]))\n landmarks.append(Vector([3.0, -1.0, -1.0]))\n landmarks.append(Vector([2.0, -1.0, 1.0]))\n landmarks.append(Vector([2.0, 1.0, 1.0]))\n landmarks.append(Vector([2.0, 1.0, -1.0]))\n\n # Define the beacon's measurements\n measurements = [Vector([0.0, 0.0, 0.0])] * NUMBER_OF_LMKS_TO_MEASURE\n\n y_sigmas = Vector([0.01, 0.01, 0.01])\n R = np.diagflat(np.square(y_sigmas))\n\n # Declare some temporaries\n J_xi_x = Jacobian()\n J_e_xi = np.zeros((SE_2_3.Dim, SE_2_3.DoF))\n\n # CONFIGURATION DONE\n\n # pretty print\n np.set_printoptions(precision=3, suppress=True, linewidth=160)\n\n # DEBUG\n print('X STATE : X Y Z TH_x TH_y TH_z ')\n print('-------------------------------------------------------')\n print('X initial : ', X_simulation.log().coeffs())\n print('-------------------------------------------------------')\n\n # END DEBUG\n\n # START TEMPORAL LOOP\n\n # Make 10/dt steps. Measure up to three landmarks each time.\n for t in frange(0, 10, dt):\n # I. Simulation\n\n # get current simulated state and measurements from previous step\n R_k = X_simulation.rotation()\n v_k = X_simulation.linearVelocity()\n acc_k = alpha_prev + R_k.transpose() @ g\n\n # input vector\n u_nom[0:3] = dt * R_k.transpose() @ v_k + 0.5 * dt * dt * acc_k\n u_nom[3:6] = dt * omega_prev\n u_nom[6:9] = dt * acc_k\n\n # simulate noise\n u_noise = u_sigmas * random(SE_2_3.DoF) # control noise\n u_noisy = u_nom + u_noise # noisy control\n\n u_simu = SE_2_3Tangent(u_nom)\n u_unfilt = SE_2_3Tangent(u_noisy)\n\n # first we move\n X_simulation = X_simulation + u_simu # X * exp(u)\n # update expected IMU measurements after moving\n alpha = alpha_const - X_simulation.rotation().transpose() @ g\n\n # then we measure all landmarks\n for i in range(NUMBER_OF_LMKS_TO_MEASURE):\n b = landmarks[i] # lmk coordinates in world frame\n\n # simulate noise\n y_noise = y_sigmas * random(SE_2_3.Dim) # measurement noise\n\n y = X_simulation.inverse().act(b) # landmark measurement, before adding noise\n\n y = y + y_noise # landmark measurement, noisy\n measurements[i] = y # store for the estimator just below\n\n # II. Estimation\n\n # get current state estimate to build\n # the state-dependent control vector\n R_k_est = X.rotation()\n v_k_est = X.linearVelocity()\n acc_k_est = alpha_prev + R_k_est.transpose() @ g\n\n accLin = dt * R_k_est.transpose() @ v_k_est + 0.5 * dt * dt * acc_k_est\n gLin = R_k_est.transpose() @ g * dt\n accLinCross = skew(accLin)\n gCross = skew(gLin)\n\n u_est[0:3] = accLin\n u_est[3:6] = dt * omega_prev\n u_est[6:9] = dt * acc_k_est\n\n u_est += u_noise\n\n # First we move\n\n X = X.plus(SE_2_3Tangent(u_est), J_x_x, J_x_u) # X * exp(u), with Jacobians\n\n # Prepare Jacobian of state-dependent control vector\n J_u_x[0:3, 3:6] = accLinCross\n J_u_x[0:3, 6:9] = np.identity(3) * dt\n J_u_x[6:9, 3:6] = gCross\n F = J_x_x + J_x_u @ J_u_x # chain rule for system model Jacobian\n\n P = F @ P @ F.transpose() + J_x_u @ U @ J_x_u.transpose()\n\n # Then we correct using the measurements of each lmk\n for i in range(NUMBER_OF_LMKS_TO_MEASURE):\n # landmark\n b = landmarks[i] # lmk coordinates in world frame\n\n # measurement\n y = measurements[i] # lmk measurement, noisy\n\n # expectation\n e = X.inverse(J_xi_x).act(b, J_e_xi) # note: e = R.tr * ( b - t ), for X = (R,t).\n H = J_e_xi @ J_xi_x # Jacobian of the measurements wrt the robot pose. note: H = J_e_x = J_e_xi * J_xi_x\n E = H @ P @ H.transpose()\n\n # innovation\n z = y - e\n Z = E + R\n\n # Kalman gain\n K = P @ H.transpose() @ inv(Z) # K = P * H.tr * ( H * P * H.tr + R).inv\n\n # Correction step\n dx = K @ z # dx is in the tangent space at X\n\n # Update\n X = X + SE_2_3Tangent(dx) # overloaded X.rplus(dx) = X * exp(dx)\n\n P = P - K @ Z @ K.transpose()\n\n # III. Unfiltered\n\n # move also an unfiltered version for comparison purposes\n X_unfiltered = X_unfiltered + u_unfilt\n\n alpha_prev = np.copy(alpha)\n omega_prev = np.copy(omega)\n\n # IV. Results\n\n # DEBUG\n print('X simulated : ', X_simulation.log().coeffs().transpose())\n print('X estimated : ', X.log().coeffs().transpose())\n print('X unfilterd : ', X_unfiltered.log().coeffs().transpose())\n print('-------------------------------------------------------')\n # END DEBUG\n" ]
[ [ "numpy.square", "numpy.diagflat", "numpy.zeros", "numpy.set_printoptions", "numpy.copy", "numpy.identity", "numpy.random.uniform", "numpy.linalg.inv" ] ]
brandonjbryant/classification-project-final
[ "c15c096d84bc14cfce9e1c8548e3c69154311bc7" ]
[ "acquire.py" ]
[ "import pandas as pd\nimport numpy as np\nimport os\n\n# acquire\nfrom env import host, user, password\nfrom pydataset import data\n\n\n\ndef get_connection(db, user=user, host=host, password=password):\n '''\n This function uses my info from my env file to\n create a connection url to access the Codeup db.\n '''\n return f'mysql+pymysql://{user}:{password}@{host}/{db}' \n \n\n# Telco Database \n \ndef new_telco_data():\n '''\n This function reads in the titanic data from the Codeup db\n and returns a pandas DataFrame with all columns.\n '''\n sql_query = '''\n SELECT *\n FROM customers\n JOIN contract_types USING(`contract_type_id`)\n JOIN internet_service_types USING(`internet_service_type_id`)\n JOIN payment_types USING(payment_type_id);\n '''\n \n return pd.read_sql(sql_query, get_connection('telco_churn'))\n \n \n \ndef get_telco_data(cached=False):\n '''\n This function reads in telco_churn data from Codeup database and writes data to\n a csv file if cached == False or if cached == True reads in titanic df from\n a csv file, returns df.\n '''\n if cached == False or os.path.isfile('telco_churn.csv') == False:\n \n # Read fresh data from db into a DataFrame.\n df = new_telco_data()\n \n # Write DataFrame to a csv file.\n df.to_csv('telco_churn.csv')\n \n else:\n \n # If csv file exists or cached == True, read in data from csv.\n df = pd.read_csv('telco_churn.csv', index_col=0)\n \n return df" ]
[ [ "pandas.read_csv" ] ]
aliPMPAINT/fakecam
[ "7e5053b06083531dba96ffce44657402e2d52223" ]
[ "fakecam/fakecam/capture.py" ]
[ "import os\nimport signal\n\nimport cv2\nimport numpy as np\nfrom multiprocessing import Queue\n\nfrom .pyfakewebcam import FakeWebcam\nfrom .types import QueueDict\nfrom .bodypix_functions import scale_and_crop_to_input_tensor_shape, to_mask_tensor\n\nFHD = (1080, 1920)\nHD = (720, 1280)\nNTSC = (480, 720)\n\n\n# if cv2.ocl.haveOpenCL():\n# cv2.ocl.setUseOpenCL(True)\n\ncvNet = cv2.dnn.readNetFromTensorflow(os.path.join(os.path.dirname(__file__), 'model.pb'))\n\noutput_stride = 16\ninternal_resolution = 0.5\nmultiplier = 0.5\n\n\ndef get_mask(frame, height, width):\n blob = cv2.dnn.blobFromImage(frame,\n size=(width, height), scalefactor=1/255, mean=(1.0, 1.0, 1.0),\n swapRB=True, crop=False)\n cvNet.setInput(blob)\n results = np.squeeze(cvNet.forward(\"float_segments/conv\"))\n\n segment_logits = cv2.UMat(results)\n scaled_segment_scores = scale_and_crop_to_input_tensor_shape(\n segment_logits, height, width, True\n )\n mask = to_mask_tensor(scaled_segment_scores, 0.75)\n return mask\n\n\ndef post_process_mask(mask):\n mask = cv2.dilate(mask, np.ones((10, 10), np.uint8), iterations=1)\n mask = cv2.blur(mask, (30, 30))\n return mask\n\n\ndef shift_image(img, dx, dy):\n img = np.roll(img, dy, axis=0)\n img = np.roll(img, dx, axis=1)\n if dy > 0:\n img[:dy, :] = 0\n elif dy < 0:\n img[dy:, :] = 0\n if dx > 0:\n img[:, :dx] = 0\n elif dx < 0:\n img[:, dx:] = 0\n return img\n\n\ndef hologram_effect(img):\n # add a blue tint\n holo = cv2.applyColorMap(img.get(), cv2.COLORMAP_WINTER)\n # add a halftone effect\n band_length, band_gap = 2, 3\n for y in range(holo.shape[0]):\n if y % (band_length + band_gap) < band_length:\n holo[y, :, :] = holo[y, :, :] * np.random.uniform(0.1, 0.3)\n # add some ghosting\n holo_blur = cv2.addWeighted(holo, 0.2, shift_image(holo.copy(), 5, 5), 0.8, 0)\n holo_blur = cv2.addWeighted(holo_blur, 0.4, shift_image(holo.copy(), -5, -5), 0.6, 0)\n # combine with the original color, oversaturated\n out = cv2.addWeighted(img, 0.5, holo_blur, 0.6, 0)\n return out\n\n\ndef get_frame(cap: object, background: object = None, use_hologram: bool = False, height=0, width=0) -> object:\n if not cap.grab():\n print(\"ERROR: could not read from camera!\")\n return None\n\n frame = cv2.UMat(cap.retrieve()[1])\n mask = get_mask(frame, height=height, width=width)\n\n # post-process mask and frame\n mask = post_process_mask(mask)\n\n if background is None:\n background = cv2.GaussianBlur(frame, (221, 221), sigmaX=20, sigmaY=20)\n\n if use_hologram:\n frame = hologram_effect(frame)\n\n # composite the foreground and background\n mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)\n\n ones = np.ones((height, width, 3))\n inv_mask = cv2.subtract(ones, mask, dtype=cv2.CV_32F)\n\n mask_mult = cv2.multiply(frame, mask, dtype=cv2.CV_32F)\n inv_mask_mult = cv2.multiply(background, inv_mask, dtype=cv2.CV_32F)\n\n frame = cv2.add(mask_mult, inv_mask_mult)\n return frame\n\n\ndef start(queue: \"Queue[QueueDict]\" = None, camera: str = \"/dev/video0\", background: str = None,\n use_hologram: bool = False, use_mirror: bool = False, resolution: (int,int) = None):\n # setup access to the *real* webcam\n print(\"Starting capture using device: {camera}\".format(camera=camera))\n cap = cv2.VideoCapture(camera, cv2.CAP_V4L2)\n if not cap.isOpened():\n print(\"Failed to open {camera}\".format(camera=camera))\n return\n\n orig_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n orig_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n if resolution is not None:\n # resolution is supplied by user in width followed by height order\n (width, height) = resolution\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n else:\n width, height = orig_width, orig_height\n\n print(\"Resolution: {width}:{height}\".format(width=width,height=height))\n\n # for (height, width) in [FHD, HD, NTSC, (orig_height, orig_width)]:\n # # Attempt to set the camera resolution via brute force\n # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n # cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n # if cap.get(cv2.CAP_PROP_FRAME_HEIGHT) == height:\n # break\n\n # setup the fake camera\n fake = FakeWebcam(\"/dev/video20\", width, height)\n\n # load the virtual background\n background_scaled = None\n greenscreen_array = np.zeros((height, width, 3), np.uint8)\n greenscreen_array[:] = (0, 177, 64)\n greenscreen_image = cv2.UMat(greenscreen_array)\n if background == \"greenscreen\":\n background_scaled = greenscreen_image\n elif background is not None and os.path.isfile(background) and os.access(background, os.R_OK):\n background_data = cv2.UMat(cv2.imread(background))\n background_scaled = cv2.resize(background_data, (width, height))\n\n # frames forever\n while True:\n frame = get_frame(cap, background=background_scaled, use_hologram=use_hologram, height=height, width=width)\n if frame is None:\n print(\"ERROR: could not read from camera!\")\n break\n\n if use_mirror is True:\n frame = cv2.flip(frame, 1)\n # fake webcam expects RGB\n # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n fake.schedule_frame(frame)\n if queue is not None and not queue.empty():\n data = queue.get(False)\n\n if data[\"background\"] is None:\n background_scaled = None\n elif data[\"background\"] == \"greenscreen\":\n background_scaled = greenscreen_image\n else:\n background = data[\"background\"]\n background_data = cv2.UMat(cv2.imread(background))\n background_scaled = cv2.resize(background_data, (width, height))\n\n use_hologram = data[\"hologram\"]\n use_mirror = data[\"mirror\"]\n" ]
[ [ "numpy.ones", "numpy.roll", "numpy.zeros", "numpy.random.uniform" ] ]
neelguha/tensorflow_constrained_optimization
[ "46b34d1c2d6ec05ea1e46db3bcc481a81e041637", "46b34d1c2d6ec05ea1e46db3bcc481a81e041637" ]
[ "tensorflow_constrained_optimization/python/rates/binary_rates_test.py", "experiments/training_utils.py" ]
[ "# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights\n# Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n# ==============================================================================\n\"\"\"Tests for binary_rates.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nfrom tensorflow_constrained_optimization.python.rates import basic_expression\nfrom tensorflow_constrained_optimization.python.rates import binary_rates\nfrom tensorflow_constrained_optimization.python.rates import subsettable_context\n\n\nclass RatesTest(tf.test.TestCase):\n \"\"\"Tests for rate-constructing functions.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(RatesTest, self).__init__(*args, **kwargs)\n\n # We use a fixed fake dataset to make sure that the tests are reproducible.\n # The code for generating this random dataset is:\n #\n # self._penalty_predictions = np.random.randn(penalty_size)\n # self._penalty_labels = (\n # np.random.randint(0, 2, size=penalty_size) * 2 - 1)\n # self._penalty_weights = np.random.rand(penalty_size)\n # self._penalty_predicate = np.random.choice(\n # [False, True], size=penalty_size)\n #\n # self._constraint_predictions = np.random.randn(constraint_size)\n # self._constraint_labels = (\n # np.random.randint(0, 2, size=constraint_size) * 2 - 1)\n # self._constraint_weights = np.random.rand(constraint_size)\n # self._constraint_predicate = np.random.choice(\n # [False, True], size=constraint_size)\n #\n # The dataset itself is:\n self._penalty_predictions = np.array([\n -0.672352809534, 0.814787452952, -0.589617508138, 0.476143711622,\n -0.914392995804, -0.140519198247, 1.01713287656, -0.842355386349,\n -1.86878605935, 0.0312541446545, 0.0929898206701, 1.02580838489\n ])\n self._penalty_labels = np.array([1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1])\n self._penalty_weights = np.array([\n 0.305792297057, 0.432750439411, 0.713731859892, 0.635314810893,\n 0.513210439781, 0.739280862773, 0.349403785117, 0.0256588613944,\n 0.96953248529, 0.257894870733, 0.106252982739, 0.0707090045685\n ])\n self._penalty_predicate = np.array([\n False, False, True, True, False, True, True, False, False, False, True,\n False\n ])\n self._constraint_predictions = np.array([\n -0.942162204902, -0.170590351989, 0.407191043958, 0.224967036781,\n -0.319641536386, 0.0965997062235, 1.58882795293, 0.954582543677\n ])\n self._constraint_labels = np.array([-1, -1, 1, 1, 1, -1, 1, 1])\n self._constraint_weights = np.array([\n 0.230071692522, 0.846932765888, 0.265366126241, 0.779471652816,\n 0.517297199127, 0.178307346815, 0.354665564039, 0.937435720249\n ])\n self._constraint_predicate = np.array(\n [True, False, False, True, True, True, True, False])\n\n @property\n def _context(self):\n \"\"\"Creates a new non-split and non-subsetted context.\"\"\"\n # We can't create the context in __init__, since it would then wind up in\n # the wrong TensorFlow graph.\n return subsettable_context.rate_context(\n predictions=tf.constant(self._penalty_predictions, dtype=tf.float32),\n labels=tf.constant(self._penalty_labels, dtype=tf.float32),\n weights=tf.constant(self._penalty_weights, dtype=tf.float32))\n\n @property\n def _split_context(self):\n \"\"\"Creates a new split and subsetted context.\"\"\"\n # We can't create the context in __init__, since it would then wind up in\n # the wrong TensorFlow graph.\n context = subsettable_context.split_rate_context(\n penalty_predictions=tf.constant(\n self._penalty_predictions, dtype=tf.float32),\n constraint_predictions=tf.constant(\n self._constraint_predictions, dtype=tf.float32),\n penalty_labels=tf.constant(self._penalty_labels, dtype=tf.float32),\n constraint_labels=tf.constant(\n self._constraint_labels, dtype=tf.float32),\n penalty_weights=tf.constant(self._penalty_weights, dtype=tf.float32),\n constraint_weights=tf.constant(\n self._constraint_weights, dtype=tf.float32))\n return context.subset(self._penalty_predicate, self._constraint_predicate)\n\n def _check_rates(self, expected_penalty_value, expected_constraint_value,\n actual_expression):\n denominator_lower_bound = 0.0\n global_step = tf.Variable(0, dtype=tf.int32)\n evaluation_context = basic_expression.BasicExpression.EvaluationContext(\n denominator_lower_bound, global_step)\n\n actual_penalty_value, penalty_pre_train_ops, _ = (\n actual_expression.penalty_expression.evaluate(evaluation_context))\n actual_constraint_value, constraint_pre_train_ops, _ = (\n actual_expression.constraint_expression.evaluate(evaluation_context))\n\n with self.session() as session:\n session.run(\n [tf.global_variables_initializer(),\n tf.local_variables_initializer()])\n\n # We only need to run the pre-train ops once, since the entire dataset is\n # contained within the Tensors, so the denominators will be correct.\n session.run(list(penalty_pre_train_ops | constraint_pre_train_ops))\n\n self.assertAllClose(\n expected_penalty_value,\n session.run(actual_penalty_value),\n rtol=0,\n atol=1e-6)\n self.assertAllClose(\n expected_constraint_value,\n session.run(actual_constraint_value),\n rtol=0,\n atol=1e-6)\n\n def test_positive_prediction_rate(self):\n \"\"\"Checks that `positive_prediction_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) * self._penalty_weights\n * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.positive_prediction_rate(\n self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_negative_prediction_rate(self):\n \"\"\"Checks that `negative_prediction_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) * self._penalty_weights\n * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.negative_prediction_rate(\n self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_error_rate(self):\n \"\"\"Checks that `error_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_signed_penalty_labels = (self._penalty_labels > 0.0) * 2.0 - 1.0\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0,\n 1.0 - expected_signed_penalty_labels * self._penalty_predictions) *\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_signed_constraint_labels = (\n (self._constraint_labels > 0.0) * 2.0 - 1.0)\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - expected_signed_constraint_labels * np.sign(\n self._constraint_predictions))) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.error_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_accuracy_rate(self):\n \"\"\"Checks that `accuracy_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_signed_penalty_labels = (self._penalty_labels > 0.0) * 2.0 - 1.0\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0,\n 1.0 + expected_signed_penalty_labels * self._penalty_predictions) *\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_signed_constraint_labels = (\n (self._constraint_labels > 0.0) * 2.0 - 1.0)\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + expected_signed_constraint_labels * np.sign(\n self._constraint_predictions))) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.accuracy_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_true_positive_rate(self):\n \"\"\"Checks that `true_positive_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0, 1.0 + self._penalty_predictions) * (self._penalty_labels > 0.0)\n * self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels > 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.true_positive_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_false_negative_rate(self):\n \"\"\"Checks that `false_negative_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0, 1.0 - self._penalty_predictions) * (self._penalty_labels > 0.0)\n * self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels > 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_negative_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_false_positive_rate(self):\n \"\"\"Checks that `false_positive_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_positive_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def test_true_negative_rate(self):\n \"\"\"Checks that `true_negative_rate` calculates the right quantity.\"\"\"\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.true_negative_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)\n\n def _find_roc_auc_thresholds(self, bins):\n \"\"\"Finds the thresholds associated with each of the ROC AUC bins.\"\"\"\n indices = [\n index for index in range(len(self._penalty_labels))\n if self._penalty_labels[index] <= 0\n ]\n permutation = sorted(\n indices, key=lambda index: self._penalty_predictions[index])\n denominator = sum(self._penalty_weights[index] for index in permutation)\n\n # Construct a dictionary mapping thresholds to FPRs.\n fprs = {-float(\"Inf\"): 1.0}\n fpr_numerator = denominator\n for index in permutation:\n fpr_numerator -= self._penalty_weights[index]\n fprs[self._penalty_predictions[index]] = fpr_numerator / denominator\n\n # These FPR thresholds are the same as in roc_auc_{lower,upper}_bound.\n fpr_thresholds = [(index + 0.5) / bins for index in xrange(bins)]\n\n # For each FPR threshold, find the threshold on the model output that\n # achieves the desired FPR.\n prediction_thresholds = []\n for fpr_threshold in fpr_thresholds:\n prediction_threshold = min(\n [float(\"Inf\")] +\n [key for key, value in six.iteritems(fprs) if value < fpr_threshold])\n prediction_thresholds.append(prediction_threshold)\n\n return prediction_thresholds\n\n def _check_roc_auc(self, bins, roc_auc_thresholds, constraints_tensor,\n pre_train_ops):\n \"\"\"Helper method for test_roc_auc_{lower,upper}_bound.\"\"\"\n bisection_loops = 32\n bisection_epsilon = 1e-6\n\n with tf.Session() as session:\n session.run(\n [tf.local_variables_initializer(),\n tf.global_variables_initializer()])\n session.run(list(pre_train_ops))\n\n session.run(tf.assign(roc_auc_thresholds, np.zeros(bins)))\n constraints = session.run(constraints_tensor)\n # We extracted the constraints from a *set*, rather than a *list*, so we\n # need to sort them by their violations (when the thresholds are\n # uninitialized) to determine which constraint is associated with which\n # threshold.\n permutation = sorted(\n range(bins), key=lambda index: constraints[index], reverse=True)\n\n # Repeatedly double the (negative) lower thresholds until they're below\n # intercepts.\n lower_thresholds = np.zeros(bins)\n threshold = -1.0\n while True:\n session.run(tf.assign(roc_auc_thresholds, lower_thresholds))\n constraints = session.run(constraints_tensor)[permutation]\n indices = (constraints <= 0.0)\n if not any(indices):\n break\n lower_thresholds[indices] = threshold\n threshold *= 2.0\n\n # Repeatedly double the (positive) upper thresholds until they're above\n # the intercepts.\n upper_thresholds = np.zeros(bins)\n threshold = 1.0\n while True:\n session.run(tf.assign(roc_auc_thresholds, upper_thresholds))\n constraints = session.run(constraints_tensor)[permutation]\n indices = (constraints > 0.0)\n if not any(indices):\n break\n upper_thresholds[indices] = threshold\n threshold *= 2.0\n\n # Now perform a bisection search to find the intercepts (i.e. the\n # thresholds for which the constraints are exactly satisfied).\n for _ in xrange(bisection_loops):\n middle_thresholds = 0.5 * (lower_thresholds + upper_thresholds)\n session.run(tf.assign(roc_auc_thresholds, middle_thresholds))\n constraints = session.run(constraints_tensor)[permutation]\n lower_indices = (constraints > 0.0)\n upper_indices = (constraints <= 0.0)\n lower_thresholds[lower_indices] = middle_thresholds[lower_indices]\n upper_thresholds[upper_indices] = middle_thresholds[upper_indices]\n # Stop the search once we're within epsilon.\n if max(upper_thresholds - lower_thresholds) <= bisection_epsilon:\n break\n\n actual_thresholds = upper_thresholds\n expected_thresholds = self._find_roc_auc_thresholds(bins)\n self.assertAllClose(\n expected_thresholds, actual_thresholds, rtol=0, atol=bisection_epsilon)\n\n def test_roc_auc_lower_bound(self):\n \"\"\"Tests that roc_auc_lower_bound's constraints give correct thresholds.\"\"\"\n bins = 3\n denominator_lower_bound = 0.0\n global_step = tf.Variable(0, dtype=tf.int32)\n evaluation_context = basic_expression.BasicExpression.EvaluationContext(\n denominator_lower_bound, global_step)\n\n expression = binary_rates.roc_auc_lower_bound(self._context, bins)\n\n # Extract the Tensors for the constraints, and the associated pre_train_ops.\n pre_train_ops = set()\n constraints_tensor = []\n for constraint in expression.extra_constraints:\n constraint_tensor, constraint_pre_train_ops, _ = (\n constraint.expression.constraint_expression.evaluate(\n evaluation_context))\n constraints_tensor.append(constraint_tensor)\n pre_train_ops.update(constraint_pre_train_ops)\n self.assertEqual(bins, len(constraints_tensor))\n constraints_tensor = tf.stack(constraints_tensor)\n\n # The check_roc_auc() helper will perform a bisection search over the\n # thresholds, so we need to extract the Tensor containing the thresholds\n # from the graph.\n roc_auc_thresholds = tf.get_default_graph().get_tensor_by_name(\n \"roc_auc_thresholds:0\")\n\n self._check_roc_auc(bins, roc_auc_thresholds, constraints_tensor,\n pre_train_ops)\n\n def test_roc_auc_upper_bound(self):\n \"\"\"Tests that roc_auc_upper_bound's constraints give correct thresholds.\"\"\"\n bins = 4\n denominator_lower_bound = 0.0\n global_step = tf.Variable(0, dtype=tf.int32)\n evaluation_context = basic_expression.BasicExpression.EvaluationContext(\n denominator_lower_bound, global_step)\n\n expression = binary_rates.roc_auc_upper_bound(self._context, bins)\n\n # Extract the Tensors for the constraints, and the associated pre_train_ops.\n pre_train_ops = set()\n constraints_tensor = []\n for constraint in expression.extra_constraints:\n constraint_tensor, constraint_pre_train_ops, _ = (\n constraint.expression.constraint_expression.evaluate(\n evaluation_context))\n constraints_tensor.append(constraint_tensor)\n pre_train_ops.update(constraint_pre_train_ops)\n self.assertEqual(bins, len(constraints_tensor))\n constraints_tensor = tf.stack(constraints_tensor)\n\n # The check_roc_auc() helper will perform a bisection search over the\n # thresholds, so we need to extract the Tensor containing the thresholds\n # from the graph.\n roc_auc_thresholds = tf.get_default_graph().get_tensor_by_name(\n \"roc_auc_thresholds:0\")\n\n self._check_roc_auc(bins, roc_auc_thresholds, -constraints_tensor,\n pre_train_ops)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# A list of training utility functions \nimport sys\nsys.path.insert(0,'/Users/neelguha/Dropbox/NeelResearch/fairness/code/tensorflow_constrained_optimization/')\nimport math\nimport random\nimport numpy as np\nimport pandas as pd\nimport warnings\nfrom six.moves import xrange\nimport tensorflow as tf\nimport tensorflow_constrained_optimization as tfco\nimport matplotlib.pyplot as plt\nimport logging \nimport time\n\ndef training_generator(model, train_df, test_df, minibatch_size, num_iterations_per_loop=1, num_loops=1):\n random.seed(31337)\n num_rows = train_df.shape[0]\n minibatch_size = min(minibatch_size, num_rows)\n permutation = list(range(train_df.shape[0]))\n random.shuffle(permutation)\n\n session = tf.Session()\n session.run((tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n\n minibatch_start_index = 0\n for n in xrange(num_loops):\n for _ in xrange(num_iterations_per_loop):\n minibatch_indices = []\n while len(minibatch_indices) < minibatch_size:\n minibatch_end_index = (\n minibatch_start_index + minibatch_size - len(minibatch_indices))\n if minibatch_end_index >= num_rows:\n minibatch_indices += range(minibatch_start_index, num_rows)\n minibatch_start_index = 0\n else:\n minibatch_indices += range(minibatch_start_index, minibatch_end_index)\n minibatch_start_index = minibatch_end_index\n \n session.run(\n model.train_op,\n feed_dict=model.feed_dict_helper(\n train_df.iloc[[permutation[ii] for ii in minibatch_indices]]))\n\n train_predictions = session.run(\n model.predictions_tensor,\n feed_dict=model.feed_dict_helper(train_df))\n session.run(\n model.predictions_tensor,\n feed_dict=model.feed_dict_helper(train_df))\n session.run(\n model.predictions_tensor,\n feed_dict=model.feed_dict_helper(train_df))\n test_predictions = session.run(\n model.predictions_tensor,\n feed_dict=model.feed_dict_helper(test_df))\n yield (train_predictions, test_predictions)\n\ndef error_rate(predictions, labels):\n signed_labels = (\n (labels > 0).astype(np.float32) - (labels <= 0).astype(np.float32))\n numerator = (np.multiply(signed_labels, predictions) <= 0).sum()\n denominator = predictions.shape[0]\n return float(numerator) / float(denominator)\n\n\ndef positive_prediction_rate(predictions, subset):\n numerator = np.multiply((predictions > 0).astype(np.float32),\n (subset > 0).astype(np.float32)).sum()\n denominator = (subset > 0).sum()\n return float(numerator) / float(denominator)\n\ndef tpr(df, label_column):\n \"\"\"Measure the true positive rate.\"\"\"\n fp = sum((df['predictions'] >= 0.0) & (df[label_column] > 0.5))\n ln = sum(df[label_column] > 0.5)\n return float(fp) / float(ln)\n\ndef _get_error_rate_and_constraints(df, tpr_max_diff, label_column, protected_columns):\n \"\"\"Computes the error and fairness violations.\"\"\"\n error_rate_local = error_rate(df[['predictions']], df[[label_column]])\n overall_tpr = tpr(df, label_column)\n diffs = []\n for protected_attribute in protected_columns:\n diffs.append((overall_tpr - tpr_max_diff) - tpr(df[df[protected_attribute] > 0.5], label_column))\n return error_rate_local, diffs\n\ndef _get_exp_error_rate_constraints(cand_dist, error_rates_vector, constraints_matrix):\n \"\"\"Computes the expected error and fairness violations on a randomized solution.\"\"\"\n expected_error_rate = np.dot(cand_dist, error_rates_vector)\n expected_constraints = np.matmul(cand_dist, constraints_matrix)\n return expected_error_rate, expected_constraints\n\ndef training_helper(model,\n train_df,\n test_df,\n minibatch_size,\n label_column,\n protected_columns,\n num_iterations_per_loop=1,\n num_loops=1 ,\n interval = 5):\n train_error_rate_vector = []\n train_constraints_matrix = []\n test_error_rate_vector = []\n test_constraints_matrix = []\n iteration = 1\n start = time.time()\n for train, test in training_generator(\n model, train_df, test_df, minibatch_size, num_iterations_per_loop,\n num_loops):\n train_df['predictions'] = train\n test_df['predictions'] = test\n '''if (iteration - 1) % interval == 0:\n train_error_rate, train_constraints = _get_error_rate_and_constraints(\n train_df, model.tpr_max_diff, label_column, protected_columns)\n train_error_rate_vector.append(train_error_rate)\n train_constraints_matrix.append(train_constraints)\n\n test_error_rate, test_constraints = _get_error_rate_and_constraints(\n test_df, model.tpr_max_diff, label_column, protected_columns)\n test_error_rate_vector.append(test_error_rate)\n test_constraints_matrix.append(test_constraints)\n duration = time.time() - start\n logging.info(\n \"Finished %d/%d. Train error = %f. Max train violation = %f. Test error = %f. Max test violation = %f. %f seconds\" % \n (iteration, num_loops, train_error_rate, max(train_constraints), test_error_rate, max(test_constraints), duration)\n )\n else:'''\n duration = time.time() - start\n logging.info(\n \"Finished %d/%d. %f seconds\" % \n (iteration, num_loops, duration)\n )\n iteration += 1\n start = time.time()\n return (train_error_rate_vector, train_constraints_matrix, test_error_rate_vector, test_constraints_matrix)\n\ndef get_tpr_subset(df, subsets, label_column):\n filtered = df \n for subset in subsets:\n filtered = filtered[filtered[subset] > 0]\n return tpr(filtered, label_column)\n\ndef get_acc_subset(df, subsets):\n filtered = df \n for subset in subsets:\n filtered = filtered[filtered[subset] > 0]\n predictions = filtered['predictions']\n labels = filtered['label']\n return np.mean(np.array(predictions > 0.0) == np.array(labels > 0.0))\n " ]
[ [ "numpy.array", "tensorflow.assign", "numpy.zeros", "tensorflow.get_default_graph", "numpy.sum", "tensorflow.Session", "tensorflow.Variable", "tensorflow.constant", "numpy.sign", "tensorflow.test.main", "tensorflow.stack", "tensorflow.local_variables_initializer", "tensorflow.global_variables_initializer", "numpy.maximum" ], [ "numpy.array", "numpy.dot", "numpy.matmul", "tensorflow.Session", "numpy.multiply", "tensorflow.local_variables_initializer", "tensorflow.global_variables_initializer" ] ]
Yu-Yy/PR_project
[ "0999d0a7b2c37e5b1cd81acd43de95a024cf951f" ]
[ "image_text_fusion.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport os\nimport argparse\nfrom lib.config.hrnet_config import update_config\nfrom lib.config.hrnet_config import config\nfrom lib.datasets.trip_dataloader import triplet_image_text_data\nfrom pathlib import Path\nfrom lib.utils.loss import triplet_loss_cl\n# import the model\nfrom hrnet_retrieval import retrieval_net\nfrom dl_text import text_simple_tf\n\nclass cross_modal(nn.Module):\n def __init__(self,cfg, original_dim, is_train = True, is_transform = True):\n super(cross_modal,self).__init__()\n self.image_em = retrieval_net(cfg, is_train = is_train, is_transform = False) \n self.text_em = text_simple_tf(original_dim,is_transform)\n self.Linear_fusing1 = nn.Sequential(nn.Linear(1024 + 32, 512), nn.BatchNorm1d(512), nn.LeakyReLU())\n self.Linear_fusing2 = nn.Sequential(nn.Linear(1024 + 32, 2048), nn.BatchNorm1d(2048), nn.LeakyReLU(), nn.Linear(2048,512), nn.BatchNorm1d(512),nn.LeakyReLU())\n def forward(self,image, text_feature):\n image_feature = self.image_em(image)\n text_embed = self.text_em(text_feature)\n # import pdb;pdb.set_trace()\n Fusion_f = torch.cat([image_feature, text_embed], dim=-1)\n Fusion_f = self.Linear_fusing1(Fusion_f) + self.Linear_fusing2(Fusion_f) \n\n output_feature = Fusion_f / torch.norm(Fusion_f,dim=-1,keepdim=True)\n return output_feature\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train keypoints network')\n parser.add_argument(\n '--cfg', help='experiment configure file name', required=True, type=str)\n args, rest = parser.parse_known_args()\n update_config(args.cfg) # 把config的文件更新过去\n return args\n\ndef get_optimizer(model):\n lr = config.TRAIN.LR\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.module.parameters()), lr=lr) # 整体模型权重均全部重新训练\n return model, optimizer\n \ndef load_checkpoint(model, optimizer, output_dir, filename='checkpoint.pth.tar'):\n file = os.path.join(output_dir, filename)\n if os.path.isfile(file):\n checkpoint = torch.load(file)\n start_epoch = checkpoint['epoch']\n metrics = checkpoint['loss']\n model.module.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print('=> load checkpoint {} (epoch {})'\n .format(file, start_epoch))\n\n return start_epoch, model, optimizer, metrics\n\n else:\n print('=> no checkpoint found at {}'.format(file))\n return 0, model, optimizer, np.inf\n\ndef save_checkpoint(states, is_best, output_dir, filename='checkpoint.pth.tar'):\n torch.save(states, os.path.join(output_dir, filename))\n if is_best and 'state_dict' in states:\n torch.save(states['state_dict'],\n os.path.join(output_dir, 'model_best.pth.tar'))\n\ndef load_backbone(model,pretrained_file):\n pretrained_state_dict = torch.load(pretrained_file)\n model_state_dict_backbone = model.module.backbone.state_dict()\n prefix_b = 'backbone.'\n new_pretrained_state_dict_bacbone = {}\n for k, v in pretrained_state_dict.items():\n if k.replace(prefix_b, \"\") in model_state_dict_backbone and v.shape == model_state_dict_backbone[k.replace(prefix_b, \"\")].shape: #.replace(prefix, \"\") .replace(prefix, \"\")\n new_pretrained_state_dict_bacbone[k.replace(prefix_b, \"\")] = v\n print(\"load statedict from {}\".format(pretrained_file))\n model.module.backbone.load_state_dict(new_pretrained_state_dict_bacbone)\n return model\n\ndef main():\n args = parse_args() # 读取 cfg 参数,config表示之后需要看一下\n result_log_dir = Path(config.OUTPUT_DIR)\n result_log_dir.mkdir(parents=True, exist_ok=True)\n\n gpus = [int(i) for i in config.GPUS.split(',')]\n image_folder = '/Extra/panzhiyu/img_retrieval/shopee-product-matching/train_images'\n train_dataset = triplet_image_text_data(image_folder,is_train = True)\n test_dataset = triplet_image_text_data(image_folder,is_train = False)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=config.TRAIN.BATCH_SIZE * len(gpus),\n shuffle=config.TRAIN.SHUFFLE,\n num_workers=config.WORKERS,\n pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=config.TEST.BATCH_SIZE * len(gpus),\n shuffle=True,\n num_workers=config.WORKERS,\n pin_memory=True)\n\n cudnn.benchmark = config.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = config.CUDNN.ENABLED\n print('=> Constructing models ..')\n model = cross_modal(config, original_dim=11914, is_transform=True)\n with torch.no_grad():\n model = torch.nn.DataParallel(model, device_ids=gpus).cuda()\n model, optimizer = get_optimizer(model)\n start_epoch = config.TRAIN.BEGIN_EPOCH\n end_epoch = config.TRAIN.END_EPOCH\n least_test_loss = np.inf # enough large\n pretrain_file_image = '/home/panzhiyu/Homework/img_retrieval/PR_project/result/orig_hrnet//model_best.pth.tar'\n pretrain_file_text = '/home/panzhiyu/Homework/img_retrieval/PR_project/result/trans_text/model_best.pth.tar'\n if config.NETWORK.PRETRAINED_BACKBONE: # no pretrained test\n # load the pretrained two model\n pretrained_state_dict_image = torch.load(pretrain_file_image)\n pretrained_state_dict_text = torch.load(pretrain_file_text)\n model_state_dict_image = model.module.image_em.state_dict()\n model_state_dict_text = model.module.text_em.state_dict()\n\n prefix = ''# module.\n new_pretrained_state_dict_image = {}\n for k, v in pretrained_state_dict_image.items():\n if k.replace(prefix, \"\") in model_state_dict_image and v.shape == model_state_dict_image[k.replace(prefix, \"\")].shape: #.replace(prefix, \"\") .replace(prefix, \"\")\n new_pretrained_state_dict_image[k.replace(prefix, \"\")] = v\n new_pretrained_state_dict_text = {}\n for k, v in pretrained_state_dict_text.items():\n if k.replace(prefix, \"\") in model_state_dict_text and v.shape == model_state_dict_text[k.replace(prefix, \"\")].shape: #.replace(prefix, \"\") .replace(prefix, \"\")\n new_pretrained_state_dict_text[k.replace(prefix, \"\")] = v\n model.module.image_em.load_state_dict(new_pretrained_state_dict_image)\n model.module.text_em.load_state_dict(new_pretrained_state_dict_text)\n print('load backbone')\n # print(f'Using backbone {config.NETWORK.PRETRAINED_BACKBONE}')\n # model = load_backbone(model, config.NETWORK.PRETRAINED_BACKBONE) # load POSE ESTIMATION BACKBONE\n\n if config.TRAIN.RESUME:\n start_epoch, model, optimizer, metrics_load = load_checkpoint(model, optimizer, config.OUTPUT_DIR) # TODO: Load the A1 metrics\n least_test_loss = metrics_load\n\n tb_log_dir = Path(os.path.join(config.OUTPUT_DIR,'tensorboard_log'))\n tb_log_dir.mkdir(parents=True, exist_ok=True)\n\n writer_dict = {\n 'writer': SummaryWriter(log_dir=str(tb_log_dir)),\n 'train_global_steps': 0,\n 'valid_global_steps': 0,\n }\n\n \n print('=> Training...')\n device=torch.device('cuda')\n for epoch in range(start_epoch, end_epoch):\n print('Epoch: {}'.format(epoch))\n train_sim_loss = AverageMeter()\n test_sim_loss = AverageMeter()\n trip_class_loss = triplet_loss_cl()\n # The train part \n model.train()\n for i, batch in enumerate(train_loader):\n q_image, g_image, q_features, g_features = batch\n if q_features.shape[0] == 1:\n continue # cannot do the triplet loss \n q_features = q_features.to(device)\n g_features = g_features.to(device)\n q_image = q_image.to(device)\n g_image = g_image.to(device)\n\n\n q_features_e = model(q_image,q_features)\n g_features_e = model(g_image,g_features)\n\n # calculate the loss as triplet\n trip_loss = trip_class_loss(q_features_e,g_features_e)\n train_sim_loss.update(trip_loss.item())\n optimizer.zero_grad()\n trip_loss.backward()\n optimizer.step()\n\n if i % config.PRINT_FREQ == 0:\n gpu_memory_usage = torch.cuda.memory_allocated(0)\n msg = f'Epoch:[{epoch}][{i}/{len(train_loader)}]\\t'\\\n f'Loss_trip: {train_sim_loss.val:.3f}({train_sim_loss.avg:.3f})\\t'\\\n f'Memory {gpu_memory_usage:.1f}'\n print(msg)\n writer = writer_dict['writer']\n global_steps = writer_dict['train_global_steps']\n writer.add_scalar('train_loss_trip', train_sim_loss.avg, global_steps)\n writer_dict['train_global_steps'] = global_steps + 1\n\n # store the first model\n if epoch == 0:\n model_name =os.path.join(config.OUTPUT_DIR,\n f'epoch{epoch}_state.pth.tar')\n print('saving current model state to {}'.format(\n model_name))\n torch.save(model.module.state_dict(), model_name)\n # The eval part\n model.eval()\n for i, batch in enumerate(test_loader):\n q_image, g_image, q_features, g_features = batch\n if q_features.shape[0] == 1:\n continue # cannot do the triplet loss \n q_features = q_features.to(device)\n g_features = g_features.to(device)\n q_image = q_image.to(device)\n g_image = g_image.to(device)\n\n\n q_features_e = model(q_image,q_features)\n g_features_e = model(g_image,g_features)\n\n # calculate the loss as triplet\n trip_loss = trip_class_loss(q_features_e,g_features_e)\n test_sim_loss.update(trip_loss.item())\n\n if i % config.PRINT_FREQ == 0:\n gpu_memory_usage = torch.cuda.memory_allocated(0)\n msg = f'Test:[{epoch}][{i}/{len(test_loader)}]\\t'\\\n f'Loss_trip: {test_sim_loss.val:.3f}({test_sim_loss.avg:.3f})\\t'\\\n f'Memory {gpu_memory_usage:.1f}'\n print(msg)\n writer = writer_dict['writer']\n global_steps = writer_dict['valid_global_steps']\n writer.add_scalar('test_loss_trip', test_sim_loss.avg, global_steps)\n writer_dict['test_global_steps'] = global_steps + 1\n \n\n test_loss = test_sim_loss.avg\n # compare the loss\n if test_loss < least_test_loss:\n least_test_loss = test_loss\n best_model = True\n else:\n best_model = False\n \n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.module.state_dict(),\n 'loss': test_loss,\n 'optimizer': optimizer.state_dict(),\n }, best_model, config.OUTPUT_DIR)\n \n final_model_state_file = os.path.join(config.OUTPUT_DIR,\n 'final_state.pth.tar')\n torch.save(model.module.state_dict(), final_model_state_file)\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nif __name__ == '__main__':\n main()\n\n\n" ]
[ [ "torch.nn.Linear", "torch.device", "torch.cat", "torch.norm", "torch.no_grad", "torch.nn.LeakyReLU", "torch.cuda.memory_allocated", "torch.nn.BatchNorm1d", "torch.load", "torch.nn.DataParallel" ] ]
lazydinoz/HackFest21
[ "84bfbfbb2c75a6511226a87d2e947984db878ba1", "84bfbfbb2c75a6511226a87d2e947984db878ba1" ]
[ "Data Analysis Projects/Wine Qulaity Prediction/refactor_wine_quality_.py", "Tkinter Apps/Connect4.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"refactor-wine-quality .ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1HbL79czQHkBYg3_-oEF1aVNp7kYY7NAi\n\n# Refactor: Wine Quality Analysis\nIn this exercise, you'll refactor code that analyzes a wine quality dataset taken from the UCI Machine Learning Repository [here](https://archive.ics.uci.edu/ml/datasets/wine+quality). Each row contains data on a wine sample, including several physicochemical properties gathered from tests, as well as a quality rating evaluated by wine experts.\n\nThe code in this notebook first renames the columns of the dataset and then calculates some statistics on how some features may be related to quality ratings. Can you refactor this code to make it more clean and modular?\n\"\"\"\n\nimport pandas as pd\ndf = pd.read_csv('winequality-red.csv', sep=';')\ndf.head(10)\n\n\"\"\"### Renaming Columns\nYou want to replace the spaces in the column labels with underscores to be able to reference columns with dot notation. Here's one way you could've done it.\n\"\"\"\n\nnew_df = df.rename(columns={'fixed acidity': 'fixed_acidity',\n 'volatile acidity': 'volatile_acidity',\n 'citric acid': 'citric_acid',\n 'residual sugar': 'residual_sugar',\n 'free sulfur dioxide': 'free_sulfur_dioxide',\n 'total sulfur dioxide': 'total_sulfur_dioxide'\n })\nnew_df.head()\n\n\"\"\"And here's a slightly better way you could do it. You can avoid making naming errors due to typos caused by manual typing. However, this looks a little repetitive. Can you make it better?\"\"\"\n\nlabels = list(df.columns)\nlabels[0] = labels[0].replace(' ', '_')\nlabels[1] = labels[1].replace(' ', '_')\nlabels[2] = labels[2].replace(' ', '_')\nlabels[3] = labels[3].replace(' ', '_')\nlabels[5] = labels[5].replace(' ', '_')\nlabels[6] = labels[6].replace(' ', '_')\ndf.columns = labels\n\ndf.head()\n\n\"\"\"### Analyzing Features\nNow that your columns are ready, you want to see how different features of this dataset relate to the quality rating of the wine. A very simple way you could do this is by observing the mean quality rating for the top and bottom half of each feature. The code below does this for four features. It looks pretty repetitive right now. Can you make this more concise? \n\nYou might challenge yourself to figure out how to make this code more efficient! But you don't need to worry too much about efficiency right now - we will cover that more in the next section.\n\"\"\"\n\nmedian_alcohol = df.alcohol.median()\nfor i, alcohol in enumerate(df.alcohol):\n if alcohol >= median_alcohol:\n df.loc[i, 'alcohol'] = 'high'\n else:\n df.loc[i, 'alcohol'] = 'low'\ndf.groupby('alcohol').quality.mean()\n\nmedian_pH = df.pH.median()\nfor i, pH in enumerate(df.pH):\n if pH >= median_pH:\n df.loc[i, 'pH'] = 'high'\n else:\n df.loc[i, 'pH'] = 'low'\ndf.groupby('pH').quality.mean()\n\nmedian_sugar = df.residual_sugar.median()\nfor i, sugar in enumerate(df.residual_sugar):\n if sugar >= median_sugar:\n df.loc[i, 'residual_sugar'] = 'high'\n else:\n df.loc[i, 'residual_sugar'] = 'low'\ndf.groupby('residual_sugar').quality.mean()\n\nmedian_citric_acid = df.citric_acid.median()\nfor i, citric_acid in enumerate(df.citric_acid):\n if citric_acid >= median_citric_acid:\n df.loc[i, 'citric_acid'] = 'high'\n else:\n df.loc[i, 'citric_acid'] = 'low'\ndf.groupby('citric_acid').quality.mean()\n\n", "import numpy as np\nimport pygame\nimport sys\nimport math\n\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\n\nROW_COUNT = 6\nCOLUMN_COUNT = 7\n\n\ndef create_board():\n board = np.zeros((ROW_COUNT, COLUMN_COUNT))\n return board\n\n\ndef drop_piece(board, row, col, piece):\n board[row][col] = piece\n\n\ndef is_valid_location(board, col):\n return board[ROW_COUNT-1][col] == 0\n\n\ndef get_next_open_row(board, col):\n for r in range(ROW_COUNT):\n if board[r][col] == 0:\n return r\n\n\ndef print_board(board):\n print(np.flip(board, 0))\n\n\ndef winning_move(board, piece):\n # Check horizontal locations for win\n for c in range(COLUMN_COUNT-3):\n for r in range(ROW_COUNT):\n if board[r][c] == piece and board[r][c+1] == piece and board[r][c+2] == piece and board[r][c+3] == piece:\n return True\n\n # Check vertical locations for win\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT-3):\n if board[r][c] == piece and board[r+1][c] == piece and board[r+2][c] == piece and board[r+3][c] == piece:\n return True\n\n # Check positively sloped diaganols\n for c in range(COLUMN_COUNT-3):\n for r in range(ROW_COUNT-3):\n if board[r][c] == piece and board[r+1][c+1] == piece and board[r+2][c+2] == piece and board[r+3][c+3] == piece:\n return True\n\n # Check negatively sloped diaganols\n for c in range(COLUMN_COUNT-3):\n for r in range(3, ROW_COUNT):\n if board[r][c] == piece and board[r-1][c+1] == piece and board[r-2][c+2] == piece and board[r-3][c+3] == piece:\n return True\n\n\ndef draw_board(board):\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT):\n pygame.draw.rect(screen, BLUE, (c*SQUARESIZE, r *\n SQUARESIZE+SQUARESIZE, SQUARESIZE, SQUARESIZE))\n pygame.draw.circle(screen, BLACK, (int(\n c*SQUARESIZE+SQUARESIZE/2), int(r*SQUARESIZE+SQUARESIZE+SQUARESIZE/2)), RADIUS)\n\n for c in range(COLUMN_COUNT):\n for r in range(ROW_COUNT):\n if board[r][c] == 1:\n pygame.draw.circle(screen, RED, (int(\n c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\n elif board[r][c] == 2:\n pygame.draw.circle(screen, YELLOW, (int(\n c*SQUARESIZE+SQUARESIZE/2), height-int(r*SQUARESIZE+SQUARESIZE/2)), RADIUS)\n pygame.display.update()\n\n\nboard = create_board()\n# print_board(board)\ngame_over = False\nturn = 0\n\npygame.init()\n\nSQUARESIZE = 100\n\nwidth = COLUMN_COUNT * SQUARESIZE\nheight = (ROW_COUNT+1) * SQUARESIZE\n\nsize = (width, height)\n\nRADIUS = int(SQUARESIZE/2 - 5)\n\nscreen = pygame.display.set_mode(size)\ndraw_board(board)\npygame.display.update()\n\nmyfont = pygame.font.SysFont(\"monospace\", 75)\n\nwhile not game_over:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n\n if event.type == pygame.MOUSEMOTION:\n pygame.draw.rect(screen, BLACK, (0, 0, width, SQUARESIZE))\n posx = event.pos[0]\n if turn == 0:\n pygame.draw.circle(\n screen, RED, (posx, int(SQUARESIZE/2)), RADIUS)\n else:\n pygame.draw.circle(\n screen, YELLOW, (posx, int(SQUARESIZE/2)), RADIUS)\n pygame.display.update()\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n pygame.draw.rect(screen, BLACK, (0, 0, width, SQUARESIZE))\n # print(event.pos)\n # Ask for Player 1 Input\n if turn == 0:\n posx = event.pos[0]\n col = int(math.floor(posx/SQUARESIZE))\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, 1)\n\n if winning_move(board, 1):\n label = myfont.render(\"Player 1 wins!!\", 1, RED)\n screen.blit(label, (40, 10))\n game_over = True\n\n # # Ask for Player 2 Input\n else:\n posx = event.pos[0]\n col = int(math.floor(posx/SQUARESIZE))\n\n if is_valid_location(board, col):\n row = get_next_open_row(board, col)\n drop_piece(board, row, col, 2)\n\n if winning_move(board, 2):\n label = myfont.render(\"Player 2 wins!!\", 1, YELLOW)\n screen.blit(label, (40, 10))\n game_over = True\n\n # print_board(board)\n draw_board(board)\n\n turn += 1\n turn = turn % 2\n\n if game_over:\n pygame.time.wait(3000)\n" ]
[ [ "pandas.read_csv" ], [ "numpy.flip", "numpy.zeros" ] ]
S1mHub/Molecular-Learning
[ "5254a88006b9981e776ba6bb76f5377c6260ca04" ]
[ "TD_Cluster_Code/ChemAAE_Prep.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 22 15:28:22 2019\n\n@author: sameermac\n\"\"\"\n\n#High Dimensional Unsupervised Learning on Organic Chemicals\n\n#from __future__ import print_function\n#import csv\n#import math\n#import random\n\n\n#from tqdm import tqdm.tqdm\n#for i in tqdm(l):\n#...stuff\n#joblib\n\n### Loaded Modules ###\n\nfrom operator import itemgetter\nfrom scipy.cluster.hierarchy import linkage, dendrogram\nfrom tqdm import tqdm\nimport numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem.Fingerprints import FingerprintMols\nfrom rdkit import DataStructs\nfrom rdkit import Chem, RDConfig\nfrom rdkit.Chem import AllChem, rdMolAlign, rdShapeHelpers\nfrom rdkit.Chem import Draw\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections as matcoll\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import SpectralClustering\nimport random\nfrom rdkit.Chem import Descriptors\nfrom sklearn.manifold import MDS\n\n\n#Note: %matplotlib auto - Put this in the IPython console to get full figures\n\n### Unsupervised Learning Framework - This file ###\n\n#1) First the molecules are loaded (here done locally) - SDF Format\n#2) Unconvertible molecules are removed\n#3) Conversion to SMILES and Fingerprint Formats for computations and Filtering\n\n#Filters Applied to the Molecular Set\n\n#4) Inorganic/Toxic Molecules are filtered : Captured then Removed\n#5) Filtering basedd off of Lipinski Rule of Five: Molecules < 500 Dalton\n\n#Unsupervised Learning - Clustering\n\n#6) Compute K-medoid Clusters (Labels): Vary Hyperparameters Clusters, Iterations\n#7) Apply K-medoid Labels to High-Dimensional Manifold Learning Methods (MDS, TSNE, etc.)\n\n#Applying the B-VAE Autoencoder Framework\n\n#8) Apply the B-VAE network\n\n\n\n\n### Loading Molecules ###\n\nMOL_open = open( '/Users/sameermac/Desktop/structures_DRUGBANK_approved.sdf','rb')\n#MOL_open = open( '/Users/sameermac/Desktop/Thesis/gdb9.sdf','rb')\nMOL_LIST = Chem.ForwardSDMolSupplier(MOL_open)\n\nmol = [x for x in MOL_LIST if x is not None]\n#Some elements empty because there are invalid molecules =/> SMILES Format\n\nmol_FULL = mol\n\nSmile_mol_FULL = [Chem.MolToSmiles(m) for m in mol_FULL]\n\nInorganicPeriodicTable = ['[Li','[Na','[K','[Rb','[Cs','[Fr',\n '[Be','[Mg','[Ca','[Sr', '[Ba', '[Ra',\n '[Sc','[Y',\n '[Ti','[Zr','[Hf','[Rf','[La','[Ac',\n '[V','[Nb','[Ta','[Db','[Ce','[Th',\n '[Cr','[Mo','[W','[Sg','[Pr','[Pa',\n '[Mn','[Tc','[Re','[Bh','[Nd','[U',\n '[Fe','[Ru','[Os','[Hs','[Pm','[Np',\n '[Co','[Rh','[Ir','[Mt','[Sm','[Pu',\n '[Ni','[Pd','[Pt','[Ds','[Eu','[Am',\n '[Cu','[Ag','[Au','[Rg','[Gd','[Cm',\n '[Zn','[Cd','[Hg','[Cn','[Tb','[Bk',\n 'B','[Al','[Ga','[In','[Tl','[Nh','[Dy','[Cf',\n '[Si','[Ge','[Sn','[Pb','[Fl','[Ho','[Es',\n '[As','[Sb','[Bi','[Mc','[Er','[Fm',\n '[Se','[Te','[Po','[Lv','[Tm','[Md',\n 'Br','I','[At','[Ts','[Yb','[No',\n '[He','[Ne','[Ar','[Kr','[Xe','[Rn',\n '[Og','[Lu','[Lr','b',\n \n 'Li]','Na]','K]','Rb]','Cs]','Fr]',\n 'Be]','Mg]','Ca]','Sr]', 'Ba]', 'Ra]',\n 'Sc]','Y]',\n 'Ti]','Zr]','Hf]','Rf]','La]','Ac]',\n 'V]','Nb]','Ta]','Db]','Ce]','Th]',\n 'Cr]','Mo]','W]','Sg]','Pr]','Pa]',\n 'Mn]','Tc]','Re]','Bh]','Nd]','U]',\n 'Fe]','Ru]','Os]','Hs]','Pm]','Np]',\n 'Co]','Rh]','Ir]','Mt]','Sm]','Pu]',\n 'Ni]','Pd]','Pt]','Ds]','Eu]','Am]',\n 'Cu]','Ag]','Au]','Rg]','Gd]','Cm]',\n 'Zn]','Cd]','Hg]','Cn]','Tb]','Bk]',\n 'Al]','Ga]','In]','Tl]','Nh]','Dy]','Cf]',\n 'Si]','Ge]','Sn]','Pb]','Fl]','Ho]','Es]',\n 'As]','Sb]','Bi]','Mc]','Er]','Fm]',\n 'Se]','Te]','Po]','Lv]','Tm]','Md]',\n 'Br','I','At]','Ts]','Yb]','No]',\n 'He]','Ne]','Ar]','Kr]','Xe]','Rn]',\n 'Og]','Lu]','Lr]']\n \n #'.[Li','.[Na','.[K','.[Rb','.[Cs','.[Fr',\n #'.[Be','.[Mg','.[Ca','.[Sr', '.[Ba', '.[Ra',\n #'.[Sc','.[Y',\n #'.[Ti','.[Zr','.[Hf','.[Rf','.[La','.[Ac',\n #'.[V','.[Nb','.[Ta','.[Db','.[Ce','.[Th',\n #'.[Cr','.[Mo','.[W','.[Sg','.[Pr','.[Pa',\n #'.[Mn','.[Tc','.[Re','.[Bh','.[Nd','.[U',\n #'.[Fe','.[Ru','.[Os','.[Hs','.[Pm','.[Np',\n #'.[Co','.[Rh','.[Ir','.[Mt','.[Sm','.[Pu',\n #'.[Ni','.[Pd','.[Pt','.[Ds','.[Eu','.[Am',\n #'.[Cu','.[Ag','.[Au','.[Rg','.[Gd','.[Cm',\n #'.[Zn','.[Cd','.[Hg','.[Cn','.[Tb','.[Bk',\n #'.[B','.[Al','.[Ga','.[In','.[Tl','.[Nh','.[Dy','.[Cf',\n #'.[Si','.[Ge','.[Sn','.[Pb','.[Fl','.[Ho','.[Es',\n #'.[As','.[Sb','.[Bi','.[Mc','.[Er','.[Fm',\n #'.[Se','.[Te','.[Po','.[Lv','.[Tm','.[Md',\n #'.[Br','.[I','.[At','.[Ts','.[Yb','.[No',\n #'.[He','.[Ne','.[Ar','.[Kr','.[Xe','.[Rn',\n #'.[Og','.[Lu','.[Lr','.[b']\n\n\n#Eliminating any molecules that contain atoms other than:\n#C,H,O,N,S,F,Cl,P\n\n#Capturing \"Bad\" Molecules as a subset\nSmile_mol_Captured = []\n\n#If Further Filtering Needed\n\nfor j in range(len(Smile_mol_FULL)):\n for i in range(len(InorganicPeriodicTable)):\n if InorganicPeriodicTable[i] in Smile_mol_FULL[j]:\n Smile_mol_Captured.append(Smile_mol_FULL[j])\n \n#Re-filtering to target leftover valid configurations\n \n#Removing \"Bad\" Molecules from the original superset \n\nSmile_mol_Filtered = [m for m in Smile_mol_FULL if m not in Smile_mol_Captured]\n\n#Checking if properly filtered - Undesirable Atoms Seperated\n\n#Check = [i for i in Smile_mol_Filtered if i in Smile_mol_Captured]\n\n#Convert to Mol Data Structures\n\n#Original\n#Mol_From_Smile_FULL = [Chem.MolFromSmiles(m) for m in Smile_mol_FULL]\n\nMol_From_Smile_FULL = [Chem.MolFromSmiles(m) for m in Smile_mol_Filtered]\n\n\n#Mol_From_Smile_FULL --> Now Smile_mol_Filtered\nLipinski_Over_500 = []\nLipinski_Over_500_SMILE = []\nfor i in range(len(Smile_mol_Filtered)):\n if Chem.Descriptors.ExactMolWt(Mol_From_Smile_FULL[i]) > 500:\n Lipinski_Over_500.append(Mol_From_Smile_FULL[i])\n Lipinski_Over_500_SMILE.append(Smile_mol_FULL[i])\n\n#Remove 500+ Dalton Molecules from set\n\n#Preserve Smile Structures for Visualization if desired\n \nSmile_mol_Filtered_Lipinski = [m for m in Smile_mol_FULL if m not in Lipinski_Over_500_SMILE]\n\n#Data File (prior to conversion to Fingerprint)\n\nmol_Filtered_Lipinski = [m for m in Mol_From_Smile_FULL if m not in Lipinski_Over_500]\n\n#Generating Array Containing Strings of Known Molecules with respect to SMILES Representation\n\n\n\n#We use PubChem Framework to do this\n\n#Convert to Molecular Fingerprint Format\n\n\n#Mol_From_Smile_FULL --> mol_Filtered_Lipinski\nfinTanArray = [FingerprintMols.FingerprintMol(x) for x in mol_Filtered_Lipinski]\n\n#Molecule Sequence (List) Length\nMSL = len(finTanArray)\nMSL_String = str(MSL)\n\n#Generating Tanimoto Distance Matrix\n\nTDA = []\nfor i in range(MSL):\n for j in range(MSL):\n TDA.append(1 - DataStructs.FingerprintSimilarity(finTanArray[i], finTanArray[j]))\n\n#This produces as a single MSL x 1 list : We transform --> into a MSL x MSL matrix\n \nTDM_list = np.array(TDA)\nTDM_matdim = (MSL, MSL)\nTDM = TDM_list.reshape(TDM_matdim)\n\n\n\n########### K-medoid Clustering ##############\n\n#Computing K-medoids based on Tanimoto Distance\n\n#Randomly select K as the medoids for n data points\n\n\n##### Select Hyperparameters #####\n\n#K clusters\nn = 10\n\n#Iterations\nIters = 10\n\n##### ^Select Hyperparameters^ #####\n\n\n\n\n#Initial K_Medoid Computation\nK_medoid= random.sample(finTanArray, n)\n\nTanDist = []\nfor i in range(len(K_medoid)):\n for j in range(MSL):\n TanDist.append(1 - DataStructs.FingerprintSimilarity(K_medoid[i], finTanArray[j]))\n\n\n#Computing K-Medoid Distance Sums\n \nTanDistDim = (n,MSL)\n\nTanDistMat = np.reshape(TanDist,TanDistDim)\n\nTanDistSums = np.matmul(TanDistMat,np.ones(MSL))\n\nK_medoid_new = random.sample(finTanArray, n)\n\n\n#Swapping with non-medoid data points\nTanDistNew = []\nfor i in range(len(K_medoid_new)):\n for j in range(MSL):\n TanDistNew.append(1 - DataStructs.FingerprintSimilarity(K_medoid_new[i], finTanArray[j]))\n\nTanDistDimNew = (n,MSL)\n\nTanDistMatNew = np.reshape(TanDistNew,TanDistDimNew)\n\nTanDistSumsNew = np.matmul(TanDistMatNew,np.ones(MSL))\n\n#Updating the Medoids - Multiple Iterations\n\nfor p in range(Iters):\n \n #Updating the Medoids - 1 Iteration \n TanDistCheckArray = TanDistSums > TanDistSumsNew\n\n IndicesImprove = np.where(TanDistCheckArray)[0]\n\n for i in range(len(TanDistCheckArray)):\n if i in IndicesImprove:\n K_medoid[i] = K_medoid_new[i]\n\n\n\n\n#Labelling the clusters\n\nClusterCheck = []\nClustersLabels = []\nfor j in range(MSL):\n for i in range(len(K_medoid)):\n ClusterCheck.append(1 - DataStructs.FingerprintSimilarity(K_medoid[i], finTanArray[j]))\n \n \nClusterDim = (MSL,n) \nClusterMat = np.reshape(ClusterCheck, ClusterDim)\n\n#--------------------------------------------------\n\n#Assigning Data Points to Medoids\nfor j in range(MSL):\n ClustersLabels.append(np.argmin(ClusterMat[j,:]))\n \n#Making Cluster Label Matrix - Cluster Labels appended with Molecules\n \nClusterJoin = np.column_stack((ClustersLabels,mol_Filtered_Lipinski))\n\n#Calling Desired Cluster (Starting with 0th) - Change as necessary in IDE\n\nChoke = []\nfor i in range(MSL):\n if ClusterJoin[i][0] == 0:\n Choke.append(ClusterJoin[i])\n\n\n\n\n##Filter by further criteria - see below for individual functions - 10 Samples Only based on Position\n#Analyte1 = Choke[0][1]\n#print('Molecular Weight of Analyte 1:', Chem.Descriptors.ExactMolWt(Analyte1))\n#print('Log P Value of Analyte 1:', Chem.Descriptors.MolLogP(Analyte1))\n#print('Polar Surface Area of Analyte 1:', Chem.Descriptors.TPSA(Analyte1))\n#print('H-bond Donors of Analyte 1:', Chem.rdMolDescriptors.CalcNumHBD(Analyte1))\n#print('(Lipinski) H-bond Donors of Analyte 1:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte1))\n#print('H-bond Acceptors of Analyte 1:', Chem.rdMolDescriptors.CalcNumHBA(Analyte1))\n#print('(Lipinski) H-bond Acceptors of Analyte 1:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte1))\n#print('Number of Rotatable Bonds of Analyte 1:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte1))\n#Analyte1\n#\n#Analyte2 = Choke[2][1]\n#print('Molecular Weight of Analyte 2:', Chem.Descriptors.ExactMolWt(Analyte2))\n#print('Log P Value of Analyte 2:', Chem.Descriptors.MolLogP(Analyte2))\n#print('Polar Surface Area of Analyte 2:', Chem.Descriptors.TPSA(Analyte2))\n#print('H-bond Donors of Analyte 2:', Chem.rdMolDescriptors.CalcNumHBD(Analyte2))\n#print('(Lipinski) H-bond Donors of Analyte 2:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte2))\n#print('H-bond Acceptors of Analyte 2:', Chem.rdMolDescriptors.CalcNumHBA(Analyte2))\n#print('(Lipinski) H-bond Acceptors of Analyte 2:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte2))\n#print('Number of Rotatable Bonds of Analyte 2:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte2))\n#Analyte2\n#\n#Analyte3 = Choke[4][1]\n#print('Molecular Weight of Analyte 3:', Chem.Descriptors.ExactMolWt(Analyte3))\n#print('Log P Value of Analyte 3:', Chem.Descriptors.MolLogP(Analyte3))\n#print('Polar Surface Area of Analyte 3:', Chem.Descriptors.TPSA(Analyte3))\n#print('H-bond Donors of Analyte 3:', Chem.rdMolDescriptors.CalcNumHBD(Analyte3))\n#print('(Lipinski) H-bond Donors of Analyte 3:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte3))\n#print('H-bond Acceptors of Analyte 3:', Chem.rdMolDescriptors.CalcNumHBA(Analyte3))\n#print('(Lipinski) H-bond Acceptors of Analyte 3:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte3))\n#print('Number of Rotatable Bonds of Analyte 3:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte3))\n#Analyte3\n#\n#Analyte4 = Choke[6][1]\n#print('Molecular Weight of Analyte 4:', Chem.Descriptors.ExactMolWt(Analyte4))\n#print('Log P Value of Analyte 4:', Chem.Descriptors.MolLogP(Analyte4))\n#print('Polar Surface Area of Analyte 4:', Chem.Descriptors.TPSA(Analyte4))\n#print('H-bond Donors of Analyte 4:', Chem.rdMolDescriptors.CalcNumHBD(Analyte4))\n#print('(Lipinski) H-bond Donors of Analyte 4:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte4))\n#print('H-bond Acceptors of Analyte 4:', Chem.rdMolDescriptors.CalcNumHBA(Analyte4))\n#print('(Lipinski) H-bond Acceptors of Analyte 4:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte4))\n#print('Number of Rotatable Bonds of Analyte 4:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte4))\n#Analyte4\n#\n#Analyte5 = Choke[int(np.floor(len(Choke)/2) - 2)][1]\n#print('Molecular Weight of Analyte 5:', Chem.Descriptors.ExactMolWt(Analyte5))\n#print('Log P Value of Analyte 5:', Chem.Descriptors.MolLogP(Analyte5))\n#print('Polar Surface Area of Analyte 5:', Chem.Descriptors.TPSA(Analyte5))\n#print('H-bond Donors of Analyte 5:', Chem.rdMolDescriptors.CalcNumHBD(Analyte5))\n#print('(Lipinski) H-bond Donors of Analyte 5:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte5))\n#print('H-bond Acceptors of Analyte 5:', Chem.rdMolDescriptors.CalcNumHBA(Analyte5))\n#print('(Lipinski) H-bond Acceptors of Analyte 5:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte5))\n#print('Number of Rotatable Bonds of Analyte 5:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte5))\n#Analyte5\n#\n#Analyte6 = Choke[int(len(Choke)/2)][1]\n#print('Molecular Weight of Analyte 6:', Chem.Descriptors.ExactMolWt(Analyte6))\n#print('Log P Value of Analyte 6:', Chem.Descriptors.MolLogP(Analyte6))\n#print('Polar Surface Area of Analyte 6:', Chem.Descriptors.TPSA(Analyte6))\n#print('H-bond Donors of Analyte 6:', Chem.rdMolDescriptors.CalcNumHBD(Analyte6))\n#print('(Lipinski) H-bond Donors of Analyte 6:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte6))\n#print('H-bond Acceptors of Analyte 6:', Chem.rdMolDescriptors.CalcNumHBA(Analyte6))\n#print('(Lipinski) H-bond Acceptors of Analyte 6:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte6))\n#print('Number of Rotatable Bonds of Analyte 6:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte6))\n#Analyte6\n#\n#Analyte7 = Choke[int(len(Choke)/2 + 2)][1]\n#print('Molecular Weight of Analyte 7:', Chem.Descriptors.ExactMolWt(Analyte7))\n#print('Log P Value of Analyte 7:', Chem.Descriptors.MolLogP(Analyte7))\n#print('Polar Surface Area of Analyte 7:', Chem.Descriptors.TPSA(Analyte7))\n#print('H-bond Donors of Analyte 7:', Chem.rdMolDescriptors.CalcNumHBD(Analyte7))\n#print('(Lipinski) H-bond Donors of Analyte 7:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte7))\n#print('H-bond Acceptors of Analyte 7:', Chem.rdMolDescriptors.CalcNumHBA(Analyte7))\n#print('(Lipinski) H-bond Acceptors of Analyte 7:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte7))\n#print('Number of Rotatable Bonds of Analyte 7:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte7))\n#Analyte7\n#\n#Analyte8 = Choke[-4][1]\n#print('Molecular Weight of Analyte 8:', Chem.Descriptors.ExactMolWt(Analyte8))\n#print('Log P Value of Analyte 8:', Chem.Descriptors.MolLogP(Analyte8))\n#print('Polar Surface Area of Analyte 8:', Chem.Descriptors.TPSA(Analyte8))\n#print('H-bond Donors of Analyte 8:', Chem.rdMolDescriptors.CalcNumHBD(Analyte8))\n#print('(Lipinski) H-bond Donors of Analyte 8:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte8))\n#print('H-bond Acceptors of Analyte 8:', Chem.rdMolDescriptors.CalcNumHBA(Analyte8))\n#print('(Lipinski) H-bond Acceptors of Analyte 8:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte8))\n#print('Number of Rotatable Bonds of Analyte 8:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte8))\n#Analyte8\n#\n#Analyte9 = Choke[-2][1]\n#print('Molecular Weight of Analyte 9:', Chem.Descriptors.ExactMolWt(Analyte9))\n#print('Log P Value of Analyte 9:', Chem.Descriptors.MolLogP(Analyte9))\n#print('Polar Surface Area of Analyte 9:', Chem.Descriptors.TPSA(Analyte9))\n#print('H-bond Donors of Analyte 9:', Chem.rdMolDescriptors.CalcNumHBD(Analyte9))\n#print('(Lipinski) H-bond Donors of Analyte 9:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte9))\n#print('H-bond Acceptors of Analyte 9:', Chem.rdMolDescriptors.CalcNumHBA(Analyte9))\n#print('(Lipinski) H-bond Acceptors of Analyte 9:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte9))\n#print('Number of Rotatable Bonds of Analyte 9:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte9))\n#Analyte9\n#\n#Analyte10 = Choke[-1][1]\n#print('Molecular Weight of Analyte 10:', Chem.Descriptors.ExactMolWt(Analyte10))\n#print('Log P Value of Analyte 10:', Chem.Descriptors.MolLogP(Analyte10))\n#print('Polar Surface Area of Analyte 10:', Chem.Descriptors.TPSA(Analyte10))\n#print('H-bond Donors of Analyte 10:', Chem.rdMolDescriptors.CalcNumHBD(Analyte10))\n#print('(Lipinski) H-bond Donors of Analyte 10:', Chem.rdMolDescriptors.CalcNumLipinskiHBD(Analyte10))\n#print('H-bond Acceptors of Analyte 10:', Chem.rdMolDescriptors.CalcNumHBA(Analyte10))\n#print('(Lipinski) H-bond Acceptors of Analyte 10:', Chem.rdMolDescriptors.CalcNumLipinskiHBA(Analyte10))\n#print('Number of Rotatable Bonds of Analyte 10:', Chem.rdMolDescriptors.CalcNumRotatableBonds(Analyte10))\n#Analyte10\n\n#ShaperFull = np.shape(Choke)\n#ShaperTarget = ShaperFull[0]\n#print('The given cluster has', ShaperTarget, 'molecules')\n\n#Example Label in IDE: #K-medoids clustering (50 clusters) [0 - 49] : Cluster 2\n\n######### Molecular Description Tools for Calculation #####################\n\n#MOLECULAR_WEIGHT \n#Chem.Descriptors.ExactMolWt(mol)\n\n\n#JCHEM_DONOR_COUNT \n#rdkit.Chem.rdMolDescriptors.CalcNumHBD((Mol)mol) → int :\n#rdkit.Chem.rdMolDescriptors.CalcNumLipinskiHBD((Mol)mol) → int :\n#Chem.Descriptors.NumHDonors\n\n#ALOGPS_LOGP\n#rdkit.Chem.rdMolDescriptors.CalcCrippenDescriptors((Mol)mol[, (bool)includeHs=True[, (bool)force=False]]) → tuple :\n#Chem.Descriptors.MolLogP <-- Use this one\n\n#JCHEM_POLAR_SURFACE_AREA\n#Chem.Descriptors.TPSA\n#Chem.rdMolDescriptors.CalcTPSA\n\n#JCHEM_ACCEPTOR_COUNT\n\n#rdkit.Chem.rdMolDescriptors.CalcNumHBA((Mol)mol) → int :\n\n#rdkit.Chem.rdMolDescriptors.CalcNumLipinskiHBA((Mol)mol) → int :\n \n#Chem.Descriptors.NumHAcceptors\n\n#>>> from rdkit.Chem import Descriptors\n#>>> m = Chem.MolFromSmiles('c1ccccc1C(=O)O')\n#>>> Descriptors.TPSA(m)\n#37.3\n#>>> Descriptors.MolLogP(m)\n#1.3848\n \n\n########### Manifold Learning Methods ##############\n\n\n\n#3D Plotting Example\n#fig = plt.figure()\n#ax = fig.add_subplot(111, projection='3d')\n#ax.scatter([x for x,y,z in coords3D], [y for x,y,z in coords3D],[z for x,y,z in coords3D], \n#c = ClustersLabels, cmap=plt.cm.Spectral)\n\n#fig = plt.figure()\n#ax = fig.add_subplot(111, projection='3d')\n#ax.scatter(DX_TSNE, DY_TSNE,DZ_TSNE, c=ClustersLabels, cmap=plt.cm.Spectral)\n\n#Multidimensional Scaling using TDM (2 Dimensions)\n\n#MDS = manifold.MDS(n_components=2, dissimilarity=\"precomputed\", random_state=3, n_jobs = 4, verbose=1,max_iter=1000)\n#results = MDS.fit(TDM)\n#coords = results.embedding_\n#print('Final stress:',MDS.stress_)\n\n\n#Multidimensional Scaling using TDM (3 Dimensions)\n\n##MDS3D = manifold.MDS(n_components=3, dissimilarity=\"precomputed\", random_state=3, n_jobs = 4, verbose=1,max_iter=1000)\n##results3D = MDS3D.fit(TDM)\n##coords3D = results3D.embedding_\n##print('Final stress:',MDS3D.stress_)\n\n\n#t-Stochastic Nearest Embedding using TDM (2D) ---------------------------------------\n\n#TDM_TSNE = manifold.TSNE(n_components=2,metric = \"precomputed\").fit_transform(TDM)\n\n#To view all these methods in 2D use this analogous method\n\n#X_coord_TSNE = [x for x,y in TDM_TSNE]\n\n#Y_coord_TSNE = [y for x,y in TDM_TSNE]\n\n##TDM_TSNE = manifold.TSNE(n_components=3,metric = \"precomputed\").fit_transform(TDM)\n\n##DX_TSNE = [x for x,y,z in TDM_TSNE]\n\n##DY_TSNE = [y for x,y,z in TDM_TSNE]\n\n##DZ_TSNE = [z for x,y,z in TDM_TSNE]\n\n#plt.scatter(X_coord_TSNE, Y_coord_TSNE, c = np.arange(MSL), cmap=plt.cm.hot)\n\n#---------------------------------------------\n\n\n\n\n#Isomap using TDM (2D)\n\n#TDM_Iso_Embed = manifold.Isomap(n_components=2)\n#TDM_Iso_transformed = TDM_Iso_Embed.fit_transform(TDM)\n#plt.scatter([x for x,y in TDM_Iso_transformed], [y for x,y in TDM_Iso_transformed],edgecolors='none')\n\n\n#Locally Linear Embedding using TDM (2D)\n\n#TDM_LLE_Embed = manifold.LocallyLinearEmbedding(n_components=2)\n#TDM_LLE_transformed = TDM_LLE_Embed.fit_transform(TDM)\n#plt.scatter([x for x,y in TDM_LLE_transformed], [y for x,y in TDM_LLE_transformed],edgecolors='none')\n\n\n\n#Principal Component Analysis using TDM - Possibly to be used later\n\n\n\n\n#Examining - For later use possibly\n\n#Redesigned distance matrix based on 1/S - 1 with tolerance, where S is similarity\n#TSM = 1 - TDM\n#eps = 1e-4\n#customDM = 1/(TSM + eps) - 1/(1+eps)\n#\n#\n#MDS2 = manifold.MDS(n_components=2, dissimilarity=\"precomputed\", random_state=3, n_jobs = 4, verbose=1,max_iter=1000)\n#results2 = MDS2.fit(customDM)\n#coords2 = results2.embedding_\n#print('Final stress:',MDS2.stress_)\n\n\n#Using Distance Matrix given non-metric status (To accomodate)\n\n\n\n#Frequency Analysis of TDM\n#sns.set(color_codes=True)\n#sns.distplot(TDM_list) \n\n#Note: We use the array and not the matrix to avoid multi-dimensional array issues\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#Check all - Plotting Methods - To get meaningful color maps\n\n#plt.scatter([x for x,y in TDM_TSNE], [y for x,y in TDM_TSNE],edgecolors='none')\n\n#plt.scatter([x for x,y in TDM_TSNE], [y for x,y in TDM_TSNE],c='hot', cmap=plt.cm.Spectral)\n\n#c = colors\n\n\n#Examining Indices that correspond to the SMILE Representation against the FingerPrint Representation\n\n#K_medoid_Smile = #[Chem.MolToSmiles(m) for m in K_medoid]\n\n#for j in range(MSL):\n# for i in range(len(K_medoid)):\n\n#Min = min(TanDistSums)\n\n#KMinIndex = min(enumerate(TanDistSums), key=itemgetter(1))[0] \n \n#K_medoid_Update = K_medoid[KMinIndex] \n\n\n#if K_medoid[i] != finTanArray[j]:\n\n\n\n\n#TanMedMat = np.reshape(TanDist, len(K_medoid),len(finTanArray)- len(K_medoid))\n\n\n\n \n#for i in range(len(K_medoid)):\n \n\n\n\n\n#Updating the medoid\n\n#for i in range(len(K_medoid)):\n# if Tandist[i] = min(TanDist):\n# K_medoid\n \n#Convergence Criteria for K-medoid\n \n\n\n#Computing SC Clustering with Medoids\n\n#-----------------------------------------------------\n \n#i = 0\n#j = 0\n#while(i < len(Smile_mol_Filtered)):\n# while(j < len(InorganicPeriodicTable)):\n# if InorganicPeriodicTable[i] in Smile_mol_Filtered[j]:\n# Smile_mol_Filtered.remove(Smile_mol_Filtered[j])\n# j = j + 1\n# i = i + 1 \n #Takes too long to complete\n\n\n\n\n \n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.argmin", "numpy.ones", "numpy.where", "numpy.column_stack" ] ]
zhangyujing/tensorflow
[ "c7a04561fb8972fb64907acc5f10f3c6d4cef9f2", "8e86dcd1c59bb3f1dc978fcb5398dd3f2f51d9ad", "c7a04561fb8972fb64907acc5f10f3c6d4cef9f2" ]
[ "tensorflow/python/keras/_impl/keras/layers/recurrent.py", "tensorflow/contrib/nccl/python/ops/nccl_ops.py", "tensorflow/contrib/autograph/converters/builtin_functions.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Recurrent layers and their base classes.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numbers\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras._impl.keras import activations\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import constraints\nfrom tensorflow.python.keras._impl.keras import initializers\nfrom tensorflow.python.keras._impl.keras import regularizers\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\nfrom tensorflow.python.keras._impl.keras.engine import Layer\nfrom tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion\nfrom tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('keras.layers.StackedRNNCells')\nclass StackedRNNCells(Layer):\n \"\"\"Wrapper allowing a stack of RNN cells to behave as a single cell.\n\n Used to implement efficient stacked RNNs.\n\n Arguments:\n cells: List of RNN cell instances.\n\n Examples:\n\n ```python\n cells = [\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n ]\n\n inputs = keras.Input((timesteps, input_dim))\n x = keras.layers.RNN(cells)(inputs)\n ```\n \"\"\"\n\n def __init__(self, cells, **kwargs):\n for cell in cells:\n if not hasattr(cell, 'call'):\n raise ValueError('All cells must have a `call` method. '\n 'received cells:', cells)\n if not hasattr(cell, 'state_size'):\n raise ValueError('All cells must have a '\n '`state_size` attribute. '\n 'received cells:', cells)\n self.cells = cells\n super(StackedRNNCells, self).__init__(**kwargs)\n\n @property\n def state_size(self):\n # States are a flat list\n # in reverse order of the cell stack.\n # This allows to preserve the requirement\n # `stack.state_size[0] == output_dim`.\n # e.g. states of a 2-layer LSTM would be\n # `[h2, c2, h1, c1]`\n # (assuming one LSTM has states [h, c])\n state_size = []\n for cell in self.cells[::-1]:\n if hasattr(cell.state_size, '__len__'):\n state_size += list(cell.state_size)\n else:\n state_size.append(cell.state_size)\n return tuple(state_size)\n\n def call(self, inputs, states, constants=None, **kwargs):\n # Recover per-cell states.\n nested_states = []\n for cell in self.cells[::-1]:\n if hasattr(cell.state_size, '__len__'):\n nested_states.append(states[:len(cell.state_size)])\n states = states[len(cell.state_size):]\n else:\n nested_states.append([states[0]])\n states = states[1:]\n nested_states = nested_states[::-1]\n\n # Call the cells in order and store the returned states.\n new_nested_states = []\n for cell, states in zip(self.cells, nested_states):\n if has_arg(cell.call, 'constants'):\n inputs, states = cell.call(inputs, states, constants=constants,\n **kwargs)\n else:\n inputs, states = cell.call(inputs, states, **kwargs)\n\n new_nested_states.append(states)\n\n # Format the new states as a flat list\n # in reverse cell order.\n states = []\n for cell_states in new_nested_states[::-1]:\n states += cell_states\n return inputs, states\n\n @shape_type_conversion\n def build(self, input_shape):\n if isinstance(input_shape, list):\n constants_shape = input_shape[1:]\n input_shape = input_shape[0]\n for cell in self.cells:\n if isinstance(cell, Layer):\n if has_arg(cell.call, 'constants'):\n cell.build([input_shape] + constants_shape)\n else:\n cell.build(input_shape)\n if hasattr(cell.state_size, '__len__'):\n output_dim = cell.state_size[0]\n else:\n output_dim = cell.state_size\n input_shape = (input_shape[0], output_dim)\n self.built = True\n\n def get_config(self):\n cells = []\n for cell in self.cells:\n cells.append({\n 'class_name': cell.__class__.__name__,\n 'config': cell.get_config()\n })\n config = {'cells': cells}\n base_config = super(StackedRNNCells, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from tensorflow.python.keras._impl.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n cells = []\n for cell_config in config.pop('cells'):\n cells.append(\n deserialize_layer(cell_config, custom_objects=custom_objects))\n return cls(cells, **config)\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.trainable_weights\n return weights\n\n @property\n def non_trainable_weights(self):\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.non_trainable_weights\n if not self.trainable:\n trainable_weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n trainable_weights += cell.trainable_weights\n return trainable_weights + weights\n return weights\n\n def get_weights(self):\n \"\"\"Retrieves the weights of the model.\n\n Returns:\n A flat list of Numpy arrays.\n \"\"\"\n weights = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n weights += cell.weights\n return K.batch_get_value(weights)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the model.\n\n Arguments:\n weights: A list of Numpy arrays with shapes and types matching\n the output of `model.get_weights()`.\n \"\"\"\n tuples = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n num_param = len(cell.weights)\n weights = weights[:num_param]\n for sw, w in zip(cell.weights, weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n K.batch_set_value(tuples)\n\n @property\n def losses(self):\n losses = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n losses += cell.losses\n return losses + self._losses\n\n @property\n def updates(self):\n updates = []\n for cell in self.cells:\n if isinstance(cell, Layer):\n updates += cell.updates\n return updates + self._updates\n\n\n@tf_export('keras.layers.RNN')\nclass RNN(Layer):\n \"\"\"Base class for recurrent layers.\n\n Arguments:\n cell: A RNN cell instance. A RNN cell is a class that has:\n - a `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - a `state_size` attribute. This can be a single integer\n (single state) in which case it is\n the size of the recurrent state\n (which should be the same as the size of the cell output).\n This can also be a list/tuple of integers\n (one size per state). In this case, the first entry\n (`state_size[0]`) should be the same as\n the size of the cell output.\n It is also possible for `cell` to be a list of RNN cell instances,\n in which cases the cells get stacked on after the other in the RNN,\n implementing an efficient stacked RNN.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n input_dim: dimensionality of the input (integer).\n This argument (or alternatively,\n the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.\n input_length: Length of input sequences, to be specified\n when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n Note that if the recurrent layer is not the first layer\n in your model, you would need to specify the input length\n at the level of the first layer\n (e.g. via the `input_shape` argument)\n\n Input shape:\n 3D tensor with shape `(batch_size, timesteps, input_dim)`.\n\n Output shape:\n - if `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `(batch_size, units)`.\n - if `return_sequences`: 3D tensor with shape\n `(batch_size, timesteps, units)`.\n - else, 2D tensor with shape `(batch_size, units)`.\n\n # Masking\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an [Embedding](embeddings.md) layer with the `mask_zero` parameter\n set to `True`.\n\n # Note on using statefulness in RNNs\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - specify `stateful=True` in the layer constructor.\n - specify a fixed batch size for your model, by passing\n if sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - specify `shuffle=False` when calling fit().\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n # Note on specifying the initial state of RNNs\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n\n # Note on passing external constants to RNNs\n You can pass \"external\" constants to the cell using the `constants`\n keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\n requires that the `cell.call` method accepts the same keyword argument\n `constants`. Such constants can be used to condition the cell\n transformation on additional static inputs (not changing over time),\n a.k.a. an attention mechanism.\n\n Examples:\n\n ```python\n # First, let's define a RNN Cell, as a layer subclass.\n\n class MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n # Let's use this cell in a RNN layer:\n\n cell = MinimalRNNCell(32)\n x = keras.Input((None, 5))\n layer = RNN(cell)\n y = layer(x)\n\n # Here's how to use the cell to build a stacked RNN:\n\n cells = [MinimalRNNCell(32), MinimalRNNCell(64)]\n x = keras.Input((None, 5))\n layer = RNN(cells)\n y = layer(x)\n ```\n \"\"\"\n\n def __init__(self,\n cell,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if isinstance(cell, (list, tuple)):\n cell = StackedRNNCells(cell)\n if not hasattr(cell, 'call'):\n raise ValueError('`cell` should have a `call` method. '\n 'The RNN was passed:', cell)\n if not hasattr(cell, 'state_size'):\n raise ValueError('The RNN cell should have '\n 'an attribute `state_size` '\n '(tuple of integers, '\n 'one integer per RNN state).')\n super(RNN, self).__init__(**kwargs)\n self.cell = cell\n self.return_sequences = return_sequences\n self.return_state = return_state\n self.go_backwards = go_backwards\n self.stateful = stateful\n self.unroll = unroll\n\n self.supports_masking = True\n self.input_spec = [InputSpec(ndim=3)]\n self.state_spec = None\n self._states = None\n self.constants_spec = None\n self._num_constants = None\n\n @property\n def states(self):\n if self._states is None:\n if isinstance(self.cell.state_size, numbers.Integral):\n num_states = 1\n else:\n num_states = len(self.cell.state_size)\n return [None for _ in range(num_states)]\n return self._states\n\n @states.setter\n def states(self, states):\n self._states = states\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n if hasattr(self.cell.state_size, '__len__'):\n state_size = self.cell.state_size\n else:\n state_size = [self.cell.state_size]\n output_dim = state_size[0]\n\n if self.return_sequences:\n output_shape = (input_shape[0], input_shape[1], output_dim)\n else:\n output_shape = (input_shape[0], output_dim)\n\n if self.return_state:\n state_shape = [(input_shape[0], dim) for dim in state_size]\n return [output_shape] + state_shape\n else:\n return output_shape\n\n def compute_mask(self, inputs, mask):\n if isinstance(mask, list):\n mask = mask[0]\n output_mask = mask if self.return_sequences else None\n if self.return_state:\n state_mask = [None for _ in self.states]\n return [output_mask] + state_mask\n else:\n return output_mask\n\n @shape_type_conversion\n def build(self, input_shape):\n # Note input_shape will be list of shapes of initial states and\n # constants if these are passed in __call__.\n if self._num_constants is not None:\n constants_shape = input_shape[-self._num_constants:] # pylint: disable=invalid-unary-operand-type\n else:\n constants_shape = None\n\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n\n batch_size = input_shape[0] if self.stateful else None\n input_dim = input_shape[-1]\n self.input_spec[0] = InputSpec(shape=(batch_size, None, input_dim))\n\n # allow cell (if layer) to build before we set or validate state_spec\n if isinstance(self.cell, Layer):\n step_input_shape = (input_shape[0],) + input_shape[2:]\n if constants_shape is not None:\n self.cell.build([step_input_shape] + constants_shape)\n else:\n self.cell.build(step_input_shape)\n\n # set or validate state_spec\n if hasattr(self.cell.state_size, '__len__'):\n state_size = list(self.cell.state_size)\n else:\n state_size = [self.cell.state_size]\n\n if self.state_spec is not None:\n # initial_state was passed in call, check compatibility\n if [spec.shape[-1] for spec in self.state_spec] != state_size:\n raise ValueError(\n 'An `initial_state` was passed that is not compatible with '\n '`cell.state_size`. Received `state_spec`={}; '\n 'however `cell.state_size` is '\n '{}'.format(self.state_spec, self.cell.state_size))\n else:\n self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]\n if self.stateful:\n self.reset_states()\n\n def get_initial_state(self, inputs):\n # build an all-zero tensor of shape (samples, output_dim)\n initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)\n initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)\n initial_state = K.expand_dims(initial_state) # (samples, 1)\n if hasattr(self.cell.state_size, '__len__'):\n return [K.tile(initial_state, [1, dim]) for dim in self.cell.state_size]\n else:\n return [K.tile(initial_state, [1, self.cell.state_size])]\n\n def __call__(self, inputs, initial_state=None, constants=None, **kwargs):\n inputs, initial_state, constants = self._standardize_args(\n inputs, initial_state, constants)\n\n if initial_state is None and constants is None:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n # If any of `initial_state` or `constants` are specified and are Keras\n # tensors, then add them to the inputs and temporarily modify the\n # input_spec to include them.\n\n additional_inputs = []\n additional_specs = []\n if initial_state is not None:\n kwargs['initial_state'] = initial_state\n additional_inputs += initial_state\n self.state_spec = [\n InputSpec(shape=K.int_shape(state)) for state in initial_state\n ]\n additional_specs += self.state_spec\n if constants is not None:\n kwargs['constants'] = constants\n additional_inputs += constants\n self.constants_spec = [\n InputSpec(shape=K.int_shape(constant)) for constant in constants\n ]\n self._num_constants = len(constants)\n additional_specs += self.constants_spec\n # at this point additional_inputs cannot be empty\n is_keras_tensor = K.is_keras_tensor(additional_inputs[0])\n for tensor in additional_inputs:\n if K.is_keras_tensor(tensor) != is_keras_tensor:\n raise ValueError('The initial state or constants of an RNN'\n ' layer cannot be specified with a mix of'\n ' Keras tensors and non-Keras tensors'\n ' (a \"Keras tensor\" is a tensor that was'\n ' returned by a Keras layer, or by `Input`)')\n\n if is_keras_tensor:\n # Compute the full input spec, including state and constants\n full_input = [inputs] + additional_inputs\n full_input_spec = self.input_spec + additional_specs\n # Perform the call with temporarily replaced input_spec\n original_input_spec = self.input_spec\n self.input_spec = full_input_spec\n output = super(RNN, self).__call__(full_input, **kwargs)\n self.input_spec = original_input_spec\n return output\n else:\n return super(RNN, self).__call__(inputs, **kwargs)\n\n def call(self,\n inputs,\n mask=None,\n training=None,\n initial_state=None,\n constants=None):\n # input shape: `(samples, time (padded with zeros), input_dim)`\n # note that the .build() method of subclasses MUST define\n # self.input_spec and self.state_spec with complete input shapes.\n if isinstance(inputs, list):\n inputs = inputs[0]\n if initial_state is not None:\n pass\n elif self.stateful:\n initial_state = self.states\n else:\n initial_state = self.get_initial_state(inputs)\n\n if isinstance(mask, list):\n mask = mask[0]\n\n if len(initial_state) != len(self.states):\n raise ValueError(\n 'Layer has ' + str(len(self.states)) + ' states but was passed ' +\n str(len(initial_state)) + ' initial states.')\n input_shape = K.int_shape(inputs)\n timesteps = input_shape[1]\n if self.unroll and timesteps in [None, 1]:\n raise ValueError('Cannot unroll a RNN if the '\n 'time dimension is undefined or equal to 1. \\n'\n '- If using a Sequential model, '\n 'specify the time dimension by passing '\n 'an `input_shape` or `batch_input_shape` '\n 'argument to your first layer. If your '\n 'first layer is an Embedding, you can '\n 'also use the `input_length` argument.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a `shape` '\n 'or `batch_shape` argument to your Input layer.')\n\n kwargs = {}\n if has_arg(self.cell.call, 'training'):\n kwargs['training'] = training\n\n if constants:\n if not has_arg(self.cell.call, 'constants'):\n raise ValueError('RNN cell does not support constants')\n\n def step(inputs, states):\n constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type\n states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type\n return self.cell.call(inputs, states, constants=constants, **kwargs)\n else:\n\n def step(inputs, states):\n return self.cell.call(inputs, states, **kwargs)\n\n last_output, outputs, states = K.rnn(\n step,\n inputs,\n initial_state,\n constants=constants,\n go_backwards=self.go_backwards,\n mask=mask,\n unroll=self.unroll,\n input_length=timesteps)\n if self.stateful:\n updates = []\n for i in range(len(states)):\n updates.append(K.update(self.states[i], states[i]))\n self.add_update(updates, inputs)\n\n if self.return_sequences:\n output = outputs\n else:\n output = last_output\n\n # Properly set learning phase\n if getattr(last_output, '_uses_learning_phase', False):\n output._uses_learning_phase = True\n for state in states:\n state._uses_learning_phase = True\n\n if self.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [output] + states\n else:\n return output\n\n def _standardize_args(self, inputs, initial_state, constants):\n \"\"\"Standardize `__call__` to a single list of tensor inputs.\n\n When running a model loaded from file, the input tensors\n `initial_state` and `constants` can be passed to `RNN.__call__` as part\n of `inputs` instead of by the dedicated keyword arguments. This method\n makes sure the arguments are separated and that `initial_state` and\n `constants` are lists of tensors (or None).\n\n Arguments:\n inputs: tensor or list/tuple of tensors\n initial_state: tensor or list of tensors or None\n constants: tensor or list of tensors or None\n\n Returns:\n inputs: tensor\n initial_state: list of tensors or None\n constants: list of tensors or None\n \"\"\"\n if isinstance(inputs, list):\n assert initial_state is None and constants is None\n if self._num_constants is not None:\n constants = inputs[-self._num_constants:] # pylint: disable=invalid-unary-operand-type\n inputs = inputs[:-self._num_constants] # pylint: disable=invalid-unary-operand-type\n if len(inputs) > 1:\n initial_state = inputs[1:]\n inputs = inputs[0]\n\n def to_list_or_none(x):\n if x is None or isinstance(x, list):\n return x\n if isinstance(x, tuple):\n return list(x)\n return [x]\n\n initial_state = to_list_or_none(initial_state)\n constants = to_list_or_none(constants)\n\n return inputs, initial_state, constants\n\n def reset_states(self, states=None):\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n batch_size = self.input_spec[0].shape[0]\n if not batch_size:\n raise ValueError('If a RNN is stateful, it needs to know '\n 'its batch size. Specify the batch size '\n 'of your input tensors: \\n'\n '- If using a Sequential model, '\n 'specify the batch size by passing '\n 'a `batch_input_shape` '\n 'argument to your first layer.\\n'\n '- If using the functional API, specify '\n 'the batch size by passing a '\n '`batch_shape` argument to your Input layer.')\n # initialize state if None\n if self.states[0] is None:\n if hasattr(self.cell.state_size, '__len__'):\n self.states = [\n K.zeros((batch_size, dim)) for dim in self.cell.state_size\n ]\n else:\n self.states = [K.zeros((batch_size, self.cell.state_size))]\n elif states is None:\n if hasattr(self.cell.state_size, '__len__'):\n for state, dim in zip(self.states, self.cell.state_size):\n K.set_value(state, np.zeros((batch_size, dim)))\n else:\n K.set_value(self.states[0], np.zeros((batch_size,\n self.cell.state_size)))\n else:\n if not isinstance(states, (list, tuple)):\n states = [states]\n if len(states) != len(self.states):\n raise ValueError('Layer ' + self.name + ' expects ' +\n str(len(self.states)) + ' states, '\n 'but it received ' + str(len(states)) +\n ' state values. Input received: ' + str(states))\n for index, (value, state) in enumerate(zip(states, self.states)):\n if hasattr(self.cell.state_size, '__len__'):\n dim = self.cell.state_size[index]\n else:\n dim = self.cell.state_size\n if value.shape != (batch_size, dim):\n raise ValueError(\n 'State ' + str(index) + ' is incompatible with layer ' +\n self.name + ': expected shape=' + str(\n (batch_size, dim)) + ', found shape=' + str(value.shape))\n # TODO(fchollet): consider batch calls to `set_value`.\n K.set_value(state, value)\n\n def get_config(self):\n config = {\n 'return_sequences': self.return_sequences,\n 'return_state': self.return_state,\n 'go_backwards': self.go_backwards,\n 'stateful': self.stateful,\n 'unroll': self.unroll\n }\n if self._num_constants is not None:\n config['num_constants'] = self._num_constants\n\n cell_config = self.cell.get_config()\n config['cell'] = {\n 'class_name': self.cell.__class__.__name__,\n 'config': cell_config\n }\n base_config = super(RNN, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from tensorflow.python.keras._impl.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top\n cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)\n num_constants = config.pop('num_constants', None)\n layer = cls(cell, **config)\n layer._num_constants = num_constants\n return layer\n\n @property\n def trainable_weights(self):\n if not self.trainable:\n return []\n if isinstance(self.cell, Layer):\n return self.cell.trainable_weights\n return []\n\n @property\n def non_trainable_weights(self):\n if isinstance(self.cell, Layer):\n if not self.trainable:\n return self.cell.weights\n return self.cell.non_trainable_weights\n return []\n\n @property\n def losses(self):\n losses = []\n if isinstance(self.cell, Layer):\n losses += self.cell.losses\n return losses + self._losses\n\n @property\n def updates(self):\n updates = []\n if isinstance(self.cell, Layer):\n updates += self.cell.updates\n return updates + self._updates\n\n\n@tf_export('keras.layers.SimpleRNNCell')\nclass SimpleRNNCell(Layer):\n \"\"\"Cell class for SimpleRNN.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n super(SimpleRNNCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_size = self.units\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n @shape_type_conversion\n def build(self, input_shape):\n self.kernel = self.add_weight(\n shape=(input_shape[-1], self.units),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(self.units,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n prev_output = states[0]\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs,\n K.shape(inputs)[-1]),\n self.dropout,\n training=training)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training)\n\n dp_mask = self._dropout_mask\n rec_dp_mask = self._recurrent_dropout_mask\n\n if dp_mask is not None:\n h = K.dot(inputs * dp_mask, self.kernel)\n else:\n h = K.dot(inputs, self.kernel)\n if self.bias is not None:\n h = K.bias_add(h, self.bias)\n\n if rec_dp_mask is not None:\n prev_output *= rec_dp_mask\n output = h + K.dot(prev_output, self.recurrent_kernel)\n if self.activation is not None:\n output = self.activation(output)\n\n # Properly set learning phase on output tensor.\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None and not context.executing_eagerly():\n # This would be harmless to set in eager mode, but eager tensors\n # disallow setting arbitrary attributes.\n output._uses_learning_phase = True\n return output, [output]\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout\n }\n base_config = super(SimpleRNNCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf_export('keras.layers.SimpleRNN')\nclass SimpleRNN(RNN):\n \"\"\"Fully-connected RNN where the output is to be fed back to input.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if 'implementation' in kwargs:\n kwargs.pop('implementation')\n logging.warning('The `implementation` argument '\n 'in `SimpleRNN` has been deprecated. '\n 'Please remove it from your layer call.')\n cell = SimpleRNNCell(\n units,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout)\n super(SimpleRNN, self).__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n self.cell._dropout_mask = None\n self.cell._recurrent_dropout_mask = None\n return super(SimpleRNN, self).call(\n inputs, mask=mask, training=training, initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout\n }\n base_config = super(SimpleRNN, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config:\n config.pop('implementation')\n return cls(**config)\n\n\n@tf_export('keras.layers.GRUCell')\nclass GRUCell(Layer):\n \"\"\"Cell class for the GRU layer.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super(GRUCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.implementation = implementation\n self.state_size = self.units\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n @shape_type_conversion\n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(\n shape=(input_dim, self.units * 3),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 3),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(self.units * 3,),\n name='bias',\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n h_tm1 = states[0] # previous memory\n\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs,\n K.shape(inputs)[-1]),\n self.dropout,\n training=training,\n count=3)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training,\n count=3)\n\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n if self.implementation == 1:\n if 0. < self.dropout < 1.:\n inputs_z = inputs * dp_mask[0]\n inputs_r = inputs * dp_mask[1]\n inputs_h = inputs * dp_mask[2]\n else:\n inputs_z = inputs\n inputs_r = inputs\n inputs_h = inputs\n x_z = K.dot(inputs_z, self.kernel[:, :self.units])\n x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2])\n x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:])\n if self.use_bias:\n x_z = K.bias_add(x_z, self.bias[:self.units])\n x_r = K.bias_add(x_r, self.bias[self.units:self.units * 2])\n x_h = K.bias_add(x_h, self.bias[self.units * 2:])\n\n if 0. < self.recurrent_dropout < 1.:\n h_tm1_z = h_tm1 * rec_dp_mask[0]\n h_tm1_r = h_tm1 * rec_dp_mask[1]\n h_tm1_h = h_tm1 * rec_dp_mask[2]\n else:\n h_tm1_z = h_tm1\n h_tm1_r = h_tm1\n h_tm1_h = h_tm1\n z = self.recurrent_activation(\n x_z + K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units]))\n r = self.recurrent_activation(\n x_r + K.dot(h_tm1_r, self.recurrent_kernel[:, self.units:\n self.units * 2]))\n\n hh = self.activation(x_h + K.dot(r * h_tm1_h,\n self.recurrent_kernel[:,\n self.units * 2:]))\n else:\n if 0. < self.dropout < 1.:\n inputs *= dp_mask[0]\n matrix_x = K.dot(inputs, self.kernel)\n if self.use_bias:\n matrix_x = K.bias_add(matrix_x, self.bias)\n if 0. < self.recurrent_dropout < 1.:\n h_tm1 *= rec_dp_mask[0]\n matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units])\n\n x_z = matrix_x[:, :self.units]\n x_r = matrix_x[:, self.units:2 * self.units]\n recurrent_z = matrix_inner[:, :self.units]\n recurrent_r = matrix_inner[:, self.units:2 * self.units]\n\n z = self.recurrent_activation(x_z + recurrent_z)\n r = self.recurrent_activation(x_r + recurrent_r)\n\n x_h = matrix_x[:, 2 * self.units:]\n recurrent_h = K.dot(r * h_tm1, self.recurrent_kernel[:, 2 * self.units:])\n hh = self.activation(x_h + recurrent_h)\n h = z * h_tm1 + (1 - z) * hh\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None and not context.executing_eagerly():\n # This would be harmless to set in eager mode, but eager tensors\n # disallow setting arbitrary attributes.\n h._uses_learning_phase = True\n return h, [h]\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint),\n 'dropout': self.dropout,\n 'recurrent_dropout': self.recurrent_dropout,\n 'implementation': self.implementation\n }\n base_config = super(GRUCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf_export('keras.layers.GRU')\nclass GRU(RNN):\n \"\"\"Gated Recurrent Unit - Cho et al.\n\n 2014.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if implementation == 0:\n logging.warning('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n cell = GRUCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation)\n super(GRU, self).__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n self.cell._dropout_mask = None\n self.cell._recurrent_dropout_mask = None\n return super(GRU, self).call(\n inputs, mask=mask, training=training, initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout,\n 'implementation':\n self.implementation\n }\n base_config = super(GRU, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\n@tf_export('keras.layers.LSTMCell')\nclass LSTMCell(Layer):\n \"\"\"Cell class for the LSTM layer.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).x\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n **kwargs):\n super(LSTMCell, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.implementation = implementation\n self.state_size = (self.units, self.units)\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n\n @shape_type_conversion\n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(\n shape=(input_dim, self.units * 4),\n name='kernel',\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name='recurrent_kernel',\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(_, *args, **kwargs):\n return K.concatenate([\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer((self.units * 2,), *args, **kwargs),\n ])\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name='bias',\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs, states, training=None):\n if 0 < self.dropout < 1 and self._dropout_mask is None:\n self._dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs,\n K.shape(inputs)[-1]),\n self.dropout,\n training=training,\n count=4)\n if (0 < self.recurrent_dropout < 1 and\n self._recurrent_dropout_mask is None):\n self._recurrent_dropout_mask = _generate_dropout_mask(\n _generate_dropout_ones(inputs, self.units),\n self.recurrent_dropout,\n training=training,\n count=4)\n\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n x_i = K.dot(inputs_i, self.kernel[:, :self.units])\n x_f = K.dot(inputs_f, self.kernel[:, self.units:self.units * 2])\n x_c = K.dot(inputs_c, self.kernel[:, self.units * 2:self.units * 3])\n x_o = K.dot(inputs_o, self.kernel[:, self.units * 3:])\n if self.use_bias:\n x_i = K.bias_add(x_i, self.bias[:self.units])\n x_f = K.bias_add(x_f, self.bias[self.units:self.units * 2])\n x_c = K.bias_add(x_c, self.bias[self.units * 2:self.units * 3])\n x_o = K.bias_add(x_o, self.bias[self.units * 3:])\n\n if 0 < self.recurrent_dropout < 1.:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))\n f = self.recurrent_activation(\n x_f + K.dot(h_tm1_f,\n self.recurrent_kernel[:, self.units: self.units * 2]))\n c = f * c_tm1 + i * self.activation(\n x_c + K.dot(h_tm1_c,\n self.recurrent_kernel[:, self.units * 2: self.units * 3]))\n o = self.recurrent_activation(\n x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))\n else:\n if 0. < self.dropout < 1.:\n inputs *= dp_mask[0]\n z = K.dot(inputs, self.kernel)\n if 0. < self.recurrent_dropout < 1.:\n h_tm1 *= rec_dp_mask[0]\n z += K.dot(h_tm1, self.recurrent_kernel)\n if self.use_bias:\n z = K.bias_add(z, self.bias)\n\n z0 = z[:, :self.units]\n z1 = z[:, self.units:2 * self.units]\n z2 = z[:, 2 * self.units:3 * self.units]\n z3 = z[:, 3 * self.units:]\n\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n\n h = o * self.activation(c)\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None and not context.executing_eagerly():\n # This would be harmless to set in eager mode, but eager tensors\n # disallow setting arbitrary attributes.\n h._uses_learning_phase = True\n return h, [h, c]\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'unit_forget_bias':\n self.unit_forget_bias,\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout,\n 'implementation':\n self.implementation\n }\n base_config = super(LSTMCell, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@tf_export('keras.layers.LSTM')\nclass LSTM(RNN):\n \"\"\"Long-Short Term Memory layer - Hochreiter 1997.\n\n Arguments:\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use.\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n Default: hard sigmoid (`hard_sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs..\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state..\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n\n \"\"\"\n\n def __init__(self,\n units,\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n dropout=0.,\n recurrent_dropout=0.,\n implementation=1,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs):\n if implementation == 0:\n logging.warning('`implementation=0` has been deprecated, '\n 'and now defaults to `implementation=1`.'\n 'Please update your layer call.')\n cell = LSTMCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n unit_forget_bias=unit_forget_bias,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n implementation=implementation)\n super(LSTM, self).__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n self.cell._dropout_mask = None\n self.cell._recurrent_dropout_mask = None\n return super(LSTM, self).call(\n inputs, mask=mask, training=training, initial_state=initial_state)\n\n @property\n def units(self):\n return self.cell.units\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n return self.cell.implementation\n\n def get_config(self):\n config = {\n 'units':\n self.units,\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'unit_forget_bias':\n self.unit_forget_bias,\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout,\n 'implementation':\n self.implementation\n }\n base_config = super(LSTM, self).get_config()\n del base_config['cell']\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n if 'implementation' in config and config['implementation'] == 0:\n config['implementation'] = 1\n return cls(**config)\n\n\ndef _generate_dropout_ones(inputs, dims):\n return K.ones((K.shape(inputs)[0], dims))\n\n\ndef _generate_dropout_mask(ones, rate, training=None, count=1):\n\n def dropped_inputs():\n return K.dropout(ones, rate)\n\n if count > 1:\n return [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(count)\n ]\n return K.in_train_phase(dropped_inputs, ones, training=training)\n\n\nclass Recurrent(Layer):\n \"\"\"Deprecated abstract base class for recurrent layers.\n\n It still exists because it is leveraged by the convolutional-recurrent layers.\n It will be removed entirely in the future.\n It was never part of the public API.\n Do not use.\n\n Arguments:\n weights: list of Numpy arrays to set as initial weights.\n The list should have 3 elements, of shapes:\n `[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n implementation: one of {0, 1, or 2}.\n If set to 0, the RNN will use\n an implementation that uses fewer, larger matrix products,\n thus running faster on CPU but consuming more memory.\n If set to 1, the RNN will use more matrix products,\n but smaller ones, thus running slower\n (may actually be faster on GPU) while consuming less memory.\n If set to 2 (LSTM/GRU only),\n the RNN will combine the input gate,\n the forget gate and the output gate into a single matrix,\n enabling more time-efficient parallelization on the GPU.\n Note: RNN dropout must be shared for all gates,\n resulting in a slightly reduced regularization.\n input_dim: dimensionality of the input (integer).\n This argument (or alternatively, the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.\n input_length: Length of input sequences, to be specified\n when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n Note that if the recurrent layer is not the first layer\n in your model, you would need to specify the input length\n at the level of the first layer\n (e.g. via the `input_shape` argument)\n\n Input shape:\n 3D tensor with shape `(batch_size, timesteps, input_dim)`,\n (Optional) 2D tensors with shape `(batch_size, output_dim)`.\n\n Output shape:\n - if `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `(batch_size, units)`.\n - if `return_sequences`: 3D tensor with shape\n `(batch_size, timesteps, units)`.\n - else, 2D tensor with shape `(batch_size, units)`.\n\n # Masking\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an `Embedding` layer with the `mask_zero` parameter\n set to `True`.\n\n # Note on using statefulness in RNNs\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch. This assumes a one-to-one mapping\n between samples in different successive batches.\n\n To enable statefulness:\n - specify `stateful=True` in the layer constructor.\n - specify a fixed batch size for your model, by passing\n if sequential model:\n `batch_input_shape=(...)` to the first layer in your model.\n else for functional model with 1 or more Input layers:\n `batch_shape=(...)` to all the first layers in your model.\n This is the expected shape of your inputs\n *including the batch size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n - specify `shuffle=False` when calling fit().\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n\n # Note on specifying the initial state of RNNs\n You can specify the initial state of RNN layers symbolically by\n calling them with the keyword argument `initial_state`. The value of\n `initial_state` should be a tensor or list of tensors representing\n the initial state of the RNN layer.\n\n You can specify the initial state of RNN layers numerically by\n calling `reset_states` with the keyword argument `states`. The value of\n `states` should be a numpy array or list of numpy arrays representing\n the initial state of the RNN layer.\n \"\"\"\n\n def __init__(self,\n return_sequences=False,\n return_state=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n implementation=0,\n **kwargs):\n super(Recurrent, self).__init__(**kwargs)\n self.return_sequences = return_sequences\n self.return_state = return_state\n self.go_backwards = go_backwards\n self.stateful = stateful\n self.unroll = unroll\n self.implementation = implementation\n self.supports_masking = True\n self.input_spec = [InputSpec(ndim=3)]\n self.state_spec = None\n self.dropout = 0\n self.recurrent_dropout = 0\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.return_sequences:\n output_shape = (input_shape[0], input_shape[1], self.units)\n else:\n output_shape = (input_shape[0], self.units)\n\n if self.return_state:\n state_shape = [tensor_shape.TensorShape(\n (input_shape[0], self.units)) for _ in self.states]\n return [tensor_shape.TensorShape(output_shape)] + state_shape\n return tensor_shape.TensorShape(output_shape)\n\n def compute_mask(self, inputs, mask):\n if isinstance(mask, list):\n mask = mask[0]\n output_mask = mask if self.return_sequences else None\n if self.return_state:\n state_mask = [None for _ in self.states]\n return [output_mask] + state_mask\n return output_mask\n\n def step(self, inputs, states):\n raise NotImplementedError\n\n def get_constants(self, inputs, training=None):\n return []\n\n def get_initial_state(self, inputs):\n # build an all-zero tensor of shape (samples, output_dim)\n initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)\n initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)\n initial_state = K.expand_dims(initial_state) # (samples, 1)\n initial_state = K.tile(initial_state, [1,\n self.units]) # (samples, output_dim)\n initial_state = [initial_state for _ in range(len(self.states))]\n return initial_state\n\n def preprocess_input(self, inputs, training=None):\n return inputs\n\n def __call__(self, inputs, initial_state=None, **kwargs):\n if (isinstance(inputs, (list, tuple)) and\n len(inputs) > 1\n and initial_state is None):\n initial_state = inputs[1:]\n inputs = inputs[0]\n\n # If `initial_state` is specified,\n # and if it a Keras tensor,\n # then add it to the inputs and temporarily\n # modify the input spec to include the state.\n if initial_state is None:\n return super(Recurrent, self).__call__(inputs, **kwargs)\n\n if not isinstance(initial_state, (list, tuple)):\n initial_state = [initial_state]\n\n is_keras_tensor = hasattr(initial_state[0], '_keras_history')\n for tensor in initial_state:\n if hasattr(tensor, '_keras_history') != is_keras_tensor:\n raise ValueError('The initial state of an RNN layer cannot be'\n ' specified with a mix of Keras tensors and'\n ' non-Keras tensors')\n\n if is_keras_tensor:\n # Compute the full input spec, including state\n input_spec = self.input_spec\n state_spec = self.state_spec\n if not isinstance(input_spec, list):\n input_spec = [input_spec]\n if not isinstance(state_spec, list):\n state_spec = [state_spec]\n self.input_spec = input_spec + state_spec\n\n # Compute the full inputs, including state\n inputs = [inputs] + list(initial_state)\n\n # Perform the call\n output = super(Recurrent, self).__call__(inputs, **kwargs)\n\n # Restore original input spec\n self.input_spec = input_spec\n return output\n else:\n kwargs['initial_state'] = initial_state\n return super(Recurrent, self).__call__(inputs, **kwargs)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n # input shape: `(samples, time (padded with zeros), input_dim)`\n # note that the .build() method of subclasses MUST define\n # self.input_spec and self.state_spec with complete input shapes.\n if isinstance(inputs, list):\n initial_state = inputs[1:]\n inputs = inputs[0]\n elif initial_state is not None:\n pass\n elif self.stateful:\n initial_state = self.states\n else:\n initial_state = self.get_initial_state(inputs)\n\n if isinstance(mask, list):\n mask = mask[0]\n\n if len(initial_state) != len(self.states):\n raise ValueError('Layer has ' + str(len(self.states)) +\n ' states but was passed ' + str(len(initial_state)) +\n ' initial states.')\n input_shape = K.int_shape(inputs)\n if self.unroll and input_shape[1] is None:\n raise ValueError('Cannot unroll a RNN if the '\n 'time dimension is undefined. \\n'\n '- If using a Sequential model, '\n 'specify the time dimension by passing '\n 'an `input_shape` or `batch_input_shape` '\n 'argument to your first layer. If your '\n 'first layer is an Embedding, you can '\n 'also use the `input_length` argument.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a `shape` '\n 'or `batch_shape` argument to your Input layer.')\n constants = self.get_constants(inputs, training=None)\n preprocessed_input = self.preprocess_input(inputs, training=None)\n last_output, outputs, states = K.rnn(\n self.step,\n preprocessed_input,\n initial_state,\n go_backwards=self.go_backwards,\n mask=mask,\n constants=constants,\n unroll=self.unroll)\n if self.stateful:\n updates = []\n for i in range(len(states)):\n updates.append(K.update(self.states[i], states[i]))\n self.add_update(updates, inputs)\n\n # Properly set learning phase\n if 0 < self.dropout + self.recurrent_dropout:\n last_output._uses_learning_phase = True\n outputs._uses_learning_phase = True\n\n if not self.return_sequences:\n outputs = last_output\n\n if self.return_state:\n if not isinstance(states, (list, tuple)):\n states = [states]\n else:\n states = list(states)\n return [outputs] + states\n return outputs\n\n def reset_states(self, states=None):\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n batch_size = self.input_spec[0].shape[0]\n if not batch_size:\n raise ValueError('If a RNN is stateful, it needs to know '\n 'its batch size. Specify the batch size '\n 'of your input tensors: \\n'\n '- If using a Sequential model, '\n 'specify the batch size by passing '\n 'a `batch_input_shape` '\n 'argument to your first layer.\\n'\n '- If using the functional API, specify '\n 'the time dimension by passing a '\n '`batch_shape` argument to your Input layer.')\n # initialize state if None\n if self.states[0] is None:\n self.states = [K.zeros((batch_size, self.units)) for _ in self.states]\n elif states is None:\n for state in self.states:\n K.set_value(state, np.zeros((batch_size, self.units)))\n else:\n if not isinstance(states, (list, tuple)):\n states = [states]\n if len(states) != len(self.states):\n raise ValueError('Layer ' + self.name + ' expects ' +\n str(len(self.states)) + ' states, '\n 'but it received ' + str(len(states)) +\n ' state values. Input received: ' + str(states))\n for index, (value, state) in enumerate(zip(states, self.states)):\n if value.shape != (batch_size, self.units):\n raise ValueError('State ' + str(index) +\n ' is incompatible with layer ' + self.name +\n ': expected shape=' + str((batch_size, self.units)) +\n ', found shape=' + str(value.shape))\n K.set_value(state, value)\n\n def get_config(self):\n config = {\n 'return_sequences': self.return_sequences,\n 'return_state': self.return_state,\n 'go_backwards': self.go_backwards,\n 'stateful': self.stateful,\n 'unroll': self.unroll,\n 'implementation': self.implementation\n }\n base_config = super(Recurrent, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Ops for GPU collective operations implemented using NVIDIA nccl.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nfrom tensorflow.contrib.nccl.ops import gen_nccl_ops\nfrom tensorflow.contrib.util import loader\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import device\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import resource_loader\n\n_nccl_ops_so = loader.load_op_library(\n resource_loader.get_path_to_datafile('_nccl_ops.so'))\n\n\ndef all_sum(tensors):\n \"\"\"Returns a list of tensors with the all-reduce sum across `tensors`.\n\n The computation is done with an all-reduce operation, so if only some of the\n returned tensors are evaluated then the computation will hang.\n\n Args:\n tensors: The input tensors across which to sum; must be assigned\n to GPU devices.\n\n Returns:\n List of tensors, each with the sum of the input tensors, where tensor i has\n the same device as `tensors[i]`.\n \"\"\"\n return _apply_all_reduce('sum', tensors)\n\n\n@ops.RegisterGradient('NcclAllReduce')\ndef _all_sum_grad(op, grad):\n \"\"\"The gradients for `all_sum`.\n\n Args:\n op: The `all_sum` `Operation` that we are differentiating.\n grad: Gradient with respect to the output of the `all_sum` op.\n\n Returns:\n The gradient with respect to the output of `all_sum`.\n\n Raises:\n LookupError: If `reduction` is not `sum`.\n \"\"\"\n if op.get_attr('reduction') != 'sum':\n raise LookupError('No gradient defined for NcclAllReduce except sum.')\n\n _check_device(grad, expected=op.device)\n num_devices = op.get_attr('num_devices')\n shared_name = op.get_attr('shared_name') + '_grad'\n\n with ops.device(op.device):\n return gen_nccl_ops.nccl_all_reduce(\n input=grad,\n reduction='sum',\n num_devices=num_devices,\n shared_name=shared_name)\n\n\ndef all_prod(tensors):\n \"\"\"Returns a list of tensors with the all-reduce product across `tensors`.\n\n The computation is done with an all-reduce operation, so if only some of the\n returned tensors are evaluated then the computation will hang.\n\n Args:\n tensors: The input tensors across which to multiply; must be assigned\n to GPU devices.\n\n Returns:\n List of tensors, each with the product of the input tensors, where tensor i\n has the same device as `tensors[i]`.\n \"\"\"\n return _apply_all_reduce('prod', tensors)\n\n\ndef all_min(tensors):\n \"\"\"Returns a list of tensors with the all-reduce min across `tensors`.\n\n The computation is done with an all-reduce operation, so if only some of the\n returned tensors are evaluated then the computation will hang.\n\n Args:\n tensors: The input tensors across which to reduce; must be assigned\n to GPU devices.\n\n Returns:\n List of tensors, each with the minimum of the input tensors, where tensor i\n has the same device as `tensors[i]`.\n \"\"\"\n return _apply_all_reduce('min', tensors)\n\n\ndef all_max(tensors):\n \"\"\"Returns a list of tensors with the all-reduce max across `tensors`.\n\n The computation is done with an all-reduce operation, so if only some of the\n returned tensors are evaluated then the computation will hang.\n\n Args:\n tensors: The input tensors across which to reduce; must be assigned\n to GPU devices.\n\n Returns:\n List of tensors, each with the maximum of the input tensors, where tensor i\n has the same device as `tensors[i]`.\n \"\"\"\n return _apply_all_reduce('max', tensors)\n\n\ndef reduce_sum(tensors):\n \"\"\"Returns a tensor with the reduce sum across `tensors`.\n\n The computation is done with a reduce operation, so only one tensor is\n returned.\n\n Args:\n tensors: The input tensors across which to sum; must be assigned\n to GPU devices.\n\n Returns:\n A tensor containing the sum of the input tensors.\n\n Raises:\n LookupError: If context is not currently using a GPU device.\n \"\"\"\n return _apply_reduce('sum', tensors)\n\n\n@ops.RegisterGradient('NcclReduce')\ndef _reduce_sum_grad(op, grad):\n \"\"\"The gradients for input `Operation` of `reduce_sum`.\n\n Args:\n op: The `sum send` `Operation` that we are differentiating.\n grad: Gradient with respect to the output of the `reduce_sum` op.\n\n Returns:\n The gradient with respect to the input of `reduce_sum` op.\n\n Raises:\n LookupError: If the reduction attribute of op is not `sum`.\n \"\"\"\n if op.get_attr('reduction') != 'sum':\n raise LookupError('No gradient defined for NcclReduce except sum.')\n _check_device(grad, expected=op.device)\n\n with ops.device(op.device):\n result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)\n\n return [result] * len(op.inputs)\n\n\ndef broadcast(tensor):\n \"\"\"Returns a tensor that can be efficiently transferred to other devices.\n\n Args:\n tensor: The tensor to send; must be assigned to a GPU device.\n\n Returns:\n A tensor with the value of `src_tensor`, which can be used as input to\n ops on other GPU devices.\n \"\"\"\n _check_graph_mode()\n _check_device(tensor)\n\n with ops.device(tensor.device):\n return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)\n\n\n@ops.RegisterGradient('NcclBroadcast')\ndef _broadcast_grad(op, accumulated_grad):\n \"\"\"The gradients for input `Operation` of `broadcast`.\n\n Args:\n op: The `broadcast send` `Operation` that we are differentiating.\n accumulated_grad: Accumulated gradients with respect to the output of the\n `broadcast` op.\n\n Returns:\n Gradients with respect to the input of `broadcast`.\n \"\"\"\n # Grab inputs of accumulated_grad and replace accumulation with reduce_sum.\n grads = [t for t in accumulated_grad.op.inputs]\n for t in grads:\n _check_device(t)\n\n with ops.device(op.device):\n return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')\n\n\ndef _apply_all_reduce(reduction, tensors):\n \"\"\"Helper function for all_* functions.\"\"\"\n if not tensors:\n raise ValueError('Must pass >0 tensors to all reduce operations')\n _check_graph_mode()\n\n shared_name = _get_shared_name()\n res = []\n\n for t in tensors:\n _check_device(t)\n with ops.device(t.device):\n res.append(\n gen_nccl_ops.nccl_all_reduce(\n input=t,\n reduction=reduction,\n num_devices=len(tensors),\n shared_name=shared_name))\n\n return res\n\n\ndef _apply_reduce(reduction, tensors):\n \"\"\"Helper function for reduce_* functions.\"\"\"\n if not tensors:\n raise ValueError('Must pass >0 tensors to reduce operations')\n _check_graph_mode()\n\n for t in tensors:\n _check_device(t)\n result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction)\n try:\n next(t for t in tensors if t.device == result.device)\n except StopIteration:\n raise ValueError('One input tensor must be assigned to current device')\n return result\n\n\n_lock = threading.Lock()\n_shared_name_counter = 0\n\n\ndef _get_shared_name():\n global _shared_name_counter\n\n with _lock:\n val = _shared_name_counter\n _shared_name_counter += 1\n return 'c%s' % val\n\n\ndef _check_device(tensor, expected=None):\n if not device.canonical_name(tensor.device):\n raise ValueError('Device assignment required for nccl collective ops')\n if expected and expected != tensor.device:\n raise ValueError('Expected device %s, got %s' % (expected, tensor.device))\n\n\ndef _check_graph_mode():\n if context.executing_eagerly():\n raise ValueError('Nccl ops are not supported in eager mode')\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Handles builtins and other special functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gast\n\nfrom tensorflow.contrib.autograph.pyct import templates\nfrom tensorflow.contrib.autograph.pyct import transformer\n\n\nclass BuiltinFunctionTransformer(transformer.Base):\n \"\"\"Handles builtin functions.\n\n This transformer only covers functions that are translated into a\n TF equivalent, like `len`.\n \"\"\"\n\n def __init__(self, context):\n super(BuiltinFunctionTransformer, self).__init__(context)\n\n # pylint:disable=invalid-name\n\n def _convert_builtin(self, node):\n template = \"\"\"\n autograph_utils.dynamic_builtin(func, args)\n \"\"\"\n return templates.replace(template, func=node.func, args=node.args)[0].value\n\n def _convert_print(self, node):\n template = \"\"\"\n autograph_utils.dynamic_print(args)\n \"\"\"\n return templates.replace(template, args=node.args)[0].value\n\n def visit_Call(self, node):\n self.generic_visit(node)\n # TODO(mdan): This won't work if the function was hidden.\n if isinstance(node.func, gast.Name) and node.func.id in ('len', 'range'):\n return self._convert_builtin(node)\n # Print needs to be handled separately because it can be read as statement.\n if isinstance(node.func, gast.Name) and node.func.id == 'print':\n return self._convert_print(node)\n return node\n\n def visit_Print(self, node):\n self.generic_visit(node)\n args = node.values\n # Following is the case when calling print(a, b)\n if len(args) == 1 and isinstance(args[0], gast.Tuple):\n args = args[0].elts\n template = \"\"\"\n fname(args)\n \"\"\"\n function_call = templates.replace(template, fname='print', args=args)[0]\n return self.visit(function_call)\n\n # pylint:enable=invalid-name\n\n\ndef transform(node, context):\n return BuiltinFunctionTransformer(context).visit(node)\n" ]
[ [ "tensorflow.python.keras._impl.keras.backend.zeros_like", "tensorflow.python.keras._impl.keras.backend.batch_get_value", "tensorflow.python.keras._impl.keras.backend.update", "tensorflow.python.keras._impl.keras.constraints.get", "tensorflow.python.keras._impl.keras.backend.is_keras_tensor", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras._impl.keras.layers.deserialize", "tensorflow.python.keras._impl.keras.backend.dot", "tensorflow.python.keras._impl.keras.activations.serialize", "tensorflow.python.keras._impl.keras.backend.expand_dims", "tensorflow.python.keras._impl.keras.constraints.serialize", "tensorflow.python.keras._impl.keras.backend.batch_set_value", "tensorflow.python.keras._impl.keras.backend.dropout", "tensorflow.python.keras._impl.keras.regularizers.get", "tensorflow.python.keras._impl.keras.engine.InputSpec", "tensorflow.python.keras._impl.keras.backend.int_shape", "numpy.zeros", "tensorflow.python.keras._impl.keras.activations.get", "tensorflow.python.keras._impl.keras.backend.tile", "tensorflow.python.keras._impl.keras.regularizers.serialize", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras._impl.keras.backend.rnn", "tensorflow.python.keras._impl.keras.backend.shape", "tensorflow.python.keras._impl.keras.initializers.Ones", "tensorflow.python.keras._impl.keras.backend.sum", "tensorflow.python.keras._impl.keras.utils.generic_utils.has_arg", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.keras._impl.keras.backend.zeros", "tensorflow.python.keras._impl.keras.backend.set_value", "tensorflow.python.keras._impl.keras.backend.in_train_phase", "tensorflow.python.keras._impl.keras.backend.bias_add", "tensorflow.python.keras._impl.keras.initializers.get", "tensorflow.python.keras._impl.keras.initializers.serialize", "tensorflow.python.util.tf_export.tf_export" ], [ "tensorflow.contrib.nccl.ops.gen_nccl_ops.nccl_reduce", "tensorflow.python.framework.ops.RegisterGradient", "tensorflow.contrib.nccl.ops.gen_nccl_ops.nccl_broadcast", "tensorflow.contrib.nccl.ops.gen_nccl_ops.nccl_all_reduce", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.ops.device", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.framework.device.canonical_name" ], [ "tensorflow.contrib.autograph.pyct.templates.replace" ] ]
oi-analytics/oia-transport-archive
[ "f89cb686704fe76c1665697b35d14caccf37f3a1" ]
[ "src/vtra/plot/rail_network_failures_multi_modal.py" ]
[ "\"\"\"Rail network flows map\n\"\"\"\nimport os\nimport sys\n\nfrom collections import OrderedDict\n\nimport cartopy.crs as ccrs\nimport cartopy.io.shapereader as shpreader\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom shapely.geometry import LineString\n\n\nfrom vtra.utils import *\n\ndef main():\n\tconfig = load_config()\n\tflows_file = os.path.join(config['paths']['data'], 'Results', 'Failure_shapefiles', 'weighted_edges_failures_national_rail_multi_modal_options.shp')\n\n\tplot_sets = [\n\t\t{\n\t\t\t'file_tag': 'commodities',\n\t\t\t'no_access':[-1,1],\n\t\t\t'legend_label': \"AADF (tons/day)\",\n\t\t\t'divisor': 1,\n\t\t\t'columns': ['min_tons','max_tons'],\n\t\t\t'title_cols': ['Total tonnage (min)','Total tonnage (max)']\n\t\t},\n\t\t{\n\t\t\t'file_tag': 'economic',\n\t\t\t'no_access':[-1,1],\n\t\t\t'legend_label': \"(million USD/day)\",\n\t\t\t'divisor': 1000000,\n\t\t\t'columns': ['min_econ_l','max_econ_l'],\n\t\t\t'title_cols': ['Economic losses (min)','Economic losses (max)']\n\t\t}\n\n\t]\n\n\tcolor = '#006d2c'\n\tcolor_by_type = {'Rail Line': color}\n\n\tfor plot_set in plot_sets:\n\t\tfor c in range(len(plot_set['columns'])):\n\t\t\tax = get_axes()\n\t\t\tplot_basemap(ax, config['paths']['data'], highlight_region = [])\n\t\t\tscale_bar(ax, location=(0.8, 0.05))\n\t\t\tplot_basemap_labels(ax, config['paths']['data'])\n\t\t\tproj_lat_lon = ccrs.PlateCarree()\n\n\t\t\tcolumn = plot_set['columns'][c]\n\t\t\tweights = [\n\t\t\t\trecord.attributes[column]\n\t\t\t\tfor record in shpreader.Reader(flows_file).records()\n\t\t\t\tif int(record.attributes['no_access']) in plot_set['no_access']\n\t\t\t]\n\n\t\t\tmax_weight = max(weights)\n\t\t\twidth_by_range = generate_weight_bins(weights)\n\n\t\t\tgeoms_by_range = {}\n\t\t\tfor value_range in width_by_range:\n\t\t\t\tgeoms_by_range[value_range] = []\n\n\t\t\tfor record in [rec for rec in shpreader.Reader(flows_file).records() if int(rec.attributes['no_access']) in plot_set['no_access']]:\n\t\t\t\tval = record.attributes[column]\n\t\t\t\tgeom = record.geometry\n\t\t\t\tfor nmin, nmax in geoms_by_range:\n\t\t\t\t\tif nmin <= val and val < nmax:\n\t\t\t\t\t\tgeoms_by_range[(nmin, nmax)].append(geom)\n\n\t\t\t\t\t\t# plot\n\t\t\tfor range_, width in width_by_range.items():\n\t\t\t\tax.add_geometries(\n\t\t\t\t\t[geom.buffer(width) for geom in geoms_by_range[range_]],\n\t\t\t\t\tcrs=proj_lat_lon,\n\t\t\t\t\tedgecolor='none',\n\t\t\t\t\tfacecolor=color,\n\t\t\t\t\tzorder=2)\n\n\t\t\tx_l = 102.3\n\t\t\tx_r = x_l + 0.4\n\t\t\tbase_y = 14\n\t\t\ty_step = 0.4\n\t\t\ty_text_nudge = 0.1\n\t\t\tx_text_nudge = 0.1\n\n\t\t\tax.text(\n\t\t\t\tx_l,\n\t\t\t\tbase_y + y_step - y_text_nudge,\n\t\t\t\tplot_set['legend_label'],\n\t\t\t\thorizontalalignment='left',\n\t\t\t\ttransform=proj_lat_lon,\n\t\t\t\tsize=10)\n\n\t\t\tdivisor = plot_set['divisor']\n\t\t\tfor (i, ((nmin, nmax), width)) in enumerate(width_by_range.items()):\n\t\t\t\ty = base_y - (i*y_step)\n\t\t\t\tline = LineString([(x_l, y), (x_r, y)])\n\t\t\t\tax.add_geometries(\n\t\t\t\t\t[line.buffer(width)],\n\t\t\t\t\tcrs=proj_lat_lon,\n\t\t\t\t\tlinewidth=0,\n\t\t\t\t\tedgecolor=color,\n\t\t\t\t\tfacecolor=color,\n\t\t\t\t\tzorder=2)\n\t\t\t\tif nmin == max_weight:\n\t\t\t\t\tlabel = '>{:.2f}'.format(max_weight/divisor)\n\t\t\t\telse:\n\t\t\t\t\tlabel = '{:.2f}-{:.2f}'.format(nmin/divisor, nmax/divisor)\n\t\t\t\tax.text(\n\t\t\t\t\tx_r + x_text_nudge,\n\t\t\t\t\ty - y_text_nudge,\n\t\t\t\t\tlabel,\n\t\t\t\t\thorizontalalignment='left',\n\t\t\t\t\ttransform=proj_lat_lon,\n\t\t\t\t\tsize=10)\n\n\t\t\tplt.title(plot_set['title_cols'][c], fontsize = 14)\n\t\t\toutput_file = os.path.join(config['paths']['figures'], 'rail_failure-map-{}-{}-multi-modal-options.png'.format(plot_set['file_tag'],column))\n\t\t\tsave_fig(output_file)\n\t\t\tplt.close()\n\nif __name__ == '__main__':\n\tmain()\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.close" ] ]
dhaulagiri0/AniGen
[ "bd845a29e771544ade1f64b94f967d8e178952f8" ]
[ "traversal.py" ]
[ "import numpy as np\nfrom data_process import create_dir, generate_latent_points, prediction_post_process\nfrom PIL import Image\nimport numpy as np\nimport os\nimport imageio\n\nimport re\ndef sorted_alphanumeric(data):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(data, key=alphanum_key)\n\ndef genGif(images_dir, save_dir):\n images = []\n for file in sorted_alphanumeric(os.listdir(images_dir)):\n if '.png' in file:\n images.append(imageio.imread(f'{images_dir}/{file}'))\n imageio.mimsave(f'{save_dir}/gif.gif', images) \n\ndef getTransitionPoints(pt1, pt2, n_steps):\n n_steps -= 1\n dist = np.linalg.norm(pt2 - pt1) # euclidean distance between p1 and p2\n dhat = (pt2 - pt1) / dist # unit vector that defines the line from p1 to p2\n pts = [pt1]\n for i in range(1, n_steps + 1):\n pt = pt1 + i*(dist / n_steps)*dhat\n pts.append(pt)\n return np.array(pts)\n\ndef gen_traversal_points(latent_dim, n_samples, n_steps, latent_points):\n if latent_points is None:\n latent_points = generate_latent_points(latent_dim, n_samples)\n pts = []\n for i in range(0, n_samples - 1):\n new = getTransitionPoints(latent_points[i], latent_points[i + 1], n_steps).tolist()\n if i != 0:\n new = new[1:]\n pts += new\n return pts, latent_points\n\ndef traverse_latent_space(g_model, latent_dim, save_dir, n_samples=10, n_steps=20, batch_size=10, latent_points=None):\n # devise name\n name = create_dir(save_dir, g_model)\n pts, latent_points = gen_traversal_points(latent_dim, n_samples, n_steps, latent_points)\n\n file_names = []\n for i in range(0, len(pts), batch_size):\n batch = pts[i:i + batch_size]\n X = g_model.predict(batch)\n prediction_post_process(X, f'{save_dir}/{name}/plot_{i}', i)\n file_names += [f'{save_dir}/{name}/plot_{str(i)}_{str(j)}_{i + j}.png' for j in range(len(X))]\n print(f'Generated Batch {i/10 + 1} out of {len(pts) // batch_size}')\n\n print(f'traversal samples generated at {save_dir}')\n return file_names, np.array(pts)\n\ndef read_latent_points(file_path, latent_dim, n_samples):\n dims = np.loadtxt(file_path).reshape(n_samples, latent_dim)\n return dims\n\n# path = 'E:\\good_dims_generator_256x256-tuned-95352-7\\dims_512_26_3.txt'\n# dims_1 = np.loadtxt(path).reshape(26, 512).tolist()\n# path = 'E:\\good_dims_generator_256x256-tuned-95352-7\\dims_512_36_4.txt'\n# dims_2 = np.loadtxt(path).reshape(36, 512).tolist()\n\n# dims = np.array(dims_1 + dims_2)\n# with open(f'E:\\good_dims_generator_256x256-tuned-95352-7/dims_{512}_{len(dims)}_combined.txt', 'w') as f:\n# for row in dims:\n# np.savetxt(f, row)\n# good_ones_3 = [1151, 1683, 1451, 1812, 624, 1627, 1779, 1100, 1711, 1423, 475, 1234, 1607, 585, 1834, 1181, 1125, 1735, 85, 1572, 1465, 1868, 1770, 1352, 1486, 1441, 757, 1139, 1881, 468, 518, 1478, 613, 1540, 1174, 997]\n# good_ones_pts = np.array([dims[x] for x in good_ones_3])\n# with open(f'E:\\good_dims_generator_256x256-tuned-95352-7/dims_{512}_{len(good_ones_pts)}_4.txt', 'w') as f:\n# for row in good_ones_pts:\n# np.savetxt(f, row)\n\n# 'E:\\good_dims_generator_256x256-tuned-95352-7\\dims_512_62_combined.txt'\ngenGif('E:/traversal_good_ones/256x256', 'E:/traversal_good_ones/')" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.loadtxt" ] ]
RjDuan/Advlight
[ "44e5fdea84b223e0f26935b23611880f8ed07a59" ]
[ "test.py" ]
[ "import numpy as np\nimport math\nimport os\nimport torch\nfrom light_simulation import tube_light_generation_by_func, simple_add\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.models import resnet50\nimport torchvision.transforms as transforms\nimport argparse\nimport random\nimport shutil\nimport itertools\nfrom tqdm import tqdm\nfrom PIL import Image\nimport torchvision.transforms.functional as transf\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--dataset', type=str, default='./query_imagenet', help='location of the data corpus')\nparser.add_argument('--portion_for_search', type=float, default=1.0, help='portion of training data')\nparser.add_argument('--batch_size', type=int, default=1, help='batch size')\nparser.add_argument('--trial_nums', type=int, default=300, help='number of the trials')\nparser.add_argument('--model', type=str, default='resnet50', help='org model or adv trained model df_resnet50')\nparser.add_argument('--output_csv', type=str, default='random_search.csv', help='number of the trials')\nparser.add_argument('--save_dir', type=str, default='./results', help='dir to save results')\nparser.add_argument('--save', type=str, default=False, help='Save results')\nargs = parser.parse_args()\n\n# transform\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\ntest_transform = transforms.Compose([\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n# model\nif args.model == 'resnet50':\n print(\"Loading model...\")\n model = resnet50(pretrained=True)\nelif args.model == 'df_resnet50':\n print(\"Loading adv trained model...\")\n model = resnet50(pretrained=False)\n model.load_state_dict(torch.load('./model/checkpoint-89.pth.tar')['state_dict'])\nmodel.cuda()\nmodel.eval()\n\n# dataset\nimagenet_path_list = os.listdir(args.dataset)\nimagenet_dataset = []\nfor img_path in imagenet_path_list:\n imagenet_dataset.append((img_path, int(img_path.split('.')[0])))\n\ntotal_num = len(imagenet_dataset)\ncurrent_num = 0\n# save\nif args.save and not os.path.exists(args.save_dir):\n os.mkdir(args.save_dir)\n# valid_queue = torch.utils.data.DataLoader(\n# imagenet_dataset, batch_size=args.batch_size,\n# sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:total_num]),\n# pin_memory=True, num_workers=16)\n\n\n\n# for cln_image, adv_image, target, index in search_queue:\n# print(target, index)\n# break\n# for cln_image, adv_image, target, index in search_queue:\n# print(target, index)\n# break\n# break\nacc_adv = 0\nacc_cln = 0\ntotal_q = 0\ndelay_threhold = 20\nQ = np.asarray([[1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [0,0,0,1],\n [1,1,0,0],\n [1,0,1,0],\n [1,0,0,1],\n [0,1,1,0],\n [0,1,0,1],\n [0,0,1,1]\n ])\n\nfor image_path, target in tqdm(imagenet_dataset):\n current_num += 1\n img = Image.open(os.path.join(args.dataset, image_path).encode(\"utf-8\")).convert('RGB')\n\n # clean\n clean_image = img.resize((256, 256), Image.BILINEAR)\n clean_image = test_transform(clean_image).unsqueeze(0)\n clean_image = clean_image.cuda()\n with torch.no_grad():\n org_pred_label = model(clean_image)\n org_pred_label = org_pred_label.cpu().detach()\n \n min_confidence = org_pred_label[0, target].item()\n org_pred_label = org_pred_label.max(1)[1].item()\n\n adv_image = np.asarray(img)\n\n cur_pred_label = org_pred_label\n\n correct_adv = org_pred_label == target\n correct_cln = cur_pred_label == target\n\n cur_search = 0\n # V = np.asarray([[580, 31, 74, 400], [580, 17, 131, 200], [580, 144, 316, 600]])\n # init_v = V[np.random.randint(len(V))]\n params_list = []\n for i in range(200):\n init_v_it = [np.random.randint(380, 750), np.random.randint(0,180), np.random.randint(0,400), np.random.randint(10, 1600)]\n params_list.append(init_v_it)\n\n for init_v in params_list:\n\n for search_i in range(delay_threhold):\n q_id = np.random.randint(len(Q))\n q = Q[q_id]\n step_size = np.random.randint(1, 20)\n q = q*step_size\n for a in [-1, 1]:\n cur_search += 1\n #print(a*q)\n temp_q = init_v + a*q\n temp_q = np.clip(temp_q, [380, 0, 0, 10], [750, 180, 400, 1600])\n \n radians = math.radians(temp_q[1])\n k = round(math.tan(radians), 2)\n \n tube_light = tube_light_generation_by_func(k, temp_q[2], alpha = 1.0, beta=temp_q[3], wavelength=temp_q[0]) \n tube_light = tube_light * 255.0\n img_with_light = simple_add(adv_image, tube_light, 1.0)\n img_with_light = np.clip(img_with_light, 0.0, 255.0).astype('uint8')\n img_with_light = Image.fromarray(img_with_light)\n if args.save:\n img_with_light.save(os.path.join(args.save_dir, image_path))\n\n img_with_light = img_with_light.resize((224, 224), Image.BILINEAR)\n img_with_light = test_transform(img_with_light).unsqueeze(0)\n img_with_light = img_with_light.cuda()\n with torch.no_grad():\n cur_pred_label = model(img_with_light)\n cur_pred_label = cur_pred_label.cpu().detach()\n\n cur_confidence = cur_pred_label[0, target].item()\n cur_pred_label = cur_pred_label.max(1)[1].item()\n\n if cur_confidence < min_confidence:\n min_confidence = cur_confidence\n init_v = temp_q\n break\n \n if cur_pred_label != org_pred_label:\n correct_adv = False\n break\n \n if cur_pred_label != org_pred_label:\n correct_adv = False\n break\n \n total_q += cur_search\n if correct_cln:\n acc_cln += 1\n\n if correct_adv:\n acc_adv += 1\n print('{} attack failed\\tqueries: {}\\tmean queries: {}\\tclean acc: {}\\tadv suc. rate:{}'.format(image_path, cur_search, total_q/current_num, acc_cln/current_num, acc_adv/current_num))\n else:\n print('{} attack success\\tqueries: {}\\tmean queries: {}\\tclean acc: {}\\tadv suc. rate:{}'.format(image_path, cur_search, total_q/current_num, acc_cln/current_num, acc_adv/current_num))\n\n\n\n\n" ]
[ [ "numpy.asarray", "torch.no_grad", "numpy.random.randint", "torch.load", "numpy.clip" ] ]
JoyMonteiro/sympl
[ "c8bee914651824360a46bf71119dd87a93a07219" ]
[ "sympl/_core/restore_dataarray.py" ]
[ "import numpy as np\nfrom .exceptions import InvalidPropertyDictError\nfrom .dataarray import DataArray\nfrom .wildcard import (\n get_wildcard_matches_and_dim_lengths, fill_dims_wildcard,\n expand_array_wildcard_dims\n)\n\n\ndef ensure_values_are_arrays(array_dict):\n for name, value in array_dict.items():\n if not isinstance(value, np.ndarray):\n array_dict[name] = np.asarray(value)\n\n\ndef get_alias_or_name(name, output_properties, input_properties):\n if 'alias' in output_properties[name].keys():\n raw_name = output_properties[name]['alias']\n elif name in input_properties.keys() and 'alias' in input_properties[name].keys():\n raw_name = input_properties[name]['alias']\n else:\n raw_name = name\n return raw_name\n\n\ndef check_array_shape(out_dims, raw_array, name, dim_lengths):\n if len(out_dims) != len(raw_array.shape):\n raise InvalidPropertyDictError(\n 'Returned array for {} has shape {} '\n 'which is incompatible with dims {} in properties'.format(\n name, raw_array.shape, out_dims))\n for dim, length in zip(out_dims, raw_array.shape):\n if dim in dim_lengths.keys() and dim_lengths[dim] != length:\n raise InvalidPropertyDictError(\n 'Dimension {} of quantity {} has length {}, but '\n 'another quantity has length {}'.format(\n dim, name, length, dim_lengths[dim])\n )\n\n\ndef restore_data_arrays_with_properties(\n raw_arrays, output_properties, input_state, input_properties,\n ignore_names=None, ignore_missing=False):\n \"\"\"\n Parameters\n ----------\n raw_arrays : dict\n A dictionary whose keys are quantity names and values are numpy arrays\n containing the data for those quantities.\n output_properties : dict\n A dictionary whose keys are quantity names and values are dictionaries\n with properties for those quantities. The property \"dims\" must be\n present for each quantity not also present in input_properties. All\n other properties are included as attributes on the output DataArray\n for that quantity, including \"units\" which is required.\n input_state : dict\n A state dictionary that was used as input to a component for which\n DataArrays are being restored.\n input_properties : dict\n A dictionary whose keys are quantity names and values are dictionaries\n with input properties for those quantities. The property \"dims\" must be\n present, indicating the dimensions that the quantity was transformed to\n when taken as input to a component.\n ignore_names : iterable of str, optional\n Names to ignore when encountered in output_properties, will not be\n included in the returned dictionary.\n ignore_missing : bool, optional\n If True, ignore any values in output_properties not present in\n raw_arrays rather than raising an exception. Default is False.\n\n Returns\n -------\n out_dict : dict\n A dictionary whose keys are quantities and values are DataArrays\n corresponding to those quantities, with data, shapes and attributes\n determined from the inputs to this function.\n\n Raises\n ------\n InvalidPropertyDictError\n When an output property is specified to have dims_like an input\n property, but the arrays for the two properties have incompatible\n shapes.\n \"\"\"\n raw_arrays = raw_arrays.copy()\n if ignore_names is None:\n ignore_names = []\n if ignore_missing:\n ignore_names = set(output_properties.keys()).difference(raw_arrays.keys()).union(ignore_names)\n wildcard_names, dim_lengths = get_wildcard_matches_and_dim_lengths(\n input_state, input_properties)\n ensure_values_are_arrays(raw_arrays)\n dims_from_out_properties = extract_output_dims_properties(\n output_properties, input_properties, ignore_names)\n out_dict = {}\n for name, out_dims in dims_from_out_properties.items():\n if name in ignore_names:\n continue\n raw_name = get_alias_or_name(name, output_properties, input_properties)\n if '*' in out_dims:\n for dim_name, length in zip(out_dims, raw_arrays[raw_name].shape):\n if dim_name not in dim_lengths and dim_name != '*':\n dim_lengths[dim_name] = length\n out_dims_without_wildcard, target_shape = fill_dims_wildcard(\n out_dims, dim_lengths, wildcard_names)\n out_array = expand_array_wildcard_dims(\n raw_arrays[raw_name], target_shape, name, out_dims)\n else:\n check_array_shape(out_dims, raw_arrays[raw_name], name, dim_lengths)\n out_dims_without_wildcard = out_dims\n out_array = raw_arrays[raw_name]\n out_dict[name] = DataArray(\n out_array,\n dims=out_dims_without_wildcard,\n attrs={'units': output_properties[name]['units']}\n )\n return out_dict\n\n\ndef extract_output_dims_properties(output_properties, input_properties, ignore_names):\n return_array = {}\n for name, properties in output_properties.items():\n if name in ignore_names:\n continue\n elif 'dims' in properties.keys():\n return_array[name] = properties['dims']\n elif name not in input_properties.keys():\n raise InvalidPropertyDictError(\n 'Output dims must be specified for {} in properties'.format(name))\n elif 'dims' not in input_properties[name].keys():\n raise InvalidPropertyDictError(\n 'Input dims must be specified for {} in properties'.format(name))\n else:\n return_array[name] = input_properties[name]['dims']\n return return_array\n" ]
[ [ "numpy.asarray" ] ]
sehandev/JT-VAE
[ "8e60eb560034bbc23d9989938d36b08a739edbb6" ]
[ "data_module.py" ]
[ "# Standard\n\n# PIP\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import LightningDataModule\nfrom pl_bolts.datasets import DummyDataset\n\n# Custom\nfrom custom.dataset import CustomDataset\n\n\nclass CustomDataModule(LightningDataModule):\n def __init__(\n self,\n seq_len,\n batch_size=1,\n num_workers=0,\n ):\n super().__init__()\n self.seq_len = seq_len\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n self.set_datasets()\n\n def set_datasets(self):\n # self.train_dataset = CustomDataset(\n # seq_len=self.seq_len,\n # )\n # self.valid_dataset = CustomDataset(\n # seq_len=self.seq_len,\n # )\n # self.test_dataset = CustomDataset(\n # seq_len=self.seq_len,\n # )\n self.train_dataset = DummyDataset((1, 28, 28), (1,))\n self.valid_dataset = DummyDataset((1, 28, 28), (1,))\n self.test_dataset = DummyDataset((1, 28, 28), (1,))\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.valid_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
flowmatters/dsed-py
[ "b967db2797320e63bc504e40023b7c7623a0b002" ]
[ "dsed/ow.py" ]
[ "'''\nRunning Dynamic Sednet simulations using OpenWater\n'''\nimport os\nimport json\nimport pandas as pd\nimport geopandas as gpd\nimport shutil\nimport numpy as np\nfrom openwater import OWTemplate, OWLink\nfrom openwater.template import TAG_MODEL\nimport openwater.nodes as n\nfrom collections import defaultdict\nfrom openwater.examples.from_source import get_default_node_template, DEFAULT_NODE_TEMPLATES, storage_template_builder\nfrom openwater.catchments import \\\n DOWNSTREAM_FLOW_FLUX, DOWNSTREAM_LOAD_FLUX, \\\n UPSTREAM_FLOW_FLUX, UPSTREAM_LOAD_FLUX\nfrom openwater.results import OpenwaterResults\nfrom openwater.template import ModelFile\nfrom .const import *\nfrom openwater.file import _tabulate_model_scalars_from_file\n\nLANDSCAPE_CONSTITUENT_SOURCES=['Hillslope','Gully']\n\nFINE_SEDIMENT = 'Sediment - Fine'\nCOARSE_SEDIMENT = 'Sediment - Coarse'\nCGUS_TS_N_DIN = ['Sugarcane','Bananas']\n\nSEDIMENT_CLASSES = [FINE_SEDIMENT,COARSE_SEDIMENT]\nSTANDARD_NUTRIENTS = ['TN','TP']\n\nSTANDARD_CONSTITUENTS = SEDIMENT_CLASSES + STANDARD_NUTRIENTS\nQUICKFLOW_INPUTS = ['quickflow','flow']\nBASEFLOW_INPUTS = ['baseflow','slowflow']\n\nNIL_MODELS = {\n 'Dynamic_SedNet.Models.SedNet_Blank_Constituent_Generation_Model',\n 'RiverSystem.Catchments.Models.ContaminantGenerationModels.NilConstituent'\n}\n\nMODEL_NAME_TRANSLATIONS = {\n\n}\n\n\n# def default_generation_model(constituent,landuse):\n# if constituent=='TSS':\n# return n.USLEFineSedimentGeneration\n# return n.EmcDwc\n\n# def build_catchment_template(constituents,hrus,landuses,generation_model=default_generation_model):\n# template = OWTemplate()\n# routing_node = template.add_node(n.Muskingum,process='FlowRouting')\n# for con in constituents:\n# # transport_node = 'Transport-%s'%(con)\n# transport_node = template.add_node(n.LumpedConstituentRouting,process='ConstituentRouting',constituent=con)\n# template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))\n\n# for hru in hrus:\n# runoff_node = template.add_node(n.Simhyd,process='RR',hru=hru)\n# runoff_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Runoff')\n# quickflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Quickflow')\n# baseflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',hru=hru,component='Baseflow')\n\n# template.add_link(OWLink(runoff_node,'runoff',runoff_scale_node,'input'))\n# template.add_link(OWLink(runoff_node,'quickflow',quickflow_scale_node,'input'))\n# template.add_link(OWLink(runoff_node,'baseflow',baseflow_scale_node,'input'))\n\n# template.add_link(OWLink(runoff_scale_node,'outflow',routing_node,'lateral'))\n\n# for con in constituents:\n# # transport_node = 'Transport-%s'%(con)\n# transport_node = template.add_node(n.LumpedConstituentRouting,process='ConstituentRouting',constituent=con) #!!!ERROR\n# template.add_link(OWLink(runoff_scale_node,'outflow',transport_node,'inflow'))\n# for lu in landuses[hru]:\n# #gen_node = 'Generation-%s-%s'%(con,lu)\n# gen_node = template.add_node(generation_model(con,lu),process='ConstituentGeneration',constituent=con,lu=lu)\n# template.add_link(OWLink(quickflow_scale_node,'outflow',gen_node,'quickflow'))\n# template.add_link(OWLink(baseflow_scale_node,'outflow',gen_node,'baseflow'))\n# template.add_link(OWLink(gen_node,'totalLoad',transport_node,'lateralLoad'))\n\n# return template\n\n# def link_catchments(graph,from_cat,to_cat,constituents):\n# linkages = [('%d-FlowRouting (Muskingum)','outflow','inflow')] + \\\n# [('%%d-ConstituentRouting-%s (LumpedConstituentRouting)'%c,'outflowLoad','inflowLoad') for c in constituents]\n# for (lt,src,dest) in linkages:\n# dest_node = lt%from_cat\n# src_node = lt%to_cat#'%d/%s'%(to_cat,lt)\n# graph.add_edge(src_node,dest_node,src=[src],dest=[dest])\n\n# def generation_models(constituent,cgu):\n# if constituent in STANDARD_NUTRIENTS:\n# return n.EmcDwc\n\n\n# # if pesticide\n# return n.EmcDwc\n\nclass Reach(object):\n pass\n\nclass HydrologicalResponseUnit(object):\n pass\n\nclass DynamicSednetCGU(object):\n def __init__(self,cropping_cgu=True,sediment_fallback_model=False,gully_cgu=False,hillslope_cgu=False,ts_load_with_dwc=None):\n self.cropping_cgu = cropping_cgu\n # self.erosion_processes = erosion_processes\n self.gully_cgu = gully_cgu\n self.hillslope_cgu = hillslope_cgu\n self.sediment_fallback_model = sediment_fallback_model\n self.ts_load_with_dwc = ts_load_with_dwc\n\n assert (not bool(gully_cgu)) or (not bool(sediment_fallback_model))\n\n def generation_model(self,constituent,catchment_template,**kwargs):\n return catchment_template.model_for(catchment_template.cg,constituent,**kwargs)\n\n def get_template(self,catchment_template,**kwargs):\n tag_values = list(kwargs.values())\n cgu = kwargs.get('cgu','?')\n template = OWTemplate('cgu:%s'%cgu)\n\n runoff_scale_node = None\n quickflow_scale_node = None\n baseflow_scale_node = None\n if catchment_template.rr is not None:\n runoff_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Runoff',**kwargs)\n quickflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Quickflow',**kwargs)\n baseflow_scale_node = template.add_node(n.DepthToRate,process='ArealScale',component='Baseflow',**kwargs)\n\n def link_runoff(dest_node,qf_input,bf_input):\n if quickflow_scale_node is None:\n return\n if qf_input is not None:\n template.add_link(OWLink(quickflow_scale_node,'outflow',dest_node,qf_input))\n\n if bf_input is not None:\n template.add_link(OWLink(baseflow_scale_node,'outflow',dest_node,bf_input))\n\n def add_emc_dwc(con):\n dwc_node = template.add_node(n.EmcDwc,process='ConstituentDryWeatherGeneration',constituent=con,**kwargs)\n link_runoff(dwc_node,'quickflow','baseflow')\n return dwc_node\n\n if runoff_scale_node is not None:\n template.define_input(runoff_scale_node,'input','runoff')\n template.define_input(quickflow_scale_node,'input','quickflow')\n template.define_input(baseflow_scale_node,'input','baseflow')\n template.define_output(runoff_scale_node,'outflow','lateral')\n # This should be able to be done automatically... any input not defined\n\n hillslope_fine_sed_gen = None\n hillslope_coarse_sed_gen = None\n hillslope_fine_sed_gen_flux = None\n hillslope_coarse_sed_gen_flux = None\n\n # fine_ts_scale = None\n # coarse_ts_scale = None\n gully_gen = None\n\n if self.hillslope_cgu:\n # Hillslope\n sed_gen = template.add_node(n.USLEFineSedimentGeneration,process=\"HillslopeGeneration\",**kwargs)\n link_runoff(sed_gen,'quickflow','baseflow')\n\n hillslope_fine_sed_gen = sed_gen\n hillslope_coarse_sed_gen = sed_gen\n hillslope_fine_sed_gen_flux = 'generatedLoadFine'\n hillslope_coarse_sed_gen_flux = 'generatedLoadCoarse'\n\n if self.gully_cgu:\n # Gully\n gully_gen = template.add_node(n.DynamicSednetGullyAlt,process=\"GullyGeneration\",**kwargs)\n link_runoff(gully_gen,'quickflow',None)\n\n fine_sum = template.add_node(n.Sum,process='ConstituentGeneration',constituent=FINE_SEDIMENT,**kwargs)\n coarse_sum = template.add_node(n.Sum,process='ConstituentGeneration',constituent=COARSE_SEDIMENT,**kwargs)\n\n template.add_link(OWLink(gully_gen,'fineLoad',fine_sum,'i2'))\n template.add_link(OWLink(gully_gen,'coarseLoad',coarse_sum,'i2'))\n\n if self.hillslope_cgu:\n template.add_link(OWLink(sed_gen,'totalLoad',fine_sum,'i1')) # was quickLoadFine\n template.add_link(OWLink(sed_gen,'quickLoadCoarse',coarse_sum,'i1'))\n else:\n fine_dwc_node = add_emc_dwc(FINE_SEDIMENT)\n template.add_link(OWLink(fine_dwc_node,'totalLoad',fine_sum,'i1'))\n\n coarse_dwc_node = add_emc_dwc(COARSE_SEDIMENT)\n template.add_link(OWLink(coarse_dwc_node,'totalLoad',coarse_sum,'i1'))\n\n if self.cropping_cgu:\n ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=FINE_SEDIMENT,**kwargs)\n link_runoff(ts_node,'flow',None)\n\n ts_split_node = template.add_node(n.FixedPartition,process='FineCoarseSplit',**kwargs)\n template.add_link(OWLink(ts_node,'outputLoad',ts_split_node,'input'))\n\n fine_ts_scale = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent=FINE_SEDIMENT,**kwargs)\n template.add_link(OWLink(ts_split_node,'output1',fine_ts_scale,'input')) # fraction\n\n fine_ts_sdr = template.add_node(n.DeliveryRatio,process='SDR',constituent=FINE_SEDIMENT,**kwargs)\n template.add_link(OWLink(fine_ts_scale,'output',fine_ts_sdr,'input')) # fraction\n\n coarse_ts_sdr = template.add_node(n.DeliveryRatio,process='SDR',constituent=COARSE_SEDIMENT,**kwargs)\n template.add_link(OWLink(ts_split_node,'output2',coarse_ts_sdr,'input')) # 1-fraction\n\n template.add_link(OWLink(fine_ts_sdr,'output',fine_sum,'i1'))\n template.add_link(OWLink(coarse_ts_sdr,'output',coarse_sum,'i1'))\n\n #TODO Will this always be the right thing to link? Should it ideally be i1 of the sum node going into other constituent models?\n hillslope_fine_sed_gen = fine_ts_scale\n hillslope_fine_sed_gen_flux = 'output'\n\n hillslope_coarse_sed_gen = ts_split_node\n hillslope_coarse_sed_gen_flux = 'output2'\n # HACK - Just seeing if this is what we need in order to get\n # the pre-SDR sediment loads?\n # But it includes the load conversion factor on the fine...\n # So *probably not*\n\n # hillslope_fine_sed_gen = fine_ts_scale\n # hillslope_fine_sed_gen_flux = 'output'\n\n # hillslope_coarse_sed_gen = coarse_ts_scale\n # hillslope_coarse_sed_gen_flux = 'output'\n else:\n # TODO: HACK - Check that quickLoad should apply whenever we have an EMC/DWC model for sediment\n hillslope_fine_sed_gen = fine_dwc_node\n hillslope_fine_sed_gen_flux = 'quickLoad'\n\n hillslope_coarse_sed_gen = coarse_dwc_node\n hillslope_coarse_sed_gen_flux = 'quickLoad'\n\n template.define_output(fine_sum,'out','generatedLoad',constituent=FINE_SEDIMENT)\n template.define_output(coarse_sum,'out','generatedLoad',constituent=COARSE_SEDIMENT)\n\n\n for con in catchment_template.pesticides:\n dwc_node = add_emc_dwc(con)\n if self.cropping_cgu:\n ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=con,**kwargs)\n link_runoff(ts_node,'flow',None)\n\n sum_node = template.add_node(n.Sum,process='ConstituentGeneration',constituent=con,**kwargs)\n template.add_link(OWLink(dwc_node,'totalLoad',sum_node,'i1'))\n template.add_link(OWLink(ts_node,'outputLoad',sum_node,'i2'))\n\n template.define_output(sum_node,'out','generatedLoad')\n\n for con in catchment_template.constituents:\n if not self.sediment_fallback_model and (con in [FINE_SEDIMENT,COARSE_SEDIMENT]):\n continue\n\n if con in catchment_template.pesticides:\n continue\n\n ts_cane_din = (cgu in CGUS_TS_N_DIN) and (con=='N_DIN')\n ts_crop_part_p = (con == 'P_Particulate') and self.cropping_cgu and not cgu=='Sugarcane'\n ts_load_with_dwc = self.ts_load_with_dwc and \\\n (con in self.ts_load_with_dwc['constituents']) and \\\n (cgu in self.ts_load_with_dwc['cgus'])\n\n if ts_cane_din or ts_crop_part_p or ts_load_with_dwc:\n ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent=con,**kwargs)\n link_runoff(ts_node,'flow',None)\n\n ts_scale_node = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent=con,**kwargs)\n template.add_link(OWLink(ts_node,'outputLoad',ts_scale_node,'input'))\n\n dwc_node = add_emc_dwc(con)\n\n sum_node = template.add_node(n.Sum,process='ConstituentGeneration',constituent=con,**kwargs)\n template.add_link(OWLink(ts_scale_node,'output',sum_node,'i1'))\n template.add_link(OWLink(dwc_node,'totalLoad',sum_node,'i2'))\n\n if ts_cane_din:\n leached_ts_node = template.add_node(n.PassLoadIfFlow,process='ConstituentOtherGeneration',constituent='NLeached',**kwargs)\n link_runoff(leached_ts_node,None,'flow')\n\n leached_ts_scale_node = template.add_node(n.ApplyScalingFactor,process='ConstituentScaling',constituent='NLeached',**kwargs)\n template.add_link(OWLink(leached_ts_node,'outputLoad',leached_ts_scale_node,'input'))\n\n template.add_link(OWLink(leached_ts_scale_node,'output',sum_node,'i2'))\n\n template.define_output(sum_node,'out','generatedLoad')\n continue\n\n model = self.generation_model(con,catchment_template,**kwargs)\n if model is None:\n print('No regular constituent generation model for %s'%con)\n continue\n\n gen_node = template.add_node(model,process='ConstituentGeneration',constituent=con,**kwargs)\n if quickflow_scale_node is not None:\n template.add_conditional_link(quickflow_scale_node,'outflow',gen_node,QUICKFLOW_INPUTS,model)\n template.add_conditional_link(baseflow_scale_node, 'outflow',gen_node,BASEFLOW_INPUTS,model)\n\n if model.name == 'SednetParticulateNutrientGeneration':\n template.add_link(OWLink(gully_gen,'generatedFine',gen_node,'fineSedModelFineGullyGeneratedKg'))\n template.add_link(OWLink(gully_gen,'generatedCoarse',gen_node,'fineSedModelCoarseGullyGeneratedKg'))\n\n template.add_link(OWLink(hillslope_fine_sed_gen,hillslope_fine_sed_gen_flux,\n gen_node,'fineSedModelFineSheetGeneratedKg'))\n template.add_link(OWLink(hillslope_coarse_sed_gen,hillslope_coarse_sed_gen_flux,\n gen_node,'fineSedModelCoarseSheetGeneratedKg'))\n\n template.define_output(gen_node,main_output_flux(model),'generatedLoad')\n\n return template\n\nclass DynamicSednetAgCGU(DynamicSednetCGU):\n pass\n # def generation_model(self,constituent,catchment):\n # if constituent == FINE_SEDIMENT:\n # return n.USLEFineSedimentGeneration\n # return super(DynamicSednetAgCGU,self).generation_model(constituent)\n\nclass NilCGU(DynamicSednetCGU):\n def generation_model(self,*args,**kwargs):\n return None\n\n\nclass DynamicSednetCatchment(object):\n def __init__(self,\n dissolved_nutrients=['DisN','DisP'],\n particulate_nutrients=['PartN','PartP'],\n pesticides=['Pesticide1'],\n particulate_nutrient_cgus=None,\n ts_load_with_dwc=None):\n self.hrus = ['HRU']\n self.cgus = ['CGU']\n self.cgu_hrus = {'CGU':'HRU'}\n self.constituents = SEDIMENT_CLASSES + dissolved_nutrients + particulate_nutrients + pesticides\n self.particulate_nutrients = particulate_nutrients\n self.particulate_nutrient_cgus = particulate_nutrient_cgus\n self.dissolved_nutrients = dissolved_nutrients\n self.pesticides = pesticides\n self.pesticide_cgus = None\n self.timeseries_sediment_cgus = None\n self.hillslope_cgus = None\n self.gully_cgus = None\n self.sediment_fallback_cgu = None\n self.ts_load_with_dwc = ts_load_with_dwc\n self.climate_inputs = ['rainfall','pet']\n\n self.rr = n.Sacramento\n self.cg = defaultdict(lambda:n.EmcDwc,{})\n # {\n # FINE_SEDIMENT:None,\n # COARSE_SEDIMENT:None\n # })\n\n self.routing = n.Muskingum\n self.transport = defaultdict(lambda:n.LumpedConstituentRouting,{\n FINE_SEDIMENT:n.InstreamFineSediment,\n COARSE_SEDIMENT:n.InstreamCoarseSediment\n })\n\n self._g = None\n self._g_lookup = {}\n\n self.node_templates = DEFAULT_NODE_TEMPLATES.copy()\n self.node_templates['Storage'] = storage_template_builder(constituent_model_map=defaultdict(lambda:n.LumpedConstituentRouting,{\n FINE_SEDIMENT:n.StorageParticulateTrapping,\n COARSE_SEDIMENT:n.StorageParticulateTrapping\n }))\n\n def get_model_dissolved_nutrient(*args,**kwargs):\n cgu = kwargs['cgu']\n\n if cgu in self.pesticide_cgus:\n constituent = args[0]\n if cgu=='Sugarcane':\n if constituent=='N_DIN':\n return None\n elif constituent=='N_DON':\n return n.EmcDwc\n elif constituent.startswith('P'):\n return n.EmcDwc\n if constituent.startswith('P'):\n return n.PassLoadIfFlow\n # if cgu is a cropping FU:\n # look at constituent\n # constituent = args[0]\n\n if cgu in ['Water']: # 'Conservation','Horticulture','Other','Urban','Forestry'\n return n.EmcDwc\n\n # print(args)\n # print(kwargs)\n return n.SednetDissolvedNutrientGeneration\n\n def get_model_particulate_nutrient(*args,**kwargs):\n cgu = kwargs['cgu']\n\n if self.particulate_nutrient_cgus is None:\n if cgu in ['Water','Conservation','Horticulture','Other','Urban','Forestry']:\n return n.EmcDwc\n # if cropping (but not sugarcane) and constituent == P_Particulate\n # Timeseries model...\n\n return n.SednetParticulateNutrientGeneration\n\n if cgu in self.particulate_nutrient_cgus:\n return n.SednetParticulateNutrientGeneration\n return n.EmcDwc\n\n for dn in dissolved_nutrients:\n self.cg[dn] = get_model_dissolved_nutrient\n self.transport[dn] = n.InstreamDissolvedNutrientDecay\n for pn in particulate_nutrients:\n self.cg[pn] = get_model_particulate_nutrient\n self.transport[pn] = n.InstreamParticulateNutrient\n\n def model_for(self,provider,*args,**kwargs):\n if hasattr(provider,'__call__'):\n return self.model_for(provider(*args,**kwargs),*args,**kwargs)\n if hasattr(provider,'__getitem__'):\n return self.model_for(provider[args[0]],*args,**kwargs)\n return provider\n\n def get_link_template(self,**kwargs) -> OWTemplate:\n tag_values = list(kwargs.values())\n reach_template = OWTemplate('reach')\n\n routing_node = None\n if self.routing is not None:\n lag_node = reach_template.add_node(n.Lag,process='FlowLag',constituent='_flow',**kwargs)\n reach_template.define_input(lag_node,'inflow','lateral')\n\n routing_node = reach_template.add_node(self.routing,process='FlowRouting',**kwargs)\n reach_template.add_link(OWLink(lag_node,'outflow',routing_node,'lateral'))\n reach_template.define_output(routing_node,'outflow')\n\n reach_template.define_input(routing_node,'inflow',UPSTREAM_FLOW_FLUX,**kwargs)\n reach_template.define_output(routing_node,'outflow',DOWNSTREAM_FLOW_FLUX,**kwargs)\n\n bank_erosion = reach_template.add_node(n.BankErosion,process='BankErosion',**kwargs)\n if routing_node is not None:\n reach_template.add_link(OWLink(routing_node,'storage',bank_erosion,'totalVolume'))\n reach_template.add_link(OWLink(routing_node,'outflow',bank_erosion,'downstreamFlowVolume'))\n\n dis_nut_models = []\n par_nut_models = []\n fine_sed_model = None\n fine_sed_con_lag_model = None\n # n.InstreamFineSediment.name: ('upstreamMass','loadDownstream'),\n # n.InstreamCoarseSediment.name: ('upstreamMass','loadDownstream'),\n # n.InstreamDissolvedNutrientDecay.name: ('incomingMassUpstream','loadDownstream'),\n # n.InstreamParticulateNutrient.name: ('incomingMassUpstream','loadDownstream')\n for con in self.constituents:\n model_type = self.model_for(self.transport,con,*tag_values)\n constituent_lag_node = reach_template.add_node(n.Lag,process='FlowLag',constituent=con,**kwargs)\n reach_template.define_input(constituent_lag_node,'inflow','generatedLoad')\n\n transport_node = reach_template.add_node(model_type,process='ConstituentRouting',constituent=con,**kwargs)\n\n if model_type == n.InstreamFineSediment:\n fine_sed_con_lag_model = constituent_lag_node\n # reach_template.define_input(transport_node,'incomingMass','generatedLoad')\n reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralMass'))\n if self.routing is not None:\n reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))\n reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))\n\n reach_template.add_link(OWLink(bank_erosion,'bankErosionFine',transport_node,'reachLocalMass'))\n load_out_flux = 'loadDownstream'\n load_in_flux = 'upstreamMass'\n fine_sed_model = transport_node\n\n elif model_type == n.InstreamCoarseSediment:\n # reach_template.define_input(transport_node,'incomingMass','generatedLoad')\n reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralMass'))\n\n reach_template.add_link(OWLink(bank_erosion,'bankErosionCoarse',transport_node,'reachLocalMass'))\n load_out_flux = 'loadDownstream'\n load_in_flux = 'upstreamMass'\n\n elif model_type == n.InstreamDissolvedNutrientDecay:\n dis_nut_models.append(transport_node)\n # reach_template.define_input(transport_node,'incomingMassLateral','generatedLoad')\n reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'incomingMassLateral'))\n if self.routing is not None:\n reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))\n reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))\n\n load_out_flux = 'loadDownstream'\n load_in_flux = 'incomingMassUpstream'\n\n# elif model_type == n.InstreamParticulateNutrient: TODO\n elif model_type == n.InstreamParticulateNutrient:\n par_nut_models.append(transport_node)\n reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'incomingMassLateral'))\n if self.routing is not None:\n reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))\n reach_template.add_link(OWLink(routing_node,'storage',transport_node,'reachVolume'))\n reach_template.add_link(OWLink(bank_erosion,'bankErosionFine',transport_node,'streambankErosion'))\n reach_template.add_link(OWLink(bank_erosion,'bankErosionCoarse',transport_node,'streambankErosion'))\n\n load_out_flux = 'loadDownstream'\n load_in_flux = 'incomingMassUpstream'\n else:\n # Lumped constituent routing\n # reach_template.define_input(transport_node,'lateralLoad','generatedLoad')\n reach_template.add_link(OWLink(constituent_lag_node,'outflow',transport_node,'lateralLoad'))\n # reach_template.add_link(OWLink(lag_node,'outflow',transport_node,'inflow')) # inflow removed from LumpedConstituentRouting. Unused\n if self.routing is not None:\n reach_template.add_link(OWLink(routing_node,'outflow',transport_node,'outflow'))\n reach_template.add_link(OWLink(routing_node,'storage',transport_node,'storage'))\n load_out_flux = 'outflowLoad'\n load_in_flux = 'inflowLoad'\n\n reach_template.define_output(transport_node,load_out_flux,DOWNSTREAM_LOAD_FLUX,constituent=con,**kwargs)\n reach_template.define_input(transport_node,load_in_flux,UPSTREAM_LOAD_FLUX,constituent=con,**kwargs)\n\n if fine_sed_model is not None:\n for dnm in dis_nut_models:\n reach_template.add_link(OWLink(fine_sed_model,'floodplainDepositionFraction',dnm,'floodplainDepositionFraction'))\n for pnm in par_nut_models:\n reach_template.add_link(OWLink(fine_sed_model,'floodplainDepositionFraction',pnm,'floodplainDepositionFraction'))\n reach_template.add_link(OWLink(fine_sed_model,'channelDepositionFraction',pnm,'channelDepositionFraction'))\n # reach_template.add_link(OWLink(fine_sed_model,'channelDepositionFraction',pnm,'channelDepositionFraction'))\n reach_template.add_link(OWLink(fine_sed_con_lag_model,'outflow',pnm,'lateralSediment'))\n\n return reach_template\n\n def cgu_factory(self,cgu):\n cropping_cgu = (self.pesticide_cgus is not None) and (cgu in self.pesticide_cgus)\n cropping_cgu = cropping_cgu or ((self.timeseries_sediment_cgus is not None) and (cgu in self.timeseries_sediment_cgus))\n gully_proc = (self.gully_cgus is None) or (cgu in self.gully_cgus)\n hillslope_proc = (self.hillslope_cgus is None) or (cgu in self.hillslope_cgus)\n\n emc_proc = False\n if self.sediment_fallback_cgu is not None:\n emc_proc = cgu in self.sediment_fallback_cgu\n\n if cgu=='Water/lakes':\n return NilCGU()\n # if cgu in ['Dryland', 'Irrigation', 'Horticulture', 'Irrigated Grazing']:\n # return DynamicSednetAgCGU()\n return DynamicSednetCGU(cropping_cgu=cropping_cgu,\n sediment_fallback_model=emc_proc,\n gully_cgu=gully_proc,\n hillslope_cgu=hillslope_proc,\n ts_load_with_dwc=self.ts_load_with_dwc)\n\n def get_template(self,**kwargs):\n tag_values = list(kwargs.values())\n template = OWTemplate('catchment')\n climate_nodes = {cvar: template.add_node(n.Input,process='input',variable=cvar,**kwargs) for cvar in self.climate_inputs}\n\n hrus={}\n for hru in self.hrus:\n hru_template = OWTemplate('hru:%s'%hru)\n\n if self.rr is not None:\n runoff_template = OWTemplate('runoff:%s'%hru)\n runoff_node = runoff_template.add_node(self.model_for(self.rr,hru,*tag_values),process='RR',hru=hru,**kwargs)\n\n for clim_var, clim_node in climate_nodes.items():\n template.add_link(OWLink(clim_node,'output',runoff_node,clim_var))\n\n runoff_template.define_output(runoff_node,'runoff')\n runoff_template.define_output(runoff_node,'surfaceRunoff','quickflow')\n runoff_template.define_output(runoff_node,'baseflow')\n hru_template.nest(runoff_template)\n hrus[hru] = hru_template\n template.nest(hru_template)\n\n for cgu in self.cgus:\n hru = self.cgu_hrus[cgu]\n cgu_builder = self.cgu_factory(cgu)\n cgu_template = cgu_builder.get_template(self,cgu=cgu,**kwargs)\n hrus[hru].nest(cgu_template)\n\n template.nest(self.get_link_template(**kwargs))\n\n return template\n\n def get_node_template(self,node_type,**kwargs):\n assert self.node_templates is not None\n return get_default_node_template(node_type,constituents=self.constituents,templates=self.node_templates,**kwargs)\n\n def link_catchments(self,graph,upstream,downstream):\n STANDARD_LINKS = defaultdict(lambda:[None,None],{\n n.InstreamFineSediment.name: ('upstreamMass','loadDownstream'),\n n.InstreamCoarseSediment.name: ('upstreamMass','loadDownstream'),\n n.InstreamDissolvedNutrientDecay.name: ('incomingMassUpstream','loadDownstream'),\n n.InstreamParticulateNutrient.name: ('incomingMassUpstream','loadDownstream')\n })\n\n if (self._g == None) or (self._g != graph):\n self._g = graph\n self._g_lookup = {}\n\n def match_node(nm):\n if nm in self._g_lookup:\n return self._g_lookup[nm]\n for nn in graph.nodes:\n if nn.startswith(nm):\n self._g_lookup[nm] = nn\n return nn\n return None\n \n linkages = [('%s-FlowRouting','outflow','inflow')] + \\\n [('%%s-ConstituentRouting-%s'%c,'outflowLoad','inflowLoad') for c in self.constituents]\n for (lt,src,dest) in linkages:\n\n src_node = lt%(str(upstream))\n dest_node = lt%(str(downstream))#'%d/%s'%(to_cat,lt)\n src_node = match_node(src_node)#[n for n in graph.nodes if n.startswith(src_node)][0]\n dest_node = match_node(dest_node)#[n for n in graph.nodes if n.startswith(dest_node)][0]\n\n if (src_node is None) and (dest_node is None):\n # If both are missing then assume process is not being modelled\n continue\n src_model = graph.nodes[src_node][TAG_MODEL]\n dest_model = graph.nodes[dest_node][TAG_MODEL]\n src = STANDARD_LINKS[src_model][1] or src\n dest = STANDARD_LINKS[dest_model][0] or dest\n # print(src_node,src,dest_node,dest)\n graph.add_edge(src_node,dest_node,src=[src],dest=[dest])\n\ndef main_output_flux(model):\n if model.name=='PassLoadIfFlow':\n return 'outputLoad'\n return 'totalLoad'\n\nclass OpenwaterDynamicSednetResults(object):\n def __init__(self, fn, res_fn=None):\n self.fn = fn\n self.ow_model_fn = self.filename_from_base('.h5')\n self.meta = json.load(open(self.filename_from_base('.meta.json')))\n self.init_network(fn)\n\n self.ow_results_fn = res_fn or self.filename_from_base('_outputs.h5')\n self.dates = pd.date_range(self.meta['start'], self.meta['end'])\n self.open_files()\n\n def filename_from_base(self,fn):\n return self.fn.replace('.h5','')+fn\n\n def init_network(self,fn):\n from veneer.general import _extend_network\n self.nodes = gpd.read_file(self.filename_from_base('.nodes.json'))\n self.links = gpd.read_file(self.filename_from_base('.links.json'))\n self.catchments = gpd.read_file(self.filename_from_base('.catchments.json'))\n raw = [json.load(open(self.filename_from_base('.'+c+'.json'),'r')) for c in ['nodes','links','catchments']]\n self.network = {\n 'type':'FeatureCollection',\n 'crs':raw[0]['crs'],\n 'features':sum([r['features'] for r in raw],[])\n }\n self.network = _extend_network(self.network)\n\n def run_model(self):\n self.model.run(self.dates, self.ow_results_fn, overwrite=True)\n self.open_files()\n\n def open_files(self):\n _ensure_uncompressed(self.ow_model_fn)\n _ensure_uncompressed(self.ow_results_fn)\n\n self.results = OpenwaterResults(self.ow_model_fn,\n self.ow_results_fn,\n self.dates)\n self.model = ModelFile(self.ow_model_fn)\n\n def regulated_links(self):\n from veneer.extensions import _feature_id\n network = self.network\n outlet_nodes = network.outlet_nodes()\n outlets = [n['properties']['name'] for n in outlet_nodes]\n network.partition(outlets,'outlet')\n storages = network['features'].find_by_icon('/resources/StorageNodeModel')\n extractions = network['features'].find_by_icon('/resources/ExtractionNodeModel')\n\n impacted_by_storage = []\n for s in storages._list+extractions._list:\n outlet = s['properties']['outlet']\n outlet_id = _feature_id(network['features'].find_by_name(outlet)[0])\n impacted_by_storage += network.path_between(s,outlet_id)\n\n ids = set([_feature_id(f) for f in impacted_by_storage])\n network_df = network.as_dataframe()\n impacted_by_storage = network_df[network_df['id'].isin(ids)]\n links_downstream_storage = [l.replace('link for catchment ','') for l in impacted_by_storage[impacted_by_storage.feature_type=='link'].name]\n return links_downstream_storage\n\n def generation_model(self,c,fu):\n EMC = 'EmcDwc','totalLoad'\n SUM = 'Sum','out'\n\n if c in self.meta['sediments']:\n if fu in (self.meta['usle_cgus']+self.meta['cropping_cgus']+self.meta['gully_cgus']):\n return SUM\n return EMC\n\n if c in self.meta['pesticides']:\n if fu in self.meta['cropping_cgus']:\n return SUM\n return EMC\n\n if c in self.meta['dissolved_nutrients']:\n if fu in ['Water']: #,'Conservation','Horticulture','Other','Urban','Forestry']:\n return EMC\n\n if (self.meta['ts_load'] is not None) and \\\n (fu in self.meta['ts_load']['cgus']) and \\\n (c in self.meta['ts_load']['constituents']):\n return SUM\n\n if fu == 'Sugarcane':\n if c=='N_DIN':\n return SUM\n elif c=='N_DON':\n return EMC\n elif c.startswith('P'):\n return EMC\n if (fu == 'Bananas') and (c=='N_DIN'):\n return SUM\n\n if fu in self.meta['cropping_cgus'] or fu in self.meta.get('pesticide_cgus',[]):\n if c.startswith('P'):\n return 'PassLoadIfFlow', 'outputLoad'\n\n return 'SednetDissolvedNutrientGeneration', 'totalLoad'\n\n if c in self.meta['particulate_nutrients']:\n if (fu != 'Sugarcane') and (c == 'P_Particulate'):\n if (fu in self.meta['cropping_cgus']) or (fu in self.meta.get('timeseries_sediment',[])):\n return SUM\n\n for fu_cat in ['cropping_cgus','hillslope_emc_cgus','gully_cgus','erosion_cgus']:\n if fu in self.meta.get(fu_cat,[]):\n return 'SednetParticulateNutrientGeneration', 'totalLoad'\n\n return EMC\n\n def transport_model(self,c):\n LCR = 'LumpedConstituentRouting','outflowLoad'\n if c in self.meta['pesticides']:\n return LCR\n if c in self.meta['dissolved_nutrients']:\n return 'InstreamDissolvedNutrientDecay', 'loadDownstream'\n if c in self.meta['particulate_nutrients']:\n return 'InstreamParticulateNutrient', 'loadDownstream'\n if c == 'Sediment - Coarse':\n return 'InstreamCoarseSediment', 'loadDownstream'\n if c == 'Sediment - Fine':\n return 'InstreamFineSediment', 'loadDownstream'\n assert False\n\nclass DynamicSednetStandardReporting(object):\n def __init__(self,ow_impl):\n self.impl = ow_impl\n self.results = ow_impl.results\n self.model = ow_impl.model\n\n def _get_states(self,f,model,**tags):\n mmap = self.model._map_model_dims(model)\n return _tabulate_model_scalars_from_file(f,model,mmap,'states',**tags)\n\n def get_final_states(self,model,**tags):\n f = self.results.results\n return self._get_states(f,model,**tags)\n\n def get_initial_states(self,model,**tags):\n f = self.results.model\n return self._get_states(f,model,**tags)\n\n def outlet_nodes_time_series(self,dest,overwrite=False):\n if os.path.exists(dest):\n if overwrite and os.path.isdir(dest):\n shutil.rmtree(dest)\n else:\n raise Exception(\"Destination exists\")\n os.makedirs(dest)\n\n outlets = self.impl.network.outlet_nodes()\n final_links = [l['properties']['name'].replace('link for catchment ','') \\\n for l in sum([self.impl.network.upstream_links(n['properties']['id'])._list for n in outlets],[])]\n assert len(final_links)==len(outlets)\n\n total_fn = os.path.join(dest,'TotalDaily_%s_ModelTotal_%s.csv')\n\n flow_l = self.results.time_series('StorageRouting','outflow','catchment')[final_links]*PER_SECOND_TO_PER_DAY * M3_TO_L\n for outlet,final_link in zip(outlets,final_links):\n fn = os.path.join(dest,f'node_flow_{outlet[\"properties\"][\"name\"]}_Litres.csv')\n flow_l[final_link].to_csv(fn)\n flow_l.sum(axis=1).to_csv(total_fn%('Flow','Litres'))\n for c in self.impl.meta['constituents']:\n mod, flux = self.impl.transport_model(c)\n constituent_loads_kg = self.results.time_series(mod,flux,'catchment',constituent=c)[final_links]*PER_SECOND_TO_PER_DAY\n for outlet,final_link in zip(outlets,final_links):\n fn = os.path.join(dest,f'link_const_{outlet[\"properties\"][\"name\"]}_{c}_Kilograms.csv')\n constituent_loads_kg[final_link].to_csv(fn)\n constituent_loads_kg.sum(axis=1).to_csv(total_fn%(c,'Kilograms'))\n\n def outlet_nodes_rates_table(self):\n outlets = [n['properties']['id'] for n in self.impl.network.outlet_nodes()]\n final_links = [l['properties']['name'].replace('link for catchment ','') for l in sum([self.impl.network.upstream_links(n)._list for n in outlets],[])]\n flow_l = np.array(self.results.time_series('StorageRouting','outflow','catchment')[final_links])*PER_SECOND_TO_PER_DAY * M3_TO_L\n total_area = sum(self.model.parameters('DepthToRate',component='Runoff').area)\n records = []\n for c in self.impl.meta['constituents']:\n mod, flux = self.impl.transport_model(c)\n constituent_loads_kg = np.array(self.results.time_series(mod,flux,'catchment',constituent=c)[final_links])*PER_SECOND_TO_PER_DAY\n records.append(dict(\n Region='ModelTotal',\n Constituent=c,\n Area=total_area,\n Total_Load_in_Kg=constituent_loads_kg.sum(),\n Flow_Litres=flow_l.sum(),\n Concentration=0.0,\n LoadPerArea=0.0,\n NumDays=flow_l.shape[0]\n ))\n return pd.DataFrame(records)\n\n def climate_table(self):\n orig_tbls = []\n melted_tbls = []\n variables = [('rainfall','Rainfall'),('actualET','Actual ET'),('baseflow','Baseflow'),('runoff','Runoff (Quickflow)')]\n for v,lbl in variables:\n tbl = self.results.table('Sacramento',v,'catchment','hru','sum','sum')*MM_TO_M\n if v=='runoff':\n tbl = tbl - orig_tbls[-1]\n orig_tbls.append(tbl)\n tbl = tbl.reset_index().melt(id_vars=['index'],value_vars=list(tbl.columns)).rename(columns={'index':'Catchment','variable':'FU','value':'Depth_m'})\n tbl['Element']=lbl\n melted_tbls.append(tbl)\n return pd.concat(melted_tbls).sort_values(['Catchment','FU','Element'])\n\n def fu_areas_table(self):\n tbl = self.model.parameters('DepthToRate',component='Runoff')\n tbl = tbl[['catchment','cgu','area']].sort_values(['catchment','cgu']).rename(columns={'catchment':'Catchment','cgu':'CGU'})\n return tbl\n\n def fu_summary_table(self):\n summary = []\n seen = {}\n for con in self.impl.meta['constituents']:\n for fu in self.impl.meta['fus']:\n combo = self.impl.generation_model(con,fu)\n if not combo in seen:\n model,flux = combo\n tbl = self.results.table(model,flux,'constituent','cgu','sum','sum') * PER_SECOND_TO_PER_DAY\n seen[combo]=tbl\n tbl = seen[combo]\n summary.append((con,fu,tbl.loc[con,fu]))\n return pd.DataFrame(summary,columns=['Constituent','FU','Total_Load_in_Kg'])\n\n def regional_summary_table(self):\n tables = [self.mass_balance_summary_table(self,region) for region in self.impl.meta['regions']]\n for tbl,region in zip(tables,self.impl.meta['regions']):\n tbl['SummaryRegion']=region\n return pd.concat(tables)\n\n def overall_summary_table(self):\n return self.mass_balance_summary_table()\n\n def constituent_loss_table(self,region=None):\n loss_fluxes = [\n ('InstreamFineSediment','loadToFloodplain','Sediment - Fine'),\n ('InstreamDissolvedNutrientDecay','loadToFloodplain'),\n # ('InstreamDissolvedNutrientDecay','loadDecayed'), #TODO\n\n ]\n\n loss_states = [\n ('InstreamFineSediment','channelStoreFine','Sediment - Fine')\n ]\n pass\n\n def residual_constituent_table(self,region=None):\n # Need to query final states (and initial states?)\n mass_states = [\n ('LumpedConstituentRouting','storedMass'),\n ('InstreamFineSediment','totalStoredMass', 'Sediment - Fine'),\n ('InstreamDissolvedNutrientDecay','totalStoredMass'),\n ('InstreamCoarseSediment','totalStoredMass', 'Sediment - Coarse')\n ]\n tables = []\n for state in mass_states:\n m = state[0]\n v = state[1]\n values = self.get_final_states(m)\n if len(state)>2:\n values['constituent']=state[2]\n tbl = values[['constituent',v]].groupby('constituent').sum().reset_index().rename(columns={\n 'constituent':'Constituent',\n v:'Total_Load_in_Kg'\n })\n tables.append(tbl)\n return pd.concat(tables).groupby('Constituent').sum().reset_index()\n\n def mass_balance_summary_table(self,region=None):\n cols =['Constituent','Total_Load_in_Kg']\n input_tables = {\n 'Supply':self.fu_summary_table(),\n 'Export':self.outlet_nodes_rates_table(),\n 'Loss':self.constituent_loss_table(region),\n 'Residual':self.residual_constituent_table(region)\n }\n\n result = []\n for k,tbl in input_tables.items():\n if tbl is None:\n print(f'Missing table {k}')\n continue\n tbl = tbl[cols].groupby('Constituent').sum().reset_index()\n tbl['MassBalanceElement'] = k\n tbl = tbl[['Constituent','MassBalanceElement','Total_Load_in_Kg']]\n result.append(tbl)\n\n return pd.concat(result).sort_values(['Constituent','MassBalanceElement'])\n\n\ndef _ensure_uncompressed(fn):\n if os.path.exists(fn):\n return\n gzfn = fn + '.gz'\n if not os.path.exists(gzfn):\n raise Exception('File not found (compressed or uncompressed): %s'%fn)\n os.system('gunzip %s'%gzfn)\n assert os.path.exists(fn)\n" ]
[ [ "pandas.DataFrame", "pandas.date_range", "pandas.concat" ] ]
johannes-graeter/UnFlow
[ "3c6bf459952ef918d0226187539472f84fa9f00c" ]
[ "src/e2eflow/core/unsupervised.py" ]
[ "import tensorflow as tf\n\nfrom .augment import random_affine, random_photometric\nfrom .flownet import flownet, FLOW_SCALE\nfrom .losses import compute_losses, create_border_mask\nfrom .util import downsample\nfrom .visualization import get_flow_visualization\n\n# REGISTER ALL POSSIBLE LOSS TERMS\nLOSSES = ['occ', 'sym', 'fb', 'grad', 'ternary', 'photo', 'smooth_1st', 'smooth_2nd']\n\n\ndef _track_loss(op, name):\n tf.add_to_collection('losses', tf.identity(op, name=name))\n\n\ndef _track_image(op, name):\n name = 'train/' + name\n tf.add_to_collection('train_images', tf.identity(op, name=name))\n\n\ndef unsupervised_loss(batch, params, normalization=None, augment=True,\n return_flow=False):\n channel_mean = tf.constant(normalization[0]) / 255.0\n im1, im2 = batch\n im1 = im1 / 255.0\n im2 = im2 / 255.0\n im_shape = tf.shape(im1)[1:3]\n\n # -------------------------------------------------------------------------\n # Data & mask augmentation\n border_mask = create_border_mask(im1, 0.1)\n\n _track_image(im1, 'orig1')\n _track_image(im2, 'orig2')\n\n if augment:\n im1_geo, im2_geo, border_mask_global = random_affine(\n [im1, im2, border_mask],\n horizontal_flipping=True,\n min_scale=0.9, max_scale=1.1\n )\n\n # augment locally\n im2_geo, border_mask_local = random_affine(\n [im2_geo, border_mask],\n min_scale=0.9, max_scale=1.1\n )\n border_mask = border_mask_local * border_mask_global\n\n im1_photo, im2_photo = random_photometric(\n [im1_geo, im2_geo],\n noise_stddev=0.04, min_contrast=-0.3, max_contrast=0.3,\n brightness_stddev=0.02, min_colour=0.9, max_colour=1.1,\n min_gamma=0.7, max_gamma=1.5)\n\n _track_image(im1_photo, 'augmented1')\n _track_image(im2_photo, 'augmented2')\n else:\n im1_geo, im2_geo = im1, im2\n im1_photo, im2_photo = im1, im2\n\n # Images for loss comparisons with values in [0, 1] (scale to original using * 255)\n im1_norm = im1_geo\n im2_norm = im2_geo\n # Images for neural network input with mean-zero values in [-1, 1]\n im1_photo = im1_photo - channel_mean\n im2_photo = im2_photo - channel_mean\n\n flownet_spec = params.get('flownet', 'S')\n full_resolution = params.get('full_res')\n train_all = params.get('train_all')\n\n flows_fw, flows_bw = flownet(im1_photo, im2_photo,\n flownet_spec=flownet_spec,\n full_resolution=full_resolution,\n backward_flow=True,\n train_all=train_all)\n\n flows_fw = flows_fw[-1]\n flows_bw = flows_bw[-1]\n\n # -------------------------------------------------------------------------\n # Losses\n layer_weights = [12.7, 4.35, 3.9, 3.4, 1.1]\n layer_patch_distances = [3, 2, 2, 1, 1]\n if full_resolution:\n layer_weights = [12.7, 5.5, 5.0, 4.35, 3.9, 3.4, 1.1]\n layer_patch_distances = [3, 3] + layer_patch_distances\n im1_s = im1_norm\n im2_s = im2_norm\n mask_s = border_mask\n final_flow_scale = FLOW_SCALE * 4\n final_flow_fw = flows_fw[0] * final_flow_scale\n final_flow_bw = flows_bw[0] * final_flow_scale\n else:\n im1_s = downsample(im1_norm, 4)\n im2_s = downsample(im2_norm, 4)\n mask_s = downsample(border_mask, 4)\n final_flow_scale = FLOW_SCALE\n final_flow_fw = tf.image.resize_bilinear(flows_fw[0], im_shape) * final_flow_scale * 4\n final_flow_bw = tf.image.resize_bilinear(flows_bw[0], im_shape) * final_flow_scale * 4\n\n combined_losses = dict()\n combined_loss = 0.0\n for loss in LOSSES:\n combined_losses[loss] = 0.0\n\n if params.get('pyramid_loss'):\n flow_enum = enumerate(zip(flows_fw, flows_bw))\n else:\n flow_enum = [(0, (flows_fw[0], flows_bw[0]))]\n\n for i, flow_pair in flow_enum:\n layer_name = \"loss\" + str(i + 2)\n\n flow_scale = final_flow_scale / (2 ** i)\n\n with tf.variable_scope(layer_name):\n layer_weight = layer_weights[i]\n flow_fw_s, flow_bw_s = flow_pair\n\n mask_occlusion = params.get('mask_occlusion', '')\n assert mask_occlusion in ['fb', 'disocc', '']\n\n losses = compute_losses(im1_s, im2_s,\n flow_fw_s * flow_scale, flow_bw_s * flow_scale,\n border_mask=mask_s if params.get('border_mask') else None,\n mask_occlusion=mask_occlusion,\n data_max_distance=layer_patch_distances[i])\n\n layer_loss = 0.0\n\n for loss in LOSSES:\n weight_name = loss + '_weight'\n if params.get(weight_name):\n _track_loss(losses[loss], loss)\n layer_loss += params[weight_name] * losses[loss]\n combined_losses[loss] += layer_weight * losses[loss]\n\n combined_loss += layer_weight * layer_loss\n\n im1_s = downsample(im1_s, 2)\n im2_s = downsample(im2_s, 2)\n mask_s = downsample(mask_s, 2)\n\n regularization_loss = tf.losses.get_regularization_loss()\n final_loss = combined_loss + regularization_loss\n\n _track_loss(final_loss, 'loss/combined')\n\n for loss in LOSSES:\n _track_loss(combined_losses[loss], 'loss/' + loss)\n weight_name = loss + '_weight'\n if params.get(weight_name):\n weight = tf.identity(params[weight_name], name='weight/' + loss)\n tf.add_to_collection('params', weight)\n\n _track_image(get_flow_visualization(final_flow_fw), 'estimated_flow')\n\n if not return_flow:\n return final_loss\n\n return final_loss, final_flow_fw, final_flow_bw\n" ]
[ [ "tensorflow.image.resize_bilinear", "tensorflow.shape", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.add_to_collection", "tensorflow.identity", "tensorflow.losses.get_regularization_loss" ] ]
Gertrud-Violett/RocketSystem
[ "f567d166e6869598ae86395a7a8ba4b5b045f612" ]
[ "Performance_Calculator/PandasHandler.py" ]
[ "import pandas as pd\n\nclass PandasHandler:\n\n def __init__(self):\n print(\"This is Constructor\")\n \n def __del__(self):\n print(\"This is Destructor\")\n\n @staticmethod\n def getSheetNumber_Excel(filePathExcel=None):\n bk = pd.ExcelFile(filePathExcel)\n return len(bk.sheet_names)\n\n @staticmethod\n def getSheetNames_Excel(filePathExcel=None):\n bk = pd.ExcelFile(filePathExcel)\n return bk.sheet_names\n\n @staticmethod\n def readAllSheets_Excel(filePathExcel=None):\n numSheets = PandasHandler.getSheetNumber_Excel(filePathExcel)\n dfSet = []\n for i in range(numSheets):\n df = pd.read_excel(filePathExcel, sheet_name=i, index_col=None)\n dfSet.append(df)\n return dfSet\n" ]
[ [ "pandas.ExcelFile", "pandas.read_excel" ] ]
AmitMY/joeynmt
[ "b30d1d53823ced56113def8fb5d5f7905d3c059f" ]
[ "joeynmt/plotting.py" ]
[ "#!/usr/bin/env python\n\nfrom typing import List, Optional\nimport numpy as np\n\n# pylint: disable=wrong-import-position\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib import rcParams\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\ndef plot_heatmap(scores: np.array, column_labels: List[str],\n row_labels: List[str], output_path: Optional[str] = None,\n dpi: int = 300) -> Figure:\n\n \"\"\"\n Plotting function that can be used to visualize (self-)attention.\n Plots are saved if `output_path` is specified, in format that this file\n ends with ('pdf' or 'png').\n\n :param scores: attention scores\n :param column_labels: labels for columns (e.g. target tokens)\n :param row_labels: labels for rows (e.g. source tokens)\n :param output_path: path to save to\n :param dpi: set resolution for matplotlib\n :return: pyplot figure\n \"\"\"\n\n if output_path is not None:\n assert output_path.endswith(\".png\") or output_path.endswith(\".pdf\"), \\\n \"output path must have .png or .pdf extension\"\n\n x_sent_len = len(column_labels)\n y_sent_len = len(row_labels)\n scores = scores[:y_sent_len, :x_sent_len]\n # check that cut off part didn't have any attention\n assert np.sum(scores[y_sent_len:, :x_sent_len]) == 0\n\n # automatic label size\n labelsize = 25 * (10 / max(x_sent_len, y_sent_len))\n\n # font config\n rcParams['xtick.labelsize'] = labelsize\n rcParams['ytick.labelsize'] = labelsize\n #rcParams['font.family'] = \"sans-serif\"\n #rcParams['font.sans-serif'] = [\"Fira Sans\"]\n #rcParams['font.weight'] = \"regular\"\n\n fig, ax = plt.subplots(figsize=(10, 10), dpi=dpi)\n plt.imshow(scores, cmap='viridis', aspect='equal',\n origin='upper', vmin=0., vmax=1.)\n\n ax.set_xticklabels(column_labels, minor=False, rotation=\"vertical\")\n ax.set_yticklabels(row_labels, minor=False)\n\n ax.xaxis.tick_top()\n ax.set_xticks(np.arange(scores.shape[1]) + 0, minor=False)\n ax.set_yticks(np.arange(scores.shape[0]) + 0, minor=False)\n plt.tight_layout()\n\n if output_path is not None:\n if output_path.endswith(\".pdf\"):\n pp = PdfPages(output_path)\n pp.savefig(fig)\n pp.close()\n else:\n if not output_path.endswith(\".png\"):\n output_path += \".png\"\n plt.savefig(output_path)\n\n plt.close()\n\n return fig\n" ]
[ [ "matplotlib.use", "numpy.sum", "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.imshow" ] ]
felixwzh/TSGB
[ "80dfb42c153c19a58ae170565d50ff8831a3029a" ]
[ "python-package/xgboost/sklearn.py" ]
[ "# coding: utf-8\n# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, E0012, R0912\n\"\"\"Scikit-Learn Wrapper interface for XGBoost.\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport warnings\nfrom .core import Booster, DMatrix, XGBoostError\nfrom .training import train\n\n# Do not use class names on scikit-learn directly.\n# Re-define the classes on .compat to guarantee the behavior without scikit-learn\nfrom .compat import (SKLEARN_INSTALLED, XGBModelBase,\n XGBClassifierBase, XGBRegressorBase, XGBLabelEncoder)\n\n\ndef _objective_decorator(func):\n \"\"\"Decorate an objective function\n\n Converts an objective function using the typical sklearn metrics\n signature so that it is usable with ``xgboost.training.train``\n\n Parameters\n ----------\n func: callable\n Expects a callable with signature ``func(y_true, y_pred)``:\n\n y_true: array_like of shape [n_samples]\n The target values\n y_pred: array_like of shape [n_samples]\n The predicted values\n\n Returns\n -------\n new_func: callable\n The new objective function as expected by ``xgboost.training.train``.\n The signature is ``new_func(preds, dmatrix)``:\n\n preds: array_like, shape [n_samples]\n The predicted values\n dmatrix: ``DMatrix``\n The training set from which the labels will be extracted using\n ``dmatrix.get_label()``\n \"\"\"\n def inner(preds, dmatrix):\n \"\"\"internal function\"\"\"\n labels = dmatrix.get_label()\n return func(labels, preds)\n return inner\n\n\nclass XGBModel(XGBModelBase):\n # pylint: disable=too-many-arguments, too-many-instance-attributes, invalid-name\n \"\"\"Implementation of the Scikit-Learn API for XGBoost.\n\n Parameters\n ----------\n max_depth : int\n Maximum tree depth for base learners.\n learning_rate : float\n Boosting learning rate (xgb's \"eta\")\n n_estimators : int\n Number of boosted trees to fit.\n silent : boolean\n Whether to print messages while running boosting.\n objective : string or callable\n Specify the learning task and the corresponding learning objective or\n a custom objective function to be used (see note below).\n booster: string\n Specify which booster to use: gbtree, gblinear or dart.\n nthread : int\n Number of parallel threads used to run xgboost. (Deprecated, please use n_jobs)\n n_jobs : int\n Number of parallel threads used to run xgboost. (replaces nthread)\n gamma : float\n Minimum loss reduction required to make a further partition on a leaf node of the tree.\n min_child_weight : int\n Minimum sum of instance weight(hessian) needed in a child.\n max_delta_step : int\n Maximum delta step we allow each tree's weight estimation to be.\n subsample : float\n Subsample ratio of the training instance.\n colsample_bytree : float\n Subsample ratio of columns when constructing each tree.\n colsample_bylevel : float\n Subsample ratio of columns for each split, in each level.\n reg_alpha : float (xgb's alpha)\n L1 regularization term on weights\n reg_lambda : float (xgb's lambda)\n L2 regularization term on weights\n scale_pos_weight : float\n Balancing of positive and negative weights.\n base_score:\n The initial prediction score of all instances, global bias.\n seed : int\n Random number seed. (Deprecated, please use random_state)\n random_state : int\n Random number seed. (replaces seed)\n missing : float, optional\n Value in the data which needs to be present as a missing value. If\n None, defaults to np.nan.\n **kwargs : dict, optional\n Keyword arguments for XGBoost Booster object. Full documentation of parameters can\n be found here: https://github.com/dmlc/xgboost/blob/master/doc/parameter.md.\n Attempting to set a parameter via the constructor args and **kwargs dict simultaneously\n will result in a TypeError.\n Note:\n **kwargs is unsupported by Sklearn. We do not guarantee that parameters passed via\n this argument will interact properly with Sklearn.\n\n Note\n ----\n A custom objective function can be provided for the ``objective``\n parameter. In this case, it should have the signature\n ``objective(y_true, y_pred) -> grad, hess``:\n\n y_true: array_like of shape [n_samples]\n The target values\n y_pred: array_like of shape [n_samples]\n The predicted values\n\n grad: array_like of shape [n_samples]\n The value of the gradient for each sample point.\n hess: array_like of shape [n_samples]\n The value of the second derivative for each sample point\n \"\"\"\n\n def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,\n silent=True, objective=\"reg:linear\", booster='gbtree',\n n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,\n subsample=1, colsample_bytree=1, colsample_bylevel=1,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1,\n base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):\n if not SKLEARN_INSTALLED:\n raise XGBoostError('sklearn needs to be installed in order to use this module')\n self.max_depth = max_depth\n self.learning_rate = learning_rate\n self.n_estimators = n_estimators\n self.silent = silent\n self.objective = objective\n self.booster = booster\n self.gamma = gamma\n self.min_child_weight = min_child_weight\n self.max_delta_step = max_delta_step\n self.subsample = subsample\n self.colsample_bytree = colsample_bytree\n self.colsample_bylevel = colsample_bylevel\n self.reg_alpha = reg_alpha\n self.reg_lambda = reg_lambda\n self.scale_pos_weight = scale_pos_weight\n self.base_score = base_score\n self.missing = missing if missing is not None else np.nan\n self.kwargs = kwargs\n self._Booster = None\n self.seed = seed\n self.random_state = random_state\n self.nthread = nthread\n self.n_jobs = n_jobs\n\n def __setstate__(self, state):\n # backward compatibility code\n # load booster from raw if it is raw\n # the booster now support pickle\n bst = state[\"_Booster\"]\n if bst is not None and not isinstance(bst, Booster):\n state[\"_Booster\"] = Booster(model_file=bst)\n self.__dict__.update(state)\n\n def get_booster(self):\n \"\"\"Get the underlying xgboost Booster of this model.\n\n This will raise an exception when fit was not called\n\n Returns\n -------\n booster : a xgboost booster of underlying model\n \"\"\"\n if self._Booster is None:\n raise XGBoostError('need to call fit beforehand')\n return self._Booster\n\n def get_params(self, deep=False):\n \"\"\"Get parameters.\"\"\"\n params = super(XGBModel, self).get_params(deep=deep)\n if isinstance(self.kwargs, dict): # if kwargs is a dict, update params accordingly\n params.update(self.kwargs)\n if params['missing'] is np.nan:\n params['missing'] = None # sklearn doesn't handle nan. see #4725\n if not params.get('eval_metric', True):\n del params['eval_metric'] # don't give as None param to Booster\n return params\n\n def get_xgb_params(self):\n \"\"\"Get xgboost type parameters.\"\"\"\n xgb_params = self.get_params()\n random_state = xgb_params.pop('random_state')\n if 'seed' in xgb_params and xgb_params['seed'] is not None:\n warnings.warn('The seed parameter is deprecated as of version .6.'\n 'Please use random_state instead.'\n 'seed is deprecated.', DeprecationWarning)\n else:\n xgb_params['seed'] = random_state\n n_jobs = xgb_params.pop('n_jobs')\n if 'nthread' in xgb_params and xgb_params['nthread'] is not None:\n warnings.warn('The nthread parameter is deprecated as of version .6.'\n 'Please use n_jobs instead.'\n 'nthread is deprecated.', DeprecationWarning)\n else:\n xgb_params['nthread'] = n_jobs\n\n xgb_params['silent'] = 1 if self.silent else 0\n\n if xgb_params['nthread'] <= 0:\n xgb_params.pop('nthread', None)\n return xgb_params\n\n def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,\n early_stopping_rounds=None, verbose=True, xgb_model=None):\n # pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init\n \"\"\"\n Fit the gradient boosting model\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n sample_weight : array_like\n instance weights\n eval_set : list, optional\n A list of (X, y) tuple pairs to use as a validation set for\n early-stopping\n eval_metric : str, callable, optional\n If a str, should be a built-in evaluation metric to use. See\n doc/parameter.md. If callable, a custom evaluation metric. The call\n signature is func(y_predicted, y_true) where y_true will be a\n DMatrix object such that you may need to call the get_label\n method. It must return a str, value pair where the str is a name\n for the evaluation and value is the value of the evaluation\n function. This objective is always minimized.\n early_stopping_rounds : int\n Activates early stopping. Validation error needs to decrease at\n least every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals. If there's more than one,\n will use the last. Returns the model from the last iteration\n (not the best one). If early stopping occurs, the model will\n have three additional fields: bst.best_score, bst.best_iteration\n and bst.best_ntree_limit.\n (Use bst.best_ntree_limit to get the correct value if num_parallel_tree\n and/or num_class appears in the parameters)\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.\n xgb_model : str\n file name of stored xgb model or 'Booster' instance Xgb model to be\n loaded before training (allows training continuation).\n \"\"\"\n if sample_weight is not None:\n trainDmatrix = DMatrix(X, label=y, weight=sample_weight,\n missing=self.missing, nthread=self.n_jobs)\n else:\n trainDmatrix = DMatrix(X, label=y, missing=self.missing, nthread=self.n_jobs)\n\n evals_result = {}\n if eval_set is not None:\n evals = list(DMatrix(x[0], label=x[1], missing=self.missing,\n nthread=self.n_jobs) for x in eval_set)\n evals = list(zip(evals, [\"validation_{}\".format(i) for i in\n range(len(evals))]))\n else:\n evals = ()\n\n params = self.get_xgb_params()\n\n if callable(self.objective):\n obj = _objective_decorator(self.objective)\n params[\"objective\"] = \"reg:linear\"\n else:\n obj = None\n\n feval = eval_metric if callable(eval_metric) else None\n if eval_metric is not None:\n if callable(eval_metric):\n eval_metric = None\n else:\n params.update({'eval_metric': eval_metric})\n\n self._Booster = train(params, trainDmatrix,\n self.n_estimators, evals=evals,\n early_stopping_rounds=early_stopping_rounds,\n evals_result=evals_result, obj=obj, feval=feval,\n verbose_eval=verbose, xgb_model=xgb_model)\n\n if evals_result:\n for val in evals_result.items():\n evals_result_key = list(val[1].keys())[0]\n evals_result[val[0]][evals_result_key] = val[1][evals_result_key]\n self.evals_result_ = evals_result\n\n if early_stopping_rounds is not None:\n self.best_score = self._Booster.best_score\n self.best_iteration = self._Booster.best_iteration\n self.best_ntree_limit = self._Booster.best_ntree_limit\n return self\n\n def predict(self, data, output_margin=False, ntree_limit=0):\n # pylint: disable=missing-docstring,invalid-name\n test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)\n return self.get_booster().predict(test_dmatrix,\n output_margin=output_margin,\n ntree_limit=ntree_limit)\n\n def apply(self, X, ntree_limit=0):\n \"\"\"Return the predicted leaf every tree for each sample.\n\n Parameters\n ----------\n X : array_like, shape=[n_samples, n_features]\n Input features matrix.\n\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all trees).\n\n Returns\n -------\n X_leaves : array_like, shape=[n_samples, n_trees]\n For each datapoint x in X and for each tree, return the index of the\n leaf x ends up in. Leaves are numbered within\n ``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.\n \"\"\"\n test_dmatrix = DMatrix(X, missing=self.missing, nthread=self.n_jobs)\n return self.get_booster().predict(test_dmatrix,\n pred_leaf=True,\n ntree_limit=ntree_limit)\n\n def evals_result(self):\n \"\"\"Return the evaluation results.\n\n If eval_set is passed to the `fit` function, you can call evals_result() to\n get evaluation results for all passed eval_sets. When eval_metric is also\n passed to the `fit` function, the evals_result will contain the eval_metrics\n passed to the `fit` function\n\n Returns\n -------\n evals_result : dictionary\n\n Example\n -------\n param_dist = {'objective':'binary:logistic', 'n_estimators':2}\n\n clf = xgb.XGBModel(**param_dist)\n\n clf.fit(X_train, y_train,\n eval_set=[(X_train, y_train), (X_test, y_test)],\n eval_metric='logloss',\n verbose=True)\n\n evals_result = clf.evals_result()\n\n The variable evals_result will contain:\n {'validation_0': {'logloss': ['0.604835', '0.531479']},\n 'validation_1': {'logloss': ['0.41965', '0.17686']}}\n \"\"\"\n if self.evals_result_:\n evals_result = self.evals_result_\n else:\n raise XGBoostError('No results.')\n\n return evals_result\n\n @property\n def feature_importances_(self):\n \"\"\"\n Returns\n -------\n feature_importances_ : array of shape = [n_features]\n\n \"\"\"\n b = self.get_booster()\n fs = b.get_fscore()\n all_features = [fs.get(f, 0.) for f in b.feature_names]\n all_features = np.array(all_features, dtype=np.float32)\n return all_features / all_features.sum()\n\n\nclass XGBClassifier(XGBModel, XGBClassifierBase):\n # pylint: disable=missing-docstring,too-many-arguments,invalid-name\n __doc__ = \"\"\"Implementation of the scikit-learn API for XGBoost classification.\n\n \"\"\" + '\\n'.join(XGBModel.__doc__.split('\\n')[2:])\n\n def __init__(self, max_depth=3, learning_rate=0.1,\n n_estimators=100, silent=True,\n objective=\"binary:logistic\", booster='gbtree',\n n_jobs=1, nthread=None, gamma=0, min_child_weight=1,\n max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1,\n base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):\n super(XGBClassifier, self).__init__(max_depth, learning_rate,\n n_estimators, silent, objective, booster,\n n_jobs, nthread, gamma, min_child_weight,\n max_delta_step, subsample,\n colsample_bytree, colsample_bylevel,\n reg_alpha, reg_lambda,\n scale_pos_weight, base_score,\n random_state, seed, missing, **kwargs)\n\n def fit(self, X, y, sample_weight=None, eval_set=None, eval_metric=None,\n early_stopping_rounds=None, verbose=True, xgb_model=None):\n # pylint: disable = attribute-defined-outside-init,arguments-differ\n \"\"\"\n Fit gradient boosting classifier\n\n Parameters\n ----------\n X : array_like\n Feature matrix\n y : array_like\n Labels\n sample_weight : array_like\n Weight for each instance\n eval_set : list, optional\n A list of (X, y) pairs to use as a validation set for\n early-stopping\n eval_metric : str, callable, optional\n If a str, should be a built-in evaluation metric to use. See\n doc/parameter.md. If callable, a custom evaluation metric. The call\n signature is func(y_predicted, y_true) where y_true will be a\n DMatrix object such that you may need to call the get_label\n method. It must return a str, value pair where the str is a name\n for the evaluation and value is the value of the evaluation\n function. This objective is always minimized.\n early_stopping_rounds : int, optional\n Activates early stopping. Validation error needs to decrease at\n least every <early_stopping_rounds> round(s) to continue training.\n Requires at least one item in evals. If there's more than one,\n will use the last. Returns the model from the last iteration\n (not the best one). If early stopping occurs, the model will\n have three additional fields: bst.best_score, bst.best_iteration\n and bst.best_ntree_limit.\n (Use bst.best_ntree_limit to get the correct value if num_parallel_tree\n and/or num_class appears in the parameters)\n verbose : bool\n If `verbose` and an evaluation set is used, writes the evaluation\n metric measured on the validation set to stderr.\n xgb_model : str\n file name of stored xgb model or 'Booster' instance Xgb model to be\n loaded before training (allows training continuation).\n \"\"\"\n evals_result = {}\n self.classes_ = np.unique(y)\n self.n_classes_ = len(self.classes_)\n\n xgb_options = self.get_xgb_params()\n\n if callable(self.objective):\n obj = _objective_decorator(self.objective)\n # Use default value. Is it really not used ?\n xgb_options[\"objective\"] = \"binary:logistic\"\n else:\n obj = None\n\n if self.n_classes_ > 2:\n # Switch to using a multiclass objective in the underlying XGB instance\n xgb_options[\"objective\"] = \"multi:softprob\"\n xgb_options['num_class'] = self.n_classes_\n\n feval = eval_metric if callable(eval_metric) else None\n if eval_metric is not None:\n if callable(eval_metric):\n eval_metric = None\n else:\n xgb_options.update({\"eval_metric\": eval_metric})\n\n self._le = XGBLabelEncoder().fit(y)\n training_labels = self._le.transform(y)\n\n if eval_set is not None:\n # TODO: use sample_weight if given?\n evals = list(\n DMatrix(x[0], label=self._le.transform(x[1]),\n missing=self.missing, nthread=self.n_jobs)\n for x in eval_set\n )\n nevals = len(evals)\n eval_names = [\"validation_{}\".format(i) for i in range(nevals)]\n evals = list(zip(evals, eval_names))\n else:\n evals = ()\n\n self._features_count = X.shape[1]\n\n if sample_weight is not None:\n train_dmatrix = DMatrix(X, label=training_labels, weight=sample_weight,\n missing=self.missing, nthread=self.n_jobs)\n else:\n train_dmatrix = DMatrix(X, label=training_labels,\n missing=self.missing, nthread=self.n_jobs)\n\n self._Booster = train(xgb_options, train_dmatrix, self.n_estimators,\n evals=evals,\n early_stopping_rounds=early_stopping_rounds,\n evals_result=evals_result, obj=obj, feval=feval,\n verbose_eval=verbose, xgb_model=None)\n\n self.objective = xgb_options[\"objective\"]\n if evals_result:\n for val in evals_result.items():\n evals_result_key = list(val[1].keys())[0]\n evals_result[val[0]][evals_result_key] = val[1][evals_result_key]\n self.evals_result_ = evals_result\n\n if early_stopping_rounds is not None:\n self.best_score = self._Booster.best_score\n self.best_iteration = self._Booster.best_iteration\n self.best_ntree_limit = self._Booster.best_ntree_limit\n\n return self\n\n def predict(self, data, output_margin=False, ntree_limit=0):\n \"\"\"\n Predict with `data`.\n NOTE: This function is not thread safe.\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call xgb.copy() to make copies\n of model object and then call predict\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n output_margin : bool\n Whether to output the raw untransformed margin value.\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all trees).\n Returns\n -------\n prediction : numpy array\n \"\"\"\n test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)\n class_probs = self.get_booster().predict(test_dmatrix,\n output_margin=output_margin,\n ntree_limit=ntree_limit)\n if len(class_probs.shape) > 1:\n column_indexes = np.argmax(class_probs, axis=1)\n else:\n column_indexes = np.repeat(0, class_probs.shape[0])\n column_indexes[class_probs > 0.5] = 1\n return self._le.inverse_transform(column_indexes)\n\n def predict_proba(self, data, output_margin=False, ntree_limit=0):\n \"\"\"\n Predict the probability of each `data` example being of a given class.\n NOTE: This function is not thread safe.\n For each booster object, predict can only be called from one thread.\n If you want to run prediction using multiple thread, call xgb.copy() to make copies\n of model object and then call predict\n Parameters\n ----------\n data : DMatrix\n The dmatrix storing the input.\n output_margin : bool\n Whether to output the raw untransformed margin value.\n ntree_limit : int\n Limit number of trees in the prediction; defaults to 0 (use all trees).\n Returns\n -------\n prediction : numpy array\n a numpy array with the probability of each data example being of a given class.\n \"\"\"\n test_dmatrix = DMatrix(data, missing=self.missing, nthread=self.n_jobs)\n class_probs = self.get_booster().predict(test_dmatrix,\n output_margin=output_margin,\n ntree_limit=ntree_limit)\n if self.objective == \"multi:softprob\":\n return class_probs\n else:\n classone_probs = class_probs\n classzero_probs = 1.0 - classone_probs\n return np.vstack((classzero_probs, classone_probs)).transpose()\n\n def evals_result(self):\n \"\"\"Return the evaluation results.\n\n If eval_set is passed to the `fit` function, you can call evals_result() to\n get evaluation results for all passed eval_sets. When eval_metric is also\n passed to the `fit` function, the evals_result will contain the eval_metrics\n passed to the `fit` function\n\n Returns\n -------\n evals_result : dictionary\n\n Example\n -------\n param_dist = {'objective':'binary:logistic', 'n_estimators':2}\n\n clf = xgb.XGBClassifier(**param_dist)\n\n clf.fit(X_train, y_train,\n eval_set=[(X_train, y_train), (X_test, y_test)],\n eval_metric='logloss',\n verbose=True)\n\n evals_result = clf.evals_result()\n\n The variable evals_result will contain:\n {'validation_0': {'logloss': ['0.604835', '0.531479']},\n 'validation_1': {'logloss': ['0.41965', '0.17686']}}\n \"\"\"\n if self.evals_result_:\n evals_result = self.evals_result_\n else:\n raise XGBoostError('No results.')\n\n return evals_result\n\n\nclass XGBRegressor(XGBModel, XGBRegressorBase):\n # pylint: disable=missing-docstring\n __doc__ = \"\"\"Implementation of the scikit-learn API for XGBoost regression.\n \"\"\" + '\\n'.join(XGBModel.__doc__.split('\\n')[2:])\n" ]
[ [ "numpy.array", "numpy.argmax", "numpy.vstack", "numpy.repeat", "numpy.unique" ] ]
AbhishekRS4/deep_lab_v3_plus
[ "aec2e1d350ebc1218c5a2cce981a875a588fd225" ]
[ "src/deep_lab_v3_plus_model_frozen.py" ]
[ "# @author : Abhishek R S\n\nimport os\nimport h5py\nimport numpy as np\nimport tensorflow as tf\n\n\"\"\"\nDeepLabv3+\n\n# Reference\n- [Deep Residual Learning for Image Recognition]\n (https://arxiv.org/abs/1512.03385)\n- [DeepLabv3+](https://arxiv.org/pdf/1802.02611.pdf)\n\n# Pretrained model weights\n- [Download pretrained resnet-50 model]\n (https://github.com/fchollet/deep-learning-models/releases/)\n\"\"\"\n\nclass DeepLab3Plus:\n def __init__(self, pretrained_weights, is_training, data_format=\"channels_first\", num_classes=15):\n self._weights_h5 = h5py.File(pretrained_weights, \"r\")\n self._is_training = is_training\n self._data_format = data_format\n self._num_classes = num_classes\n self._padding = \"SAME\"\n self._feature_map_axis = None\n self._encoder_data_format = None\n self._encoder_pool_kernel = None\n self._input_size = [512, 1024]\n self._atrous_rate = [6, 12, 18]\n self._encoder_conv_strides = [1, 1, 1, 1]\n self._encoder_pool_strides = None\n self._avg_pool_axes = None\n self._initializer = tf.contrib.layers.xavier_initializer_conv2d()\n\n \"\"\"\n based on the data format set appropriate pool_kernel and pool_strides\n always use channels_first i.e. NCHW as the data format on a GPU\n \"\"\"\n\n if data_format == \"channels_first\":\n self._encoder_data_format = \"NCHW\"\n self._encoder_pool_kernel = [1, 1, 3, 3]\n self._encoder_pool_strides = [1, 1, 2, 2]\n self._avg_pool_axes = [2, 3]\n self._feature_map_axis = 1\n else:\n self._encoder_data_format = \"NHWC\"\n self._encoder_pool_kernel = [1, 3, 3, 1]\n self._encoder_pool_strides = [1, 2, 2, 1]\n self._avg_pool_axes = [1, 2]\n self._feature_map_axis = -1\n\n # build resnet-50 encoder\n def resnet50_encoder(self, features):\n # input : BGR format with image_net mean subtracted\n # bgr mean : [103.939, 116.779, 123.68]\n\n if self._data_format == \"channels_last\":\n features = tf.transpose(features, perm=[0, 2, 3, 1])\n\n # Stage 0\n self.stage0 = self._res_conv_layer(\n features, \"conv1\", strides=self._encoder_pool_strides)\n self.stage0 = self._res_batchnorm_layer(self.stage0, \"bn_conv1\")\n self.stage0 = tf.nn.relu(self.stage0, name=\"relu1\")\n\n # Stage 1\n self.stage1 = tf.nn.max_pool(\n self.stage0, ksize=self._encoder_pool_kernel, strides=self._encoder_pool_strides,\n padding=self._padding, data_format=self._encoder_data_format, name=\"pool1\"\n )\n\n # Stage 2\n self.stage2 = self._res_conv_block(\n input_layer=self.stage1, stage=\"2a\", strides=self._encoder_conv_strides)\n self.stage2 = self._res_identity_block(input_layer=self.stage2, stage=\"2b\")\n self.stage2 = self._res_identity_block(input_layer=self.stage2, stage=\"2c\")\n\n # Stage 3\n self.stage3 = self._res_conv_block(\n input_layer=self.stage2, stage=\"3a\", strides=self._encoder_pool_strides)\n self.stage3 = self._res_identity_block(input_layer=self.stage3, stage=\"3b\")\n self.stage3 = self._res_identity_block(input_layer=self.stage3, stage=\"3c\")\n self.stage3 = self._res_identity_block(input_layer=self.stage3, stage=\"3d\")\n\n # Stage 4\n self.stage4 = self._res_conv_block(\n input_layer=self.stage3, stage=\"4a\", strides=self._encoder_pool_strides)\n self.stage4 = self._res_identity_block(input_layer=self.stage4, stage=\"4b\")\n self.stage4 = self._res_identity_block(input_layer=self.stage4, stage=\"4c\")\n self.stage4 = self._res_identity_block(input_layer=self.stage4, stage=\"4d\")\n self.stage4 = self._res_identity_block(input_layer=self.stage4, stage=\"4e\")\n self.stage4 = self._res_identity_block(input_layer=self.stage4, stage=\"4f\")\n\n # Stage 5\n self.stage5 = self._res_conv_block(\n input_layer=self.stage4, stage=\"5a\", strides=self._encoder_conv_strides)\n self.stage5 = self._res_identity_block(input_layer=self.stage5, stage=\"5b\")\n self.stage5 = self._res_identity_block(input_layer=self.stage5, stage=\"5c\")\n\n # build deep_lab_v3+\n def deeplabv3_plus(self):\n self.aspp_out = self._atrous_spatial_pyramid_pool_block(self.stage5, name=\"aspp_\")\n\n self.low_level_features = self._get_conv2d_layer(\n self.stage2, 48, [1, 1], [1, 1], name=\"low_level_conv\")\n self.low_level_features = self._get_relu_activation(\n self.low_level_features, name=\"low_level_relu\")\n\n if self._data_format == \"channels_first\":\n low_level_features_size = tf.shape(self.low_level_features)[2:]\n else:\n low_level_features_size = tf.shape(self.low_level_features)[1:3]\n\n self.up1 = self._get_upsample_layer(\n self.aspp_out, low_level_features_size, name=\"upsample1\")\n self.up1_concat = tf.concat([self.up1, self.low_level_features],\n axis=self._feature_map_axis, name=\"decoder_concat\")\n\n self.decoder_conv1 = self._get_conv2d_layer(\n self.up1_concat, 256, [3, 3], [1, 1], name=\"decoder_conv1\")\n self.decoder_conv1 = self._get_relu_activation(self.decoder_conv1, name=\"decoder_relu1\")\n self.decoder_conv2 = self._get_conv2d_layer(\n self.decoder_conv1, 256, [3, 3], [1, 1], name=\"decoder_conv2\")\n self.decoder_conv2 = self._get_relu_activation(self.decoder_conv2, name=\"decoder_relu2\")\n self.decoder_conv3 = self._get_conv2d_layer(\n self.decoder_conv2, self._num_classes, [1, 1], [1, 1], name=\"decoder_conv3\")\n\n self.logits = self._get_upsample_layer(\n self.decoder_conv3, self._input_size, name=\"logits\")\n\n # build atrous spatial pyramid pool block\n def _atrous_spatial_pyramid_pool_block(self, input_layer, depth=256, name=\"aspp_\"):\n if self._data_format == \"channels_first\":\n _inputs_size = tf.shape(input_layer)[2:]\n else:\n _inputs_size = tf.shape(input_layer)[1:3]\n\n _conv1x1 = self._get_conv2d_layer(\n input_layer, depth, [1, 1], [1, 1], name=name + \"conv1x1\")\n _conv1x1 = self._get_relu_activation(_conv1x1, name=name + \"conv1x1_relu\")\n\n _conv3x3_1 = self._get_conv2d_layer(input_layer, depth, [3, 3], [1, 1],\n dilation_rate=self._atrous_rate[0], name=name + \"conv3x3_1\")\n _conv3x3_1 = self._get_relu_activation(_conv3x3_1, name=name + \"conv3x3_1_relu\")\n\n _conv3x3_2 = self._get_conv2d_layer(input_layer, depth, [3, 3], [1, 1],\n dilation_rate=self._atrous_rate[1], name=name + \"conv3x3_2\")\n _conv3x3_2 = self._get_relu_activation(_conv3x3_2, name=name + \"conv3x3_2_relu\")\n\n _conv3x3_3 = self._get_conv2d_layer(input_layer, depth, [3, 3], [1, 1],\n dilation_rate=self._atrous_rate[2], name=name + \"conv3x3_3\")\n _conv3x3_3 = self._get_relu_activation(_conv3x3_3, name=name + \"conv3x3_3_relu\")\n\n _avg_pool = tf.reduce_mean(input_layer, self._avg_pool_axes,\n name=name + \"avg_pool\", keepdims=True)\n _img_lvl_conv1x1 = self._get_conv2d_layer(\n _avg_pool, depth, [1, 1], [1, 1], name=name + \"img_lvl_conv1x1\")\n _img_lvl_conv1x1 = self._get_relu_activation(\n _img_lvl_conv1x1, name=name + \"img_lvl_conv1x1_relu\")\n\n _img_lvl_upsample = self._get_upsample_layer(\n _img_lvl_conv1x1, _inputs_size, name=name + \"upsample\")\n\n _concat_features = tf.concat(\n [_conv1x1, _conv3x3_1, _conv3x3_2, _conv3x3_3, _img_lvl_upsample],\n axis=self._feature_map_axis, name=name + \"concat\")\n _conv1x1_aspp_out = self._get_conv2d_layer(_concat_features, depth,\n [1, 1], [1, 1], name=name + \"out_conv1x1\")\n _conv1x1_aspp_out = self._get_relu_activation(\n _conv1x1_aspp_out, name=name + \"out_conv1x1_relu\")\n _conv1x1_aspp_out = self._get_dropout_layer(\n _conv1x1_aspp_out, rate=0.1, name=name + \"dropout\")\n\n return _conv1x1_aspp_out\n\n # return convolution2d layer\n def _get_conv2d_layer(self, input_layer, num_filters, kernel_size, strides, dilation_rate=1, use_bias=True, name=\"conv\"):\n conv_2d_layer = tf.layers.conv2d(inputs=input_layer, filters=num_filters, kernel_size=kernel_size,\n strides=strides, use_bias=use_bias, padding=self._padding, data_format=self._data_format,\n kernel_initializer=self._initializer, dilation_rate=dilation_rate, name=name)\n return conv_2d_layer\n\n # return bilinear upsampling layer\n def _get_upsample_layer(self, input_layer, target_size, name=\"upsample\"):\n if self._data_format == \"channels_first\":\n input_layer = tf.transpose(input_layer, perm=[0, 2, 3, 1])\n\n _upsampled = tf.image.resize_bilinear(input_layer, target_size, name=name)\n\n if self._data_format == \"channels_first\":\n _upsampled = tf.transpose(_upsampled, perm=[0, 3, 1, 2])\n\n return _upsampled\n\n # return relu activation function\n def _get_relu_activation(self, input_layer, name=\"relu\"):\n relu_layer = tf.nn.relu(input_layer, name=name)\n return relu_layer\n\n # return dropout layer\n def _get_dropout_layer(self, input_layer, rate=0.1, name=\"dropout\"):\n dropout_layer = tf.layers.dropout(inputs=input_layer, rate=rate, training=self._is_training, name=name)\n return dropout_layer\n\n # return batch normalization layer\n def _get_batchnorm_layer(self, input_layer, name=\"bn\"):\n bn_layer = tf.layers.batch_normalization(input_layer, axis=self._feature_map_axis, training=self._is_training, name=name)\n return bn_layer\n\n #---------------------------------------#\n # pretrained resnet50 encoder functions #\n #---------------------------------------#\n #-----------------------#\n # convolution layer #\n #-----------------------#\n def _res_conv_layer(self, input_layer, name, strides=[1, 1, 1, 1]):\n W = tf.constant(self._weights_h5[name][name + \"_W_1:0\"])\n b = self._weights_h5[name][name + \"_b_1:0\"]\n b = tf.constant(np.reshape(b, (b.shape[0])))\n x = tf.nn.conv2d(input_layer, filter=W, strides=strides,\n padding=self._padding, data_format=self._encoder_data_format, name=name)\n x = tf.nn.bias_add(x, b, data_format=self._encoder_data_format)\n\n return x\n\n #-----------------------#\n # batchnorm layer #\n #-----------------------#\n def _res_batchnorm_layer(self, input_layer, name):\n if self._encoder_data_format == \"NCHW\":\n input_layer = tf.transpose(input_layer, perm=[0, 2, 3, 1])\n\n mean = tf.constant(self._weights_h5[name][name + \"_running_mean_1:0\"])\n std = tf.constant(self._weights_h5[name][name + \"_running_std_1:0\"])\n beta = tf.constant(self._weights_h5[name][name + \"_beta_1:0\"])\n gamma = tf.constant(self._weights_h5[name][name + \"_gamma_1:0\"])\n\n bn = tf.nn.batch_normalization(input_layer, mean=mean, variance=std,\n offset=beta, scale=gamma, variance_epsilon=1e-12, name=name)\n\n if self._encoder_data_format == \"NCHW\":\n bn = tf.transpose(bn, perm=[0, 3, 1, 2])\n\n return bn\n\n #-----------------------#\n # convolution block #\n #-----------------------#\n def _res_conv_block(self, input_layer, stage, strides):\n x = self._res_conv_layer(input_layer, name=\"res\" + stage + \"_branch2a\", strides=strides)\n x = self._res_batchnorm_layer(x, name=\"bn\" + stage + \"_branch2a\")\n x = tf.nn.relu(x, name=\"relu\" + stage + \"_branch2a\")\n\n x = self._res_conv_layer(x, name=\"res\" + stage + \"_branch2b\")\n x = self._res_batchnorm_layer(x, name=\"bn\" + stage + \"_branch2b\")\n x = tf.nn.relu(x, name=\"relu\" + stage + \"_branch2b\")\n\n x = self._res_conv_layer(x, name=\"res\" + stage + \"_branch2c\")\n x = self._res_batchnorm_layer(x, name=\"bn\" + stage + \"_branch2c\")\n\n shortcut = self._res_conv_layer(input_layer, name=\"res\" + stage + \"_branch1\", strides=strides)\n shortcut = self._res_batchnorm_layer(shortcut, name=\"bn\" + stage + \"_branch1\")\n\n x = tf.add(x, shortcut, name=\"add\" + stage)\n x = tf.nn.relu(x, name=\"relu\" + stage)\n\n return x\n\n #-----------------------#\n # identity block #\n #-----------------------#\n def _res_identity_block(self, input_layer, stage):\n x = self._res_conv_layer(input_layer, name=\"res\" + stage + \"_branch2a\")\n x = self._res_batchnorm_layer(x, name=\"bn\" + stage + \"_branch2a\")\n x = tf.nn.relu(x, name=\"relu\" + stage + \"_branch2a\")\n\n x = self._res_conv_layer(x, name=\"res\" + stage + \"_branch2b\")\n x = self._res_batchnorm_layer(x, name=\"bn\" + stage + \"_branch2b\")\n x = tf.nn.relu(x, name=\"relu\" + stage + \"_branch2b\")\n\n x = self._res_conv_layer(x, name=\"res\" + stage + \"_branch2c\")\n x = self._res_batchnorm_layer(x, name=\"bn\" + stage + \"_branch2c\")\n\n x = tf.add(x, input_layer, name=\"add\" + stage)\n x = tf.nn.relu(x, name=\"relu\" + stage)\n\n return x\n" ]
[ [ "tensorflow.image.resize_bilinear", "tensorflow.layers.dropout", "tensorflow.nn.batch_normalization", "tensorflow.shape", "tensorflow.concat", "tensorflow.nn.relu", "tensorflow.nn.conv2d", "numpy.reshape", "tensorflow.layers.batch_normalization", "tensorflow.transpose", "tensorflow.constant", "tensorflow.layers.conv2d", "tensorflow.contrib.layers.xavier_initializer_conv2d", "tensorflow.add", "tensorflow.nn.bias_add", "tensorflow.reduce_mean", "tensorflow.nn.max_pool" ] ]
sallypannn/autogluon
[ "fe5eccc3c14eb18478495f0406c812b60a65cbc1" ]
[ "tabular/src/autogluon/tabular/models/fastainn/tabular_nn_fastai.py" ]
[ "import copy\nimport logging\nimport time\nfrom builtins import classmethod\nfrom pathlib import Path\n\nimport sklearn\nimport numpy as np\nimport pandas as pd\n\nfrom autogluon.core.constants import REGRESSION, BINARY, QUANTILE\nfrom autogluon.core.features.types import R_OBJECT, R_INT, R_FLOAT, R_DATETIME, R_CATEGORY, R_BOOL\nfrom autogluon.core.models import AbstractModel\nfrom autogluon.core.models.abstract.model_trial import skip_hpo\nfrom autogluon.core.utils import try_import_fastai\nfrom autogluon.core.utils.files import make_temp_directory\nfrom autogluon.core.utils.loaders import load_pkl\nfrom autogluon.core.utils.multiprocessing_utils import is_fork_enabled\nfrom autogluon.core.utils.savers import save_pkl\nfrom .hyperparameters.parameters import get_param_baseline\nfrom .hyperparameters.searchspaces import get_default_searchspace\n\n# FIXME: Has a leak somewhere, training additional models in a single python script will slow down training for each additional model. Gets very slow after 20+ models (10x+ slowdown)\n# Slowdown does not appear to impact Mac OS\n# Reproduced with raw torch: https://github.com/pytorch/pytorch/issues/31867\n# https://forums.fast.ai/t/runtimeerror-received-0-items-of-ancdata/48935\n# https://github.com/pytorch/pytorch/issues/973\n# https://pytorch.org/docs/master/multiprocessing.html#file-system-file-system\n# Slowdown bug not experienced on Linux if 'torch.multiprocessing.set_sharing_strategy('file_system')' commented out\n# NOTE: If below line is commented out, Torch uses many file descriptors. If issues arise, increase ulimit through 'ulimit -n 2048' or larger. Default on Linux is 1024.\n# torch.multiprocessing.set_sharing_strategy('file_system')\n\n# MacOS issue: torchvision==0.7.0 + torch==1.6.0 can cause segfaults; use torch==1.2.0 torchvision==0.4.0\n\nLABEL = '__label__'\nMISSING = '__!#ag_internal_missing#!__'\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Takes extremely long time prior to training start if many (10000) continuous features from ngrams, debug - explore TruncateSVD option to reduce input dimensionality\n# TODO: currently fastai automatically detect and use CUDA if available - add code to honor autogluon settings\nclass NNFastAiTabularModel(AbstractModel):\n \"\"\" Class for fastai v1 neural network models that operate on tabular data.\n\n Hyperparameters:\n y_scaler: on a regression problems, the model can give unreasonable predictions on unseen data.\n This attribute allows to pass a scaler for y values to address this problem. Please note that intermediate\n iteration metrics will be affected by this transform and as a result intermediate iteration scores will be\n different from the final ones (these will be correct).\n https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing\n\n 'layers': list of hidden layers sizes; None - use model's heuristics; default is None\n\n 'emb_drop': embedding layers dropout; defaut is 0.1\n\n 'ps': linear layers dropout - list of values applied to every layer in `layers`; default is [0.1]\n\n 'bs': batch size; default is 256\n\n 'lr': maximum learning rate for one cycle policy; default is 1e-2;\n see also https://docs.fast.ai/callback.schedule.html#Learner.fit_one_cycle,\n One-cycle policy paper: https://arxiv.org/abs/1803.09820\n\n 'epochs': number of epochs; default is 30\n\n # Early stopping settings. See more details here: https://docs.fast.ai/callback.tracker.html#EarlyStoppingCallback\n 'early.stopping.min_delta': 0.0001,\n 'early.stopping.patience': 10,\n \"\"\"\n\n model_internals_file_name = 'model-internals.pkl'\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cat_columns = None\n self.cont_columns = None\n self.columns_fills = None\n self.procs = None\n self.y_scaler = None\n self._inner_features = None\n self._load_model = None # Whether to load inner model when loading.\n\n def _preprocess_train(self, X, y, X_val, y_val):\n from fastai.tabular.core import TabularPandas\n from fastai.data.block import RegressionBlock, CategoryBlock\n from fastai.data.transforms import IndexSplitter\n from fastcore.basics import range_of\n\n X = self.preprocess(X, fit=True)\n if X_val is not None:\n X_val = self.preprocess(X_val)\n\n from fastai.tabular.core import FillMissing, Categorify, Normalize\n self.procs = [FillMissing, Categorify, Normalize]\n\n if self.problem_type in [REGRESSION, QUANTILE] and self.y_scaler is not None:\n y_norm = pd.Series(self.y_scaler.fit_transform(y.values.reshape(-1, 1)).reshape(-1))\n y_val_norm = pd.Series(self.y_scaler.transform(y_val.values.reshape(-1, 1)).reshape(-1)) if y_val is not None else None\n logger.log(0, f'Training with scaled targets: {self.y_scaler} - !!! NN training metric will be different from the final results !!!')\n else:\n y_norm = y\n y_val_norm = y_val\n\n logger.log(15, f'Using {len(self.cont_columns)} cont features')\n df_train, train_idx, val_idx = self._generate_datasets(X, y_norm, X_val, y_val_norm)\n y_block = RegressionBlock() if self.problem_type in [REGRESSION, QUANTILE] else CategoryBlock()\n\n # Copy cat_columns and cont_columns because TabularList is mutating the list\n data = TabularPandas(\n df_train,\n cat_names=self.cat_columns.copy(),\n cont_names=self.cont_columns.copy(),\n procs=self.procs,\n y_block=y_block,\n y_names=LABEL,\n splits=IndexSplitter(val_idx)(range_of(df_train)),\n )\n return data\n\n def _preprocess(self, X: pd.DataFrame, fit=False, **kwargs):\n X = super()._preprocess(X=X, **kwargs)\n if fit:\n self.cat_columns = self.feature_metadata.get_features(valid_raw_types=[R_OBJECT, R_CATEGORY, R_BOOL])\n self.cont_columns = self.feature_metadata.get_features(valid_raw_types=[R_INT, R_FLOAT, R_DATETIME])\n try:\n X_stats = X.describe(include='all').T.reset_index()\n cat_cols_to_drop = X_stats[(X_stats['unique'] > self.params.get('max_unique_categorical_values', 10000)) | (X_stats['unique'].isna())]['index'].values\n except:\n cat_cols_to_drop = []\n cat_cols_to_keep = [col for col in X.columns.values if (col not in cat_cols_to_drop)]\n cat_cols_to_use = [col for col in self.cat_columns if col in cat_cols_to_keep]\n logger.log(15, f'Using {len(cat_cols_to_use)}/{len(self.cat_columns)} categorical features')\n self.cat_columns = cat_cols_to_use\n self.cat_columns = [feature for feature in self.cat_columns if feature in list(X.columns)]\n self.cont_columns = [feature for feature in self.cont_columns if feature in list(X.columns)]\n\n self.columns_fills = {}\n for c in self.cat_columns:\n self.columns_fills[c] = MISSING\n for c in self.cont_columns:\n self.columns_fills[c] = X[c].mean()\n self._inner_features = self.cat_columns + self.cont_columns\n return self._fill_missing(X)\n\n def _fill_missing(self, df: pd.DataFrame) -> pd.DataFrame:\n df = df[self._inner_features].copy()\n for c in self.cat_columns:\n df[c] = df[c].cat.add_categories(MISSING)\n df[c] = df[c].fillna(self.columns_fills[c])\n for c in self.cont_columns:\n df[c] = df[c].fillna(self.columns_fills[c])\n return df\n\n def _fit(self,\n X,\n y,\n X_val=None,\n y_val=None,\n time_limit=None,\n num_cpus=None,\n num_gpus=0,\n sample_weight=None,\n **kwargs):\n try_import_fastai()\n from fastai.tabular.model import tabular_config\n from .fastai_helpers import tabular_learner\n from fastcore.basics import defaults\n from .callbacks import AgSaveModelCallback, EarlyStoppingCallbackWithTimeLimit\n from .quantile_helpers import HuberPinballLoss\n import torch\n\n start_time = time.time()\n if sample_weight is not None: # TODO: support\n logger.log(15, \"sample_weight not yet supported for NNFastAiTabularModel, this model will ignore them in training.\")\n\n params = self._get_model_params()\n\n self.y_scaler = params.get('y_scaler', None)\n if self.problem_type == QUANTILE and self.y_scaler is None:\n self.y_scaler = sklearn.preprocessing.MinMaxScaler()\n if self.y_scaler is not None:\n self.y_scaler = copy.deepcopy(self.y_scaler)\n\n if num_cpus is None:\n num_cpus = defaults.cpus\n # additional workers are helping only when fork is enabled; in other mp modes, communication overhead reduces performance\n num_workers = int(num_cpus / 2)\n if not is_fork_enabled():\n num_workers = 0\n if num_gpus is not None:\n if num_gpus == 0:\n # TODO: Does not obviously impact inference speed\n defaults.device = torch.device('cpu')\n else:\n defaults.device = torch.device('cuda')\n\n logger.log(15, f'Fitting Neural Network with parameters {params}...')\n data = self._preprocess_train(X, y, X_val, y_val)\n\n nn_metric, objective_func_name = self.__get_objective_func_name(self.stopping_metric)\n objective_func_name_to_monitor = self.__get_objective_func_to_monitor(objective_func_name)\n objective_optim_mode = np.less if objective_func_name in [\n 'log_loss',\n 'root_mean_squared_error', 'mean_squared_error', 'mean_absolute_error', 'median_absolute_error', 'r2', # Regression objectives\n 'pinball_loss', # Quantile objective\n ] else np.greater\n\n # TODO: calculate max emb concat layer size and use 1st layer as that value and 2nd in between number of classes and the value\n if params.get('layers', None) is not None:\n layers = params['layers']\n elif self.problem_type in [REGRESSION, BINARY]:\n layers = [200, 100]\n elif self.problem_type == QUANTILE:\n base_size = max(len(self.quantile_levels) * 4, 128)\n layers = [base_size, base_size, base_size]\n else:\n base_size = max(data.c * 2, 100)\n layers = [base_size * 2, base_size]\n\n loss_func = None\n if self.problem_type == QUANTILE:\n loss_func = HuberPinballLoss(self.quantile_levels, alpha=self.params['alpha'])\n\n if time_limit:\n time_elapsed = time.time() - start_time\n time_left = time_limit - time_elapsed\n else:\n time_left = None\n\n best_epoch_stop = params.get(\"best_epoch\", None) # Use best epoch for refit_full.\n dls = data.dataloaders(bs=self.params['bs'] if len(X) > self.params['bs'] else 32)\n\n if self.problem_type == QUANTILE:\n dls.c = len(self.quantile_levels)\n\n self.model = tabular_learner(\n dls, layers=layers, metrics=nn_metric,\n config=tabular_config(ps=params['ps'], embed_p=params['emb_drop']),\n loss_func=loss_func,\n )\n logger.log(15, self.model.model)\n\n save_callback = AgSaveModelCallback(\n monitor=objective_func_name_to_monitor, comp=objective_optim_mode, fname=self.name,\n best_epoch_stop=best_epoch_stop, with_opt=True\n )\n\n early_stopping = EarlyStoppingCallbackWithTimeLimit(\n monitor=objective_func_name_to_monitor,\n comp=objective_optim_mode,\n min_delta=params['early.stopping.min_delta'],\n patience=params['early.stopping.patience'],\n time_limit=time_left, best_epoch_stop=best_epoch_stop\n )\n\n callbacks = [save_callback, early_stopping]\n\n with make_temp_directory() as temp_dir:\n with self.model.no_bar():\n with self.model.no_logging():\n original_path = self.model.path\n self.model.path = Path(temp_dir)\n self.model.fit_one_cycle(params['epochs'], params['lr'], cbs=callbacks)\n\n # Load the best one and export it\n self.model = self.model.load(self.name)\n\n if objective_func_name == 'log_loss':\n eval_result = self.model.validate(dl=dls.valid)[0]\n else:\n eval_result = self.model.validate(dl=dls.valid)[1]\n\n logger.log(15, f'Model validation metrics: {eval_result}')\n self.model.path = original_path\n\n self.params_trained['best_epoch'] = save_callback.best_epoch\n\n def _generate_datasets(self, X, y, X_val, y_val):\n df_train = pd.concat([X, X_val], ignore_index=True)\n df_train[LABEL] = pd.concat([y, y_val], ignore_index=True)\n train_idx = np.arange(len(X))\n if X_val is None:\n # use validation set for refit_full case - it's not going to be used for early stopping\n val_idx = np.array([0, 1]) + len(train_idx)\n df_train = pd.concat([df_train, df_train[:2]], ignore_index=True)\n else:\n val_idx = np.arange(len(X_val)) + len(X)\n return df_train, train_idx, val_idx\n\n def __get_objective_func_name(self, stopping_metric):\n metrics_map = self.__get_metrics_map()\n\n # Unsupported metrics will be replaced by defaults for a given problem type\n objective_func_name = stopping_metric.name\n if objective_func_name not in metrics_map.keys():\n if self.problem_type == REGRESSION:\n objective_func_name = 'mean_squared_error'\n elif self.problem_type == QUANTILE:\n objective_func_name = 'pinball_loss'\n else:\n objective_func_name = 'log_loss'\n logger.warning(f'Metric {stopping_metric.name} is not supported by this model - using {objective_func_name} instead')\n\n nn_metric = metrics_map.get(objective_func_name, None)\n\n return nn_metric, objective_func_name\n\n def __get_objective_func_to_monitor(self, objective_func_name):\n monitor_obj_func = {\n **{k: m.name if hasattr(m, 'name') else m.__name__ for k, m in self.__get_metrics_map().items() if m is not None},\n 'log_loss': 'valid_loss'\n }\n objective_func_name_to_monitor = objective_func_name\n if objective_func_name in monitor_obj_func:\n objective_func_name_to_monitor = monitor_obj_func[objective_func_name]\n return objective_func_name_to_monitor\n\n def _predict_proba(self, X, **kwargs):\n X = self.preprocess(X, **kwargs)\n\n single_row = len(X) == 1\n # fastai has issues predicting on a single row, duplicating the row as a workaround\n if single_row:\n X = pd.concat([X, X]).reset_index(drop=True)\n\n # Copy cat_columns and cont_columns because TabularList is mutating the list\n test_dl = self.model.dls.test_dl(X)\n with self.model.no_bar():\n with self.model.no_logging():\n preds, _ = self.model.get_preds(dl=test_dl)\n if single_row:\n preds = preds[:1, :]\n if self.problem_type == REGRESSION:\n if self.y_scaler is not None:\n return self.y_scaler.inverse_transform(preds.numpy()).reshape(-1)\n else:\n return preds.numpy().reshape(-1)\n elif self.problem_type == QUANTILE:\n from .quantile_helpers import isotonic\n if self.y_scaler is not None:\n preds = self.y_scaler.inverse_transform(preds.numpy()).reshape(-1, len(self.quantile_levels))\n else:\n preds = preds.numpy().reshape(-1, len(self.quantile_levels))\n return isotonic(preds, self.quantile_levels)\n elif self.problem_type == BINARY:\n return preds[:, 1].numpy()\n else:\n return preds.numpy()\n\n def save(self, path: str = None, verbose=True) -> str:\n from .fastai_helpers import export\n self._load_model = self.model is not None\n __model = self.model\n self.model = None\n path = super().save(path=path, verbose=verbose)\n self.model = __model\n # Export model\n if self._load_model:\n save_pkl.save_with_fn(\n f'{path}{self.model_internals_file_name}',\n self.model,\n pickle_fn=lambda m, buffer: export(m, buffer),\n verbose=verbose\n )\n self._load_model = None\n return path\n\n\n @classmethod\n def load(cls, path: str, reset_paths=True, verbose=True):\n from fastai.learner import load_learner\n model = super().load(path, reset_paths=reset_paths, verbose=verbose)\n if model._load_model:\n model.model = load_pkl.load_with_fn(f'{model.path}{model.model_internals_file_name}', lambda p: load_learner(p), verbose=verbose)\n model._load_model = None\n return model\n\n def _set_default_params(self):\n \"\"\" Specifies hyperparameter values to use by default \"\"\"\n default_params = get_param_baseline(self.problem_type)\n for param, val in default_params.items():\n self._set_default_param_value(param, val)\n\n def _get_default_searchspace(self):\n return get_default_searchspace(self.problem_type, num_classes=None)\n\n # TODO: add warning regarding dataloader leak: https://github.com/pytorch/pytorch/issues/31867\n # TODO: Add HPO\n def _hyperparameter_tune(self, **kwargs):\n return skip_hpo(self, **kwargs)\n\n def _get_default_auxiliary_params(self) -> dict:\n default_auxiliary_params = super()._get_default_auxiliary_params()\n extra_auxiliary_params = dict(\n ignored_type_group_raw=[R_OBJECT],\n )\n default_auxiliary_params.update(extra_auxiliary_params)\n return default_auxiliary_params\n\n def __get_metrics_map(self):\n from fastai.metrics import rmse, mse, mae, accuracy, FBeta, RocAucBinary, Precision, Recall, R2Score\n from .fastai_helpers import medae\n from .quantile_helpers import PinballLoss\n metrics_map = {\n # Regression\n 'root_mean_squared_error': rmse,\n 'mean_squared_error': mse,\n 'mean_absolute_error': mae,\n 'r2': R2Score(),\n 'median_absolute_error': medae,\n\n # Classification\n 'accuracy': accuracy,\n\n 'f1': FBeta(beta=1),\n 'f1_macro': FBeta(beta=1, average='macro'),\n 'f1_micro': FBeta(beta=1, average='micro'),\n 'f1_weighted': FBeta(beta=1, average='weighted'), # this one has some issues\n\n 'roc_auc': RocAucBinary(),\n\n 'precision': Precision(),\n 'precision_macro': Precision(average='macro'),\n 'precision_micro': Precision(average='micro'),\n 'precision_weighted': Precision(average='weighted'),\n\n 'recall': Recall(),\n 'recall_macro': Recall(average='macro'),\n 'recall_micro': Recall(average='micro'),\n 'recall_weighted': Recall(average='weighted'),\n 'log_loss': None,\n\n 'pinball_loss': PinballLoss(quantile_levels=self.quantile_levels)\n # Not supported: pac_score\n }\n return metrics_map\n" ]
[ [ "torch.device", "numpy.array", "sklearn.preprocessing.MinMaxScaler", "pandas.concat" ] ]
Thuva97/TransSeg
[ "469722dd62755b75a865721b10c2016d9f2daa4a" ]
[ "dataloader.py" ]
[ "import librosa\r\nimport numpy as np\r\nimport soundfile as sf\r\nimport torch\r\nfrom boltons.fileutils import iter_find_files\r\nfrom torch.utils.data import Dataset\r\n\r\n\r\ndef collate_fn_padd(batch):\r\n \"\"\"collate_fn_padd\r\n Padds batch of variable length\r\n\r\n :param batch:\r\n \"\"\"\r\n # get sequence lengths\r\n spects = [t[0] for t in batch]\r\n segs = [t[1] for t in batch]\r\n lengths = [t[2] for t in batch]\r\n\r\n # pad and stack\r\n padded_spects = torch.nn.utils.rnn.pad_sequence(spects, batch_first=True)\r\n lengths = torch.LongTensor(lengths)\r\n\r\n return padded_spects, segs, lengths\r\n\r\n\r\ndef mfcc_dist(mfcc):\r\n \"\"\"mfcc_dist\r\n calc 4-dimensional dist features like in HTK\r\n\r\n :param mfcc:\r\n \"\"\"\r\n d = []\r\n for i in range(2, 9, 2):\r\n pad = int(i/2)\r\n d_i = np.concatenate([np.zeros(pad), ((mfcc[:, i:] - mfcc[:, :-i]) ** 2).sum(0) ** 0.5, np.zeros(pad)], axis=0)\r\n d.append(d_i)\r\n return np.stack(d)\r\n\r\n\r\ndef segmentation_to_binary_mask(segmentation):\r\n \"\"\"\r\n replicates boundaries to frame-wise labels\r\n example:\r\n segmentation - [0, 3, 5]\r\n returns - [1, 0, 0, 1, 0, 1]\r\n\r\n :param segmentation:\r\n :param phonemes:\r\n \"\"\"\r\n mask = torch.zeros(segmentation[-1] + 1).long()\r\n for boundary in segmentation[1:-1]:\r\n mask[boundary] = 1\r\n return mask\r\n\r\n\r\ndef extract_features(wav_file):\r\n wav, sr = sf.read(wav_file)\r\n\r\n # extract mfcc\r\n spect = librosa.feature.mfcc(wav,\r\n sr=sr,\r\n n_fft=160,\r\n hop_length=160,\r\n n_mels=40,\r\n n_mfcc=13)\r\n\r\n spect = (spect - spect.mean(0)) / spect.std(0)\r\n\r\n delta = librosa.feature.delta(spect, order=1)\r\n delta2 = librosa.feature.delta(spect, order=2)\r\n spect = np.concatenate([spect, delta, delta2], axis=0)\r\n\r\n dist = mfcc_dist(spect)\r\n spect = np.concatenate([spect, dist], axis=0)\r\n\r\n spect = torch.transpose(torch.FloatTensor(spect), 0, 1)\r\n return spect\r\n\r\n\r\ndef get_onset_offset(segmentations):\r\n search_start, search_end = float(\"inf\"), 0\r\n for seg in segmentations:\r\n start, end = seg[0], seg[-1]\r\n if start < search_start:\r\n search_start = start\r\n if end > search_end:\r\n search_end = end\r\n return search_start, search_end\r\n\r\n\r\nclass WavPhnDataset(Dataset):\r\n def __init__(self, path):\r\n self.wav_path = path\r\n self.data = list(iter_find_files(self.wav_path, \"*.wav\"))\r\n super(WavPhnDataset, self).__init__()\r\n\r\n @staticmethod\r\n def get_datasets():\r\n raise NotImplementedError\r\n\r\n def process_file(self, wav_path):\r\n phn_path = wav_path.replace(\"wav\", \"PHN\")\r\n\r\n # load audio\r\n spect = extract_features(wav_path)\r\n\r\n # load labels -- segmentation and phonemes\r\n with open(phn_path, \"r\") as f:\r\n lines = f.readlines()\r\n lines = list(map(lambda line: line.split(\" \"), lines))\r\n\r\n # get segment times\r\n times = torch.FloatTensor([0] + list(map(lambda line: int(line[1]), lines)))\r\n wav_len = times[-1]\r\n times = (times / wav_len * (len(spect) - 1)).long()\r\n boundries = segmentation_to_binary_mask(times)\r\n\r\n return spect, boundries\r\n\r\n def __getitem__(self, idx):\r\n\r\n spect, seg = self.process_file(self.data[idx])\r\n\r\n return spect, seg, spect.shape[0]\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dataset = WavPhnDataset(\"timit/train\")\r\n spect, boundaries, len = dataset[0]\r\n print(len(spect[0]))\r\n print(boundaries)\r\n" ]
[ [ "numpy.concatenate", "torch.zeros", "numpy.zeros", "torch.nn.utils.rnn.pad_sequence", "torch.FloatTensor", "numpy.stack", "torch.LongTensor" ] ]
rgayon/timesketch
[ "5b055a580652b85c594b7383ef3c7747ba956b4f" ]
[ "api_client/python/timesketch_api_client/search.py" ]
[ "# Copyright 2020 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Timesketch API search object.\"\"\"\nimport datetime\nimport json\nimport logging\n\nimport pandas\n\nfrom . import error\nfrom . import resource\n\n\nlogger = logging.getLogger('timesketch_api.search')\n\n\nclass Chip:\n \"\"\"Class definition for a query filter chip.\"\"\"\n\n # The type of a chip that is defiend.\n CHIP_TYPE = ''\n\n # The chip value defines what property or attribute of the\n # chip class will be used to generate the chip value.\n CHIP_VALUE = ''\n\n # The value of the chip field.\n CHIP_FIELD = ''\n\n def __init__(self):\n \"\"\"Initialize the chip.\"\"\"\n self._active = True\n self._operator = 'must'\n self._chip_field = self.CHIP_FIELD\n\n @property\n def active(self):\n \"\"\"A property that returns whether the chip is active or not.\"\"\"\n return self._active\n\n @active.setter\n def active(self, active):\n \"\"\"Decide whether the chip is active or disabled.\"\"\"\n self._active = bool(active)\n\n @property\n def chip(self):\n \"\"\"A property that returns the chip value.\"\"\"\n return {\n 'field': self._chip_field,\n 'type': self.CHIP_TYPE,\n 'operator': self._operator,\n 'active': self._active,\n 'value': getattr(self, self.CHIP_VALUE, ''),\n }\n\n def from_dict(self, chip_dict):\n \"\"\"Configure the chip from a dictionary.\"\"\"\n raise NotImplementedError\n\n def set_include(self):\n \"\"\"Configure the chip so the content needs to be included in results.\"\"\"\n self._operator = 'must'\n\n def set_exclude(self):\n \"\"\"Configure the chip so content needs to be excluded in results.\"\"\"\n self._operator = 'must_not'\n\n def set_optional(self):\n \"\"\"Configure the chip so the content is optional in results.\"\"\"\n self._operator = 'should'\n\n def set_active(self):\n \"\"\"Set the chip as active.\"\"\"\n self._active = True\n\n def set_disable(self):\n \"\"\"Disable the chip.\"\"\"\n self._active = False\n\n\nclass DateIntervalChip(Chip):\n \"\"\"A date interval chip.\"\"\"\n\n CHIP_TYPE = 'datetime_interval'\n CHIP_VALUE = 'interval'\n\n _DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\n def __init__(self):\n \"\"\"Initialize the chip.\"\"\"\n super().__init__()\n self._date = None\n self._before = 5\n self._after = 5\n self._unit = 'm'\n\n def add_interval(self, before, after=None, unit='m'):\n \"\"\"Set the interval of the chip.\n\n Args:\n before (int): the number of units that should be included\n before the date.\n after (int): optional number of units after the date. If not\n provided the value of before is used.\n unit (str): optional string with the unit of interval. This can\n be s for seconds, m for minutes, d for days and h for hours.\n The default value is m (minutes).\n\n Raises:\n ValueError if the unit is not correctly formed.\n \"\"\"\n if after is None:\n after = before\n\n self.unit = unit\n\n self._before = before\n self._after = after\n\n @property\n def after(self):\n \"\"\"Property that returns the time interval after the date.\"\"\"\n return self._after\n\n @after.setter\n def after(self, after):\n \"\"\"Make changes to the time interval after the date.\"\"\"\n self._after = after\n\n @property\n def before(self):\n \"\"\"Property that returns the time interval before the date.\"\"\"\n return self._before\n\n @before.setter\n def before(self, before):\n \"\"\"Make changes to the time interval before the date.\"\"\"\n self._before = before\n\n @property\n def date(self):\n \"\"\"Property that returns back the date.\"\"\"\n if not self._date:\n return ''\n return self._date.strftime(self._DATE_FORMAT)\n\n @date.setter\n def date(self, date):\n \"\"\"Make changes to the date.\"\"\"\n try:\n dt = datetime.datetime.strptime(date, self._DATE_FORMAT)\n except ValueError as exc:\n logger.error(\n 'Unable to add date chip, wrong date format', exc_info=True)\n raise ValueError('Wrong date format') from exc\n self._date = dt\n\n def from_dict(self, chip_dict):\n \"\"\"Configure the chip from a dictionary.\"\"\"\n value = chip_dict.get('value')\n if not value:\n return\n date, before, after = value.split()\n self.unit = before[-1]\n self.date = date\n self.before = int(before[1:-1])\n self.after = int(after[1:-1])\n\n @property\n def interval(self):\n \"\"\"A property that returns back the full interval.\"\"\"\n return (\n f'{self.date} -{self.before}{self.unit} +{self.after}{self.unit}')\n\n @property\n def unit(self):\n \"\"\"Property that returns back the unit used.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n \"\"\"Make changes to the unit.\"\"\"\n if unit not in ('s', 'm', 'd', 'h'):\n raise ValueError(\n 'Unable to add interval, needs to be one of: '\n 's (seconds), m (minutes), h (hours) or d (days)')\n self._unit = unit\n\n\nclass DateRangeChip(Chip):\n \"\"\"A date range chip.\"\"\"\n\n CHIP_TYPE = 'datetime_range'\n CHIP_VALUE = 'date_range'\n\n _DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\n def __init__(self):\n \"\"\"Initialize the date range.\"\"\"\n super().__init__()\n self._start_date = None\n self._end_date = None\n\n def add_end_time(self, end_time):\n \"\"\"Add an end time to the range.\n\n Args:\n end_time (str): date string using the format '%Y-%m-%dT%H:%M:%s'\n\n Raises:\n ValueError: if the date format is incorrectly formatted.\n \"\"\"\n try:\n dt = datetime.datetime.strptime(end_time, self._DATE_FORMAT)\n except ValueError as exc:\n logger.error(\n 'Unable to add date chip, wrong date format', exc_info=True)\n raise ValueError('Wrong date format') from exc\n self._end_date = dt\n\n def add_start_time(self, start_time):\n \"\"\"Add a start time to the range.\n\n Args:\n start_time (str): date string using the format '%Y-%m-%dT%H:%M:%s'\n\n Raises:\n ValueError: if the date format is incorrectly formatted.\n \"\"\"\n try:\n dt = datetime.datetime.strptime(start_time, self._DATE_FORMAT)\n except ValueError as exc:\n logger.error(\n 'Unable to add date chip, wrong date format', exc_info=True)\n raise ValueError('Wrong date format') from exc\n self._start_date = dt\n\n @property\n def end_time(self):\n \"\"\"Property that returns the end time of a range.\"\"\"\n if not self._end_date:\n return ''\n return self._end_date.strftime(self._DATE_FORMAT)\n\n @end_time.setter\n def end_time(self, end_time):\n \"\"\"Sets the new end time.\"\"\"\n self.add_end_time(end_time)\n\n @property\n def date_range(self):\n \"\"\"Property that returns back the range.\"\"\"\n return f'{self.start_time},{self.end_time}'\n\n @date_range.setter\n def date_range(self, date_range):\n \"\"\"Sets the new range of the date range chip.\"\"\"\n start_time, end_time = date_range.split(',')\n self.add_start_time(start_time)\n self.add_end_time(end_time)\n\n def from_dict(self, chip_dict):\n \"\"\"Configure the chip from a dictionary.\"\"\"\n chip_value = chip_dict.get('value')\n if not chip_value:\n return\n start, end = chip_value.split(',')\n self.start_time = start\n self.end_time = end\n\n @property\n def start_time(self):\n \"\"\"Property that returns the start time of a range.\"\"\"\n if not self._start_date:\n return ''\n return self._start_date.strftime(self._DATE_FORMAT)\n\n @start_time.setter\n def start_time(self, start_time):\n \"\"\"Sets the new start time of a range.\"\"\"\n self.add_start_time(start_time)\n\n\nclass LabelChip(Chip):\n \"\"\"Label chip.\"\"\"\n\n CHIP_TYPE = 'label'\n CHIP_VALUE = 'label'\n\n def __init__(self):\n \"\"\"Initialize the chip.\"\"\"\n super().__init__()\n self._label = ''\n\n def from_dict(self, chip_dict):\n \"\"\"Configure the chip from a dictionary.\"\"\"\n chip_value = chip_dict.get('value')\n if not chip_value:\n return\n\n self.label = chip_value\n\n @property\n def label(self):\n \"\"\"Property that returns back the label.\"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Make changes to the label.\"\"\"\n self._label = label\n\n def use_comment_label(self):\n \"\"\"Use the comment label.\"\"\"\n self._label = '__ts_comment'\n\n def use_star_label(self):\n \"\"\"Use the star label.\"\"\"\n self._label = '__ts_star'\n\n\nclass TermChip(Chip):\n \"\"\"Term chip definition.\"\"\"\n\n CHIP_TYPE = 'term'\n CHIP_VALUE = 'query'\n\n def __init__(self):\n \"\"\"Initialize the chip.\"\"\"\n super().__init__()\n self._query = ''\n\n @property\n def field(self):\n \"\"\"Property that returns back the field used to match against.\"\"\"\n return self._chip_field\n\n @field.setter\n def field(self, field):\n \"\"\"Make changes to the field used to match against.\"\"\"\n self._chip_field = field\n\n def from_dict(self, chip_dict):\n \"\"\"Configure the term chip from a dictionary.\"\"\"\n chip_value = chip_dict.get('value')\n if not chip_value:\n return\n\n self.field = chip_dict.get('field')\n self.query = chip_value\n\n @property\n def query(self):\n \"\"\"Property that returns back the query.\"\"\"\n return self._query\n\n @query.setter\n def query(self, query):\n \"\"\"Make changes to the query.\"\"\"\n self._query = query\n\n\nclass Search(resource.SketchResource):\n \"\"\"Search object.\"\"\"\n\n DEFAULT_SIZE_LIMIT = 10000\n\n def __init__(self, sketch):\n resource_uri = f'sketches/{sketch.id}/explore/'\n super().__init__(sketch=sketch, resource_uri=resource_uri)\n\n self._aggregations = ''\n self._chips = []\n self._created_at = ''\n self._description = ''\n self._max_entries = self.DEFAULT_SIZE_LIMIT\n self._name = ''\n self._query_dsl = ''\n self._query_filter = {}\n self._query_string = ''\n self._raw_response = None\n self._return_fields = ''\n self._scrolling = None\n self._searchtemplate = ''\n self._updated_at = ''\n\n def _extract_chips(self, query_filter):\n \"\"\"Extract chips from a query_filter.\"\"\"\n chips = query_filter.get('chips', [])\n if not chips:\n return\n\n for chip_dict in chips:\n chip_type = chip_dict.get('type')\n if not chip_type:\n continue\n\n chip = None\n if chip_type == 'datetime_interval':\n chip = DateIntervalChip()\n elif chip_type == 'datetime_range':\n chip = DateRangeChip()\n elif chip_type == 'label':\n chip = LabelChip()\n elif chip_type == 'term':\n chip = TermChip()\n\n if not chip:\n continue\n chip.from_dict(chip_dict)\n\n active = chip_dict.get('active', True)\n chip.active = active\n\n operator = chip_dict.get('operator', 'must')\n if operator == 'must':\n chip.set_include()\n elif operator == 'must_not':\n chip.set_exclude()\n\n self.add_chip(chip)\n\n def _execute_query(self, file_name=''):\n \"\"\"Execute a search request and store the results.\n\n Args:\n file_name (str): optional file path to a filename that\n all the results will be saved to. If not provided\n the results will be stored in the search object.\n \"\"\"\n query_filter = self.query_filter\n if not isinstance(query_filter, dict):\n raise ValueError(\n 'Unable to query with a query filter that isn\\'t a dict.')\n\n stop_size = self._max_entries\n scrolling = not bool(stop_size and (\n stop_size < self.DEFAULT_SIZE_LIMIT))\n\n if self.scrolling is not None:\n scrolling = self.scrolling\n\n form_data = {\n 'query': self._query_string,\n 'filter': query_filter,\n 'dsl': self._query_dsl,\n 'fields': self._return_fields,\n 'enable_scroll': scrolling,\n 'file_name': file_name,\n }\n\n response = self.api.session.post(\n f'{self.api.api_root}/{self.resource_uri}', json=form_data)\n if not error.check_return_status(response, logger):\n error.error_message(\n response, message='Unable to query results',\n error=ValueError)\n\n if file_name:\n with open(file_name, 'wb') as fw:\n fw.write(response.content)\n return\n\n response_json = error.get_response_json(response, logger)\n\n scroll_id = response_json.get('meta', {}).get('scroll_id', '')\n form_data['scroll_id'] = scroll_id\n\n count = len(response_json.get('objects', []))\n total_count = count\n while count > 0:\n if self._max_entries and total_count >= self._max_entries:\n break\n\n if not scroll_id:\n logger.debug('No scroll ID, will stop.')\n break\n\n more_response = self.api.session.post(\n f'{self.api.api_root}/{self.resource_uri}', json=form_data)\n if not error.check_return_status(more_response, logger):\n error.error_message(\n response, message='Unable to query results',\n error=ValueError)\n more_response_json = error.get_response_json(more_response, logger)\n count = len(more_response_json.get('objects', []))\n total_count += count\n response_json['objects'].extend(\n more_response_json.get('objects', []))\n more_meta = more_response_json.get('meta', {})\n added_time = more_meta.get('es_time', 0)\n response_json['meta']['es_time'] += added_time\n\n total_elastic_count = response_json.get(\n 'meta', {}).get('es_total_count', 0)\n if total_elastic_count != total_count:\n logger.info(\n '%d results were returned, but '\n '%d records matched the search query',\n total_count, total_elastic_count)\n\n self._raw_response = response_json\n\n def add_chip(self, chip):\n \"\"\"Add a chip to the ...\"\"\"\n self._chips.append(chip)\n self.commit()\n\n def add_date_range(self, start_time, end_time):\n \"\"\"Add a date range chip to the search query.\n\n Args:\n start_time (str): a string with the start time of the range,\n the format should be '%Y-%m-%dT%H:%M:%S'\n end_time (str): a string with the end time of the range,\n the format should be '%Y-%m-%dT%H:%M:%S'\n \"\"\"\n chip = DateRangeChip()\n chip.start_time = start_time\n chip.end_time = end_time\n self.add_chip(chip)\n\n @property\n def chips(self):\n \"\"\"Property that returns all the chips in the search object.\"\"\"\n return self._chips\n\n def commit(self):\n \"\"\"Commit changes to the search object.\"\"\"\n self._raw_response = None\n super().commit()\n\n @property\n def created_at(self):\n \"\"\"Property that returns back the creation time of a search.\"\"\"\n return self._created_at\n\n def delete(self):\n \"\"\"Deletes the saved search from the store.\"\"\"\n if not self._resource_id:\n logger.warning(\n 'Unable to delete the saved search, it does not appear to be '\n 'saved in the first place.')\n return False\n\n resource_url = (\n f'{self.api.api_root}/sketches/{self._sketch.id}/views/'\n f'{self._resource_id}/')\n response = self.api.session.delete(resource_url)\n return error.check_return_status(response, logger)\n\n @property\n def description(self):\n \"\"\"Property that returns back the description of the saved search.\"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Make changes to the saved search description field.\"\"\"\n self._description = description\n self.commit()\n\n def from_manual( # pylint: disable=arguments-differ\n self,\n query_string=None,\n query_dsl=None,\n query_filter=None,\n return_fields=None,\n max_entries=None,\n **kwargs):\n \"\"\"Explore the sketch.\n\n Args:\n query_string (str): Elasticsearch query string.\n query_dsl (str): Elasticsearch query DSL as JSON string.\n query_filter (dict): Filter for the query as a dict.\n return_fields (str): A comma separated string with a list of fields\n that should be included in the response. Optional and defaults\n to None.\n max_entries (int): Optional integer denoting a best effort to limit\n the output size to the number of events. Events are read in,\n 10k at a time so there may be more events in the answer back\n than this number denotes, this is a best effort.\n kwargs (dict[str, object]): Depending on the resource they may\n require different sets of arguments to be able to run a raw\n API request.\n\n Raises:\n ValueError: if unable to query for the results.\n RuntimeError: if the query is missing needed values, or if the\n sketch is archived.\n \"\"\"\n super().from_manual(**kwargs)\n if not (query_string or query_filter or query_dsl):\n raise RuntimeError('You need to supply a query')\n\n self._username = self.api.current_user.username\n self._name = 'From Explore'\n self._description = 'From Explore'\n\n if query_filter:\n self.query_filter = query_filter\n\n self._query_string = query_string\n self._query_dsl = query_dsl\n self._return_fields = return_fields\n\n if max_entries:\n self._max_entries = max_entries\n\n # TODO: Make use of search templates and aggregations.\n #self._searchtemplate = data.get('searchtemplate', 0)\n #self._aggregations = data.get('aggregation', 0)\n\n self._created_at = datetime.datetime.now(\n datetime.timezone.utc).isoformat()\n self._updated_at = self._created_at\n\n self.resource_data = {}\n\n def from_saved(self, search_id): # pylint: disable=arguments-differ\n \"\"\"Initialize the search object from a saved search.\n\n Args:\n search_id: integer value for the saved\n search (primary key).\n \"\"\"\n resource_uri = f'sketches/{self._sketch.id}/views/{search_id}/'\n resource_data = self.api.fetch_resource_data(resource_uri)\n\n data = resource_data.get('objects', [None])[0]\n if not data:\n logger.error('Unable to get any data back from a saved search.')\n return\n\n label_string = data.get('label_string', '')\n if label_string:\n self._labels = json.loads(label_string)\n else:\n self._labels = []\n\n self._aggregations = data.get('aggregation', 0)\n self._created_at = data.get('created_at', '')\n self._description = data.get('description', '')\n self._name = data.get('name', '')\n self._query_dsl = data.get('query_dsl', '')\n query_filter = data.get('query_filter', '')\n if query_filter:\n filter_dict = json.loads(query_filter)\n if 'fields' in filter_dict:\n fields = filter_dict.pop('fields')\n return_fields = [x.get('field') for x in fields]\n self.return_fields = ','.join(return_fields)\n\n self.query_filter = filter_dict\n self._query_string = data.get('query_string', '')\n self._resource_id = search_id\n self._searchtemplate = data.get('searchtemplate', 0)\n self._updated_at = data.get('updated_at', '')\n self._username = data.get('user', {}).get('username', 'System')\n\n self.resource_data = data\n\n @property\n def max_entries(self):\n \"\"\"Return the maximum number of entries in the return value.\"\"\"\n return self._max_entries\n\n @max_entries.setter\n def max_entries(self, max_entries):\n \"\"\"Make changes to the max entries of return values.\"\"\"\n self._max_entries = max_entries\n if max_entries < self.DEFAULT_SIZE_LIMIT:\n _ = self.query_filter\n self._query_filter['size'] = max_entries\n self._query_filter['terminate_after'] = max_entries\n self.commit()\n\n @property\n def name(self):\n \"\"\"Property that returns the query name.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Make changes to the saved search name.\"\"\"\n self._name = name\n self.commit()\n\n def order_ascending(self):\n \"\"\"Set the order of objects returned back ascending.\"\"\"\n # Trigger a creation of a query filter if it does not exist.\n _ = self.query_filter\n self._query_filter['order'] = 'asc'\n\n def order_descending(self):\n \"\"\"Set the order of objects returned back descending.\"\"\"\n # Trigger a creation of a query filter if it does not exist.\n _ = self.query_filter\n self._query_filter['order'] = 'desc'\n\n @property\n def query_dsl(self):\n \"\"\"Property that returns back the query DSL.\"\"\"\n return self._query_dsl\n\n @query_dsl.setter\n def query_dsl(self, query_dsl):\n \"\"\"Make changes to the query DSL of the search.\"\"\"\n self._query_dsl = query_dsl\n self.commit()\n\n @property\n def query_filter(self):\n \"\"\"Property that returns the query filter.\"\"\"\n if not self._query_filter:\n self._query_filter = {\n 'time_start': None,\n 'time_end': None,\n 'size': self.DEFAULT_SIZE_LIMIT,\n 'terminate_after': self.DEFAULT_SIZE_LIMIT,\n 'indices': '_all',\n 'order': 'asc',\n 'chips': [],\n }\n query_filter = self._query_filter\n query_filter['chips'] = [x.chip for x in self._chips]\n return query_filter\n\n @query_filter.setter\n def query_filter(self, query_filter):\n \"\"\"Make changes to the query filter.\"\"\"\n if isinstance(query_filter, str):\n try:\n query_filter = json.loads(query_filter)\n except json.JSONDecodeError as exc:\n raise ValueError('Unable to parse the string as JSON') from exc\n\n if not isinstance(query_filter, dict):\n raise ValueError('Query filter needs to be a dict.')\n self._query_filter = query_filter\n self._extract_chips(query_filter)\n self.commit()\n\n @property\n def query_string(self):\n \"\"\"Property that returns back the query string.\"\"\"\n return self._query_string\n\n @query_string.setter\n def query_string(self, query_string):\n \"\"\"Make changes to the query string of a saved search.\"\"\"\n self._query_string = query_string\n self.commit()\n\n def remove_chip(self, chip_index):\n \"\"\"Remove a chip from the saved search.\"\"\"\n chip_len = len(self._chips)\n if chip_index > (chip_len + 1):\n raise ValueError(\n f'Unable to remove chip, only {chip_len} chips stored '\n f'(no index {chip_index})')\n\n try:\n _ = self._chips.pop(chip_index)\n except IndexError as exc:\n raise ValueError(\n f'Unable to remove index {chip_index}, out of range') from exc\n\n self.commit()\n\n @property\n def return_fields(self):\n \"\"\"Property that returns the return_fields.\"\"\"\n return self._return_fields\n\n @return_fields.setter\n def return_fields(self, return_fields):\n \"\"\"Make changes to the return fields.\"\"\"\n self._return_fields = return_fields\n self.commit()\n\n @property\n def return_size(self):\n \"\"\"Return the maximum number of entries in the return value.\"\"\"\n return self._max_entries\n\n @return_size.setter\n def return_size(self, return_size):\n \"\"\"Make changes to the maximum number of entries in the return.\"\"\"\n self._max_entries = return_size\n\n def save(self):\n \"\"\"Save the search in the database.\n\n Raises:\n ValueError: if there are values missing in order to save the query.\n \"\"\"\n if not self.name:\n raise ValueError(\n 'No name for the query saved. Please select a name first.')\n\n if not (self.query_string or self.query_dsl):\n raise ValueError(\n 'Need to have either a query DSL or a query string to be '\n 'able to save the search.')\n\n if not self.description:\n logger.warning(\n 'No description selected for search, saving without one')\n\n if self._resource_id:\n resource_url = (\n f'{self.api.api_root}/sketches/{self._sketch.id}/views/'\n f'{self._resource_id}/')\n else:\n resource_url = (\n f'{self.api.api_root}/sketches/{self._sketch.id}/views/')\n\n query_filter = self.query_filter\n if self.return_fields:\n sketch_data = self._sketch.data\n sketch_meta = sketch_data.get('meta', {})\n mappings = sketch_meta.get('mappings', [])\n\n use_mappings = []\n for field in self.return_fields.split(','):\n field = field.strip().lower()\n for map_entry in mappings:\n if map_entry.get('field', '').lower() == field:\n use_mappings.append(map_entry)\n query_filter['fields'] = use_mappings\n\n data = {\n 'name': self.name,\n 'description': self.description,\n 'query': self.query_string,\n 'filter': query_filter,\n 'dsl': self.query_dsl,\n 'labels': json.dumps(self.labels),\n }\n response = self.api.session.post(resource_url, json=data)\n status = error.check_return_status(response, logger)\n if not status:\n error.error_message(\n response, 'Unable to save search', error=RuntimeError)\n\n response_json = error.get_response_json(response, logger)\n search_dict = response_json.get('objects', [{}])[0]\n self._resource_id = search_dict.get('id', 0)\n return f'Saved search to ID: {self._resource_id}'\n\n @property\n def scrolling(self):\n \"\"\"Returns whether scrolling is enabled or not.\"\"\"\n return self._scrolling\n\n def scrolling_disable(self):\n \"\"\"\"Disables scrolling.\"\"\"\n self._scrolling = False\n\n def scrolling_enable(self):\n \"\"\"Enable scrolling.\"\"\"\n self._scrolling = True\n\n def to_dict(self):\n \"\"\"Returns a dict with the respone of the query.\"\"\"\n if not self._raw_response:\n self._execute_query()\n\n return self._raw_response\n\n def to_file(self, file_name):\n \"\"\"Saves the content of the query to a file.\n\n Args:\n file_name (str): Full path to a file that will store the results\n of the query to as a ZIP file. The ZIP file will contain a\n METADATA file and a CSV with the results from the query.\n\n Returns:\n Boolean that determines if it was successful.\n \"\"\"\n old_scrolling = self.scrolling\n self._scrolling = True\n self._execute_query(file_name=file_name)\n self._scrolling = old_scrolling\n return True\n\n def to_pandas(self):\n \"\"\"Returns a pandas DataFrame with the response of the query.\"\"\"\n if not self._raw_response:\n self._execute_query()\n\n return_list = []\n timelines = {\n t.index_name: t.name for t in self._sketch.list_timelines()}\n\n return_field_list = []\n return_fields = self._return_fields\n if return_fields:\n if return_fields.startswith('\\''):\n return_fields = return_fields[1:]\n if return_fields.endswith('\\''):\n return_fields = return_fields[:-1]\n return_field_list = return_fields.split(',')\n\n for result in self._raw_response.get('objects', []):\n source = result.get('_source', {})\n if not return_fields or '_id' in return_field_list:\n source['_id'] = result.get('_id')\n if not return_fields or '_type' in return_field_list:\n source['_type'] = result.get('_type')\n if not return_fields or '_index' in return_field_list:\n source['_index'] = result.get('_index')\n if not return_fields or '_source' in return_field_list:\n source['_source'] = timelines.get(result.get('_index'))\n\n return_list.append(source)\n\n data_frame = pandas.DataFrame(return_list)\n if 'datetime' in data_frame:\n try:\n data_frame['datetime'] = pandas.to_datetime(data_frame.datetime)\n except pandas.errors.OutOfBoundsDatetime:\n pass\n elif 'timestamp' in data_frame:\n try:\n data_frame['datetime'] = pandas.to_datetime(\n data_frame.timestamp / 1e6, utc=True, unit='s')\n except pandas.errors.OutOfBoundsDatetime:\n pass\n\n return data_frame\n\n @property\n def updated_at(self):\n \"\"\"Property that returns back the updated time of a search.\"\"\"\n return self._updated_at\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
Siokhan/Self-Driving-Car
[ "a4419184f0378f31c4aa8efafd44beba5a5c8fc5" ]
[ "Scripts/generate_predictions.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 21 14:00:01 2020\r\n\r\n@author: Siokhan Kouassi\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport os\r\nimport csv\r\nimport pandas as pd\r\n\r\nn=3 #1 for nvidia cnn, 2 for sio cnn, 3 for speed model\r\n\r\n#load model\r\nmodel = tf.keras.models.load_model('sio_model_testing')\r\n\r\n#print(model.summary())\r\n\r\n#extract test data\r\ntest_data_dir = '../../Data/test_data/test_data'\r\ntest_file_list = np.asarray(os.listdir(test_data_dir))\r\nb = 0\r\nx_test = []\r\ntest_image_id = []\r\nfor f in test_file_list:\r\n frame = cv2.imread(test_data_dir + '/' + f)\r\n x_test.append(frame)\r\n test_image_id.append(f.split('.')[0])\r\n #print(b)\r\n b+=1\r\n\r\nx_test = np.asarray(x_test)\r\n\r\nif n==1: #if nvidiacnn is selected\r\n from nvidiacnn import nvidia_img_preprocess \r\n x_test = nvidia_img_preprocess(x_test)\r\n print('Test data pre - processed')\r\n #make predictions\r\n predictions_test = model.predict(x_test)\r\n test_image_id = pd.DataFrame(test_image_id, columns = ['image_id'])\r\n angles = pd.DataFrame(predictions_test[:,0], columns = ['angle'])\r\n speeds = pd.DataFrame(predictions_test[:,1], columns = ['speed'])\r\n #making sure no speed is above 1\r\n for index, row in speeds.iterrows():\r\n if row['speed'] > 1:\r\n speeds.iloc[index] = 1\r\n \r\n output = pd.concat([test_image_id, angles, speeds], axis = 1, sort = False)\r\n output.sort_values(by=['image_id'], inplace = True)\r\n #output.to_csv('predictions_4.csv', index = False)\r\n \r\nelif n==2: #if siocnn is selected\r\n from siocnn import sio_img_preprocess\r\n x_test = sio_img_preprocess(x_test)\r\n print('Test data pre - processed')\r\n #make predictions\r\n predictions_test = model.predict(x_test)\r\n test_image_id = pd.DataFrame(test_image_id, columns = ['image_id'])\r\n angles = pd.DataFrame(predictions_test[:,0], columns = ['angle'])\r\n speeds = pd.DataFrame(predictions_test[:,1], columns = ['speed'])\r\n #making sure no speed is above 1\r\n for index, row in speeds.iterrows():\r\n if row['speed'] > 1:\r\n speeds.iloc[index] = 1\r\n \r\n output = pd.concat([test_image_id, angles, speeds], axis = 1, sort = False)\r\n output.sort_values(by=['image_id'], inplace = True)\r\n #output.to_csv('predictions_4.csv', index = False)\r\n \r\nelif n==3: #speed model\r\n from edgescnn import edges_img_preprocess\r\n x_test = edges_img_preprocess(x_test)\r\n print('Test data pre - processed')\r\n #make predictions\r\n predictions_test = model.predict(x_test)\r\n test_image_id = pd.DataFrame(test_image_id, columns = ['image_id'])\r\n speeds = pd.DataFrame(predictions_test[:,0], columns = ['speed'])\r\n #making sure no speed is above 1\r\n for index, row in speeds.iterrows():\r\n if row['speed'] > 1:\r\n speeds.iloc[index] = 1\r\n \r\n output = pd.concat([test_image_id, speeds], axis = 1, sort = False)\r\n output.sort_values(by=['image_id'], inplace = True)\r\n #output.to_csv('predictions_4.csv', index = False)\r\n'''\r\n#make predictions\r\npredictions_test = model.predict(x_test)\r\ntest_image_id = pd.DataFrame(test_image_id, columns = ['image_id'])\r\nangles = pd.DataFrame(predictions_test[:,0], columns = ['angle'])\r\nspeeds = pd.DataFrame(predictions_test[:,1], columns = ['speed'])\r\n\r\n#making sure no speed is above 1\r\nfor index, row in speeds.iterrows():\r\n if row['speed'] > 1:\r\n speeds.iloc[index] = 1\r\n \r\noutput = pd.concat([test_image_id, angles, speeds], axis = 1, sort = False)\r\noutput.sort_values(by=['image_id'], inplace = True)\r\n#output.to_csv('predictions_4.csv', index = False)\r\n'''\r\nprint('predictions generated')" ]
[ [ "pandas.concat", "tensorflow.keras.models.load_model", "numpy.asarray", "pandas.DataFrame" ] ]
caudexy/dashmap.io
[ "1f18ea201dcf017a3463a86565f42bbba4d116bc" ]
[ "website/dashmap/map_graphs.py" ]
[ "import os\nimport json\nimport pandas as pd\nimport geopandas as gpd\n\n# Dash & Plotly\nfrom dash import html\nfrom dash import dcc\nimport plotly.graph_objects as go\n\n# Environment Variables\nfrom dotenv import load_dotenv\nload_dotenv()\nMAPBOX_TOKEN = os.getenv('MAPBOX_TOKEN')\n\n# Colors used by graphs\ncolors = [\n '#4182C8', '#2E94B2',\n '#39A791', '#6FB26C',\n '#C0C15C', '#F9BD24',\n '#F3903F', '#EC6546',\n '#7D4C94', '#5B61AE'\n]\n\ndef load_datum():\n \"\"\"\n Load the main data sources.\n Args: None\n\n Returns: \n datum (object): main geodataframe\n real_estate (object): dataframe with real estate data\n \"\"\"\n datum = gpd.read_file(open(\"website/data/datum/datum.geojson\"), crs=\"WGS84\")\n datum.rename(columns = {'index': 'postal_code'}, inplace = True)\n datum.set_index('postal_code', inplace=True)\n #print(len(datum.index), len(datum), datum.head())\n\n real_estate = pd.read_csv('website/data/real-estate/real-estate.csv')\n real_estate.set_index('postcode', inplace=True)\n\n bus_stops = gpd.read_file(open(\"website/data/mobility/HSL_stations.geojson\"), crs=\"WGS84\")\n\n return datum, real_estate, bus_stops\n\ndef init_choropleth(datum, bus_stops):\n \"\"\"\n Initialize the main choropleth map.\n ---\n Args: None\n\n Returns: \n choropleth (object): Plotly Graph Object\n \"\"\"\n # Initializing the Figure\n choropleth = go.Figure()\n\n # Adding postal districts trace\n choropleth.add_trace(\n go.Choroplethmapbox(\n name=\"Postal Areas\",\n geojson=json.loads(datum.to_json()), \n locations=datum.index,\n z=datum['Inhabitants, total, 2019 (HE)'],\n colorscale=[\"#A9A9A9\", \"#A9A9A9\"],\n colorbar=dict(\n len=1, \n x=0.95,\n y=0.5, \n tickfont=dict(\n size=10, \n color= \"white\"\n )\n ),\n marker_line_width=1,\n marker_opacity=.3,\n marker_line_color= '#fff',\n hovertext = datum.index,\n text = datum['neighborhood'],\n hovertemplate = \"<b>Neighborhood:</b> %{text}<br><b>Postal Area</b>: %{hovertext}<br><extra></extra>\"\n )\n )\n\n # Adding Population\n choropleth.add_trace(\n go.Choroplethmapbox(\n name=\"Population\",\n geojson=json.loads(datum.to_json()), \n locations=datum.index,\n z=datum['Inhabitants, total, 2019 (HE)'],\n colorscale='blues',\n colorbar=dict(\n len=1, \n x=0.95,\n y=0.5, \n tickfont=dict(\n size=10, \n color= \"white\"\n )\n ),\n marker_line_width=1,\n marker_opacity=.4,\n marker_line_color= '#fff',\n visible='legendonly',\n hovertext = datum.index,\n text = datum['neighborhood'],\n hovertemplate = \"<b>Neighborhood:</b> %{text}<br><b>Postal Area</b>: %{hovertext}<br><b>Population:</b> %{z}<br><extra></extra>\"\n )\n )\n\n # Adding Average income by postal code trace\n choropleth.add_trace(\n go.Choroplethmapbox(\n name=\"Avg. Individual Income\",\n geojson=json.loads(datum.to_json()), \n locations=datum.index,\n z=datum['Average income of inhabitants, 2019 (HR)'],\n colorscale=\"Bluered\",\n colorbar=dict(\n len=1, \n x=0.95,\n y=0.5, \n tickfont=dict(\n size=10, \n color= \"white\"\n )\n ),\n marker_line_width=1,\n marker_opacity=.4,\n marker_line_color= '#fff',\n visible='legendonly',\n hovertext = datum.index,\n text = datum['neighborhood'],\n hovertemplate = \"<b>Neighborhood:</b> %{text}<br><b>Postal Area</b>: %{hovertext}<br><b>Avg. Individual Income:</b> %{z}<br><extra><extra></extra>\"\n )\n )\n\n # Adding Avg. Households Income by postal code trace\n choropleth.add_trace(\n go.Choroplethmapbox(\n name=\"Avg. Households Income\",\n geojson=json.loads(datum.to_json()), \n locations=datum.index,\n z=datum['Average income of households, 2019 (TR)'],\n colorscale=\"hot\",\n colorbar=dict(\n len=1, \n x=0.95,\n y=0.5, \n tickfont=dict(\n size=10, \n color= \"white\"\n )\n ),\n marker_line_width=1,\n marker_opacity=.4,\n marker_line_color= '#fff',\n visible='legendonly',\n hovertext = datum.index,\n text = datum['neighborhood'],\n hovertemplate = \"<b>Neighborhood:</b> %{text}<br><b>Postal Area</b>: %{hovertext}<br><b>Avg. Households Income:</b> %{z}<br><extra></extra>\"\n )\n )\n\n # Adding Avg. Inhabitant Age by postal code trace\n choropleth.add_trace(\n go.Choroplethmapbox(\n name=\"Avg. Inhabitant Age\",\n geojson=json.loads(datum.to_json()), \n locations=datum.index,\n z=datum['Average age of inhabitants, 2019 (HE)'],\n colorscale=\"tealgrn\",\n colorbar=dict(\n len=1, \n x=0.95,\n y=0.5, \n tickfont=dict(\n size=10, \n color= \"white\"\n )\n ),\n marker_line_width=1,\n marker_opacity=.4,\n marker_line_color= '#fff',\n visible='legendonly',\n hovertext = datum.index,\n text = datum['neighborhood'],\n hovertemplate = \"<b>Neighborhood:</b> %{text}<br><b>Postal Area</b>: %{hovertext}<br><b>Avg. Age:</b> %{z}<br><extra></extra>\"\n )\n )\n\n # Adding Avg. Household Size by postal code trace\n choropleth.add_trace(\n go.Choroplethmapbox(\n name=\"Avg. Household Size\",\n geojson=json.loads(datum.to_json()), \n locations=datum.index,\n z=datum['Average size of households, 2019 (TE)'],\n colorscale=\"aggrnyl\",\n colorbar=dict(\n len=1, \n x=0.95,\n y=0.5, \n tickfont=dict(\n size=10, \n color= \"white\"\n )\n ),\n marker_line_width=1,\n marker_opacity=.4,\n marker_line_color= '#fff',\n visible='legendonly',\n hovertext = datum.index,\n text = datum['neighborhood'],\n hovertemplate = \"<b>Neighborhood:</b> %{text}<br><b>Postal Area</b>: %{hovertext}<br><b>Avg. Household size:</b> %{z}<br><extra></extra>\"\n )\n )\n\n # Adding Mobility Nodes\n choropleth.add_trace(\n go.Scattermapbox(\n name=\"Mobility Network\",\n lat = bus_stops['geometry'].y,\n lon = bus_stops['geometry'].x,\n hovertext = bus_stops['NAMN1'],\n marker = go.scattermapbox.Marker(color=colors[1],size=5),\n marker_opacity=.6,\n visible='legendonly',\n text = bus_stops['NAMN1'],\n hovertemplate = \"<b>Name:</b> %{text}<br><extra></extra>\"\n )\n )\n\n # Update layout preferences\n choropleth.update_layout(\n clickmode='event+select',\n mapbox_style=\"dark\",\n \n autosize=True,\n margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0},\n paper_bgcolor='#303030',\n plot_bgcolor='#303030',\n legend=dict(x=0.02,\n y=0.99,\n yanchor=\"top\",\n orientation=\"v\",\n font=dict(\n family=\"Courier\",\n size=12,\n color=\"white\"\n )\n ),\n mapbox=dict( \n accesstoken=MAPBOX_TOKEN,\n bearing=0,\n center=dict(lat=60.192059, lon=24.945831),\n pitch=3,\n zoom=10,\n ),\n )\n\n # Update Trace preferences\n choropleth.update_traces(\n showlegend=True,\n selector=dict(type='choroplethmapbox'),\n unselected= dict(marker={'opacity': 0.2}),\n selected= dict(marker={'opacity': 0.5})\n )\n\n return choropleth\n\n" ]
[ [ "pandas.read_csv" ] ]
RMORIOKA/tensorflow
[ "6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6", "6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6", "6886eb9c73940fd3b4dfadc3d6964ae9aa71eef6" ]
[ "tensorflow/contrib/distributions/python/kernel_tests/mvn_test.py", "tensorflow/contrib/distributions/python/ops/bernoulli.py", "tensorflow/contrib/training/python/training/resample.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for MultivariateNormal.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom scipy import stats\nimport tensorflow as tf\n\ndistributions = tf.contrib.distributions\n\n\nclass MultivariateNormalShapeTest(tf.test.TestCase):\n\n def _testPDFShapes(self, mvn_dist, mu, sigma):\n with self.test_session() as sess:\n mvn = mvn_dist(mu, sigma)\n x = 2 * tf.ones_like(mu)\n\n log_pdf = mvn.log_pdf(x)\n pdf = mvn.pdf(x)\n\n mu_value = np.ones([3, 3, 2])\n sigma_value = np.zeros([3, 3, 2, 2])\n sigma_value[:] = np.identity(2)\n x_value = 2. * np.ones([3, 3, 2])\n feed_dict = {mu: mu_value, sigma: sigma_value}\n\n scipy_mvn = stats.multivariate_normal(mean=mu_value[(0, 0)],\n cov=sigma_value[(0, 0)])\n expected_log_pdf = scipy_mvn.logpdf(x_value[(0, 0)])\n expected_pdf = scipy_mvn.pdf(x_value[(0, 0)])\n\n log_pdf_evaled, pdf_evaled = sess.run([log_pdf, pdf], feed_dict=feed_dict)\n self.assertAllEqual([3, 3], log_pdf_evaled.shape)\n self.assertAllEqual([3, 3], pdf_evaled.shape)\n self.assertAllClose(expected_log_pdf, log_pdf_evaled[0, 0])\n self.assertAllClose(expected_pdf, pdf_evaled[0, 0])\n\n def testPDFUnknownSize(self):\n mu = tf.placeholder(tf.float32, shape=(3 * [None]))\n sigma = tf.placeholder(tf.float32, shape=(4 * [None]))\n self._testPDFShapes(distributions.MultivariateNormalFull, mu, sigma)\n self._testPDFShapes(distributions.MultivariateNormalCholesky, mu, sigma)\n\n def testPDFUnknownShape(self):\n mu = tf.placeholder(tf.float32)\n sigma = tf.placeholder(tf.float32)\n self._testPDFShapes(distributions.MultivariateNormalFull, mu, sigma)\n self._testPDFShapes(distributions.MultivariateNormalCholesky, mu, sigma)\n\n\nclass MultivariateNormalDiagTest(tf.test.TestCase):\n \"\"\"Well tested because this is a simple override of the base class.\"\"\"\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def testMean(self):\n mu = [-1.0, 1.0]\n diag = [1.0, 5.0]\n with self.test_session():\n dist = distributions.MultivariateNormalDiag(mu, diag)\n self.assertAllEqual(mu, dist.mean().eval())\n\n def testEntropy(self):\n mu = [-1.0, 1.0]\n diag = [1.0, 5.0]\n diag_mat = np.diag(diag)\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2)\n with self.test_session():\n dist = distributions.MultivariateNormalDiag(mu, diag)\n self.assertAllClose(scipy_mvn.entropy(), dist.entropy().eval(), atol=1e-4)\n\n def testNonmatchingMuDiagDimensionsFailsStatic(self):\n mu = [-1.0, 1.0]\n diag = [[1.0, 5.0]]\n with self.test_session():\n with self.assertRaisesRegexp(ValueError, \"shape.*should match\"):\n distributions.MultivariateNormalDiag(mu, diag)\n\n def testNonmatchingMuDiagDimensionsFailsDynamic(self):\n mu_v = [-1.0, 1.0]\n diag_v = [[1.0, 5.0]]\n\n with self.test_session():\n mu_ph = tf.placeholder(tf.float32, name=\"mu_ph\")\n diag_ph = tf.placeholder(tf.float32, name=\"diag_ph\")\n dist = distributions.MultivariateNormalDiag(\n mu_ph, diag_ph, validate_args=True)\n with self.assertRaisesOpError(\"mu should have rank\"):\n dist.mean().eval(feed_dict={mu_ph: mu_v, diag_ph: diag_v})\n\n def testSample(self):\n mu = [-1.0, 1.0]\n diag = [1.0, 2.0]\n with self.test_session():\n dist = distributions.MultivariateNormalDiag(mu, diag)\n samps = dist.sample(1000, seed=0).eval()\n cov_mat = tf.matrix_diag(diag).eval()**2\n\n self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)\n self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)\n\n def testMultivariateNormalDiagWithSoftplusStDev(self):\n mu = [-1.0, 1.0]\n diag = [-1.0, -2.0]\n with self.test_session():\n dist = distributions.MultivariateNormalDiagWithSoftplusStDev(mu, diag)\n samps = dist.sample(1000, seed=0).eval()\n cov_mat = tf.matrix_diag(tf.nn.softplus(diag)).eval()**2\n\n self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)\n self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)\n\n\nclass MultivariateNormalDiagPlusVDVTTest(tf.test.TestCase):\n \"\"\"Well tested because this is a simple override of the base class.\"\"\"\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def testMean(self):\n mu = [-1.0, 1.0]\n diag_large = [1.0, 5.0]\n v = [[2.0], [3.0]]\n diag_small = [3.0]\n with self.test_session():\n dist = distributions.MultivariateNormalDiagPlusVDVT(\n mu, diag_large, v, diag_small=diag_small)\n self.assertAllEqual(mu, dist.mean().eval())\n\n def testNonmatchingMuAndSigmaDimensionFailsStatic(self):\n mu = self._rng.rand(2)\n # With this diag_large and v, the covariance is 3 x 3\n diag_large = self._rng.rand(3)\n v = self._rng.rand(3, 2) # v works with diag_large.\n with self.test_session():\n with self.assertRaisesRegexp(ValueError, \"shape.*should match\"):\n distributions.MultivariateNormalDiagPlusVDVT(\n mu, diag_large, v)\n\n def testNonmatchingMuDiagDimensionsFailsDynamic(self):\n mu = self._rng.rand(2)\n # With this diag_large and v, the covariance is 3 x 3\n diag_large = self._rng.rand(3)\n v = self._rng.rand(3, 2) # v works with diag_large.\n\n with self.test_session():\n mu_ph = tf.placeholder(tf.float32, name=\"mu_ph\")\n v_ph = tf.placeholder(tf.float32, name=\"v_ph\")\n diag_ph = tf.placeholder(tf.float32, name=\"diag_ph\")\n dist = distributions.MultivariateNormalDiagPlusVDVT(\n mu_ph, diag_ph, v_ph, validate_args=True)\n with self.assertRaisesOpError(\"mu.*cov.*shape\"):\n dist.mean().eval(feed_dict={mu_ph: mu, diag_ph: diag_large, v_ph: v})\n\n def testSample(self):\n mu = [-1.0, 1.0]\n diag_large = [1.0, 0.5]\n v = [[0.2], [0.3]]\n with self.test_session():\n dist = distributions.MultivariateNormalDiagPlusVDVT(mu, diag_large, v)\n\n samps = dist.sample(1000, seed=0).eval()\n cov_mat = dist.sigma.eval()\n\n self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)\n self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)\n\n\nclass MultivariateNormalCholeskyTest(tf.test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def _random_chol(self, *shape):\n mat = self._rng.rand(*shape)\n chol = distributions.matrix_diag_transform(mat, transform=tf.nn.softplus)\n chol = tf.matrix_band_part(chol, -1, 0)\n sigma = tf.batch_matmul(chol, chol, adj_y=True)\n return chol.eval(), sigma.eval()\n\n def testNonmatchingMuSigmaFailsStatic(self):\n with self.test_session():\n mu = self._rng.rand(2)\n chol, _ = self._random_chol(2, 2, 2)\n with self.assertRaisesRegexp(ValueError, \"shape.*should match\"):\n distributions.MultivariateNormalCholesky(mu, chol)\n\n mu = self._rng.rand(2, 1)\n chol, _ = self._random_chol(2, 2, 2)\n with self.assertRaisesRegexp(ValueError, \"shape.*should match\"):\n distributions.MultivariateNormalCholesky(mu, chol)\n\n def testNonmatchingMuSigmaFailsDynamic(self):\n with self.test_session():\n mu_ph = tf.placeholder(tf.float64)\n chol_ph = tf.placeholder(tf.float64)\n\n mu_v = self._rng.rand(2)\n chol_v, _ = self._random_chol(2, 2, 2)\n mvn = distributions.MultivariateNormalCholesky(\n mu_ph, chol_ph, validate_args=True)\n with self.assertRaisesOpError(\"mu should have rank 1 less than cov\"):\n mvn.mean().eval(feed_dict={mu_ph: mu_v, chol_ph: chol_v})\n\n mu_v = self._rng.rand(2, 1)\n chol_v, _ = self._random_chol(2, 2, 2)\n mvn = distributions.MultivariateNormalCholesky(\n mu_ph, chol_ph, validate_args=True)\n with self.assertRaisesOpError(\"mu.shape and cov.shape.*should match\"):\n mvn.mean().eval(feed_dict={mu_ph: mu_v, chol_ph: chol_v})\n\n def testLogPDFScalarBatch(self):\n with self.test_session():\n mu = self._rng.rand(2)\n chol, sigma = self._random_chol(2, 2)\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n x = self._rng.rand(2)\n\n log_pdf = mvn.log_pdf(x)\n pdf = mvn.pdf(x)\n\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)\n\n expected_log_pdf = scipy_mvn.logpdf(x)\n expected_pdf = scipy_mvn.pdf(x)\n self.assertEqual((), log_pdf.get_shape())\n self.assertEqual((), pdf.get_shape())\n self.assertAllClose(expected_log_pdf, log_pdf.eval())\n self.assertAllClose(expected_pdf, pdf.eval())\n\n def testLogPDFXIsHigherRank(self):\n with self.test_session():\n mu = self._rng.rand(2)\n chol, sigma = self._random_chol(2, 2)\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n x = self._rng.rand(3, 2)\n\n log_pdf = mvn.log_pdf(x)\n pdf = mvn.pdf(x)\n\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)\n\n expected_log_pdf = scipy_mvn.logpdf(x)\n expected_pdf = scipy_mvn.pdf(x)\n self.assertEqual((3,), log_pdf.get_shape())\n self.assertEqual((3,), pdf.get_shape())\n self.assertAllClose(expected_log_pdf, log_pdf.eval())\n self.assertAllClose(expected_pdf, pdf.eval())\n\n def testLogPDFXLowerDimension(self):\n with self.test_session():\n mu = self._rng.rand(3, 2)\n chol, sigma = self._random_chol(3, 2, 2)\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n x = self._rng.rand(2)\n\n log_pdf = mvn.log_pdf(x)\n pdf = mvn.pdf(x)\n\n self.assertEqual((3,), log_pdf.get_shape())\n self.assertEqual((3,), pdf.get_shape())\n\n # scipy can't do batches, so just test one of them.\n scipy_mvn = stats.multivariate_normal(mean=mu[1, :], cov=sigma[1, :, :])\n expected_log_pdf = scipy_mvn.logpdf(x)\n expected_pdf = scipy_mvn.pdf(x)\n\n self.assertAllClose(expected_log_pdf, log_pdf.eval()[1])\n self.assertAllClose(expected_pdf, pdf.eval()[1])\n\n def testEntropy(self):\n with self.test_session():\n mu = self._rng.rand(2)\n chol, sigma = self._random_chol(2, 2)\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n entropy = mvn.entropy()\n\n scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)\n expected_entropy = scipy_mvn.entropy()\n self.assertEqual(entropy.get_shape(), ())\n self.assertAllClose(expected_entropy, entropy.eval())\n\n def testEntropyMultidimensional(self):\n with self.test_session():\n mu = self._rng.rand(3, 5, 2)\n chol, sigma = self._random_chol(3, 5, 2, 2)\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n entropy = mvn.entropy()\n\n # Scipy doesn't do batches, so test one of them.\n expected_entropy = stats.multivariate_normal(\n mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).entropy()\n self.assertEqual(entropy.get_shape(), (3, 5))\n self.assertAllClose(expected_entropy, entropy.eval()[1, 1])\n\n def testSample(self):\n with self.test_session():\n mu = self._rng.rand(2)\n chol, sigma = self._random_chol(2, 2)\n\n n = tf.constant(100000)\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n samples = mvn.sample(n, seed=137)\n sample_values = samples.eval()\n self.assertEqual(samples.get_shape(), (100000, 2))\n self.assertAllClose(sample_values.mean(axis=0), mu, atol=1e-2)\n self.assertAllClose(np.cov(sample_values, rowvar=0), sigma, atol=1e-1)\n\n def testSampleWithSampleShape(self):\n with self.test_session():\n mu = self._rng.rand(3, 5, 2)\n chol, sigma = self._random_chol(3, 5, 2, 2)\n\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n samples_val = mvn.sample((10, 11, 12), seed=137).eval()\n\n # Check sample shape\n self.assertEqual((10, 11, 12, 3, 5, 2), samples_val.shape)\n\n # Check sample means\n x = samples_val[:, :, :, 1, 1, :]\n self.assertAllClose(\n x.reshape(10 * 11 * 12, 2).mean(axis=0),\n mu[1, 1], atol=1e-2)\n\n # Check that log_prob(samples) works\n log_prob_val = mvn.log_prob(samples_val).eval()\n x_log_pdf = log_prob_val[:, :, :, 1, 1]\n expected_log_pdf = stats.multivariate_normal(\n mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).logpdf(x)\n self.assertAllClose(expected_log_pdf, x_log_pdf)\n\n def testSampleMultiDimensional(self):\n with self.test_session():\n mu = self._rng.rand(3, 5, 2)\n chol, sigma = self._random_chol(3, 5, 2, 2)\n\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n n = tf.constant(100000)\n samples = mvn.sample(n, seed=137)\n sample_values = samples.eval()\n\n self.assertEqual(samples.get_shape(), (100000, 3, 5, 2))\n self.assertAllClose(\n sample_values[:, 1, 1, :].mean(axis=0),\n mu[1, 1, :], atol=0.05)\n self.assertAllClose(\n np.cov(sample_values[:, 1, 1, :], rowvar=0),\n sigma[1, 1, :, :], atol=1e-1)\n\n def testShapes(self):\n with self.test_session():\n mu = self._rng.rand(3, 5, 2)\n chol, _ = self._random_chol(3, 5, 2, 2)\n\n mvn = distributions.MultivariateNormalCholesky(mu, chol)\n\n # Shapes known at graph construction time.\n self.assertEqual((2,), tuple(mvn.get_event_shape().as_list()))\n self.assertEqual((3, 5), tuple(mvn.get_batch_shape().as_list()))\n\n # Shapes known at runtime.\n self.assertEqual((2,), tuple(mvn.event_shape().eval()))\n self.assertEqual((3, 5), tuple(mvn.batch_shape().eval()))\n\n\nclass MultivariateNormalFullTest(tf.test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def _random_mu_and_sigma(self, batch_shape, event_shape):\n # This ensures sigma is positive def.\n mat_shape = batch_shape + event_shape + event_shape\n mat = self._rng.randn(*mat_shape)\n sigma = tf.batch_matmul(mat, mat, adj_y=True).eval()\n\n mu_shape = batch_shape + event_shape\n mu = self._rng.randn(*mu_shape)\n\n return mu, sigma\n\n def testKLNonBatch(self):\n batch_shape = ()\n event_shape = (2,)\n with self.test_session():\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\n mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)\n mvn_a = distributions.MultivariateNormalFull(mu_a, sigma_a)\n mvn_b = distributions.MultivariateNormalFull(mu_b, sigma_b)\n\n kl = distributions.kl(mvn_a, mvn_b)\n self.assertEqual(batch_shape, kl.get_shape())\n\n kl_v = kl.eval()\n expected_kl = _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b)\n self.assertAllClose(expected_kl, kl_v)\n\n def testKLBatch(self):\n batch_shape = (2,)\n event_shape = (3,)\n with self.test_session():\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\n mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)\n mvn_a = distributions.MultivariateNormalFull(mu_a, sigma_a)\n mvn_b = distributions.MultivariateNormalFull(mu_b, sigma_b)\n\n kl = distributions.kl(mvn_a, mvn_b)\n self.assertEqual(batch_shape, kl.get_shape())\n\n kl_v = kl.eval()\n expected_kl_0 = _compute_non_batch_kl(\n mu_a[0, :], sigma_a[0, :, :], mu_b[0, :], sigma_b[0, :])\n expected_kl_1 = _compute_non_batch_kl(\n mu_a[1, :], sigma_a[1, :, :], mu_b[1, :], sigma_b[1, :])\n self.assertAllClose(expected_kl_0, kl_v[0])\n self.assertAllClose(expected_kl_1, kl_v[1])\n\n def testKLTwoIdenticalDistributionsIsZero(self):\n batch_shape = (2,)\n event_shape = (3,)\n with self.test_session():\n mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)\n mvn_a = distributions.MultivariateNormalFull(mu_a, sigma_a)\n\n # Should be zero since KL(p || p) = =.\n kl = distributions.kl(mvn_a, mvn_a)\n self.assertEqual(batch_shape, kl.get_shape())\n\n kl_v = kl.eval()\n self.assertAllClose(np.zeros(*batch_shape), kl_v)\n\n\ndef _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):\n \"\"\"Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b).\"\"\"\n # Check using numpy operations\n # This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.\n # So it is important to also check that KL(mvn, mvn) = 0.\n sigma_b_inv = np.linalg.inv(sigma_b)\n\n t = np.trace(sigma_b_inv.dot(sigma_a))\n q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)\n k = mu_a.shape[0]\n l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))\n\n return 0.5 * (t + q - k + l)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Bernoulli distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.distributions.python.ops import distribution\nfrom tensorflow.contrib.distributions.python.ops import distribution_util\nfrom tensorflow.contrib.distributions.python.ops import kullback_leibler\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import random_ops\n\n\nclass Bernoulli(distribution.Distribution):\n \"\"\"Bernoulli distribution.\n\n The Bernoulli distribution is parameterized by p, the probability of a\n positive event.\n \"\"\"\n\n def __init__(self,\n logits=None,\n p=None,\n dtype=dtypes.int32,\n validate_args=False,\n allow_nan_stats=True,\n name=\"Bernoulli\"):\n \"\"\"Construct Bernoulli distributions.\n\n Args:\n logits: An N-D `Output` representing the log-odds\n of a positive event. Each entry in the `Output` parametrizes\n an independent Bernoulli distribution where the probability of an event\n is sigmoid(logits). Only one of `logits` or `p` should be passed in.\n p: An N-D `Output` representing the probability of a positive\n event. Each entry in the `Output` parameterizes an independent\n Bernoulli distribution. Only one of `logits` or `p` should be passed\n in.\n dtype: dtype for samples.\n validate_args: `Boolean`, default `False`. Whether to validate that\n `0 <= p <= 1`. If `validate_args` is `False`, and the inputs are\n invalid, methods like `log_pmf` may return `NaN` values.\n allow_nan_stats: `Boolean`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution.\n\n Raises:\n ValueError: If p and logits are passed, or if neither are passed.\n \"\"\"\n parameters = locals()\n parameters.pop(\"self\")\n with ops.name_scope(name) as ns:\n self._logits, self._p = distribution_util.get_logits_and_prob(\n logits=logits, p=p, validate_args=validate_args)\n with ops.name_scope(\"q\"):\n self._q = 1. - self._p\n super(Bernoulli, self).__init__(\n dtype=dtype,\n is_continuous=False,\n is_reparameterized=False,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._p, self._q, self._logits],\n name=ns)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return {\"logits\": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}\n\n @property\n def logits(self):\n \"\"\"Log-odds of success.\"\"\"\n return self._logits\n\n @property\n def p(self):\n \"\"\"Probability of success.\"\"\"\n return self._p\n\n @property\n def q(self):\n \"\"\"1-p.\"\"\"\n return self._q\n\n def _batch_shape(self):\n return array_ops.shape(self._logits)\n\n def _get_batch_shape(self):\n return self._logits.get_shape()\n\n def _event_shape(self):\n return array_ops.constant([], dtype=dtypes.int32)\n\n def _get_event_shape(self):\n return tensor_shape.scalar()\n\n def _sample_n(self, n, seed=None):\n new_shape = array_ops.concat(0, ([n], self.batch_shape()))\n uniform = random_ops.random_uniform(\n new_shape, seed=seed, dtype=self.p.dtype)\n sample = math_ops.less(uniform, self.p)\n return math_ops.cast(sample, self.dtype)\n\n def _log_prob(self, event):\n # TODO(jaana): The current sigmoid_cross_entropy_with_logits has\n # inconsistent behavior for logits = inf/-inf.\n event = ops.convert_to_tensor(event, name=\"event\")\n event = math_ops.cast(event, self.logits.dtype)\n logits = self.logits\n # sigmoid_cross_entropy_with_logits doesn't broadcast shape,\n # so we do this here.\n # TODO(b/30637701): Check dynamic shape, and don't broadcast if the\n # dynamic shapes are the same.\n if (not event.get_shape().is_fully_defined() or\n not logits.get_shape().is_fully_defined() or\n event.get_shape() != logits.get_shape()):\n logits = array_ops.ones_like(event) * logits\n event = array_ops.ones_like(logits) * event\n return -nn.sigmoid_cross_entropy_with_logits(logits, event)\n\n def _prob(self, event):\n return math_ops.exp(self._log_prob(event))\n\n def _entropy(self):\n return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +\n nn.softplus(-self.logits))\n\n def _mean(self):\n return array_ops.identity(self.p)\n\n def _variance(self):\n return self.q * self.p\n\n def _std(self):\n return math_ops.sqrt(self._variance())\n\n def _mode(self):\n \"\"\"Returns `1` if `p > 1-p` and `0` otherwise.\"\"\"\n return math_ops.cast(self.p > self.q, self.dtype)\n\n\nclass BernoulliWithSigmoidP(Bernoulli):\n \"\"\"Bernoulli with `p = sigmoid(p)`.\"\"\"\n\n def __init__(self,\n p=None,\n dtype=dtypes.int32,\n validate_args=False,\n allow_nan_stats=True,\n name=\"BernoulliWithSigmoidP\"):\n parameters = locals()\n parameters.pop(\"self\")\n with ops.name_scope(name) as ns:\n super(BernoulliWithSigmoidP, self).__init__(\n p=nn.sigmoid(p),\n dtype=dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=ns)\n self._parameters = parameters\n\n\n@kullback_leibler.RegisterKL(Bernoulli, Bernoulli)\ndef _kl_bernoulli_bernoulli(a, b, name=None):\n \"\"\"Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.\n\n Args:\n a: instance of a Bernoulli distribution object.\n b: instance of a Bernoulli distribution object.\n name: (optional) Name to use for created operations.\n default is \"kl_bernoulli_bernoulli\".\n\n Returns:\n Batchwise KL(a || b)\n \"\"\"\n with ops.name_scope(name, \"kl_bernoulli_bernoulli\", [a.logits, b.logits]):\n return (math_ops.sigmoid(a.logits) * (-nn.softplus(-a.logits) +\n nn.softplus(-b.logits)) +\n math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) +\n nn.softplus(b.logits)))\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Resampling methods for batches of tensors.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.training import moving_averages\n\n\ndef resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):\n \"\"\"Given `inputs` tensors, stochastically resamples each at a given rate.\n\n For example, if the inputs are `[[a1, a2], [b1, b2]]` and the rates\n tensor contains `[3, 1]`, then the return value may look like `[[a1,\n a2, a1, a1], [b1, b2, b1, b1]]`. However, many other outputs are\n possible, since this is stochastic -- averaged over many repeated\n calls, each set of inputs should appear in the output `rate` times\n the number of invocations.\n\n Uses Knuth's method to generate samples from the poisson\n distribution (but instead of just incrementing a count, actually\n emits the input); this is described at\n https://en.wikipedia.org/wiki/Poisson_distribution in the section on\n generating Poisson-distributed random variables.\n\n Note that this method is not appropriate for large rate values: with\n float16 it will stop performing correctly for rates above 9.17;\n float32, 87; and float64, 708. (These are the base-e versions of the\n minimum representable exponent for each type.)\n\n Args:\n inputs: A list of tensors, each of which has a shape of `[batch_size, ...]`\n rates: A tensor of shape `[batch_size]` contiaining the resampling rates\n for each input.\n scope: Scope for the op.\n seed: Random seed to use.\n back_prop: Whether to allow back-propagation through this op.\n\n Returns:\n Selections from the input tensors.\n\n \"\"\"\n # TODO(shoutis): Refactor, splitting this up into a poisson draw and a repeat.\n\n # What this implementation does is loop, simulating the intervals\n # between events by drawing from the exponential distribution\n # (`-log(random_uniform)/rate`), and emitting another copy of the\n # corresponding input so long as sum(intervals) < 1. However, that\n # condition can be transformed into the easier-to-compute condition\n # `product(random_uniforms) > e^-rate`.\n with ops.name_scope(scope, default_name='resample_at_rate', values=inputs):\n floor_vals = math_ops.exp(-rates)\n\n def _body(chosen_inputs, running_products, idx, output_count):\n \"\"\"Body of the resampling loop.\"\"\"\n # Update the running product\n next_running_products = running_products * random_ops.random_uniform(\n shape=array_ops.shape(running_products), seed=seed)\n\n # Append inputs which still pass the condition:\n indexes = array_ops.reshape(\n array_ops.where(next_running_products > floor_vals), [-1])\n\n next_output_count = output_count + array_ops.shape(indexes)[0]\n\n next_chosen_inputs = [\n chosen_inputs[i].write(idx, array_ops.gather(inputs[i], indexes))\n for i in range(len(inputs))]\n\n return [next_chosen_inputs,\n next_running_products,\n idx + 1,\n next_output_count]\n\n def _cond(unused_chosen_inputs, running_products, unused_idx, unused_count):\n \"\"\"Resampling loop exit condition.\"\"\"\n return math_ops.reduce_any(running_products > floor_vals)\n\n initial_chosen_inputs = [\n tensor_array_ops.TensorArray(dtype=x.dtype, size=0, dynamic_size=True)\n for x in inputs]\n\n resampled_inputs, _, unused_idx, count = control_flow_ops.while_loop(\n _cond,\n _body,\n loop_vars=[initial_chosen_inputs,\n array_ops.ones_like(rates), # initial running_products\n 0, # initial idx\n 0], # initial count\n back_prop=back_prop)\n\n # Work around TensorArray \"Currently only static shapes are supported when\n # concatenating zero-size TensorArrays\" limitation:\n def _empty_tensor_like(t):\n result = array_ops.zeros(\n shape=(array_ops.concat(0, [[0], array_ops.shape(t)[1:]])),\n dtype=t.dtype)\n if t.get_shape().ndims is not None:\n # preserve known shapes\n result.set_shape([0] + t.get_shape()[1:].as_list())\n return result\n\n return control_flow_ops.cond(\n count > 0,\n lambda: [tensor_array.concat() for tensor_array in resampled_inputs],\n lambda: [_empty_tensor_like(t) for t in inputs])\n\n\ndef weighted_resample(inputs, weights, overall_rate, scope=None,\n mean_decay=0.999, warmup=10, seed=None):\n \"\"\"Performs an approximate weighted resampling of `inputs`.\n\n This method chooses elements from `inputs` where each item's rate of\n selection is proportional to its value in `weights`, and the average\n rate of selection across all inputs (and many invocations!) is\n `overall_rate`.\n\n Args:\n inputs: A list of tensors whose first dimension is `batch_size`.\n weights: A `[batch_size]`-shaped tensor with each batch member's weight.\n overall_rate: Desired overall rate of resampling.\n scope: Scope to use for the op.\n mean_decay: How quickly to decay the running estimate of the mean weight.\n warmup: Until the resulting tensor has been evaluated `warmup`\n times, the resampling menthod uses the true mean over all calls\n as its weight estimate, rather than a decayed mean.\n seed: Random seed.\n\n Returns:\n A list of tensors exactly like `inputs`, but with an unknown (and\n possibly zero) first dimension.\n A tensor containing the effective resampling rate used for each output.\n\n \"\"\"\n # Algorithm: Just compute rates as weights/mean_weight *\n # overall_rate. This way the the average weight corresponds to the\n # overall rate, and a weight twice the average has twice the rate,\n # etc.\n with ops.name_scope(scope, 'weighted_resample', inputs) as opscope:\n # First: Maintain a running estimated mean weight, with decay\n # adjusted (by also maintaining an invocation count) during the\n # warmup period so that at the beginning, there aren't too many\n # zeros mixed in, throwing the average off.\n\n with variable_scope.variable_scope(scope, 'estimate_mean', inputs):\n count_so_far = variable_scope.get_local_variable(\n 'resample_count', initializer=0)\n\n estimated_mean = variable_scope.get_local_variable(\n 'estimated_mean', initializer=0.0)\n\n count = count_so_far.assign_add(1)\n real_decay = math_ops.minimum(\n math_ops.truediv((count - 1), math_ops.minimum(count, warmup)),\n mean_decay)\n\n batch_mean = math_ops.reduce_mean(weights)\n mean = moving_averages.assign_moving_average(\n estimated_mean, batch_mean, real_decay, zero_debias=False)\n\n # Then, normalize the weights into rates using the mean weight and\n # overall target rate:\n rates = weights * overall_rate / mean\n\n results = resample_at_rate([rates] + inputs, rates,\n scope=opscope, seed=seed, back_prop=False)\n\n return (results[1:], results[0])\n" ]
[ [ "tensorflow.matrix_band_part", "tensorflow.batch_matmul", "tensorflow.matrix_diag", "numpy.cov", "numpy.zeros", "numpy.random.RandomState", "numpy.linalg.inv", "numpy.ones", "tensorflow.ones_like", "numpy.linalg.det", "numpy.identity", "tensorflow.constant", "scipy.stats.multivariate_normal", "tensorflow.placeholder", "tensorflow.nn.softplus", "tensorflow.test.main", "numpy.diag" ], [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.nn.sigmoid_cross_entropy_with_logits", "tensorflow.python.ops.nn.sigmoid", "tensorflow.contrib.distributions.python.ops.kullback_leibler.RegisterKL", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.contrib.distributions.python.ops.distribution_util.get_logits_and_prob", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.nn.softplus", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.random_ops.random_uniform" ], [ "tensorflow.python.ops.variable_scope.get_local_variable", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.math_ops.reduce_any", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.math_ops.minimum", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.math_ops.exp", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.python.training.moving_averages.assign_moving_average" ] ]
Udolf15/recommedMeMovies
[ "be5ae74acd98e3f93beaaa5bb55623974fb24247", "952abcf471b819b6b6dfa23b6d5dd248155f9dbf", "9444dce96954c546333d5aecc92a06c3bfd19aa5", "9444dce96954c546333d5aecc92a06c3bfd19aa5" ]
[ "env/lib/python3.5/site-packages/pandas/core/arrays/numpy_.py", "env/lib/python3.5/site-packages/sklearn/feature_extraction/tests/test_text.py", "env/lib/python3.5/site-packages/sklearn/gaussian_process/kernels.py", "env/lib/python3.5/site-packages/sklearn/utils/arpack.py" ]
[ "import numbers\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._validators import validate_fillna_kwargs\n\nfrom pandas.core.dtypes.dtypes import ExtensionDtype\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.inference import is_array_like, is_list_like\n\nfrom pandas import compat\nfrom pandas.core import nanops\nfrom pandas.core.missing import backfill_1d, pad_1d\n\nfrom .base import ExtensionArray, ExtensionOpsMixin\n\n\nclass PandasDtype(ExtensionDtype):\n \"\"\"\n A Pandas ExtensionDtype for NumPy dtypes.\n\n .. versionadded:: 0.24.0\n\n This is mostly for internal compatibility, and is not especially\n useful on its own.\n\n Parameters\n ----------\n dtype : numpy.dtype\n \"\"\"\n _metadata = ('_dtype',)\n\n def __init__(self, dtype):\n dtype = np.dtype(dtype)\n self._dtype = dtype\n self._name = dtype.name\n self._type = dtype.type\n\n def __repr__(self):\n return \"PandasDtype({!r})\".format(self.name)\n\n @property\n def numpy_dtype(self):\n \"\"\"The NumPy dtype this PandasDtype wraps.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n return self._name\n\n @property\n def type(self):\n return self._type\n\n @property\n def _is_numeric(self):\n # exclude object, str, unicode, void.\n return self.kind in set('biufc')\n\n @property\n def _is_boolean(self):\n return self.kind == 'b'\n\n @classmethod\n def construct_from_string(cls, string):\n return cls(np.dtype(string))\n\n def construct_array_type(cls):\n return PandasArray\n\n @property\n def kind(self):\n return self._dtype.kind\n\n @property\n def itemsize(self):\n \"\"\"The element size of this data-type object.\"\"\"\n return self._dtype.itemsize\n\n\n# TODO(NumPy1.13): remove this\n# Compat for NumPy 1.12, which doesn't provide NDArrayOperatorsMixin\n# or __array_ufunc__, so those operations won't be available to people\n# on older NumPys.\n#\n# We would normally write this as bases=(...), then \"class Foo(*bases):\n# but Python2 doesn't allow unpacking tuples in the class statement.\n# So, we fall back to \"object\", to avoid writing a metaclass.\ntry:\n from numpy.lib.mixins import NDArrayOperatorsMixin\nexcept ImportError:\n NDArrayOperatorsMixin = object\n\n\nclass PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):\n \"\"\"\n A pandas ExtensionArray for NumPy data.\n\n .. versionadded :: 0.24.0\n\n This is mostly for internal compatibility, and is not especially\n useful on its own.\n\n Parameters\n ----------\n values : ndarray\n The NumPy ndarray to wrap. Must be 1-dimensional.\n copy : bool, default False\n Whether to copy `values`.\n\n Notes\n -----\n Operations like ``+`` and applying ufuncs requires NumPy>=1.13.\n \"\"\"\n # If you're wondering why pd.Series(cls) doesn't put the array in an\n # ExtensionBlock, search for `ABCPandasArray`. We check for\n # that _typ to ensure that that users don't unnecessarily use EAs inside\n # pandas internals, which turns off things like block consolidation.\n _typ = \"npy_extension\"\n __array_priority__ = 1000\n\n # ------------------------------------------------------------------------\n # Constructors\n\n def __init__(self, values, copy=False):\n if isinstance(values, type(self)):\n values = values._ndarray\n if not isinstance(values, np.ndarray):\n raise ValueError(\"'values' must be a NumPy array.\")\n\n if values.ndim != 1:\n raise ValueError(\"PandasArray must be 1-dimensional.\")\n\n if copy:\n values = values.copy()\n\n self._ndarray = values\n self._dtype = PandasDtype(values.dtype)\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n if isinstance(dtype, PandasDtype):\n dtype = dtype._dtype\n\n result = np.asarray(scalars, dtype=dtype)\n if copy and result is scalars:\n result = result.copy()\n return cls(result)\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n return cls(np.concatenate(to_concat))\n\n # ------------------------------------------------------------------------\n # Data\n\n @property\n def dtype(self):\n return self._dtype\n\n # ------------------------------------------------------------------------\n # NumPy Array Interface\n\n def __array__(self, dtype=None):\n return np.asarray(self._ndarray, dtype=dtype)\n\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n # Lightly modified version of\n # https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/\\\n # numpy.lib.mixins.NDArrayOperatorsMixin.html\n # The primary modification is not boxing scalar return values\n # in PandasArray, since pandas' ExtensionArrays are 1-d.\n out = kwargs.get('out', ())\n for x in inputs + out:\n # Only support operations with instances of _HANDLED_TYPES.\n # Use PandasArray instead of type(self) for isinstance to\n # allow subclasses that don't override __array_ufunc__ to\n # handle PandasArray objects.\n if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):\n return NotImplemented\n\n # Defer to the implementation of the ufunc on unwrapped values.\n inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x\n for x in inputs)\n if out:\n kwargs['out'] = tuple(\n x._ndarray if isinstance(x, PandasArray) else x\n for x in out)\n result = getattr(ufunc, method)(*inputs, **kwargs)\n\n if type(result) is tuple and len(result):\n # multiple return values\n if not lib.is_scalar(result[0]):\n # re-box array-like results\n return tuple(type(self)(x) for x in result)\n else:\n # but not scalar reductions\n return result\n elif method == 'at':\n # no return value\n return None\n else:\n # one return value\n if not lib.is_scalar(result):\n # re-box array-like results, but not scalar reductions\n result = type(self)(result)\n return result\n\n # ------------------------------------------------------------------------\n # Pandas ExtensionArray Interface\n\n def __getitem__(self, item):\n if isinstance(item, type(self)):\n item = item._ndarray\n\n result = self._ndarray[item]\n if not lib.is_scalar(item):\n result = type(self)(result)\n return result\n\n def __setitem__(self, key, value):\n from pandas.core.internals.arrays import extract_array\n\n value = extract_array(value, extract_numpy=True)\n\n if not lib.is_scalar(key) and is_list_like(key):\n key = np.asarray(key)\n\n if not lib.is_scalar(value):\n value = np.asarray(value)\n\n values = self._ndarray\n t = np.result_type(value, values)\n if t != self._ndarray.dtype:\n values = values.astype(t, casting='safe')\n values[key] = value\n self._dtype = PandasDtype(t)\n self._ndarray = values\n else:\n self._ndarray[key] = value\n\n def __len__(self):\n return len(self._ndarray)\n\n @property\n def nbytes(self):\n return self._ndarray.nbytes\n\n def isna(self):\n from pandas import isna\n\n return isna(self._ndarray)\n\n def fillna(self, value=None, method=None, limit=None):\n # TODO(_values_for_fillna): remove this\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self)))\n value = value[mask]\n\n if mask.any():\n if method is not None:\n func = pad_1d if method == 'pad' else backfill_1d\n new_values = func(self._ndarray, limit=limit,\n mask=mask)\n new_values = self._from_sequence(new_values, dtype=self.dtype)\n else:\n # fill with value\n new_values = self.copy()\n new_values[mask] = value\n else:\n new_values = self.copy()\n return new_values\n\n def take(self, indices, allow_fill=False, fill_value=None):\n from pandas.core.algorithms import take\n\n result = take(self._ndarray, indices, allow_fill=allow_fill,\n fill_value=fill_value)\n return type(self)(result)\n\n def copy(self, deep=False):\n return type(self)(self._ndarray.copy())\n\n def _values_for_argsort(self):\n return self._ndarray\n\n def _values_for_factorize(self):\n return self._ndarray, -1\n\n def unique(self):\n from pandas import unique\n\n return type(self)(unique(self._ndarray))\n\n # ------------------------------------------------------------------------\n # Reductions\n\n def _reduce(self, name, skipna=True, **kwargs):\n meth = getattr(self, name, None)\n if meth:\n return meth(skipna=skipna, **kwargs)\n else:\n msg = (\n \"'{}' does not implement reduction '{}'\"\n )\n raise TypeError(msg.format(type(self).__name__, name))\n\n def any(self, axis=None, out=None, keepdims=False, skipna=True):\n nv.validate_any((), dict(out=out, keepdims=keepdims))\n return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)\n\n def all(self, axis=None, out=None, keepdims=False, skipna=True):\n nv.validate_all((), dict(out=out, keepdims=keepdims))\n return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)\n\n def min(self, axis=None, out=None, keepdims=False, skipna=True):\n nv.validate_min((), dict(out=out, keepdims=keepdims))\n return nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)\n\n def max(self, axis=None, out=None, keepdims=False, skipna=True):\n nv.validate_max((), dict(out=out, keepdims=keepdims))\n return nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)\n\n def sum(self, axis=None, dtype=None, out=None, keepdims=False,\n initial=None, skipna=True, min_count=0):\n nv.validate_sum((), dict(dtype=dtype, out=out, keepdims=keepdims,\n initial=initial))\n return nanops.nansum(self._ndarray, axis=axis, skipna=skipna,\n min_count=min_count)\n\n def prod(self, axis=None, dtype=None, out=None, keepdims=False,\n initial=None, skipna=True, min_count=0):\n nv.validate_prod((), dict(dtype=dtype, out=out, keepdims=keepdims,\n initial=initial))\n return nanops.nanprod(self._ndarray, axis=axis, skipna=skipna,\n min_count=min_count)\n\n def mean(self, axis=None, dtype=None, out=None, keepdims=False,\n skipna=True):\n nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))\n return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)\n\n def median(self, axis=None, out=None, overwrite_input=False,\n keepdims=False, skipna=True):\n nv.validate_median((), dict(out=out, overwrite_input=overwrite_input,\n keepdims=keepdims))\n return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)\n\n def std(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,\n skipna=True):\n nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,\n keepdims=keepdims),\n fname='std')\n return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna,\n ddof=ddof)\n\n def var(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,\n skipna=True):\n nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,\n keepdims=keepdims),\n fname='var')\n return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna,\n ddof=ddof)\n\n def sem(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,\n skipna=True):\n nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,\n keepdims=keepdims),\n fname='sem')\n return nanops.nansem(self._ndarray, axis=axis, skipna=skipna,\n ddof=ddof)\n\n def kurt(self, axis=None, dtype=None, out=None, keepdims=False,\n skipna=True):\n nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,\n keepdims=keepdims),\n fname='kurt')\n return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)\n\n def skew(self, axis=None, dtype=None, out=None, keepdims=False,\n skipna=True):\n nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,\n keepdims=keepdims),\n fname='skew')\n return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)\n\n # ------------------------------------------------------------------------\n # Additional Methods\n def to_numpy(self, dtype=None, copy=False):\n \"\"\"\n Convert the PandasArray to a :class:`numpy.ndarray`.\n\n By default, this requires no coercion or copying of data.\n\n Parameters\n ----------\n dtype : numpy.dtype\n The NumPy dtype to pass to :func:`numpy.asarray`.\n copy : bool, default False\n Whether to copy the underlying data.\n\n Returns\n -------\n ndarray\n \"\"\"\n result = np.asarray(self._ndarray, dtype=dtype)\n if copy and result is self._ndarray:\n result = result.copy()\n\n return result\n\n # ------------------------------------------------------------------------\n # Ops\n\n def __invert__(self):\n return type(self)(~self._ndarray)\n\n @classmethod\n def _create_arithmetic_method(cls, op):\n def arithmetic_method(self, other):\n if isinstance(other, (ABCIndexClass, ABCSeries)):\n return NotImplemented\n\n elif isinstance(other, cls):\n other = other._ndarray\n\n with np.errstate(all=\"ignore\"):\n result = op(self._ndarray, other)\n\n if op is divmod:\n a, b = result\n return cls(a), cls(b)\n\n return cls(result)\n\n return compat.set_function_name(arithmetic_method,\n \"__{}__\".format(op.__name__),\n cls)\n\n _create_comparison_method = _create_arithmetic_method\n\n\nPandasArray._add_arithmetic_ops()\nPandasArray._add_comparison_ops()\n", "from __future__ import unicode_literals\nimport re\nimport warnings\n\nimport pytest\nfrom scipy import sparse\n\nfrom sklearn.externals.six import PY2\nfrom sklearn.feature_extraction.text import strip_tags\nfrom sklearn.feature_extraction.text import strip_accents_unicode\nfrom sklearn.feature_extraction.text import strip_accents_ascii\n\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom sklearn.feature_extraction.text import ENGLISH_STOP_WORDS\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.base import clone\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom numpy.testing import assert_array_equal\nfrom sklearn.utils import IS_PYPY\nfrom sklearn.utils.testing import (assert_equal, assert_false,\n assert_not_equal, assert_almost_equal,\n assert_in, assert_less, assert_greater,\n assert_warns_message, assert_raise_message,\n clean_warning_registry, ignore_warnings,\n SkipTest, assert_raises, assert_no_warnings,\n fails_if_pypy, assert_allclose_dense_sparse,\n skip_if_32bit)\nfrom sklearn.utils.fixes import _Mapping as Mapping\nfrom collections import defaultdict\nfrom functools import partial\nimport pickle\nfrom io import StringIO\n\nJUNK_FOOD_DOCS = (\n \"the pizza pizza beer copyright\",\n \"the pizza burger beer copyright\",\n \"the the pizza beer beer copyright\",\n \"the burger beer beer copyright\",\n \"the coke burger coke copyright\",\n \"the coke burger burger\",\n)\n\nNOTJUNK_FOOD_DOCS = (\n \"the salad celeri copyright\",\n \"the salad salad sparkling water copyright\",\n \"the the celeri celeri copyright\",\n \"the tomato tomato salad water\",\n \"the tomato salad water copyright\",\n)\n\nALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n\ndef uppercase(s):\n return strip_accents_unicode(s).upper()\n\n\ndef strip_eacute(s):\n return s.replace('\\xe9', 'e')\n\n\ndef split_tokenize(s):\n return s.split()\n\n\ndef lazy_analyze(s):\n return ['the_ultimate_feature']\n\n\ndef test_strip_accents():\n # check some classical latin accentuated symbols\n a = '\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe7\\xe8\\xe9\\xea\\xeb'\n expected = 'aaaaaaceeee'\n assert_equal(strip_accents_unicode(a), expected)\n\n a = '\\xec\\xed\\xee\\xef\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf9\\xfa\\xfb\\xfc\\xfd'\n expected = 'iiiinooooouuuuy'\n assert_equal(strip_accents_unicode(a), expected)\n\n # check some arabic\n a = '\\u0625' # halef with a hamza below\n expected = '\\u0627' # simple halef\n assert_equal(strip_accents_unicode(a), expected)\n\n # mix letters accentuated and not\n a = \"this is \\xe0 test\"\n expected = 'this is a test'\n assert_equal(strip_accents_unicode(a), expected)\n\n\ndef test_to_ascii():\n # check some classical latin accentuated symbols\n a = '\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe7\\xe8\\xe9\\xea\\xeb'\n expected = 'aaaaaaceeee'\n assert_equal(strip_accents_ascii(a), expected)\n\n a = '\\xec\\xed\\xee\\xef\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf9\\xfa\\xfb\\xfc\\xfd'\n expected = 'iiiinooooouuuuy'\n assert_equal(strip_accents_ascii(a), expected)\n\n # check some arabic\n a = '\\u0625' # halef with a hamza below\n expected = '' # halef has no direct ascii match\n assert_equal(strip_accents_ascii(a), expected)\n\n # mix letters accentuated and not\n a = \"this is \\xe0 test\"\n expected = 'this is a test'\n assert_equal(strip_accents_ascii(a), expected)\n\n\n@pytest.mark.parametrize('Vectorizer', (CountVectorizer, HashingVectorizer))\ndef test_word_analyzer_unigrams(Vectorizer):\n wa = Vectorizer(strip_accents='ascii').build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \"c'\\xe9tait pas tr\\xeas bon.\")\n expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',\n 'etait', 'pas', 'tres', 'bon']\n assert_equal(wa(text), expected)\n\n text = \"This is a test, really.\\n\\n I met Harry yesterday.\"\n expected = ['this', 'is', 'test', 'really', 'met', 'harry',\n 'yesterday']\n assert_equal(wa(text), expected)\n\n wa = Vectorizer(input='file').build_analyzer()\n text = StringIO(\"This is a test with a file-like object!\")\n expected = ['this', 'is', 'test', 'with', 'file', 'like',\n 'object']\n assert_equal(wa(text), expected)\n\n # with custom preprocessor\n wa = Vectorizer(preprocessor=uppercase).build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \" c'\\xe9tait pas tr\\xeas bon.\")\n expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',\n 'ETAIT', 'PAS', 'TRES', 'BON']\n assert_equal(wa(text), expected)\n\n # with custom tokenizer\n wa = Vectorizer(tokenizer=split_tokenize,\n strip_accents='ascii').build_analyzer()\n text = (\"J'ai mang\\xe9 du kangourou ce midi, \"\n \"c'\\xe9tait pas tr\\xeas bon.\")\n expected = [\"j'ai\", 'mange', 'du', 'kangourou', 'ce', 'midi,',\n \"c'etait\", 'pas', 'tres', 'bon.']\n assert_equal(wa(text), expected)\n\n\ndef test_word_analyzer_unigrams_and_bigrams():\n wa = CountVectorizer(analyzer=\"word\", strip_accents='unicode',\n ngram_range=(1, 2)).build_analyzer()\n\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon.\"\n expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',\n 'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',\n 'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',\n 'etait pas', 'pas tres', 'tres bon']\n assert_equal(wa(text), expected)\n\n\ndef test_unicode_decode_error():\n # decode_error default to strict, so this should fail\n # First, encode (as bytes) a unicode string.\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon.\"\n text_bytes = text.encode('utf-8')\n\n # Then let the Analyzer try to decode it as ascii. It should fail,\n # because we have given it an incorrect encoding.\n wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()\n assert_raises(UnicodeDecodeError, wa, text_bytes)\n\n ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),\n encoding='ascii').build_analyzer()\n assert_raises(UnicodeDecodeError, ca, text_bytes)\n\n\ndef test_char_ngram_analyzer():\n cnga = CountVectorizer(analyzer='char', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"J'ai mang\\xe9 du kangourou ce midi, c'\\xe9tait pas tr\\xeas bon\"\n expected = [\"j'a\", \"'ai\", 'ai ', 'i m', ' ma']\n assert_equal(cnga(text)[:5], expected)\n expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']\n assert_equal(cnga(text)[-5:], expected)\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = ['thi', 'his', 'is ', 's i', ' is']\n assert_equal(cnga(text)[:5], expected)\n\n expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']\n assert_equal(cnga(text)[-5:], expected)\n\n cnga = CountVectorizer(input='file', analyzer='char',\n ngram_range=(3, 6)).build_analyzer()\n text = StringIO(\"This is a test with a file-like object!\")\n expected = ['thi', 'his', 'is ', 's i', ' is']\n assert_equal(cnga(text)[:5], expected)\n\n\ndef test_char_wb_ngram_analyzer():\n cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = [' th', 'thi', 'his', 'is ', ' thi']\n assert_equal(cnga(text)[:5], expected)\n\n expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']\n assert_equal(cnga(text)[-5:], expected)\n\n cnga = CountVectorizer(input='file', analyzer='char_wb',\n ngram_range=(3, 6)).build_analyzer()\n text = StringIO(\"A test with a file-like object!\")\n expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']\n assert_equal(cnga(text)[:6], expected)\n\n\ndef test_word_ngram_analyzer():\n cnga = CountVectorizer(analyzer='word', strip_accents='unicode',\n ngram_range=(3, 6)).build_analyzer()\n\n text = \"This \\n\\tis a test, really.\\n\\n I met Harry yesterday\"\n expected = ['this is test', 'is test really', 'test really met']\n assert_equal(cnga(text)[:3], expected)\n\n expected = ['test really met harry yesterday',\n 'this is test really met harry',\n 'is test really met harry yesterday']\n assert_equal(cnga(text)[-3:], expected)\n\n cnga_file = CountVectorizer(input='file', analyzer='word',\n ngram_range=(3, 6)).build_analyzer()\n file = StringIO(text)\n assert_equal(cnga_file(file), cnga(text))\n\n\ndef test_countvectorizer_custom_vocabulary():\n vocab = {\"pizza\": 0, \"beer\": 1}\n terms = set(vocab.keys())\n\n # Try a few of the supported types.\n for typ in [dict, list, iter, partial(defaultdict, int)]:\n v = typ(vocab)\n vect = CountVectorizer(vocabulary=v)\n vect.fit(JUNK_FOOD_DOCS)\n if isinstance(v, Mapping):\n assert_equal(vect.vocabulary_, vocab)\n else:\n assert_equal(set(vect.vocabulary_), terms)\n X = vect.transform(JUNK_FOOD_DOCS)\n assert_equal(X.shape[1], len(terms))\n\n\ndef test_countvectorizer_custom_vocabulary_pipeline():\n what_we_like = [\"pizza\", \"beer\"]\n pipe = Pipeline([\n ('count', CountVectorizer(vocabulary=what_we_like)),\n ('tfidf', TfidfTransformer())])\n X = pipe.fit_transform(ALL_FOOD_DOCS)\n assert_equal(set(pipe.named_steps['count'].vocabulary_),\n set(what_we_like))\n assert_equal(X.shape[1], len(what_we_like))\n\n\ndef test_countvectorizer_custom_vocabulary_repeated_indices():\n vocab = {\"pizza\": 0, \"beer\": 0}\n try:\n CountVectorizer(vocabulary=vocab)\n except ValueError as e:\n assert_in(\"vocabulary contains repeated indices\", str(e).lower())\n\n\ndef test_countvectorizer_custom_vocabulary_gap_index():\n vocab = {\"pizza\": 1, \"beer\": 2}\n try:\n CountVectorizer(vocabulary=vocab)\n except ValueError as e:\n assert_in(\"doesn't contain index\", str(e).lower())\n\n\ndef test_countvectorizer_stop_words():\n cv = CountVectorizer()\n cv.set_params(stop_words='english')\n assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)\n cv.set_params(stop_words='_bad_str_stop_')\n assert_raises(ValueError, cv.get_stop_words)\n cv.set_params(stop_words='_bad_unicode_stop_')\n assert_raises(ValueError, cv.get_stop_words)\n stoplist = ['some', 'other', 'words']\n cv.set_params(stop_words=stoplist)\n assert_equal(cv.get_stop_words(), set(stoplist))\n\n\ndef test_countvectorizer_empty_vocabulary():\n try:\n vect = CountVectorizer(vocabulary=[])\n vect.fit([\"foo\"])\n assert False, \"we shouldn't get here\"\n except ValueError as e:\n assert_in(\"empty vocabulary\", str(e).lower())\n\n try:\n v = CountVectorizer(max_df=1.0, stop_words=\"english\")\n # fit on stopwords only\n v.fit([\"to be or not to be\", \"and me too\", \"and so do you\"])\n assert False, \"we shouldn't get here\"\n except ValueError as e:\n assert_in(\"empty vocabulary\", str(e).lower())\n\n\ndef test_fit_countvectorizer_twice():\n cv = CountVectorizer()\n X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])\n X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])\n assert_not_equal(X1.shape[1], X2.shape[1])\n\n\ndef test_tf_idf_smoothing():\n X = [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=True, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert (tfidf >= 0).all()\n\n # check normalization\n assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])\n\n # this is robust to features with only zeros\n X = [[1, 1, 0],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=True, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert (tfidf >= 0).all()\n\n\ndef test_tfidf_no_smoothing():\n X = [[1, 1, 1],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=False, norm='l2')\n tfidf = tr.fit_transform(X).toarray()\n assert (tfidf >= 0).all()\n\n # check normalization\n assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])\n\n # the lack of smoothing make IDF fragile in the presence of feature with\n # only zeros\n X = [[1, 1, 0],\n [1, 1, 0],\n [1, 0, 0]]\n tr = TfidfTransformer(smooth_idf=False, norm='l2')\n\n clean_warning_registry()\n with warnings.catch_warnings(record=True) as w:\n 1. / np.array([0.])\n numpy_provides_div0_warning = len(w) == 1\n\n in_warning_message = 'divide by zero'\n tfidf = assert_warns_message(RuntimeWarning, in_warning_message,\n tr.fit_transform, X).toarray()\n if not numpy_provides_div0_warning:\n raise SkipTest(\"Numpy does not provide div 0 warnings.\")\n\n\ndef test_sublinear_tf():\n X = [[1], [2], [3]]\n tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)\n tfidf = tr.fit_transform(X).toarray()\n assert_equal(tfidf[0], 1)\n assert_greater(tfidf[1], tfidf[0])\n assert_greater(tfidf[2], tfidf[1])\n assert_less(tfidf[1], 2)\n assert_less(tfidf[2], 3)\n\n\ndef test_vectorizer():\n # raw documents as an iterator\n train_data = iter(ALL_FOOD_DOCS[:-1])\n test_data = [ALL_FOOD_DOCS[-1]]\n n_train = len(ALL_FOOD_DOCS) - 1\n\n # test without vocabulary\n v1 = CountVectorizer(max_df=0.5)\n counts_train = v1.fit_transform(train_data)\n if hasattr(counts_train, 'tocsr'):\n counts_train = counts_train.tocsr()\n assert_equal(counts_train[0, v1.vocabulary_[\"pizza\"]], 2)\n\n # build a vectorizer v1 with the same vocabulary as the one fitted by v1\n v2 = CountVectorizer(vocabulary=v1.vocabulary_)\n\n # compare that the two vectorizer give the same output on the test sample\n for v in (v1, v2):\n counts_test = v.transform(test_data)\n if hasattr(counts_test, 'tocsr'):\n counts_test = counts_test.tocsr()\n\n vocabulary = v.vocabulary_\n assert_equal(counts_test[0, vocabulary[\"salad\"]], 1)\n assert_equal(counts_test[0, vocabulary[\"tomato\"]], 1)\n assert_equal(counts_test[0, vocabulary[\"water\"]], 1)\n\n # stop word from the fixed list\n assert_false(\"the\" in vocabulary)\n\n # stop word found automatically by the vectorizer DF thresholding\n # words that are high frequent across the complete corpus are likely\n # to be not informative (either real stop words of extraction\n # artifacts)\n assert_false(\"copyright\" in vocabulary)\n\n # not present in the sample\n assert_equal(counts_test[0, vocabulary[\"coke\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"burger\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"beer\"]], 0)\n assert_equal(counts_test[0, vocabulary[\"pizza\"]], 0)\n\n # test tf-idf\n t1 = TfidfTransformer(norm='l1')\n tfidf = t1.fit(counts_train).transform(counts_train).toarray()\n assert_equal(len(t1.idf_), len(v1.vocabulary_))\n assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))\n\n # test tf-idf with new data\n tfidf_test = t1.transform(counts_test).toarray()\n assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))\n\n # test tf alone\n t2 = TfidfTransformer(norm='l1', use_idf=False)\n tf = t2.fit(counts_train).transform(counts_train).toarray()\n assert_false(hasattr(t2, \"idf_\"))\n\n # test idf transform with unlearned idf vector\n t3 = TfidfTransformer(use_idf=True)\n assert_raises(ValueError, t3.transform, counts_train)\n\n # test idf transform with incompatible n_features\n X = [[1, 1, 5],\n [1, 1, 0]]\n t3.fit(X)\n X_incompt = [[1, 3],\n [1, 3]]\n assert_raises(ValueError, t3.transform, X_incompt)\n\n # L1-normalized term frequencies sum to one\n assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)\n\n # test the direct tfidf vectorizer\n # (equivalent to term count vectorizer + tfidf transformer)\n train_data = iter(ALL_FOOD_DOCS[:-1])\n tv = TfidfVectorizer(norm='l1')\n\n tv.max_df = v1.max_df\n tfidf2 = tv.fit_transform(train_data).toarray()\n assert_false(tv.fixed_vocabulary_)\n assert_array_almost_equal(tfidf, tfidf2)\n\n # test the direct tfidf vectorizer with new data\n tfidf_test2 = tv.transform(test_data).toarray()\n assert_array_almost_equal(tfidf_test, tfidf_test2)\n\n # test transform on unfitted vectorizer with empty vocabulary\n v3 = CountVectorizer(vocabulary=None)\n assert_raises(ValueError, v3.transform, train_data)\n\n # ascii preprocessor?\n v3.set_params(strip_accents='ascii', lowercase=False)\n assert_equal(v3.build_preprocessor(), strip_accents_ascii)\n\n # error on bad strip_accents param\n v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)\n assert_raises(ValueError, v3.build_preprocessor)\n\n # error with bad analyzer type\n v3.set_params = '_invalid_analyzer_type_'\n assert_raises(ValueError, v3.build_analyzer)\n\n\ndef test_tfidf_vectorizer_setters():\n tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,\n sublinear_tf=False)\n tv.norm = 'l1'\n assert_equal(tv._tfidf.norm, 'l1')\n tv.use_idf = True\n assert tv._tfidf.use_idf\n tv.smooth_idf = True\n assert tv._tfidf.smooth_idf\n tv.sublinear_tf = True\n assert tv._tfidf.sublinear_tf\n\n\n@fails_if_pypy\n@ignore_warnings(category=DeprecationWarning)\ndef test_hashing_vectorizer():\n v = HashingVectorizer()\n X = v.transform(ALL_FOOD_DOCS)\n token_nnz = X.nnz\n assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))\n assert_equal(X.dtype, v.dtype)\n\n # By default the hashed values receive a random sign and l2 normalization\n # makes the feature values bounded\n assert np.min(X.data) > -1\n assert np.min(X.data) < 0\n assert np.max(X.data) > 0\n assert np.max(X.data) < 1\n\n # Check that the rows are normalized\n for i in range(X.shape[0]):\n assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)\n\n # Check vectorization with some non-default parameters\n v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')\n X = v.transform(ALL_FOOD_DOCS)\n assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))\n assert_equal(X.dtype, v.dtype)\n\n # ngrams generate more non zeros\n ngrams_nnz = X.nnz\n assert ngrams_nnz > token_nnz\n assert ngrams_nnz < 2 * token_nnz\n\n # makes the feature values bounded\n assert np.min(X.data) > 0\n assert np.max(X.data) < 1\n\n # Check that the rows are normalized\n for i in range(X.shape[0]):\n assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)\n\n\ndef test_feature_names():\n cv = CountVectorizer(max_df=0.5)\n\n # test for Value error on unfitted/empty vocabulary\n assert_raises(ValueError, cv.get_feature_names)\n assert_false(cv.fixed_vocabulary_)\n\n # test for vocabulary learned from data\n X = cv.fit_transform(ALL_FOOD_DOCS)\n n_samples, n_features = X.shape\n assert_equal(len(cv.vocabulary_), n_features)\n\n feature_names = cv.get_feature_names()\n assert_equal(len(feature_names), n_features)\n assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'],\n feature_names)\n\n for idx, name in enumerate(feature_names):\n assert_equal(idx, cv.vocabulary_.get(name))\n\n # test for custom vocabulary\n vocab = ['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water']\n\n cv = CountVectorizer(vocabulary=vocab)\n feature_names = cv.get_feature_names()\n assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad',\n 'sparkling', 'tomato', 'water'], feature_names)\n assert cv.fixed_vocabulary_\n\n for idx, name in enumerate(feature_names):\n assert_equal(idx, cv.vocabulary_.get(name))\n\n\n@pytest.mark.parametrize('Vectorizer', (CountVectorizer, TfidfVectorizer))\ndef test_vectorizer_max_features(Vectorizer):\n expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])\n expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',\n u'sparkling', u'water', u'the'])\n\n # test bounded number of extracted features\n vectorizer = Vectorizer(max_df=0.6, max_features=4)\n vectorizer.fit(ALL_FOOD_DOCS)\n assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)\n assert_equal(vectorizer.stop_words_, expected_stop_words)\n\n\ndef test_count_vectorizer_max_features():\n # Regression test: max_features didn't work correctly in 0.14.\n\n cv_1 = CountVectorizer(max_features=1)\n cv_3 = CountVectorizer(max_features=3)\n cv_None = CountVectorizer(max_features=None)\n\n counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)\n\n features_1 = cv_1.get_feature_names()\n features_3 = cv_3.get_feature_names()\n features_None = cv_None.get_feature_names()\n\n # The most common feature is \"the\", with frequency 7.\n assert_equal(7, counts_1.max())\n assert_equal(7, counts_3.max())\n assert_equal(7, counts_None.max())\n\n # The most common feature should be the same\n assert_equal(\"the\", features_1[np.argmax(counts_1)])\n assert_equal(\"the\", features_3[np.argmax(counts_3)])\n assert_equal(\"the\", features_None[np.argmax(counts_None)])\n\n\ndef test_vectorizer_max_df():\n test_data = ['abc', 'dea', 'eat']\n vect = CountVectorizer(analyzer='char', max_df=1.0)\n vect.fit(test_data)\n assert 'a' in vect.vocabulary_.keys()\n assert_equal(len(vect.vocabulary_.keys()), 6)\n assert_equal(len(vect.stop_words_), 0)\n\n vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5\n vect.fit(test_data)\n assert 'a' not in vect.vocabulary_.keys() # {ae} ignored\n assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain\n assert 'a' in vect.stop_words_\n assert_equal(len(vect.stop_words_), 2)\n\n vect.max_df = 1\n vect.fit(test_data)\n assert 'a' not in vect.vocabulary_.keys() # {ae} ignored\n assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain\n assert 'a' in vect.stop_words_\n assert_equal(len(vect.stop_words_), 2)\n\n\ndef test_vectorizer_min_df():\n test_data = ['abc', 'dea', 'eat']\n vect = CountVectorizer(analyzer='char', min_df=1)\n vect.fit(test_data)\n assert 'a' in vect.vocabulary_.keys()\n assert_equal(len(vect.vocabulary_.keys()), 6)\n assert_equal(len(vect.stop_words_), 0)\n\n vect.min_df = 2\n vect.fit(test_data)\n assert 'c' not in vect.vocabulary_.keys() # {bcdt} ignored\n assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain\n assert 'c' in vect.stop_words_\n assert_equal(len(vect.stop_words_), 4)\n\n vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4\n vect.fit(test_data)\n assert 'c' not in vect.vocabulary_.keys() # {bcdet} ignored\n assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains\n assert 'c' in vect.stop_words_\n assert_equal(len(vect.stop_words_), 5)\n\n\ndef test_count_binary_occurrences():\n # by default multiple occurrences are counted as longs\n test_data = ['aaabc', 'abbde']\n vect = CountVectorizer(analyzer='char', max_df=1.0)\n X = vect.fit_transform(test_data).toarray()\n assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())\n assert_array_equal([[3, 1, 1, 0, 0],\n [1, 2, 0, 1, 1]], X)\n\n # using boolean features, we can fetch the binary occurrence info\n # instead.\n vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)\n X = vect.fit_transform(test_data).toarray()\n assert_array_equal([[1, 1, 1, 0, 0],\n [1, 1, 0, 1, 1]], X)\n\n # check the ability to change the dtype\n vect = CountVectorizer(analyzer='char', max_df=1.0,\n binary=True, dtype=np.float32)\n X_sparse = vect.fit_transform(test_data)\n assert_equal(X_sparse.dtype, np.float32)\n\n\n@fails_if_pypy\n@ignore_warnings(category=DeprecationWarning)\ndef test_hashed_binary_occurrences():\n # by default multiple occurrences are counted as longs\n test_data = ['aaabc', 'abbde']\n vect = HashingVectorizer(analyzer='char', non_negative=True,\n norm=None)\n X = vect.transform(test_data)\n assert_equal(np.max(X[0:1].data), 3)\n assert_equal(np.max(X[1:2].data), 2)\n assert_equal(X.dtype, np.float64)\n\n # using boolean features, we can fetch the binary occurrence info\n # instead.\n vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,\n norm=None)\n X = vect.transform(test_data)\n assert_equal(np.max(X.data), 1)\n assert_equal(X.dtype, np.float64)\n\n # check the ability to change the dtype\n vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,\n norm=None, dtype=np.float64)\n X = vect.transform(test_data)\n assert_equal(X.dtype, np.float64)\n\n\n@pytest.mark.parametrize('Vectorizer', (CountVectorizer, TfidfVectorizer))\ndef test_vectorizer_inverse_transform(Vectorizer):\n # raw documents\n data = ALL_FOOD_DOCS\n vectorizer = Vectorizer()\n transformed_data = vectorizer.fit_transform(data)\n inversed_data = vectorizer.inverse_transform(transformed_data)\n analyze = vectorizer.build_analyzer()\n for doc, inversed_terms in zip(data, inversed_data):\n terms = np.sort(np.unique(analyze(doc)))\n inversed_terms = np.sort(np.unique(inversed_terms))\n assert_array_equal(terms, inversed_terms)\n\n # Test that inverse_transform also works with numpy arrays\n transformed_data = transformed_data.toarray()\n inversed_data2 = vectorizer.inverse_transform(transformed_data)\n for terms, terms2 in zip(inversed_data, inversed_data2):\n assert_array_equal(np.sort(terms), np.sort(terms2))\n\n\n@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22\n@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22\ndef test_count_vectorizer_pipeline_grid_selection():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n # split the dataset for model development and final evaluation\n train_data, test_data, target_train, target_test = train_test_split(\n data, target, test_size=.2, random_state=0)\n\n pipeline = Pipeline([('vect', CountVectorizer()),\n ('svc', LinearSVC())])\n\n parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'svc__loss': ('hinge', 'squared_hinge')\n }\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)\n\n # Check that the best model found by grid search is 100% correct on the\n # held out evaluation set.\n pred = grid_search.fit(train_data, target_train).predict(test_data)\n assert_array_equal(pred, target_test)\n\n # on this toy dataset bigram representation which is used in the last of\n # the grid_search is considered the best estimator since they all converge\n # to 100% accuracy models\n assert_equal(grid_search.best_score_, 1.0)\n best_vectorizer = grid_search.best_estimator_.named_steps['vect']\n assert_equal(best_vectorizer.ngram_range, (1, 1))\n\n\n@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22\n@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22\ndef test_vectorizer_pipeline_grid_selection():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n # split the dataset for model development and final evaluation\n train_data, test_data, target_train, target_test = train_test_split(\n data, target, test_size=.1, random_state=0)\n\n pipeline = Pipeline([('vect', TfidfVectorizer()),\n ('svc', LinearSVC())])\n\n parameters = {\n 'vect__ngram_range': [(1, 1), (1, 2)],\n 'vect__norm': ('l1', 'l2'),\n 'svc__loss': ('hinge', 'squared_hinge'),\n }\n\n # find the best parameters for both the feature extraction and the\n # classifier\n grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)\n\n # Check that the best model found by grid search is 100% correct on the\n # held out evaluation set.\n pred = grid_search.fit(train_data, target_train).predict(test_data)\n assert_array_equal(pred, target_test)\n\n # on this toy dataset bigram representation which is used in the last of\n # the grid_search is considered the best estimator since they all converge\n # to 100% accuracy models\n assert_equal(grid_search.best_score_, 1.0)\n best_vectorizer = grid_search.best_estimator_.named_steps['vect']\n assert_equal(best_vectorizer.ngram_range, (1, 1))\n assert_equal(best_vectorizer.norm, 'l2')\n assert_false(best_vectorizer.fixed_vocabulary_)\n\n\ndef test_vectorizer_pipeline_cross_validation():\n # raw documents\n data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS\n\n # label junk food as -1, the others as +1\n target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)\n\n pipeline = Pipeline([('vect', TfidfVectorizer()),\n ('svc', LinearSVC())])\n\n cv_scores = cross_val_score(pipeline, data, target, cv=3)\n assert_array_equal(cv_scores, [1., 1., 1.])\n\n\n@fails_if_pypy\n@ignore_warnings(category=DeprecationWarning)\ndef test_vectorizer_unicode():\n # tests that the count vectorizer works with cyrillic.\n document = (\n \"\\xd0\\x9c\\xd0\\xb0\\xd1\\x88\\xd0\\xb8\\xd0\\xbd\\xd0\\xbd\\xd0\\xbe\\xd0\"\n \"\\xb5 \\xd0\\xbe\\xd0\\xb1\\xd1\\x83\\xd1\\x87\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd0\"\n \"\\xb5 \\xe2\\x80\\x94 \\xd0\\xbe\\xd0\\xb1\\xd1\\x88\\xd0\\xb8\\xd1\\x80\\xd0\\xbd\"\n \"\\xd1\\x8b\\xd0\\xb9 \\xd0\\xbf\\xd0\\xbe\\xd0\\xb4\\xd1\\x80\\xd0\\xb0\\xd0\\xb7\"\n \"\\xd0\\xb4\\xd0\\xb5\\xd0\\xbb \\xd0\\xb8\\xd1\\x81\\xd0\\xba\\xd1\\x83\\xd1\\x81\"\n \"\\xd1\\x81\\xd1\\x82\\xd0\\xb2\\xd0\\xb5\\xd0\\xbd\\xd0\\xbd\\xd0\\xbe\\xd0\\xb3\"\n \"\\xd0\\xbe \\xd0\\xb8\\xd0\\xbd\\xd1\\x82\\xd0\\xb5\\xd0\\xbb\\xd0\\xbb\\xd0\"\n \"\\xb5\\xd0\\xba\\xd1\\x82\\xd0\\xb0, \\xd0\\xb8\\xd0\\xb7\\xd1\\x83\\xd1\\x87\"\n \"\\xd0\\xb0\\xd1\\x8e\\xd1\\x89\\xd0\\xb8\\xd0\\xb9 \\xd0\\xbc\\xd0\\xb5\\xd1\\x82\"\n \"\\xd0\\xbe\\xd0\\xb4\\xd1\\x8b \\xd0\\xbf\\xd0\\xbe\\xd1\\x81\\xd1\\x82\\xd1\\x80\"\n \"\\xd0\\xbe\\xd0\\xb5\\xd0\\xbd\\xd0\\xb8\\xd1\\x8f \\xd0\\xb0\\xd0\\xbb\\xd0\\xb3\"\n \"\\xd0\\xbe\\xd1\\x80\\xd0\\xb8\\xd1\\x82\\xd0\\xbc\\xd0\\xbe\\xd0\\xb2, \\xd1\\x81\"\n \"\\xd0\\xbf\\xd0\\xbe\\xd1\\x81\\xd0\\xbe\\xd0\\xb1\\xd0\\xbd\\xd1\\x8b\\xd1\\x85 \"\n \"\\xd0\\xbe\\xd0\\xb1\\xd1\\x83\\xd1\\x87\\xd0\\xb0\\xd1\\x82\\xd1\\x8c\\xd1\\x81\\xd1\"\n \"\\x8f.\")\n\n vect = CountVectorizer()\n X_counted = vect.fit_transform([document])\n assert_equal(X_counted.shape, (1, 15))\n\n vect = HashingVectorizer(norm=None, non_negative=True)\n X_hashed = vect.transform([document])\n assert_equal(X_hashed.shape, (1, 2 ** 20))\n\n # No collisions on such a small dataset\n assert_equal(X_counted.nnz, X_hashed.nnz)\n\n # When norm is None and non_negative, the tokens are counted up to\n # collisions\n assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))\n\n\ndef test_tfidf_vectorizer_with_fixed_vocabulary():\n # non regression smoke test for inheritance issues\n vocabulary = ['pizza', 'celeri']\n vect = TfidfVectorizer(vocabulary=vocabulary)\n X_1 = vect.fit_transform(ALL_FOOD_DOCS)\n X_2 = vect.transform(ALL_FOOD_DOCS)\n assert_array_almost_equal(X_1.toarray(), X_2.toarray())\n assert vect.fixed_vocabulary_\n\n\ndef test_pickling_vectorizer():\n instances = [\n HashingVectorizer(),\n HashingVectorizer(norm='l1'),\n HashingVectorizer(binary=True),\n HashingVectorizer(ngram_range=(1, 2)),\n CountVectorizer(),\n CountVectorizer(preprocessor=strip_tags),\n CountVectorizer(analyzer=lazy_analyze),\n CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),\n CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),\n TfidfVectorizer(),\n TfidfVectorizer(analyzer=lazy_analyze),\n TfidfVectorizer().fit(JUNK_FOOD_DOCS),\n ]\n\n for orig in instances:\n s = pickle.dumps(orig)\n copy = pickle.loads(s)\n assert_equal(type(copy), orig.__class__)\n assert_equal(copy.get_params(), orig.get_params())\n if IS_PYPY and isinstance(orig, HashingVectorizer):\n continue\n else:\n assert_array_equal(\n copy.fit_transform(JUNK_FOOD_DOCS).toarray(),\n orig.fit_transform(JUNK_FOOD_DOCS).toarray())\n\n\ndef test_countvectorizer_vocab_sets_when_pickling():\n # ensure that vocabulary of type set is coerced to a list to\n # preserve iteration ordering after deserialization\n rng = np.random.RandomState(0)\n vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'])\n for x in range(0, 100):\n vocab_set = set(rng.choice(vocab_words, size=5, replace=False))\n cv = CountVectorizer(vocabulary=vocab_set)\n unpickled_cv = pickle.loads(pickle.dumps(cv))\n cv.fit(ALL_FOOD_DOCS)\n unpickled_cv.fit(ALL_FOOD_DOCS)\n assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())\n\n\ndef test_countvectorizer_vocab_dicts_when_pickling():\n rng = np.random.RandomState(0)\n vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',\n 'salad', 'sparkling', 'tomato', 'water'])\n for x in range(0, 100):\n vocab_dict = dict()\n words = rng.choice(vocab_words, size=5, replace=False)\n for y in range(0, 5):\n vocab_dict[words[y]] = y\n cv = CountVectorizer(vocabulary=vocab_dict)\n unpickled_cv = pickle.loads(pickle.dumps(cv))\n cv.fit(ALL_FOOD_DOCS)\n unpickled_cv.fit(ALL_FOOD_DOCS)\n assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())\n\n\ndef test_stop_words_removal():\n # Ensure that deleting the stop_words_ attribute doesn't affect transform\n\n fitted_vectorizers = (\n TfidfVectorizer().fit(JUNK_FOOD_DOCS),\n CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),\n CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)\n )\n\n for vect in fitted_vectorizers:\n vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n vect.stop_words_ = None\n stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n delattr(vect, 'stop_words_')\n stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()\n\n assert_array_equal(stop_None_transform, vect_transform)\n assert_array_equal(stop_del_transform, vect_transform)\n\n\ndef test_pickling_transformer():\n X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)\n orig = TfidfTransformer().fit(X)\n s = pickle.dumps(orig)\n copy = pickle.loads(s)\n assert_equal(type(copy), orig.__class__)\n assert_array_equal(\n copy.fit_transform(X).toarray(),\n orig.fit_transform(X).toarray())\n\n\ndef test_transformer_idf_setter():\n X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)\n orig = TfidfTransformer().fit(X)\n copy = TfidfTransformer()\n copy.idf_ = orig.idf_\n assert_array_equal(\n copy.transform(X).toarray(),\n orig.transform(X).toarray())\n\n\ndef test_tfidf_vectorizer_setter():\n orig = TfidfVectorizer(use_idf=True)\n orig.fit(JUNK_FOOD_DOCS)\n copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)\n copy.idf_ = orig.idf_\n assert_array_equal(\n copy.transform(JUNK_FOOD_DOCS).toarray(),\n orig.transform(JUNK_FOOD_DOCS).toarray())\n\n\ndef test_tfidfvectorizer_invalid_idf_attr():\n vect = TfidfVectorizer(use_idf=True)\n vect.fit(JUNK_FOOD_DOCS)\n copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)\n expected_idf_len = len(vect.idf_)\n invalid_idf = [1.0] * (expected_idf_len + 1)\n assert_raises(ValueError, setattr, copy, 'idf_', invalid_idf)\n\n\ndef test_non_unique_vocab():\n vocab = ['a', 'b', 'c', 'a', 'a']\n vect = CountVectorizer(vocabulary=vocab)\n assert_raises(ValueError, vect.fit, [])\n\n\n@fails_if_pypy\ndef test_hashingvectorizer_nan_in_docs():\n # np.nan can appear when using pandas to load text fields from a csv file\n # with missing values.\n message = \"np.nan is an invalid document, expected byte or unicode string.\"\n exception = ValueError\n\n def func():\n hv = HashingVectorizer()\n hv.fit_transform(['hello world', np.nan, 'hello hello'])\n\n assert_raise_message(exception, message, func)\n\n\ndef test_tfidfvectorizer_binary():\n # Non-regression test: TfidfVectorizer used to ignore its \"binary\" param.\n v = TfidfVectorizer(binary=True, use_idf=False, norm=None)\n assert v.binary\n\n X = v.fit_transform(['hello world', 'hello hello']).toarray()\n assert_array_equal(X.ravel(), [1, 1, 1, 0])\n X2 = v.transform(['hello world', 'hello hello']).toarray()\n assert_array_equal(X2.ravel(), [1, 1, 1, 0])\n\n\ndef test_tfidfvectorizer_export_idf():\n vect = TfidfVectorizer(use_idf=True)\n vect.fit(JUNK_FOOD_DOCS)\n assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)\n\n\ndef test_vectorizer_vocab_clone():\n vect_vocab = TfidfVectorizer(vocabulary=[\"the\"])\n vect_vocab_clone = clone(vect_vocab)\n vect_vocab.fit(ALL_FOOD_DOCS)\n vect_vocab_clone.fit(ALL_FOOD_DOCS)\n assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)\n\n\n@pytest.mark.parametrize('Vectorizer',\n (CountVectorizer, TfidfVectorizer, HashingVectorizer))\ndef test_vectorizer_string_object_as_input(Vectorizer):\n message = (\"Iterable over raw text documents expected, \"\n \"string object received.\")\n vec = Vectorizer()\n assert_raise_message(\n ValueError, message, vec.fit_transform, \"hello world!\")\n assert_raise_message(ValueError, message, vec.fit, \"hello world!\")\n assert_raise_message(ValueError, message, vec.transform, \"hello world!\")\n\n\n@pytest.mark.parametrize(\"X_dtype\", [np.float32, np.float64])\ndef test_tfidf_transformer_type(X_dtype):\n X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)\n X_trans = TfidfTransformer().fit_transform(X)\n assert X_trans.dtype == X.dtype\n\n\ndef test_tfidf_transformer_sparse():\n X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)\n X_csc = sparse.csc_matrix(X)\n X_csr = sparse.csr_matrix(X)\n\n X_trans_csc = TfidfTransformer().fit_transform(X_csc)\n X_trans_csr = TfidfTransformer().fit_transform(X_csr)\n assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)\n assert X_trans_csc.format == X_trans_csr.format\n\n\n@pytest.mark.parametrize(\n \"vectorizer_dtype, output_dtype, warning_expected\",\n [(np.int32, np.float64, True),\n (np.int64, np.float64, True),\n (np.float32, np.float32, False),\n (np.float64, np.float64, False)]\n)\ndef test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype,\n warning_expected):\n X = np.array([\"numpy\", \"scipy\", \"sklearn\"])\n vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)\n\n warning_msg_match = \"'dtype' should be used.\"\n warning_cls = UserWarning\n expected_warning_cls = warning_cls if warning_expected else None\n with pytest.warns(expected_warning_cls,\n match=warning_msg_match) as record:\n X_idf = vectorizer.fit_transform(X)\n if expected_warning_cls is None:\n relevant_warnings = [w for w in record\n if isinstance(w, warning_cls)]\n assert len(relevant_warnings) == 0\n assert X_idf.dtype == output_dtype\n\n\n@pytest.mark.parametrize(\"vec\", [\n HashingVectorizer(ngram_range=(2, 1)),\n CountVectorizer(ngram_range=(2, 1)),\n TfidfVectorizer(ngram_range=(2, 1))\n ])\ndef test_vectorizers_invalid_ngram_range(vec):\n # vectorizers could be initialized with invalid ngram range\n # test for raising error message\n invalid_range = vec.ngram_range\n message = (\"Invalid value for ngram_range=%s \"\n \"lower boundary larger than the upper boundary.\"\n % str(invalid_range))\n if isinstance(vec, HashingVectorizer):\n pytest.xfail(reason='HashingVectorizer not supported on PyPy')\n\n assert_raise_message(\n ValueError, message, vec.fit, [\"good news everyone\"])\n assert_raise_message(\n ValueError, message, vec.fit_transform, [\"good news everyone\"])\n\n if isinstance(vec, HashingVectorizer):\n assert_raise_message(\n ValueError, message, vec.transform, [\"good news everyone\"])\n\n\ndef _check_stop_words_consistency(estimator):\n stop_words = estimator.get_stop_words()\n tokenize = estimator.build_tokenizer()\n preprocess = estimator.build_preprocessor()\n return estimator._check_stop_words_consistency(stop_words, preprocess,\n tokenize)\n\n\n@fails_if_pypy\ndef test_vectorizer_stop_words_inconsistent():\n if PY2:\n lstr = \"[u'and', u'll', u've']\"\n else:\n lstr = \"['and', 'll', 've']\"\n message = ('Your stop_words may be inconsistent with your '\n 'preprocessing. Tokenizing the stop words generated '\n 'tokens %s not in stop_words.' % lstr)\n for vec in [CountVectorizer(),\n TfidfVectorizer(), HashingVectorizer()]:\n vec.set_params(stop_words=[\"you've\", \"you\", \"you'll\", 'AND'])\n assert_warns_message(UserWarning, message, vec.fit_transform,\n ['hello world'])\n # reset stop word validation\n del vec._stop_words_id\n assert _check_stop_words_consistency(vec) is False\n\n # Only one warning per stop list\n assert_no_warnings(vec.fit_transform, ['hello world'])\n assert _check_stop_words_consistency(vec) is None\n\n # Test caching of inconsistency assessment\n vec.set_params(stop_words=[\"you've\", \"you\", \"you'll\", 'blah', 'AND'])\n assert_warns_message(UserWarning, message, vec.fit_transform,\n ['hello world'])\n\n\n@skip_if_32bit\ndef test_countvectorizer_sort_features_64bit_sparse_indices():\n \"\"\"\n Check that CountVectorizer._sort_features preserves the dtype of its sparse\n feature matrix.\n\n This test is skipped on 32bit platforms, see:\n https://github.com/scikit-learn/scikit-learn/pull/11295\n for more details.\n \"\"\"\n\n X = sparse.csr_matrix((5, 5), dtype=np.int64)\n\n # force indices and indptr to int64.\n INDICES_DTYPE = np.int64\n X.indices = X.indices.astype(INDICES_DTYPE)\n X.indptr = X.indptr.astype(INDICES_DTYPE)\n\n vocabulary = {\n \"scikit-learn\": 0,\n \"is\": 1,\n \"great!\": 2\n }\n\n Xs = CountVectorizer()._sort_features(X, vocabulary)\n\n assert INDICES_DTYPE == Xs.indices.dtype\n\n\n@fails_if_pypy\n@pytest.mark.parametrize('Estimator',\n [CountVectorizer, TfidfVectorizer, HashingVectorizer])\ndef test_stop_word_validation_custom_preprocessor(Estimator):\n data = [{'text': 'some text'}]\n\n vec = Estimator()\n assert _check_stop_words_consistency(vec) is True\n\n vec = Estimator(preprocessor=lambda x: x['text'],\n stop_words=['and'])\n assert _check_stop_words_consistency(vec) == 'error'\n # checks are cached\n assert _check_stop_words_consistency(vec) is None\n vec.fit_transform(data)\n\n class CustomEstimator(Estimator):\n def build_preprocessor(self):\n return lambda x: x['text']\n\n vec = CustomEstimator(stop_words=['and'])\n assert _check_stop_words_consistency(vec) == 'error'\n\n vec = Estimator(tokenizer=lambda doc: re.compile(r'\\w{1,}')\n .findall(doc),\n stop_words=['and'])\n assert _check_stop_words_consistency(vec) is True\n", "\"\"\"Kernels for Gaussian process regression and classification.\n\nThe kernels in this module allow kernel-engineering, i.e., they can be\ncombined via the \"+\" and \"*\" operators or be exponentiated with a scalar\nvia \"**\". These sum and product expressions can also contain scalar values,\nwhich are automatically converted to a constant kernel.\n\nAll kernels allow (analytic) gradient-based hyperparameter optimization.\nThe space of hyperparameters can be specified by giving lower und upper\nboundaries for the value of each hyperparameter (the search space is thus\nrectangular). Instead of specifying bounds, hyperparameters can also be\ndeclared to be \"fixed\", which causes these hyperparameters to be excluded from\noptimization.\n\"\"\"\n\n# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>\n# License: BSD 3 clause\n\n# Note: this module is strongly inspired by the kernel module of the george\n# package.\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nimport math\n\nimport numpy as np\nfrom scipy.special import kv, gamma\nfrom scipy.spatial.distance import pdist, cdist, squareform\n\nfrom ..metrics.pairwise import pairwise_kernels\nfrom ..externals import six\nfrom ..base import clone\nfrom ..utils.fixes import signature\n\n\ndef _check_length_scale(X, length_scale):\n length_scale = np.squeeze(length_scale).astype(float)\n if np.ndim(length_scale) > 1:\n raise ValueError(\"length_scale cannot be of dimension greater than 1\")\n if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:\n raise ValueError(\"Anisotropic kernel must have the same number of \"\n \"dimensions as data (%d!=%d)\"\n % (length_scale.shape[0], X.shape[1]))\n return length_scale\n\n\nclass Hyperparameter(namedtuple('Hyperparameter',\n ('name', 'value_type', 'bounds',\n 'n_elements', 'fixed'))):\n \"\"\"A kernel hyperparameter's specification in form of a namedtuple.\n\n .. versionadded:: 0.18\n\n Attributes\n ----------\n name : string\n The name of the hyperparameter. Note that a kernel using a\n hyperparameter with name \"x\" must have the attributes self.x and\n self.x_bounds\n\n value_type : string\n The type of the hyperparameter. Currently, only \"numeric\"\n hyperparameters are supported.\n\n bounds : pair of floats >= 0 or \"fixed\"\n The lower and upper bound on the parameter. If n_elements>1, a pair\n of 1d array with n_elements each may be given alternatively. If\n the string \"fixed\" is passed as bounds, the hyperparameter's value\n cannot be changed.\n\n n_elements : int, default=1\n The number of elements of the hyperparameter value. Defaults to 1,\n which corresponds to a scalar hyperparameter. n_elements > 1\n corresponds to a hyperparameter which is vector-valued,\n such as, e.g., anisotropic length-scales.\n\n fixed : bool, default: None\n Whether the value of this hyperparameter is fixed, i.e., cannot be\n changed during hyperparameter tuning. If None is passed, the \"fixed\" is\n derived based on the given bounds.\n\n \"\"\"\n # A raw namedtuple is very memory efficient as it packs the attributes\n # in a struct to get rid of the __dict__ of attributes in particular it\n # does not copy the string for the keys on each instance.\n # By deriving a namedtuple class just to introduce the __init__ method we\n # would also reintroduce the __dict__ on the instance. By telling the\n # Python interpreter that this subclass uses static __slots__ instead of\n # dynamic attributes. Furthermore we don't need any additional slot in the\n # subclass so we set __slots__ to the empty tuple.\n __slots__ = ()\n\n def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):\n if not isinstance(bounds, six.string_types) or bounds != \"fixed\":\n bounds = np.atleast_2d(bounds)\n if n_elements > 1: # vector-valued parameter\n if bounds.shape[0] == 1:\n bounds = np.repeat(bounds, n_elements, 0)\n elif bounds.shape[0] != n_elements:\n raise ValueError(\"Bounds on %s should have either 1 or \"\n \"%d dimensions. Given are %d\"\n % (name, n_elements, bounds.shape[0]))\n\n if fixed is None:\n fixed = isinstance(bounds, six.string_types) and bounds == \"fixed\"\n return super(Hyperparameter, cls).__new__(\n cls, name, value_type, bounds, n_elements, fixed)\n\n # This is mainly a testing utility to check that two hyperparameters\n # are equal.\n def __eq__(self, other):\n return (self.name == other.name and\n self.value_type == other.value_type and\n np.all(self.bounds == other.bounds) and\n self.n_elements == other.n_elements and\n self.fixed == other.fixed)\n\n\nclass Kernel(six.with_metaclass(ABCMeta)):\n \"\"\"Base class for all kernels.\n\n .. versionadded:: 0.18\n \"\"\"\n\n def get_params(self, deep=True):\n \"\"\"Get parameters of this kernel.\n\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n params = dict()\n\n # introspect the constructor arguments to find the model parameters\n # to represent\n cls = self.__class__\n init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n init_sign = signature(init)\n args, varargs = [], []\n for parameter in init_sign.parameters.values():\n if (parameter.kind != parameter.VAR_KEYWORD and\n parameter.name != 'self'):\n args.append(parameter.name)\n if parameter.kind == parameter.VAR_POSITIONAL:\n varargs.append(parameter.name)\n\n if len(varargs) != 0:\n raise RuntimeError(\"scikit-learn kernels should always \"\n \"specify their parameters in the signature\"\n \" of their __init__ (no varargs).\"\n \" %s doesn't follow this convention.\"\n % (cls, ))\n for arg in args:\n params[arg] = getattr(self, arg, None)\n return params\n\n def set_params(self, **params):\n \"\"\"Set the parameters of this kernel.\n\n The method works on simple kernels as well as on nested kernels.\n The latter have parameters of the form ``<component>__<parameter>``\n so that it's possible to update each component of a nested object.\n\n Returns\n -------\n self\n \"\"\"\n if not params:\n # Simple optimisation to gain speed (inspect is slow)\n return self\n valid_params = self.get_params(deep=True)\n for key, value in six.iteritems(params):\n split = key.split('__', 1)\n if len(split) > 1:\n # nested objects case\n name, sub_name = split\n if name not in valid_params:\n raise ValueError('Invalid parameter %s for kernel %s. '\n 'Check the list of available parameters '\n 'with `kernel.get_params().keys()`.' %\n (name, self))\n sub_object = valid_params[name]\n sub_object.set_params(**{sub_name: value})\n else:\n # simple objects case\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for kernel %s. '\n 'Check the list of available parameters '\n 'with `kernel.get_params().keys()`.' %\n (key, self.__class__.__name__))\n setattr(self, key, value)\n return self\n\n def clone_with_theta(self, theta):\n \"\"\"Returns a clone of self with given hyperparameters theta.\n\n Parameters\n ----------\n theta : array, shape (n_dims,)\n The hyperparameters\n \"\"\"\n cloned = clone(self)\n cloned.theta = theta\n return cloned\n\n @property\n def n_dims(self):\n \"\"\"Returns the number of non-fixed hyperparameters of the kernel.\"\"\"\n return self.theta.shape[0]\n\n @property\n def hyperparameters(self):\n \"\"\"Returns a list of all hyperparameter specifications.\"\"\"\n r = []\n for attr in dir(self):\n if attr.startswith(\"hyperparameter_\"):\n r.append(getattr(self, attr))\n return r\n\n @property\n def theta(self):\n \"\"\"Returns the (flattened, log-transformed) non-fixed hyperparameters.\n\n Note that theta are typically the log-transformed values of the\n kernel's hyperparameters as this representation of the search space\n is more amenable for hyperparameter search, as hyperparameters like\n length-scales naturally live on a log-scale.\n\n Returns\n -------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n theta = []\n params = self.get_params()\n for hyperparameter in self.hyperparameters:\n if not hyperparameter.fixed:\n theta.append(params[hyperparameter.name])\n if len(theta) > 0:\n return np.log(np.hstack(theta))\n else:\n return np.array([])\n\n @theta.setter\n def theta(self, theta):\n \"\"\"Sets the (flattened, log-transformed) non-fixed hyperparameters.\n\n Parameters\n ----------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n params = self.get_params()\n i = 0\n for hyperparameter in self.hyperparameters:\n if hyperparameter.fixed:\n continue\n if hyperparameter.n_elements > 1:\n # vector-valued parameter\n params[hyperparameter.name] = np.exp(\n theta[i:i + hyperparameter.n_elements])\n i += hyperparameter.n_elements\n else:\n params[hyperparameter.name] = np.exp(theta[i])\n i += 1\n\n if i != len(theta):\n raise ValueError(\"theta has not the correct number of entries.\"\n \" Should be %d; given are %d\"\n % (i, len(theta)))\n self.set_params(**params)\n\n @property\n def bounds(self):\n \"\"\"Returns the log-transformed bounds on the theta.\n\n Returns\n -------\n bounds : array, shape (n_dims, 2)\n The log-transformed bounds on the kernel's hyperparameters theta\n \"\"\"\n bounds = []\n for hyperparameter in self.hyperparameters:\n if not hyperparameter.fixed:\n bounds.append(hyperparameter.bounds)\n if len(bounds) > 0:\n return np.log(np.vstack(bounds))\n else:\n return np.array([])\n\n def __add__(self, b):\n if not isinstance(b, Kernel):\n return Sum(self, ConstantKernel(b))\n return Sum(self, b)\n\n def __radd__(self, b):\n if not isinstance(b, Kernel):\n return Sum(ConstantKernel(b), self)\n return Sum(b, self)\n\n def __mul__(self, b):\n if not isinstance(b, Kernel):\n return Product(self, ConstantKernel(b))\n return Product(self, b)\n\n def __rmul__(self, b):\n if not isinstance(b, Kernel):\n return Product(ConstantKernel(b), self)\n return Product(b, self)\n\n def __pow__(self, b):\n return Exponentiation(self, b)\n\n def __eq__(self, b):\n if type(self) != type(b):\n return False\n params_a = self.get_params()\n params_b = b.get_params()\n for key in set(list(params_a.keys()) + list(params_b.keys())):\n if np.any(params_a.get(key, None) != params_b.get(key, None)):\n return False\n return True\n\n def __repr__(self):\n return \"{0}({1})\".format(self.__class__.__name__,\n \", \".join(map(\"{0:.3g}\".format, self.theta)))\n\n @abstractmethod\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Evaluate the kernel.\"\"\"\n\n @abstractmethod\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n\n @abstractmethod\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n\n\nclass NormalizedKernelMixin(object):\n \"\"\"Mixin for kernels which are normalized: k(X, X)=1.\n\n .. versionadded:: 0.18\n \"\"\"\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return np.ones(X.shape[0])\n\n\nclass StationaryKernelMixin(object):\n \"\"\"Mixin for kernels which are stationary: k(X, Y)= f(X-Y).\n\n .. versionadded:: 0.18\n \"\"\"\n\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n return True\n\n\nclass CompoundKernel(Kernel):\n \"\"\"Kernel which is composed of a set of other kernels.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n kernels : list of Kernel objects\n The other kernels\n \"\"\"\n\n def __init__(self, kernels):\n self.kernels = kernels\n\n def get_params(self, deep=True):\n \"\"\"Get parameters of this kernel.\n\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n return dict(kernels=self.kernels)\n\n @property\n def theta(self):\n \"\"\"Returns the (flattened, log-transformed) non-fixed hyperparameters.\n\n Note that theta are typically the log-transformed values of the\n kernel's hyperparameters as this representation of the search space\n is more amenable for hyperparameter search, as hyperparameters like\n length-scales naturally live on a log-scale.\n\n Returns\n -------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n return np.hstack([kernel.theta for kernel in self.kernels])\n\n @theta.setter\n def theta(self, theta):\n \"\"\"Sets the (flattened, log-transformed) non-fixed hyperparameters.\n\n Parameters\n ----------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n k_dims = self.k1.n_dims\n for i, kernel in enumerate(self.kernels):\n kernel.theta = theta[i * k_dims:(i + 1) * k_dims]\n\n @property\n def bounds(self):\n \"\"\"Returns the log-transformed bounds on the theta.\n\n Returns\n -------\n bounds : array, shape (n_dims, 2)\n The log-transformed bounds on the kernel's hyperparameters theta\n \"\"\"\n return np.vstack([kernel.bounds for kernel in self.kernels])\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Note that this compound kernel returns the results of all simple kernel\n stacked along an additional axis.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y, n_kernels)\n Kernel k(X, Y)\n\n K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n if eval_gradient:\n K = []\n K_grad = []\n for kernel in self.kernels:\n K_single, K_grad_single = kernel(X, Y, eval_gradient)\n K.append(K_single)\n K_grad.append(K_grad_single[..., np.newaxis])\n return np.dstack(K), np.concatenate(K_grad, 3)\n else:\n return np.dstack([kernel(X, Y, eval_gradient)\n for kernel in self.kernels])\n\n def __eq__(self, b):\n if type(self) != type(b) or len(self.kernels) != len(b.kernels):\n return False\n return np.all([self.kernels[i] == b.kernels[i]\n for i in range(len(self.kernels))])\n\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n return np.all([kernel.is_stationary() for kernel in self.kernels])\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X, n_kernels)\n Diagonal of kernel k(X, X)\n \"\"\"\n return np.vstack([kernel.diag(X) for kernel in self.kernels]).T\n\n\nclass KernelOperator(Kernel):\n \"\"\"Base class for all kernel operators.\n\n .. versionadded:: 0.18\n \"\"\"\n\n def __init__(self, k1, k2):\n self.k1 = k1\n self.k2 = k2\n\n def get_params(self, deep=True):\n \"\"\"Get parameters of this kernel.\n\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n params = dict(k1=self.k1, k2=self.k2)\n if deep:\n deep_items = self.k1.get_params().items()\n params.update(('k1__' + k, val) for k, val in deep_items)\n deep_items = self.k2.get_params().items()\n params.update(('k2__' + k, val) for k, val in deep_items)\n\n return params\n\n @property\n def hyperparameters(self):\n \"\"\"Returns a list of all hyperparameter.\"\"\"\n r = []\n for hyperparameter in self.k1.hyperparameters:\n r.append(Hyperparameter(\"k1__\" + hyperparameter.name,\n hyperparameter.value_type,\n hyperparameter.bounds,\n hyperparameter.n_elements))\n for hyperparameter in self.k2.hyperparameters:\n r.append(Hyperparameter(\"k2__\" + hyperparameter.name,\n hyperparameter.value_type,\n hyperparameter.bounds,\n hyperparameter.n_elements))\n return r\n\n @property\n def theta(self):\n \"\"\"Returns the (flattened, log-transformed) non-fixed hyperparameters.\n\n Note that theta are typically the log-transformed values of the\n kernel's hyperparameters as this representation of the search space\n is more amenable for hyperparameter search, as hyperparameters like\n length-scales naturally live on a log-scale.\n\n Returns\n -------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n return np.append(self.k1.theta, self.k2.theta)\n\n @theta.setter\n def theta(self, theta):\n \"\"\"Sets the (flattened, log-transformed) non-fixed hyperparameters.\n\n Parameters\n ----------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n k1_dims = self.k1.n_dims\n self.k1.theta = theta[:k1_dims]\n self.k2.theta = theta[k1_dims:]\n\n @property\n def bounds(self):\n \"\"\"Returns the log-transformed bounds on the theta.\n\n Returns\n -------\n bounds : array, shape (n_dims, 2)\n The log-transformed bounds on the kernel's hyperparameters theta\n \"\"\"\n if self.k1.bounds.size == 0:\n return self.k2.bounds\n if self.k2.bounds.size == 0:\n return self.k1.bounds\n return np.vstack((self.k1.bounds, self.k2.bounds))\n\n def __eq__(self, b):\n if type(self) != type(b):\n return False\n return (self.k1 == b.k1 and self.k2 == b.k2) \\\n or (self.k1 == b.k2 and self.k2 == b.k1)\n\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n return self.k1.is_stationary() and self.k2.is_stationary()\n\n\nclass Sum(KernelOperator):\n \"\"\"Sum-kernel k1 + k2 of two kernels k1 and k2.\n\n The resulting kernel is defined as\n k_sum(X, Y) = k1(X, Y) + k2(X, Y)\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n k1 : Kernel object\n The first base-kernel of the sum-kernel\n\n k2 : Kernel object\n The second base-kernel of the sum-kernel\n\n \"\"\"\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n if eval_gradient:\n K1, K1_gradient = self.k1(X, Y, eval_gradient=True)\n K2, K2_gradient = self.k2(X, Y, eval_gradient=True)\n return K1 + K2, np.dstack((K1_gradient, K2_gradient))\n else:\n return self.k1(X, Y) + self.k2(X, Y)\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return self.k1.diag(X) + self.k2.diag(X)\n\n def __repr__(self):\n return \"{0} + {1}\".format(self.k1, self.k2)\n\n\nclass Product(KernelOperator):\n \"\"\"Product-kernel k1 * k2 of two kernels k1 and k2.\n\n The resulting kernel is defined as\n k_prod(X, Y) = k1(X, Y) * k2(X, Y)\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n k1 : Kernel object\n The first base-kernel of the product-kernel\n\n k2 : Kernel object\n The second base-kernel of the product-kernel\n\n \"\"\"\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n if eval_gradient:\n K1, K1_gradient = self.k1(X, Y, eval_gradient=True)\n K2, K2_gradient = self.k2(X, Y, eval_gradient=True)\n return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],\n K2_gradient * K1[:, :, np.newaxis]))\n else:\n return self.k1(X, Y) * self.k2(X, Y)\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return self.k1.diag(X) * self.k2.diag(X)\n\n def __repr__(self):\n return \"{0} * {1}\".format(self.k1, self.k2)\n\n\nclass Exponentiation(Kernel):\n \"\"\"Exponentiate kernel by given exponent.\n\n The resulting kernel is defined as\n k_exp(X, Y) = k(X, Y) ** exponent\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n kernel : Kernel object\n The base kernel\n\n exponent : float\n The exponent for the base kernel\n\n \"\"\"\n def __init__(self, kernel, exponent):\n self.kernel = kernel\n self.exponent = exponent\n\n def get_params(self, deep=True):\n \"\"\"Get parameters of this kernel.\n\n Parameters\n ----------\n deep : boolean, optional\n If True, will return the parameters for this estimator and\n contained subobjects that are estimators.\n\n Returns\n -------\n params : mapping of string to any\n Parameter names mapped to their values.\n \"\"\"\n params = dict(kernel=self.kernel, exponent=self.exponent)\n if deep:\n deep_items = self.kernel.get_params().items()\n params.update(('kernel__' + k, val) for k, val in deep_items)\n return params\n\n @property\n def hyperparameters(self):\n \"\"\"Returns a list of all hyperparameter.\"\"\"\n r = []\n for hyperparameter in self.kernel.hyperparameters:\n r.append(Hyperparameter(\"kernel__\" + hyperparameter.name,\n hyperparameter.value_type,\n hyperparameter.bounds,\n hyperparameter.n_elements))\n return r\n\n @property\n def theta(self):\n \"\"\"Returns the (flattened, log-transformed) non-fixed hyperparameters.\n\n Note that theta are typically the log-transformed values of the\n kernel's hyperparameters as this representation of the search space\n is more amenable for hyperparameter search, as hyperparameters like\n length-scales naturally live on a log-scale.\n\n Returns\n -------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n return self.kernel.theta\n\n @theta.setter\n def theta(self, theta):\n \"\"\"Sets the (flattened, log-transformed) non-fixed hyperparameters.\n\n Parameters\n ----------\n theta : array, shape (n_dims,)\n The non-fixed, log-transformed hyperparameters of the kernel\n \"\"\"\n self.kernel.theta = theta\n\n @property\n def bounds(self):\n \"\"\"Returns the log-transformed bounds on the theta.\n\n Returns\n -------\n bounds : array, shape (n_dims, 2)\n The log-transformed bounds on the kernel's hyperparameters theta\n \"\"\"\n return self.kernel.bounds\n\n def __eq__(self, b):\n if type(self) != type(b):\n return False\n return (self.kernel == b.kernel and self.exponent == b.exponent)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n if eval_gradient:\n K, K_gradient = self.kernel(X, Y, eval_gradient=True)\n K_gradient *= \\\n self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)\n return K ** self.exponent, K_gradient\n else:\n K = self.kernel(X, Y, eval_gradient=False)\n return K ** self.exponent\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return self.kernel.diag(X) ** self.exponent\n\n def __repr__(self):\n return \"{0} ** {1}\".format(self.kernel, self.exponent)\n\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n return self.kernel.is_stationary()\n\n\nclass ConstantKernel(StationaryKernelMixin, Kernel):\n \"\"\"Constant kernel.\n\n Can be used as part of a product-kernel where it scales the magnitude of\n the other factor (kernel) or as part of a sum-kernel, where it modifies\n the mean of the Gaussian process.\n\n k(x_1, x_2) = constant_value for all x_1, x_2\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n constant_value : float, default: 1.0\n The constant value which defines the covariance:\n k(x_1, x_2) = constant_value\n\n constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on constant_value\n\n \"\"\"\n def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):\n self.constant_value = constant_value\n self.constant_value_bounds = constant_value_bounds\n\n @property\n def hyperparameter_constant_value(self):\n return Hyperparameter(\n \"constant_value\", \"numeric\", self.constant_value_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n if Y is None:\n Y = X\n elif eval_gradient:\n raise ValueError(\"Gradient can only be evaluated when Y is None.\")\n\n K = np.full((X.shape[0], Y.shape[0]), self.constant_value,\n dtype=np.array(self.constant_value).dtype)\n if eval_gradient:\n if not self.hyperparameter_constant_value.fixed:\n return (K, np.full((X.shape[0], X.shape[0], 1),\n self.constant_value,\n dtype=np.array(self.constant_value).dtype))\n else:\n return K, np.empty((X.shape[0], X.shape[0], 0))\n else:\n return K\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return np.full(X.shape[0], self.constant_value,\n dtype=np.array(self.constant_value).dtype)\n\n def __repr__(self):\n return \"{0:.3g}**2\".format(np.sqrt(self.constant_value))\n\n\nclass WhiteKernel(StationaryKernelMixin, Kernel):\n \"\"\"White kernel.\n\n The main use-case of this kernel is as part of a sum-kernel where it\n explains the noise-component of the signal. Tuning its parameter\n corresponds to estimating the noise-level.\n\n k(x_1, x_2) = noise_level if x_1 == x_2 else 0\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n noise_level : float, default: 1.0\n Parameter controlling the noise level\n\n noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on noise_level\n\n \"\"\"\n def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):\n self.noise_level = noise_level\n self.noise_level_bounds = noise_level_bounds\n\n @property\n def hyperparameter_noise_level(self):\n return Hyperparameter(\n \"noise_level\", \"numeric\", self.noise_level_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n if Y is not None and eval_gradient:\n raise ValueError(\"Gradient can only be evaluated when Y is None.\")\n\n if Y is None:\n K = self.noise_level * np.eye(X.shape[0])\n if eval_gradient:\n if not self.hyperparameter_noise_level.fixed:\n return (K, self.noise_level\n * np.eye(X.shape[0])[:, :, np.newaxis])\n else:\n return K, np.empty((X.shape[0], X.shape[0], 0))\n else:\n return K\n else:\n return np.zeros((X.shape[0], Y.shape[0]))\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return np.full(X.shape[0], self.noise_level,\n dtype=np.array(self.noise_level).dtype)\n\n def __repr__(self):\n return \"{0}(noise_level={1:.3g})\".format(self.__class__.__name__,\n self.noise_level)\n\n\nclass RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):\n \"\"\"Radial-basis function kernel (aka squared-exponential kernel).\n\n The RBF kernel is a stationary kernel. It is also known as the\n \"squared exponential\" kernel. It is parameterized by a length-scale\n parameter length_scale>0, which can either be a scalar (isotropic variant\n of the kernel) or a vector with the same number of dimensions as the inputs\n X (anisotropic variant of the kernel). The kernel is given by:\n\n k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)\n\n This kernel is infinitely differentiable, which implies that GPs with this\n kernel as covariance function have mean square derivatives of all orders,\n and are thus very smooth.\n\n .. versionadded:: 0.18\n\n Parameters\n -----------\n length_scale : float or array with shape (n_features,), default: 1.0\n The length scale of the kernel. If a float, an isotropic kernel is\n used. If an array, an anisotropic kernel is used where each dimension\n of l defines the length-scale of the respective feature dimension.\n\n length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on length_scale\n\n \"\"\"\n def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):\n self.length_scale = length_scale\n self.length_scale_bounds = length_scale_bounds\n\n @property\n def anisotropic(self):\n return np.iterable(self.length_scale) and len(self.length_scale) > 1\n\n @property\n def hyperparameter_length_scale(self):\n if self.anisotropic:\n return Hyperparameter(\"length_scale\", \"numeric\",\n self.length_scale_bounds,\n len(self.length_scale))\n return Hyperparameter(\n \"length_scale\", \"numeric\", self.length_scale_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n length_scale = _check_length_scale(X, self.length_scale)\n if Y is None:\n dists = pdist(X / length_scale, metric='sqeuclidean')\n K = np.exp(-.5 * dists)\n # convert from upper-triangular matrix to square matrix\n K = squareform(K)\n np.fill_diagonal(K, 1)\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n dists = cdist(X / length_scale, Y / length_scale,\n metric='sqeuclidean')\n K = np.exp(-.5 * dists)\n\n if eval_gradient:\n if self.hyperparameter_length_scale.fixed:\n # Hyperparameter l kept fixed\n return K, np.empty((X.shape[0], X.shape[0], 0))\n elif not self.anisotropic or length_scale.shape[0] == 1:\n K_gradient = \\\n (K * squareform(dists))[:, :, np.newaxis]\n return K, K_gradient\n elif self.anisotropic:\n # We need to recompute the pairwise dimension-wise distances\n K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \\\n / (length_scale ** 2)\n K_gradient *= K[..., np.newaxis]\n return K, K_gradient\n else:\n return K\n\n def __repr__(self):\n if self.anisotropic:\n return \"{0}(length_scale=[{1}])\".format(\n self.__class__.__name__, \", \".join(map(\"{0:.3g}\".format,\n self.length_scale)))\n else: # isotropic\n return \"{0}(length_scale={1:.3g})\".format(\n self.__class__.__name__, np.ravel(self.length_scale)[0])\n\n\nclass Matern(RBF):\n \"\"\" Matern kernel.\n\n The class of Matern kernels is a generalization of the RBF and the\n absolute exponential kernel parameterized by an additional parameter\n nu. The smaller nu, the less smooth the approximated function is.\n For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5\n to the absolute exponential kernel. Important intermediate values are\n nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable\n functions).\n\n See Rasmussen and Williams 2006, pp84 for details regarding the\n different variants of the Matern kernel.\n\n .. versionadded:: 0.18\n\n Parameters\n -----------\n length_scale : float or array with shape (n_features,), default: 1.0\n The length scale of the kernel. If a float, an isotropic kernel is\n used. If an array, an anisotropic kernel is used where each dimension\n of l defines the length-scale of the respective feature dimension.\n\n length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on length_scale\n\n nu : float, default: 1.5\n The parameter nu controlling the smoothness of the learned function.\n The smaller nu, the less smooth the approximated function is.\n For nu=inf, the kernel becomes equivalent to the RBF kernel and for\n nu=0.5 to the absolute exponential kernel. Important intermediate\n values are nu=1.5 (once differentiable functions) and nu=2.5\n (twice differentiable functions). Note that values of nu not in\n [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost\n (appr. 10 times higher) since they require to evaluate the modified\n Bessel function. Furthermore, in contrast to l, nu is kept fixed to\n its initial value and not optimized.\n\n \"\"\"\n def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),\n nu=1.5):\n super(Matern, self).__init__(length_scale, length_scale_bounds)\n self.nu = nu\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n length_scale = _check_length_scale(X, self.length_scale)\n if Y is None:\n dists = pdist(X / length_scale, metric='euclidean')\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n dists = cdist(X / length_scale, Y / length_scale,\n metric='euclidean')\n\n if self.nu == 0.5:\n K = np.exp(-dists)\n elif self.nu == 1.5:\n K = dists * math.sqrt(3)\n K = (1. + K) * np.exp(-K)\n elif self.nu == 2.5:\n K = dists * math.sqrt(5)\n K = (1. + K + K ** 2 / 3.0) * np.exp(-K)\n else: # general case; expensive to evaluate\n K = dists\n K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan\n tmp = (math.sqrt(2 * self.nu) * K)\n K.fill((2 ** (1. - self.nu)) / gamma(self.nu))\n K *= tmp ** self.nu\n K *= kv(self.nu, tmp)\n\n if Y is None:\n # convert from upper-triangular matrix to square matrix\n K = squareform(K)\n np.fill_diagonal(K, 1)\n\n if eval_gradient:\n if self.hyperparameter_length_scale.fixed:\n # Hyperparameter l kept fixed\n K_gradient = np.empty((X.shape[0], X.shape[0], 0))\n return K, K_gradient\n\n # We need to recompute the pairwise dimension-wise distances\n if self.anisotropic:\n D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \\\n / (length_scale ** 2)\n else:\n D = squareform(dists**2)[:, :, np.newaxis]\n\n if self.nu == 0.5:\n K_gradient = K[..., np.newaxis] * D \\\n / np.sqrt(D.sum(2))[:, :, np.newaxis]\n K_gradient[~np.isfinite(K_gradient)] = 0\n elif self.nu == 1.5:\n K_gradient = \\\n 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]\n elif self.nu == 2.5:\n tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]\n K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)\n else:\n # approximate gradient numerically\n def f(theta): # helper function\n return self.clone_with_theta(theta)(X, Y)\n return K, _approx_fprime(self.theta, f, 1e-10)\n\n if not self.anisotropic:\n return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]\n else:\n return K, K_gradient\n else:\n return K\n\n def __repr__(self):\n if self.anisotropic:\n return \"{0}(length_scale=[{1}], nu={2:.3g})\".format(\n self.__class__.__name__,\n \", \".join(map(\"{0:.3g}\".format, self.length_scale)),\n self.nu)\n else:\n return \"{0}(length_scale={1:.3g}, nu={2:.3g})\".format(\n self.__class__.__name__, np.ravel(self.length_scale)[0],\n self.nu)\n\n\nclass RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):\n \"\"\"Rational Quadratic kernel.\n\n The RationalQuadratic kernel can be seen as a scale mixture (an infinite\n sum) of RBF kernels with different characteristic length-scales. It is\n parameterized by a length-scale parameter length_scale>0 and a scale\n mixture parameter alpha>0. Only the isotropic variant where length_scale is\n a scalar is supported at the moment. The kernel given by:\n\n k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n length_scale : float > 0, default: 1.0\n The length scale of the kernel.\n\n alpha : float > 0, default: 1.0\n Scale mixture parameter\n\n length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on length_scale\n\n alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on alpha\n\n \"\"\"\n def __init__(self, length_scale=1.0, alpha=1.0,\n length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):\n self.length_scale = length_scale\n self.alpha = alpha\n self.length_scale_bounds = length_scale_bounds\n self.alpha_bounds = alpha_bounds\n\n @property\n def hyperparameter_length_scale(self):\n return Hyperparameter(\n \"length_scale\", \"numeric\", self.length_scale_bounds)\n\n @property\n def hyperparameter_alpha(self):\n return Hyperparameter(\"alpha\", \"numeric\", self.alpha_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n if Y is None:\n dists = squareform(pdist(X, metric='sqeuclidean'))\n tmp = dists / (2 * self.alpha * self.length_scale ** 2)\n base = (1 + tmp)\n K = base ** -self.alpha\n np.fill_diagonal(K, 1)\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n dists = cdist(X, Y, metric='sqeuclidean')\n K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \\\n ** -self.alpha\n\n if eval_gradient:\n # gradient with respect to length_scale\n if not self.hyperparameter_length_scale.fixed:\n length_scale_gradient = \\\n dists * K / (self.length_scale ** 2 * base)\n length_scale_gradient = length_scale_gradient[:, :, np.newaxis]\n else: # l is kept fixed\n length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))\n\n # gradient with respect to alpha\n if not self.hyperparameter_alpha.fixed:\n alpha_gradient = \\\n K * (-self.alpha * np.log(base)\n + dists / (2 * self.length_scale ** 2 * base))\n alpha_gradient = alpha_gradient[:, :, np.newaxis]\n else: # alpha is kept fixed\n alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))\n\n return K, np.dstack((alpha_gradient, length_scale_gradient))\n else:\n return K\n\n def __repr__(self):\n return \"{0}(alpha={1:.3g}, length_scale={2:.3g})\".format(\n self.__class__.__name__, self.alpha, self.length_scale)\n\n\nclass ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):\n r\"\"\"Exp-Sine-Squared kernel.\n\n The ExpSineSquared kernel allows modeling periodic functions. It is\n parameterized by a length-scale parameter length_scale>0 and a periodicity\n parameter periodicity>0. Only the isotropic variant where l is a scalar is\n supported at the moment. The kernel given by:\n\n k(x_i, x_j) =\n exp(-2 (sin(\\pi / periodicity * d(x_i, x_j)) / length_scale) ^ 2)\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n length_scale : float > 0, default: 1.0\n The length scale of the kernel.\n\n periodicity : float > 0, default: 1.0\n The periodicity of the kernel.\n\n length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on length_scale\n\n periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on periodicity\n\n \"\"\"\n def __init__(self, length_scale=1.0, periodicity=1.0,\n length_scale_bounds=(1e-5, 1e5),\n periodicity_bounds=(1e-5, 1e5)):\n self.length_scale = length_scale\n self.periodicity = periodicity\n self.length_scale_bounds = length_scale_bounds\n self.periodicity_bounds = periodicity_bounds\n\n @property\n def hyperparameter_length_scale(self):\n return Hyperparameter(\n \"length_scale\", \"numeric\", self.length_scale_bounds)\n\n @property\n def hyperparameter_periodicity(self):\n return Hyperparameter(\n \"periodicity\", \"numeric\", self.periodicity_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n if Y is None:\n dists = squareform(pdist(X, metric='euclidean'))\n arg = np.pi * dists / self.periodicity\n sin_of_arg = np.sin(arg)\n K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n dists = cdist(X, Y, metric='euclidean')\n K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)\n / self.length_scale) ** 2)\n\n if eval_gradient:\n cos_of_arg = np.cos(arg)\n # gradient with respect to length_scale\n if not self.hyperparameter_length_scale.fixed:\n length_scale_gradient = \\\n 4 / self.length_scale**2 * sin_of_arg**2 * K\n length_scale_gradient = length_scale_gradient[:, :, np.newaxis]\n else: # length_scale is kept fixed\n length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))\n # gradient with respect to p\n if not self.hyperparameter_periodicity.fixed:\n periodicity_gradient = \\\n 4 * arg / self.length_scale**2 * cos_of_arg \\\n * sin_of_arg * K\n periodicity_gradient = periodicity_gradient[:, :, np.newaxis]\n else: # p is kept fixed\n periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))\n\n return K, np.dstack((length_scale_gradient, periodicity_gradient))\n else:\n return K\n\n def __repr__(self):\n return \"{0}(length_scale={1:.3g}, periodicity={2:.3g})\".format(\n self.__class__.__name__, self.length_scale, self.periodicity)\n\n\nclass DotProduct(Kernel):\n r\"\"\"Dot-Product kernel.\n\n The DotProduct kernel is non-stationary and can be obtained from linear\n regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .\n . , D) and a prior of N(0, \\sigma_0^2) on the bias. The DotProduct kernel\n is invariant to a rotation of the coordinates about the origin, but not\n translations. It is parameterized by a parameter sigma_0^2. For\n sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise\n it is inhomogeneous. The kernel is given by\n\n k(x_i, x_j) = sigma_0 ^ 2 + x_i \\cdot x_j\n\n The DotProduct kernel is commonly combined with exponentiation.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n sigma_0 : float >= 0, default: 1.0\n Parameter controlling the inhomogenity of the kernel. If sigma_0=0,\n the kernel is homogenous.\n\n sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on l\n\n \"\"\"\n\n def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):\n self.sigma_0 = sigma_0\n self.sigma_0_bounds = sigma_0_bounds\n\n @property\n def hyperparameter_sigma_0(self):\n return Hyperparameter(\"sigma_0\", \"numeric\", self.sigma_0_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n X = np.atleast_2d(X)\n if Y is None:\n K = np.inner(X, X) + self.sigma_0 ** 2\n else:\n if eval_gradient:\n raise ValueError(\n \"Gradient can only be evaluated when Y is None.\")\n K = np.inner(X, Y) + self.sigma_0 ** 2\n\n if eval_gradient:\n if not self.hyperparameter_sigma_0.fixed:\n K_gradient = np.empty((K.shape[0], K.shape[1], 1))\n K_gradient[..., 0] = 2 * self.sigma_0 ** 2\n return K, K_gradient\n else:\n return K, np.empty((X.shape[0], X.shape[0], 0))\n else:\n return K\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2\n\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n return False\n\n def __repr__(self):\n return \"{0}(sigma_0={1:.3g})\".format(\n self.__class__.__name__, self.sigma_0)\n\n\n# adapted from scipy/optimize/optimize.py for functions with 2d output\ndef _approx_fprime(xk, f, epsilon, args=()):\n f0 = f(*((xk,) + args))\n grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)\n ei = np.zeros((len(xk), ), float)\n for k in range(len(xk)):\n ei[k] = 1.0\n d = epsilon * ei\n grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]\n ei[k] = 0.0\n return grad\n\n\nclass PairwiseKernel(Kernel):\n \"\"\"Wrapper for kernels in sklearn.metrics.pairwise.\n\n A thin wrapper around the functionality of the kernels in\n sklearn.metrics.pairwise.\n\n Note: Evaluation of eval_gradient is not analytic but numeric and all\n kernels support only isotropic distances. The parameter gamma is\n considered to be a hyperparameter and may be optimized. The other\n kernel parameters are set directly at initialization and are kept\n fixed.\n\n .. versionadded:: 0.18\n\n Parameters\n ----------\n gamma : float >= 0, default: 1.0\n Parameter gamma of the pairwise kernel specified by metric\n\n gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)\n The lower and upper bound on gamma\n\n metric : string, or callable, default: \"linear\"\n The metric to use when calculating kernel between instances in a\n feature array. If metric is a string, it must be one of the metrics\n in pairwise.PAIRWISE_KERNEL_FUNCTIONS.\n If metric is \"precomputed\", X is assumed to be a kernel matrix.\n Alternatively, if metric is a callable function, it is called on each\n pair of instances (rows) and the resulting value recorded. The callable\n should take two arrays from X as input and return a value indicating\n the distance between them.\n\n pairwise_kernels_kwargs : dict, default: None\n All entries of this dict (if any) are passed as keyword arguments to\n the pairwise kernel function.\n\n \"\"\"\n\n def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric=\"linear\",\n pairwise_kernels_kwargs=None):\n self.gamma = gamma\n self.gamma_bounds = gamma_bounds\n self.metric = metric\n self.pairwise_kernels_kwargs = pairwise_kernels_kwargs\n\n @property\n def hyperparameter_gamma(self):\n return Hyperparameter(\"gamma\", \"numeric\", self.gamma_bounds)\n\n def __call__(self, X, Y=None, eval_gradient=False):\n \"\"\"Return the kernel k(X, Y) and optionally its gradient.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Y : array, shape (n_samples_Y, n_features), (optional, default=None)\n Right argument of the returned kernel k(X, Y). If None, k(X, X)\n if evaluated instead.\n\n eval_gradient : bool (optional, default=False)\n Determines whether the gradient with respect to the kernel\n hyperparameter is determined. Only supported when Y is None.\n\n Returns\n -------\n K : array, shape (n_samples_X, n_samples_Y)\n Kernel k(X, Y)\n\n K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)\n The gradient of the kernel k(X, X) with respect to the\n hyperparameter of the kernel. Only returned when eval_gradient\n is True.\n \"\"\"\n pairwise_kernels_kwargs = self.pairwise_kernels_kwargs\n if self.pairwise_kernels_kwargs is None:\n pairwise_kernels_kwargs = {}\n\n X = np.atleast_2d(X)\n K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,\n filter_params=True,\n **pairwise_kernels_kwargs)\n if eval_gradient:\n if self.hyperparameter_gamma.fixed:\n return K, np.empty((X.shape[0], X.shape[0], 0))\n else:\n # approximate gradient numerically\n def f(gamma): # helper function\n return pairwise_kernels(\n X, Y, metric=self.metric, gamma=np.exp(gamma),\n filter_params=True, **pairwise_kernels_kwargs)\n return K, _approx_fprime(self.theta, f, 1e-10)\n else:\n return K\n\n def diag(self, X):\n \"\"\"Returns the diagonal of the kernel k(X, X).\n\n The result of this method is identical to np.diag(self(X)); however,\n it can be evaluated more efficiently since only the diagonal is\n evaluated.\n\n Parameters\n ----------\n X : array, shape (n_samples_X, n_features)\n Left argument of the returned kernel k(X, Y)\n\n Returns\n -------\n K_diag : array, shape (n_samples_X,)\n Diagonal of kernel k(X, X)\n \"\"\"\n # We have to fall back to slow way of computing diagonal\n return np.apply_along_axis(self, 1, X).ravel()\n\n def is_stationary(self):\n \"\"\"Returns whether the kernel is stationary. \"\"\"\n return self.metric in [\"rbf\"]\n\n def __repr__(self):\n return \"{0}(gamma={1}, metric={2})\".format(\n self.__class__.__name__, self.gamma, self.metric)\n", "# Remove this module in version 0.21\n\nfrom scipy.sparse.linalg import eigs as _eigs, eigsh as _eigsh, svds as _svds\n\nfrom .deprecation import deprecated\n\n\n@deprecated(\"sklearn.utils.arpack.eigs was deprecated in version 0.19 and \"\n \"will be removed in 0.21. Use scipy.sparse.linalg.eigs instead.\")\ndef eigs(A, *args, **kwargs):\n return _eigs(A, *args, **kwargs)\n\n\n@deprecated(\"sklearn.utils.arpack.eigsh was deprecated in version 0.19 and \"\n \"will be removed in 0.21. Use scipy.sparse.linalg.eigsh instead.\")\ndef eigsh(A, *args, **kwargs):\n return _eigsh(A, *args, **kwargs)\n\n\n@deprecated(\"sklearn.utils.arpack.svds was deprecated in version 0.19 and \"\n \"will be removed in 0.21. Use scipy.sparse.linalg.svds instead.\")\ndef svds(A, *args, **kwargs):\n return _svds(A, *args, **kwargs)\n" ]
[ [ "pandas.core.nanops.nanall", "pandas.core.nanops.nansum", "pandas.core.nanops.nansem", "pandas._libs.lib.is_scalar", "numpy.dtype", "numpy.concatenate", "pandas.core.algorithms.take", "pandas.core.nanops.nanmin", "pandas.core.nanops.nanmax", "pandas.core.dtypes.inference.is_array_like", "pandas.core.nanops.nanany", "pandas.core.nanops.nanvar", "pandas.unique", "pandas.util._validators.validate_fillna_kwargs", "pandas.core.dtypes.inference.is_list_like", "pandas.core.nanops.nankurt", "pandas.core.nanops.nanmedian", "numpy.result_type", "pandas.core.nanops.nanskew", "pandas.isna", "numpy.asarray", "numpy.errstate", "pandas.core.internals.arrays.extract_array", "pandas.core.nanops.nanprod", "pandas.core.nanops.nanmean", "pandas.core.nanops.nanstd" ], [ "sklearn.utils.testing.assert_allclose_dense_sparse", "sklearn.utils.testing.assert_false", "numpy.min", "sklearn.feature_extraction.text.CountVectorizer", "numpy.sort", "sklearn.utils.testing.clean_warning_registry", "sklearn.model_selection.cross_val_score", "sklearn.svm.LinearSVC", "numpy.max", "numpy.linalg.norm", "sklearn.utils.testing.SkipTest", "sklearn.feature_extraction.text.strip_accents_ascii", "numpy.testing.assert_array_almost_equal", "sklearn.utils.testing.ignore_warnings", "sklearn.utils.testing.assert_warns_message", "scipy.sparse.rand", "numpy.argmax", "sklearn.utils.testing.assert_less", "sklearn.base.clone", "scipy.sparse.csr_matrix", "sklearn.feature_extraction.text.strip_accents_unicode", "numpy.array", "scipy.sparse.csc_matrix", "sklearn.utils.testing.assert_no_warnings", "sklearn.utils.testing.assert_equal", "sklearn.utils.testing.assert_raises", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.model_selection.train_test_split", "sklearn.feature_extraction.text.TfidfTransformer", "numpy.random.RandomState", "numpy.sum", "sklearn.utils.testing.assert_greater", "numpy.testing.assert_array_equal", "sklearn.utils.testing.assert_raise_message", "sklearn.utils.testing.assert_not_equal", "sklearn.feature_extraction.text.HashingVectorizer", "sklearn.model_selection.GridSearchCV", "numpy.unique" ], [ "numpy.repeat", "scipy.special.gamma", "scipy.special.kv", "numpy.exp", "numpy.finfo", "numpy.apply_along_axis", "numpy.cos", "numpy.inner", "numpy.concatenate", "numpy.sin", "numpy.empty", "numpy.log", "numpy.eye", "numpy.ndim", "numpy.append", "numpy.sqrt", "numpy.isfinite", "numpy.vstack", "numpy.atleast_2d", "numpy.array", "numpy.zeros", "scipy.spatial.distance.squareform", "numpy.einsum", "numpy.dstack", "numpy.hstack", "scipy.spatial.distance.cdist", "numpy.squeeze", "scipy.spatial.distance.pdist", "numpy.iterable", "numpy.fill_diagonal", "numpy.ones", "numpy.ravel", "numpy.all" ], [ "scipy.sparse.linalg.eigs", "scipy.sparse.linalg.svds", "scipy.sparse.linalg.eigsh" ] ]
itohamy/self-conditioned-gan
[ "f01156ea4d8f0c7cb7a75eb1be5d19c11fbdda50" ]
[ "metrics.py" ]
[ "import argparse\nimport os\nimport json\nfrom tqdm import tqdm\n\nimport numpy as np\nimport torch\n\nfrom gan_training.config import load_config\nfrom seeded_sampler import SeededSampler\n\n#from utils.classifiers.pytorch_playground.utee import misc\n#print555 = misc.logger.info\n\nparser = argparse.ArgumentParser('Computes numbers used in paper and caches them to a result files. Examples include FID, IS, reverse-KL, # modes, FSD, cluster NMI, Purity.')\nparser.add_argument('paths', nargs='+', type=str, help='list of configs for each experiment')\nparser.add_argument('--it', type=int, default=-1, help='If set, computes numbers only for that iteration')\nparser.add_argument('--every', type=int, default=-1, help='skips some checkpoints and only computes those whose iteration number are divisible by every')\nparser.add_argument('--fid', action='store_true', help='compute FID metric')\nparser.add_argument('--inception', action='store_true', help='compute IS metric')\nparser.add_argument('--modes', action='store_true', help='compute # modes and reverse-KL metric')\nparser.add_argument('--fsd', action='store_true', help='compute FSD metric')\nparser.add_argument('--cluster_metrics', action='store_true', help='compute clustering metrics (NMI, purity)')\nparser.add_argument('--device', type=int, default=1, help='device to run the metrics on (can run into OOM issues if same as main device)')\nargs = parser.parse_args()\n\ndevice = args.device\ndirs = list(args.paths)\n\nN = 50000\nBS = 100\n\ndatasets = ['imagenet', 'cifar', 'stacked_mnist', 'places']\n\ndataset_to_img = {\n 'places': 'output/places_gt_imgs.npz',\n 'imagenet': 'output/imagenet_gt_imgs.npz'}\n\n\ndef load_results(results_dir):\n results = []\n for results_file in ['fid_results.json', 'is_results.json', 'kl_results.json', 'nmodes_results.json', 'fsd_results.json', 'cluster_metrics.json']:\n results_file = os.path.join(results_dir, results_file)\n if not os.path.exists(results_file):\n with open(results_file, 'w') as f:\n f.write(json.dumps({}))\n with open(results_file) as f:\n results.append(json.load(f))\n return results\n\n\ndef get_dataset_from_path(path):\n for name in datasets:\n if name in path:\n print('Inferred dataset:', name)\n return name\n\n\ndef pt_to_np(imgs):\n '''normalizes pytorch image in [-1, 1] to [0, 255]'''\n return (imgs.permute(0, 2, 3, 1).mul_(0.5).add_(0.5).mul_(255)).clamp_(0, 255).numpy()\n\n\ndef sample(sampler):\n with torch.no_grad():\n samples = []\n for _ in tqdm(range(N // BS + 1)):\n x_real = sampler.sample(BS)[0].detach().cpu()\n x_real = [x.detach().cpu() for x in x_real]\n samples.extend(x_real)\n samples = torch.stack(samples[:N], dim=0)\n return pt_to_np(samples)\n\n\nroot = './'\n\nwhile len(dirs) > 0:\n path = dirs.pop()\n if os.path.isdir(path): # search down tree for config files\n for d1 in os.listdir(path):\n dirs.append(os.path.join(path, d1))\n else:\n if path.endswith('.yaml'):\n config = load_config(path, default_path='configs/default.yaml')\n outdir = config['training']['out_dir']\n\n if not os.path.exists(outdir) and config['pretrained'] == {}:\n print('Skipping', path, 'outdir', outdir)\n continue\n\n results_dir = os.path.join(outdir, 'results')\n checkpoint_dir = os.path.join(outdir, 'chkpts')\n os.makedirs(results_dir, exist_ok=True)\n\n fid_results, is_results, kl_results, nmodes_results, fsd_results, cluster_results = load_results(results_dir)\n\n checkpoint_files = os.listdir(checkpoint_dir) if os.path.exists(checkpoint_dir) else []\n if config['pretrained'] != {}:\n checkpoint_files = checkpoint_files + ['pretrained']\n\n for checkpoint in checkpoint_files:\n if (checkpoint.endswith('.pt') and checkpoint != 'model.pt') or checkpoint == 'pretrained':\n print('Computing for', checkpoint)\n if 'model' in checkpoint:\n # infer iteration number from checkpoint file w/o loading it\n if 'model_' in checkpoint:\n it = int(checkpoint.split('model_')[1].split('.pt')[0])\n else:\n continue\n if args.every != 0 and it % args.every != 0:\n continue\n # iteration 0 is often useless, skip it\n if it == 0 or args.it != -1 and it != args.it:\n continue\n elif checkpoint == 'pretrained':\n it = 'pretrained'\n it = str(it)\n\n clusterer_path = os.path.join(root, checkpoint_dir, f'clusterer{it}.pkl')\n # don't save samples for each iteration for disk space\n samples_path = os.path.join(outdir, 'results', 'samples.npz')\n\n targets = []\n if args.inception:\n targets = targets + [is_results]\n if args.fid:\n targets = targets + [fid_results]\n if args.modes:\n targets = targets + [kl_results, nmodes_results]\n if args.fsd:\n targets = targets + [fsd_results]\n\n if all([it in result for result in targets]):\n print('Already generated', it, path)\n else:\n sampler = SeededSampler(path,\n model_path=os.path.join(root, checkpoint_dir, checkpoint),\n clusterer_path=clusterer_path,\n pretrained=config['pretrained'])\n samples = sample(sampler)\n dataset_name = get_dataset_from_path(path)\n np.savez(samples_path, fake=samples, real=dataset_name)\n\n arguments = f'--samples {samples_path} --it {it} --results_dir {results_dir}'\n if args.fid and it not in fid_results:\n os.system(f'CUDA_VISIBLE_DEVICES={device} python gan_training/metrics/fid.py {arguments}')\n if args.inception and it not in is_results:\n os.system(f'CUDA_VISIBLE_DEVICES={device} python gan_training/metrics/tf_is/inception_score.py {arguments}')\n if args.modes and (it not in kl_results or it not in nmodes_results):\n os.system(f'CUDA_VISIBLE_DEVICES={device} python utils/get_empirical_distribution.py {arguments} --dataset {dataset_name}')\n if args.cluster_metrics and it not in cluster_results:\n os.system(f'CUDA_VISIBLE_DEVICES={device} python cluster_metrics.py {path} --model_it {it}')\n if args.fsd and it not in fsd_results:\n gt_path = dataset_to_img[dataset_name]\n os.system(f'CUDA_VISIBLE_DEVICES={device} python -m seeing.fsd {gt_path} {samples_path} --it {it} --results_dir {results_dir}')\n" ]
[ [ "torch.no_grad", "numpy.savez", "torch.stack" ] ]
YBrady/pands-problem-set
[ "06f4bfb7ad520ac8e1e7c15aa8fa1692b71711d4" ]
[ "problem-10.py" ]
[ "# Problem No 10\n# --------------------------------------------------------------------------------\n# Write a program that displays a plot of the functions x, x2 and 2x \n# in the range [0, 4].\n# --------------------------------------------------------------------------------\n#\n# Author: Yvonne Brady\n# Student ID: G00376355\n#\n# --------------------------------------------------------------------------------\n\n\n# Import the matplotlib to draw the plots \nimport matplotlib.pyplot as plt \n\n# Setting the x values \nx = range (4)\n\n# Plot #1: f(x) = x\ny1 = x\n# Plot #2: f(x) = x^2\ny2 = [x*x for x in range(4)]\n# Plot #3: f(x) = 2^x\ny3 = [2**x for x in range(4)]\n\n# Plotting the points and the legend \n# Plot #1: f(x) = x\nplt.plot(x, y1, label = \"f(x): x\")\n# Plot #2: f(x) = x^2\nplt.plot(x, y2, label = \"f(x): x^2\")\n# Plot #3: f(x) = 2^x\nplt.plot(x, y3, label = \"f(x): 2^x\") \n\n# Add a plot title\nplt.title(\"Problem #10 of problem set\")\n# Name the x axis\nplt.xlabel(\"x-axis\")\n# Name the y axis\nplt.ylabel(\"y-axis\")\n# Position the legend\nplt.legend(loc='upper left')\n\n# Show the plotted graphs \nplt.show() " ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
Satertek/adventofcode
[ "9282d99bb9f74e02217b24b07421f48103a12b47" ]
[ "aoc2020/day5.py" ]
[ "import numpy as np\n\ndef get_day5(infile=\"./aoc2020/day5_input.txt\", part2=False):\n with open(infile, 'r') as f:\n data = f.read().split(\"\\n\")\n seats = []\n for each in data:\n seats.append(\n [int(each[0:7].replace(\"F\",\"0\").replace(\"B\",\"1\"), 2), \n int(each[7: ].replace(\"L\",\"0\").replace(\"R\",\"1\"), 2)\n ])\n \n seat_id = []\n for row, column in seats:\n seat_id.append(\n row * 8 + column\n )\n if not part2:\n return np.max(seat_id)\n else:\n missing_values = set(np.arange(0,np.max(seat_id))).difference(set(seat_id))\n # Return seat number that is between two taken seats\n for each in missing_values:\n if each - 1 in seat_id and each + 1 in seat_id:\n return each\n\nif __name__ == \"__main__\":\n print(f\"Part 1: {get_day5()}\")\n print(f\"Part 2: {get_day5(part2=True)}\")" ]
[ [ "numpy.max" ] ]
nmonath/Prob-CBR
[ "94daa1b79123ca326bba357db7a50c296e4afc6b" ]
[ "prob_cbr/data/stream_utils.py" ]
[ "from collections import Counter\nimport logging\nimport numpy as np\nimport os\n\nfrom prob_cbr.data.data_utils import get_inv_relation, is_inv_relation\nlogger = logging.getLogger('stream_utils')\nlogger.setLevel(logging.INFO)\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\nformatter = logging.Formatter(\"[%(asctime)s \\t %(message)s]\",\n \"%Y-%m-%d %H:%M:%S\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\ndef read_triple_raw(file_path, dataset_name):\n \"\"\"\n Read triples and map them into ids.\n \"\"\"\n triples = []\n with open(file_path) as fin:\n for line in fin:\n h, r, t = line.strip().split('\\t')\n if not is_inv_relation(r, dataset_name):\n triples.append((h, r, t))\n return triples\n\n\nclass KBStream:\n def __init__(self, dataset_name, data_path, test_file_name=None,\n stream_init_proportion=0.5, n_stream_updates=10, seed=42):\n self.dataset_name = dataset_name\n self.data_path = data_path\n self.stream_init_proportion = stream_init_proportion\n self.n_stream_updates = n_stream_updates\n self.stream_rng = np.random.default_rng(seed)\n self.train_rng = np.random.default_rng(seed)\n\n self.entity_set, self.relation_set = set(), set()\n\n with open(os.path.join(self.data_path, 'entities.dict')) as fin:\n for line in fin:\n eid, entity = line.strip().split('\\t')\n self.entity_set.add(entity)\n\n with open(os.path.join(self.data_path, 'relations.dict')) as fin:\n for line in fin:\n rid, relation = line.strip().split('\\t')\n self.relation_set.add(relation)\n\n if test_file_name is None or test_file_name == '':\n test_file_name = 'test.txt'\n if dataset_name == 'nell':\n graph_file = 'full_graph.txt'\n else:\n graph_file = 'graph.txt'\n self.train_triples = read_triple_raw(os.path.join(self.data_path, graph_file), self.dataset_name)\n self.valid_triples = read_triple_raw(os.path.join(self.data_path, 'dev.txt'), self.dataset_name)\n self.test_triples = read_triple_raw(os.path.join(self.data_path, test_file_name), self.dataset_name)\n self.kb_state = {'entity2id': {}, 'relation2id': {},\n 'train_triples': [], 'valid_triples': [], 'test_triples': []}\n\n def get_max_num_entities(self):\n return len(self.entity_set)\n\n def get_max_num_relations(self):\n return 2*len(self.relation_set)\n\n def get_init_kb(self):\n # INIT\n # Sample 10% of the most common nodes (hubs)\n # Sample (stream_init_proportion - 10)% of the remaining nodes randomly\n node_usage_train = Counter([e for (e, _, _) in self.train_triples] + [e for (_, _, e) in self.train_triples])\n init_entities = [_ent for _ent, _ in node_usage_train.most_common(len(node_usage_train) // 10)]\n for _ent in init_entities:\n del node_usage_train[_ent]\n permutation = self.stream_rng.permutation(len(node_usage_train))\n usage_list = list(node_usage_train.most_common())\n sample_size = int(np.ceil(max(self.stream_init_proportion - 0.1, 0.0)*len(self.entity_set)))\n init_entities.extend([usage_list[j][0] for j in permutation[:sample_size]])\n assert len(init_entities) == len(set(init_entities))\n init_entities = set(init_entities)\n\n entity2id, relation2id = {}, {}\n id2entity, id2relation = {}, {}\n for eid, entity in enumerate(sorted(init_entities)):\n entity2id[entity] = eid\n id2entity[eid] = entity\n\n edge_coverage = {'train': 0, 'valid': 0, 'test': 0}\n init_train_triples, init_valid_triples, init_test_triples = [], [], []\n for edge in self.train_triples:\n e1, r, e2 = edge\n if e1 in init_entities and e2 in init_entities:\n if r not in relation2id:\n new_id = len(relation2id)\n relation2id[r] = new_id\n id2relation[new_id] = r\n new_id = len(relation2id)\n r_inv = get_inv_relation(r, self.dataset_name)\n relation2id[r_inv] = new_id\n id2relation[new_id] = r_inv\n init_train_triples.append((e1, r, e2))\n edge_coverage['train'] += 1\n\n for edge in self.valid_triples:\n e1, r, e2 = edge\n if e1 in init_entities and e2 in init_entities:\n if r not in relation2id:\n new_id = len(relation2id)\n relation2id[r] = new_id\n id2relation[new_id] = r\n new_id = len(relation2id)\n r_inv = get_inv_relation(r, self.dataset_name)\n relation2id[r_inv] = new_id\n id2relation[new_id] = r_inv\n init_valid_triples.append((e1, r, e2))\n edge_coverage['valid'] += 1\n\n for edge in self.test_triples:\n e1, r, e2 = edge\n if e1 in init_entities and e2 in init_entities:\n if r not in relation2id:\n new_id = len(relation2id)\n relation2id[r] = new_id\n id2relation[new_id] = r\n new_id = len(relation2id)\n r_inv = get_inv_relation(r, self.dataset_name)\n relation2id[r_inv] = new_id\n id2relation[new_id] = r_inv\n init_test_triples.append((e1, r, e2))\n edge_coverage['test'] += 1\n\n logger.info(f\"[STREAM] Init edge_coverage: \"\n f\"train: {edge_coverage['train']} ({edge_coverage['train'] / len(self.train_triples) * 100:0.2f}%) \"\n f\"valid: {edge_coverage['valid']} ({edge_coverage['valid'] / len(self.valid_triples) * 100:0.2f}%) \"\n f\"test: {edge_coverage['test']} ({edge_coverage['test'] / len(self.test_triples) * 100:0.2f}%)\")\n logger.info(f'[STREAM] Init entity_coverage:'\n f' {len(init_entities)} ({len(init_entities) / (len(self.entity_set)) * 100:0.2f}%)')\n\n self.kb_state['entity2id'] = entity2id.copy()\n self.kb_state['relation2id'] = relation2id.copy()\n self.kb_state['id2entity'] = id2entity.copy()\n self.kb_state['id2relation'] = id2relation.copy()\n self.kb_state['train_triples'] = init_train_triples.copy()\n self.kb_state['valid_triples'] = init_valid_triples.copy()\n self.kb_state['test_triples'] = init_test_triples.copy()\n\n # RotatE explicitly adds them in model\n rev_train_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_train_triples]\n rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_valid_triples]\n rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in init_test_triples]\n init_train_triples = init_train_triples + rev_train_triples\n init_valid_triples = init_valid_triples + rev_valid_triples\n init_test_triples = init_test_triples + rev_test_triples\n\n return entity2id, id2entity, relation2id, id2relation, \\\n init_train_triples + init_valid_triples + init_test_triples,\\\n init_train_triples, init_valid_triples, init_test_triples\n\n def batch_generator(self):\n for step in range(self.n_stream_updates):\n logger.info(f'[STREAM] Generating batch {step + 1}...')\n entity2id, relation2id = self.kb_state['entity2id'], self.kb_state['relation2id']\n id2entity, id2relation = self.kb_state['id2entity'], self.kb_state['id2relation']\n curr_train_triples, curr_valid_triples, curr_test_triples = \\\n self.kb_state['train_triples'], self.kb_state['valid_triples'], self.kb_state['test_triples']\n new_train_triples, new_valid_triples, new_test_triples = [], [], []\n\n seen_entities = set(entity2id.keys())\n unseen_entities = sorted(self.entity_set.difference(seen_entities))\n permutation = self.stream_rng.permutation(len(unseen_entities))\n sample_size = int(np.ceil((1 - self.stream_init_proportion) / self.n_stream_updates * len(self.entity_set)))\n if step == self.n_stream_updates - 1:\n sample_size = len(unseen_entities)\n new_entities = [unseen_entities[j] for j in permutation[:sample_size]]\n new_entities = set(new_entities)\n\n for entity in sorted(new_entities):\n if entity not in entity2id:\n new_id = len(entity2id)\n entity2id[entity] = new_id\n id2entity[new_id] = entity\n\n for edge in self.train_triples:\n e1, r, e2 = edge\n if e1 in seen_entities and e2 in seen_entities:\n continue\n if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):\n if r not in relation2id:\n new_id = len(relation2id)\n relation2id[r] = new_id\n id2relation[new_id] = r\n new_id = len(relation2id)\n r_inv = get_inv_relation(r, self.dataset_name)\n relation2id[r_inv] = new_id\n id2relation[new_id] = r_inv\n new_train_triples.append((e1, r, e2))\n\n for edge in self.valid_triples:\n e1, r, e2 = edge\n if e1 in seen_entities and e2 in seen_entities:\n continue\n if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):\n if r not in relation2id:\n new_id = len(relation2id)\n relation2id[r] = new_id\n id2relation[new_id] = r\n new_id = len(relation2id)\n r_inv = get_inv_relation(r, self.dataset_name)\n relation2id[r_inv] = new_id\n id2relation[new_id] = r_inv\n new_valid_triples.append((e1, r, e2))\n\n for edge in self.test_triples:\n e1, r, e2 = edge\n if e1 in seen_entities and e2 in seen_entities:\n continue\n if (e1 in new_entities or e1 in seen_entities) and (e2 in new_entities or e2 in seen_entities):\n if r not in relation2id:\n new_id = len(relation2id)\n relation2id[r] = new_id\n id2relation[new_id] = r\n new_id = len(relation2id)\n r_inv = get_inv_relation(r, self.dataset_name)\n relation2id[r_inv] = new_id\n id2relation[new_id] = r_inv\n new_test_triples.append((e1, r, e2))\n\n all_train_triples = new_train_triples + curr_train_triples\n all_valid_triples = new_valid_triples + curr_valid_triples\n all_test_triples = new_test_triples + curr_test_triples\n logger.info(f\"[STREAM] Batch edge_coverage: \"\n f\"train: {len(new_train_triples)} ({len(new_train_triples) / len(self.train_triples) * 100:0.2f}%) \"\n f\"valid: {len(new_valid_triples)} ({len(new_valid_triples) / len(self.valid_triples) * 100:0.2f}%) \"\n f\"test: {len(new_test_triples)} ({len(new_test_triples) / len(self.test_triples) * 100:0.2f}%)\")\n logger.info(f\"[STREAM] Total edge_coverage: \"\n f\"train: {len(all_train_triples)} ({len(all_train_triples) / len(self.train_triples) * 100:0.2f}%) \"\n f\"valid: {len(all_valid_triples)} ({len(all_valid_triples) / len(self.valid_triples) * 100:0.2f}%) \"\n f\"test: {len(all_test_triples)} ({len(all_test_triples) / len(self.test_triples) * 100:0.2f}%)\")\n logger.info(f'[STREAM] Total entity_coverage:'\n f' {len(entity2id)} ({len(entity2id) / (len(self.entity_set)) * 100:0.2f}%)')\n\n self.kb_state['entity2id'] = entity2id.copy()\n self.kb_state['relation2id'] = relation2id.copy()\n self.kb_state['id2entity'] = id2entity.copy()\n self.kb_state['id2relation'] = id2relation.copy()\n self.kb_state['train_triples'] = all_train_triples.copy()\n self.kb_state['valid_triples'] = all_valid_triples.copy()\n self.kb_state['test_triples'] = all_test_triples.copy()\n\n # RotatE explicitly adds them in model\n rev_train_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_train_triples]\n rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_valid_triples]\n rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in all_test_triples]\n all_train_triples = all_train_triples + rev_train_triples\n all_valid_triples = all_valid_triples + rev_valid_triples\n all_test_triples = all_test_triples + rev_test_triples\n\n rev_valid_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in new_valid_triples]\n rev_test_triples = [(e2, get_inv_relation(r, self.dataset_name), e1) for (e1, r, e2) in new_test_triples]\n new_valid_triples = new_valid_triples + rev_valid_triples\n new_test_triples = new_test_triples + rev_test_triples\n\n yield entity2id, id2entity, relation2id, id2relation, \\\n all_train_triples + all_valid_triples + all_test_triples, \\\n all_train_triples, all_valid_triples, new_valid_triples, all_test_triples, new_test_triples\n" ]
[ [ "numpy.random.default_rng" ] ]
ZachT1711/language
[ "de84080fc8a239a7271aad1d447fcb38a895790b" ]
[ "language/nql/nql/dataset.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Dataset object, which makes it easier to train NQL.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport nql\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\n\ndef tuple_generator_builder(\n context,\n tuple_input,\n type_specs,\n normalize_outputs = True,\n field_separator = '\\t',\n entity_separator = ' || ',\n start_line = 0,\n end_line = None):\n \"\"\"Create iterator over tuples produced by parsing lines from a file.\n\n The lines are delimited by field_separator, with each being a different type\n of feature. By convention the last field is the desired output of the model\n given the first n-1 fields as input, and if normalize_outputs is\n True, then this field will be L1-normalized.\n\n The types of each field are given by the list of type_specs. The possible\n type_specs are\n\n 1) The string name of an entity type declared in context. In this case the\n corresponding part of 'lines' should be a set of entity names, of the provided\n type, separated by the 'entity_separator' string. This will be converted to a\n k-hot representation of that set of entities.\n\n 2) The python type str. In this case the corresponding part of 'lines' will\n be passed on as a tf.string.\n\n Args:\n context: a NeuralQueryContext\n tuple_input: Either a filename or an iterater over lines of data.\n type_specs: list of specifications for how to parse each tab-separated field\n of the line. The possible specifications are listed above.\n normalize_outputs: treat the last line as a label and L1-normalize it\n field_separator: string to separate fields\n entity_separator: string to separate entity names\n start_line: Begin returning values at this row.\n end_line: Stop returning values before this row.\n\n Returns:\n a function taking no arguments that returns an Iterable[Tuple[Any]]\n\n Raises:\n ValueError, for incorrectly formatted lines\n \"\"\"\n\n def tuple_generator():\n \"\"\"Closure produced by tuple_generator_builder.\"\"\"\n line_iter = tf.io.gfile.GFile(tuple_input) if isinstance(\n tuple_input, str) else tuple_input\n line_number = 0\n for line_number, line in enumerate(line_iter):\n if line_number < start_line:\n continue\n if end_line and line_number >= end_line:\n break\n line = line.strip('\\r\\n')\n parts = line.split(field_separator)\n if len(parts) != len(type_specs):\n raise ValueError('line (%d) does not have %d fields: %r' %\n (line_number, len(type_specs), line))\n buf = []\n try:\n for i in range(len(parts)):\n spec = type_specs[i]\n if isinstance(spec, str) and context.is_type(spec):\n parsed_val = k_hot_array_from_string_list(\n context, spec, parts[i].split(entity_separator))\n elif spec == str:\n parsed_val = parts[i]\n else:\n raise ValueError('illegal type_spec %r' % spec)\n buf.append(parsed_val)\n if normalize_outputs:\n buf_sum = np.sum(buf[-1])\n if buf_sum:\n buf[-1] /= buf_sum\n yield tuple(buf)\n except (nql.EntityNameError, nql.TypeNameError) as ex:\n tf.logging.warn('Problem %r on line (%d): %r', ex, line_number, line)\n\n return tuple_generator\n\n\ndef tuple_dataset(context,\n tuple_input,\n type_specs,\n normalize_outputs = True,\n field_separator = '\\t',\n entity_separator = ' || ',\n start_line = 0,\n end_line = None):\n \"\"\"Produce a dataset by parsing lines from a file.\n\n Lines are formatted as described in documents for tuple_generator_builder.\n\n Args:\n context: passed to tuple_generator_builder\n tuple_input: passed to tuple_generator_builder\n type_specs: passed to tuple_generator_builder\n normalize_outputs: passed to tuple_generator_builder\n field_separator: passed to tuple_generator_builder\n entity_separator: passed to tuple_generator_builder\n start_line: passed to tuple_generator_builder\n end_line: passed to tuple_generator_builder\n\n Returns:\n tf.Data.Dataset over tuples, with one component for tab-separated field\n \"\"\"\n\n return tf.data.Dataset.from_generator(\n tuple_generator_builder(context, tuple_input, type_specs,\n normalize_outputs, field_separator,\n entity_separator, start_line, end_line),\n tuple([spec_as_tf_type(spec) for spec in type_specs]),\n tuple([spec_as_shape(spec, context) for spec in type_specs]))\n\n\ndef spec_as_tf_type(spec):\n \"\"\"Convert a type_spec to a tf type.\n\n Args:\n spec: a single specification for tuple_generator_builder\n\n Returns:\n type specification required by tf.data.Dataset.from_generator\n \"\"\"\n if spec == str:\n return tf.string\n elif isinstance(spec, int):\n return tf.int32\n else:\n return tf.float32\n\n\ndef spec_as_shape(spec, context):\n \"\"\"Convert a type_spec to a tf shape.\n\n Args:\n spec: a single specification for tuple_generator_builder\n context: a NQL context\n\n Returns:\n tensor shape specification, as required by tf.data.Dataset.from_generator\n \"\"\"\n if spec == str:\n return tf.TensorShape([])\n elif isinstance(spec, int):\n return tf.TensorShape([spec])\n else:\n return tf.TensorShape([context.get_max_id(spec)])\n\n\n# GOOGLE_INTERNAL: TODO(b/124102056) Consider moving into nql.\ndef k_hot_array_from_string_list(context,\n typename,\n entity_names):\n \"\"\"Create a numpy array encoding a k-hot set.\n\n Args:\n context: a NeuralExpressionContext\n typename: type of entity_names\n entity_names: list of names of type typename\n\n Returns:\n A k-hot-array representation of the set of entity_names. For frozen\n dictionaries, unknown entity names are mapped to the unknown_id of their\n type or discarded if the unknown_value of the type is None. Unknown entity\n names will throw an nql.EntityNameException for non-frozen dictionaries.\n It is possible for this method to return an all-zeros array.\n \"\"\"\n # Empty string is not a valid entity_name.\n ids = [context.get_id(e, typename) for e in entity_names if e]\n # None is not a valid id.\n valid_ids = [x for x in ids if x is not None]\n max_id = context.get_max_id(typename)\n result = np.zeros((max_id,), dtype='float32')\n if valid_ids:\n result[valid_ids] = 1.\n return result\n\n\ndef placeholder_for_type(context,\n type_spec,\n name = None):\n \"\"\"Produce a Tensorflow placeholder for this type_spec.\n\n Args:\n context: a NeuralQueryContext\n type_spec: a single type_spec (see tuple_dataset)\n name: a name to use for the placeholder\n\n Returns:\n a Tensorflow placeholder\n\n Raises:\n ValueError, if the type_spec is invalid\n \"\"\"\n if type_spec == str:\n return tf.compat.v1.placeholder(tf.string, shape=[None], name=name)\n elif isinstance(type_spec, str) and context.is_type(type_spec):\n name = name or ('%s_ph' % type_spec)\n return context.placeholder(name, type_spec).tf\n else:\n raise ValueError('bad type spec %r' % type_spec)\n\n\ndef build_feature_dict_mapper(feature_names):\n \"\"\"Build a function for tf.data.Dataset.map.\n\n Args:\n feature_names: List of feature names.\n\n Returns:\n A function converting tuples into (dictionary of features, label).\n \"\"\"\n\n def mapper(*tuple_args):\n d = {}\n for i in range(len(feature_names)):\n d[feature_names[i]] = tuple_args[i]\n return d, tuple_args[-1]\n\n return mapper\n" ]
[ [ "numpy.zeros", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.logging.warn", "numpy.sum", "tensorflow.compat.v2.compat.v1.placeholder", "tensorflow.compat.v2.io.gfile.GFile" ] ]
jarbus/neural-mmo
[ "a9a7c34a1fb24fbf252e2958bdb869c213e580a3" ]
[ "jsuarez/extra/embyr_deprecated/embyr/client.py" ]
[ "from pdb import set_trace as T\nimport pygame, time\nimport numpy as np\n\nfrom forge.embyr import embyr\nfrom forge.embyr.modules import *\n\nclass Client(embyr.Container):\n def __init__(self, view, size, realm, step, dims, nAnim, **kwargs):\n super().__init__(size, **kwargs)\n self.W, self.H, self.side = dims\n self.realm, self.step = realm, step\n self.view, self.nAnim = view, nAnim\n\n offset = 16 * 8\n self.x, self.y, self.xVol, self.yVol = -offset, offset, 0, 0\n self.zoom, self.zoomVol = 1, 0\n\n self.count = 0\n self.frame = 0\n self.frames = []\n self.vals = []\n self.init = time.time()\n\n def setup(self):\n surf = self.view.renderTitle()\n self.blit(surf, (0, 0))\n self.frame += 1\n self.flip()\n\n def render(self, t):\n self.update()\n return\n if self.frame == 0:\n return self.setup()\n if self.frame == 1:\n time.sleep(2.5)\n self.update()\n\n def update(self):\n self.view.render(self.realm, None, None)\n return\n self.writeFrame()\n self.trans = self.renderOffsets(self.H, self.H)\n keyframe = self.count == 0\n if keyframe:\n self.step()\n\n self.surf = self.view.render(self.realm, self.trans, keyframe)\n\n self.count = (self.count + 1) % (self.nAnim+1)\n self.blit(self.surf, (0,0))\n self.flip()\n self.frame += 1\n\n def writeFrame(self):\n NFRAMES=1800\n return\n if self.frame < NFRAMES:\n print('Frame: ', len(self.frames))\n frame = pygame.surfarray.array3d(pygame.transform.rotate(self.surf, 90))\n frame = np.fliplr(frame)\n #frame = frame[:1024, 256:256+1024]\n frame = frame[:1024, 1024+256:1024+256+1024]\n self.frames.append(frame)\n #pygame.image.save(self.screen, 'resource/data/lrframe'+str(self.frame)+'.png')\n elif self.frame == NFRAMES:\n import imageio\n print('Saving MP4...')\n imageio.mimwrite('swordfrag.mp4', self.frames, fps = 30)\n print('Saved')\n\n def clipZoom(self, zoom):\n return np.clip(zoom, 1.0, 8.0)\n\n def renderOffsets(self, W, H):\n #Scale\n zoom = self.clipZoom(self.zoom + self.zoomVol)\n scaleX, scaleY = int(W*zoom), int(H*zoom)\n\n #Translate\n deltaX = self.x + self.xVol - scaleX/2 + W/2\n deltaY = -self.y - self.yVol - scaleY/2 + H/2\n return scaleX, scaleY, deltaX, deltaY\n\n def on_touch_down(self, touch):\n self.xStart, self.yStart = touch.pos\n\n def on_touch_up(self, touch):\n if touch.button == 'left':\n self.xVol, self.yVol= 0, 0\n xEnd, yEnd = touch.pos\n self.x += xEnd - self.xStart\n self.y += yEnd - self.yStart\n elif touch.button == 'right':\n self.zoom = self.clipZoom(self.zoom + self.zoomVol)\n self.zoomVol = 0\n\n def on_touch_move(self, touch):\n if touch.button == 'left':\n xEnd, yEnd = touch.pos\n self.xVol = xEnd - self.xStart\n self.yVol = yEnd - self.yStart\n elif touch.button == 'right':\n xEnd, yEnd = touch.pos\n delta = (xEnd - self.xStart)/2 - (yEnd - self.yStart)/2\n self.zoomVol = delta/100\n\n def on_key_down(self, *args):\n text = args[3]\n if text == 'i':\n #Toggle isometric\n trans = self.renderOffsets(self.H, self.H)\n self.view.toggleEnv(trans)\n elif text == 'p':\n T()\n elif text == '[':\n self.view.leftScreenshot()\n else:\n #Toggle overlay\n self.view.key(text)\n" ]
[ [ "numpy.fliplr", "numpy.clip" ] ]
khoih-prog/transformers
[ "77321481247787c97568c3b9f64b19e22351bab8" ]
[ "src/transformers/models/roberta/modeling_roberta.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model.\"\"\"\n\nimport math\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN, gelu\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_roberta import RobertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"roberta-base\"\n_CONFIG_FOR_DOC = \"RobertaConfig\"\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\n\nclass RobertaEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n if version.parse(torch.__version__) > version.parse(\"1.6.0\"):\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long),\n persistent=False,\n )\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta\nclass RobertaSelfAttention(nn.Module):\n def __init__(self, config, position_embedding_type=None):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = position_embedding_type or getattr(\n config, \"position_embedding_type\", \"absolute\"\n )\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple:\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass RobertaSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta\nclass RobertaAttention(nn.Module):\n def __init__(self, config, position_embedding_type=None):\n super().__init__()\n self.self = RobertaSelfAttention(config, position_embedding_type=position_embedding_type)\n self.output = RobertaSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple:\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass RobertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass RobertaOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta\nclass RobertaLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RobertaAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n if not self.is_decoder:\n raise ValueError(f\"{self} should be used as a decoder model if cross attention is added\")\n self.crossattention = RobertaAttention(config, position_embedding_type=\"absolute\")\n self.intermediate = RobertaIntermediate(config)\n self.output = RobertaOutput(config)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple:\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n if not hasattr(self, \"crossattention\"):\n raise ValueError(\n f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n )\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta\nclass RobertaEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = False,\n output_hidden_states: Optional[bool] = False,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if self.gradient_checkpointing and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass RobertaPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RobertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n supports_gradient_checkpointing = True\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, RobertaEncoder):\n module.gradient_checkpointing = value\n\n def update_keys_to_ignore(self, config, del_keys_to_ignore):\n \"\"\"Remove some keys from ignore list\"\"\"\n if not config.tie_word_embeddings:\n # must make a new list, or the class variable gets modified!\n self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]\n self._keys_to_ignore_on_load_missing = [\n k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore\n ]\n\n\nROBERTA_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`RobertaConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`RobertaTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModel(RobertaPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in *Attention is\n all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n\n .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RobertaEmbeddings(config)\n self.encoder = RobertaEncoder(config)\n\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n # Copied from transformers.models.bert.modeling_bert.BertModel.forward\n def forward(\n self,\n input_ids: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n token_type_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:\n r\"\"\"\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.\"\"\", ROBERTA_START_DOCSTRING\n)\nclass RobertaForCausalLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n r\"\"\"\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained(\"roberta-base\", config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top.\"\"\", ROBERTA_START_DOCSTRING)\nclass RobertaForMaskedLM(RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MaskedLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n kwargs (`Dict[str, any]`, optional, defaults to *{}*):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.decoder.bias = self.bias\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n \"\"\"\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForSequenceClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.classifier = RobertaClassificationHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForMultipleChoice(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MultipleChoiceModelOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,\n num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See\n `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n flat_inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.roberta(\n flat_input_ids,\n position_ids=flat_position_ids,\n token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask,\n head_mask=head_mask,\n inputs_embeds=flat_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForTokenClassification(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForQuestionAnswering(RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n start_positions: Optional[torch.LongTensor] = None,\n end_positions: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx\n" ]
[ [ "torch.nn.Linear", "torch.zeros", "torch.nn.Dropout", "torch.nn.LayerNorm", "torch.cat", "torch.nn.MSELoss", "torch.arange", "torch.einsum", "torch.nn.Tanh", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.nn.BCEWithLogitsLoss", "torch.nn.functional.softmax", "torch.tanh", "torch.matmul", "torch.nn.Embedding", "torch.cumsum" ] ]
Archymade/English-Letter-Classifier
[ "0aefc5114a4dc48205c18e8e667407c86217af19" ]
[ "scripts/viz_utils.py" ]
[ "import os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.metrics import plot_confusion_matrix\n \n \n# Univariate visualization\ndef univariate_plot(data, path, save = True):\n \n ''' Plot the data univariately. '''\n \n for col in data.columns:\n plt.figure(figsize = (10, 8))\n sns.displot(data[col])\n\n plt.title(f'Distribution plot for Feature {col}')\n \n if save:\n plt.savefig(f'{path} - Feature {col}.png', dpi = 300)\n \n plt.show()\n plt.close('all')\n \n return None\n\n\ndef correlogram(data, path, palette = 'inferno', h = 10, w = 10, save = True):\n ''' Plot and save correlogram. '''\n \n plt.figure(figsize = (h, w))\n sns.pairplot(data = data, palette = palette)\n \n plt.title('Bivariate visual relationships in data')\n \n if save:\n plt.savefig(f'{path}.png', dpi = 300)\n \n plt.show()\n plt.close('all')\n \n return None\n\n \ndef get_correlation_map(data, path, save = True, h = 20, w = 10):\n ''' Visualize feature correlation. '''\n \n plt.figure(figsize = (h, w))\n sns.heatmap(data.corr(), annot = True, fmt = '.3g')\n plt.title('Feature collinearity heatmap')\n \n if save:\n plt.savefig(f'{path}.png', dpi = 300)\n \n plt.show(); plt.close('all')\n \n return None\n\n \ndef visualize_confusion_matrix(model, X, y, split, path, save = True):\n \"\"\" Display Confusion Matrix visually.\"\"\"\n\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (20, 20))\n plt.title(f'Confusion matrix for {split.upper()}', fontsize = 30, pad = 30)\n plot_confusion_matrix(model, X, y, ax = ax)\n \n if save:\n plt.savefig(os.path.join(path, f'{split}-confusion-matrix.png'), dpi = 300)\n \n plt.show()\n plt.close('all')\n\n return None\n\n\ndef class_distribution(data, path, save = True, h = 10, w = 10):\n ''' Visualize class distribution. '''\n \n plt.figure(figsize = (w, h))\n sns.countplot(x = data)\n \n plt.title('Class Distribution', pad = 20, fontsize = 20)\n \n plt.xlabel('Class Label', fontsize = 20)\n plt.ylabel('Class Population', fontsize = 20)\n \n if save:\n plt.savefig(f'{path}.png', dpi = 300)\n \n plt.show(); plt.close('all')\n \n return None\n\n\n\n" ]
[ [ "sklearn.metrics.plot_confusion_matrix", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
msc-acse/acse-9-independent-research-project-dekape
[ "d3d2236e47e8604803850c7cacceb826c7649bcb" ]
[ "fullwaveqc/geom.py" ]
[ "#!/usr/bin/env python\n# Deborah Pelacani Cruz\n# https://github.com/dekape\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nfrom collections import OrderedDict\nimport fullwaveqc.tools as tools\n\n\ndef boundarycalc(d, dx, fmin, vmax):\n \"\"\"\n Calculates the number of absorbing boundaries and number of cells from the source recommended for\n a Fullwave run without reflections and interferences. Number of absorbing boundaries computed as\n to guarantee it covers two of the largest wavelengths. Distance from source computed to guarantee\n one Fresnel zone between the source and the first receiver. The total number of padding cells above\n the source should therefore be nabsorb + ndist.\n\n Parameters\n ----------\n d: float\n distance between source and first receiver in units of distance (meters)\n dx: float\n size of model cell in units of distance (meters)\n fmin: float\n minimum frequency present in the wavelet (Hz)\n vmax: float\n maximum P-wave velocity expected from the models (m/s)\n\n Returns\n -------\n nabsorb: int\n number of absorbing boundary cells recommended for the specific problem\n ndist: int\n number of padded cells from the source to the start of the absorbing boundaries\n \"\"\"\n\n # Largest wavelength (in units of distance)\n lambda_max = vmax / fmin\n\n # Number of boundaries necessary to cover one Fresnel radius\n ndist = int(np.ceil(0.5 * np.sqrt(lambda_max * d) / dx))\n if ndist < 4:\n ndist = 4\n\n # Number of absorbing boundaries required to cover 2 of the largest wavelengths\n nabsorb = int(np.ceil(2 * lambda_max / dx))\n\n return nabsorb, ndist\n\n\ndef surveygeom(rcvgeopath, srcgeopath, src_list=[], plot=False, verbose=0):\n \"\"\"\n Retrieves and plots the in-line positions of the survey array as understood from SegyPrep.\n \n Parameters\n ----------\n rcvgeopath: str\n path to file <PROJECT_NAME>-Receivers.geo generated by SegyPrep\n srcgeopath: str\n path to file <PROJECT_NAME>-Sources.geo generated by SegyPrep\n src_list: list, optional\n list of source/shot numbers to retrieve and plot. Empty list returns all positions. Default: []\n plot: bool, optional\n If true will plot the shots and receivers of src_list. Default:False\n verbose: bool, optional\n If true will print to console the steps of this function. Default: False\n\n Returns\n -------\n src_ret: list\n list of all in-line positions from the sources in src_list\n rcv_ret: list\n list of numpy.arrays, for each source in src_list, with its array of in-line\n receiver positions\n\n \"\"\"\n # Set verbose\n verbose_print = tools.set_verbose(verbose)\n\n # Store source positions\n verbose_print(str(datetime.datetime.now()) + \" \\t Reading source locations ...\\r\")\n srcx, srcy = [], []\n\n with open(srcgeopath) as srcgeo:\n for i, line in enumerate(srcgeo):\n if i != 0:\n srcx.append(float(line.split()[1]))\n\n # Store receiver positions\n verbose_print(str(datetime.datetime.now()) + \" \\t Reading receiver locations ...\\r\")\n rcvx, rcvy = [], []\n with open(rcvgeopath) as rcvgeo:\n for i, line in enumerate(rcvgeo):\n if i != 0:\n rcvx.append(float(line.split()[1]))\n rcvx = np.array(rcvx)\n\n # Rearrange receivers list by source, by identifying high jumps in position\n verbose_print(str(datetime.datetime.now()) + \" \\t Rearranging receiver by sources ...\\r\")\n rcvx_2 = []\n src_index = [0]\n for i in range(1, np.size(rcvx)):\n if rcvx[i-1] > rcvx[i]:\n src_index.append(i)\n src_index.append(-1)\n for i in range(0, len(src_index)-1):\n rcvx_2.append(np.array(rcvx[src_index[i]: src_index[i+1]]))\n\n verbose_print(str(datetime.datetime.now()) + \" \\t Plotting ... \")\n\n # Filtering sources and receivers to src_list for plotting and returning\n src_ret = []\n rcv_ret = []\n\n # If list of sources not given, create list with all sources, and store all sources and receivers to return\n if len(src_list) == 0:\n src_list = [i for i in range(1, len(srcx) + 1)]\n src_ret = srcx\n rcv_ret = rcvx_2\n else:\n for i in src_list:\n # Dealing with negative numbers in the list\n if i > 0:\n i -= 1\n if i < 0:\n i = len(srcx) + i\n src_ret.append(srcx[i])\n rcv_ret.append(rcvx_2[i])\n\n # Plot every source in list\n if plot:\n figure, ax = plt.subplots(1, 1)\n figure.set_size_inches(15.5, 7.5)\n for i in range(0, len(src_ret)):\n if src_list[i] < 0:\n src_pos = len(srcx) + src_list[i]\n else:\n src_pos = src_list[i]\n ax.scatter(src_ret[i], src_pos + 1, c=\"y\", label=\"Source\", marker=\"*\", s=155)\n ax.scatter(rcv_ret[i], np.zeros_like(rcv_ret[i]) + src_pos + 1, c=\"r\", label=\"Receiver\", marker=11, s=40)\n\n ax.set_xlim(0, )\n ax.set(xlabel=\"Lateral offset (m)\", ylabel=\"Shot Number\")\n ax.grid()\n\n # Configuring legend so that it doesn't repeat the labels\n handles, labels = plt.gca().get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys())\n ax.legend(by_label.values(), by_label.keys(), loc=\"best\")\n\n plt.show()\n\n return src_ret, rcv_ret\n" ]
[ [ "numpy.array", "numpy.ceil", "numpy.zeros_like", "matplotlib.pyplot.subplots", "numpy.size", "numpy.sqrt", "matplotlib.pyplot.show", "matplotlib.pyplot.gca" ] ]
tobyglei/streamlit
[ "5ce9eeee09abb1ce34da8ffe2075881139ea7b7d" ]
[ "lib/streamlit/delta_generator.py" ]
[ "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Allows us to create and absorb changes (aka Deltas) to elements.\"\"\"\nfrom typing import Optional, Iterable, List\n\nfrom streamlit import caching\nfrom streamlit import cursor\nfrom streamlit import type_util\nfrom streamlit.cursor import Cursor\nfrom streamlit.proto.BlockPath_pb2 import BlockPath\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.errors import NoSessionContext\nfrom streamlit.proto import Block_pb2\nfrom streamlit.proto import BlockPath_pb2\nfrom streamlit.proto import ForwardMsg_pb2\nfrom streamlit.logger import get_logger\n\nfrom streamlit.elements.utils import NoValue\nfrom streamlit.elements.balloons import BalloonsMixin\nfrom streamlit.elements.button import ButtonMixin\nfrom streamlit.elements.markdown import MarkdownMixin\nfrom streamlit.elements.text import TextMixin\nfrom streamlit.elements.alert import AlertMixin\nfrom streamlit.elements.json import JsonMixin\nfrom streamlit.elements.doc_string import HelpMixin\nfrom streamlit.elements.exception_proto import ExceptionMixin\nfrom streamlit.elements.data_frame_proto import DataFrameMixin\nfrom streamlit.elements.altair import AltairMixin\nfrom streamlit.elements.bokeh_chart import BokehMixin\nfrom streamlit.elements.graphviz_chart import GraphvizMixin\nfrom streamlit.elements.plotly_chart import PlotlyMixin\nfrom streamlit.elements.vega_lite import VegaLiteMixin\nfrom streamlit.elements.deck_gl_json_chart import PydeckMixin\nfrom streamlit.elements.map import MapMixin\nfrom streamlit.elements.iframe_proto import IframeMixin\nfrom streamlit.elements.media_proto import MediaMixin\nfrom streamlit.elements.checkbox import CheckboxMixin\nfrom streamlit.elements.multiselect import MultiSelectMixin\nfrom streamlit.elements.radio import RadioMixin\nfrom streamlit.elements.selectbox import SelectboxMixin\nfrom streamlit.elements.text_widgets import TextWidgetsMixin\nfrom streamlit.elements.time_widgets import TimeWidgetsMixin\nfrom streamlit.elements.progress import ProgressMixin\nfrom streamlit.elements.empty import EmptyMixin\nfrom streamlit.elements.number_input import NumberInputMixin\nfrom streamlit.elements.color_picker import ColorPickerMixin\nfrom streamlit.elements.file_uploader import FileUploaderMixin\nfrom streamlit.elements.select_slider import SelectSliderMixin\nfrom streamlit.elements.slider import SliderMixin\nfrom streamlit.elements.image_proto import ImageMixin\nfrom streamlit.elements.pyplot import PyplotMixin\nfrom streamlit.elements.write import WriteMixin\n\nLOGGER = get_logger(__name__)\n\n# Save the type built-in for when we override the name \"type\".\n_type = type\n\nMAX_DELTA_BYTES = 14 * 1024 * 1024 # 14MB\n\n# List of Streamlit commands that perform a Pandas \"melt\" operation on\n# input dataframes.\nDELTAS_TYPES_THAT_MELT_DATAFRAMES = (\"line_chart\", \"area_chart\", \"bar_chart\")\n\n\nclass DeltaGenerator(\n AlertMixin,\n AltairMixin,\n BalloonsMixin,\n BokehMixin,\n ButtonMixin,\n CheckboxMixin,\n ColorPickerMixin,\n DataFrameMixin,\n EmptyMixin,\n ExceptionMixin,\n FileUploaderMixin,\n GraphvizMixin,\n HelpMixin,\n IframeMixin,\n ImageMixin,\n MarkdownMixin,\n MapMixin,\n MediaMixin,\n MultiSelectMixin,\n NumberInputMixin,\n PlotlyMixin,\n ProgressMixin,\n PydeckMixin,\n PyplotMixin,\n RadioMixin,\n SelectboxMixin,\n SelectSliderMixin,\n SliderMixin,\n JsonMixin,\n TextMixin,\n TextWidgetsMixin,\n TimeWidgetsMixin,\n VegaLiteMixin,\n WriteMixin,\n):\n \"\"\"Creator of Delta protobuf messages.\n\n Parameters\n ----------\n container: BlockPath_pb2.BlockPath.ContainerValue or None\n The root container for this DeltaGenerator. If None, this is a null\n DeltaGenerator which doesn't print to the app at all (useful for\n testing).\n\n cursor: cursor.Cursor or None\n This is either:\n - None: if this is the running DeltaGenerator for a top-level\n container (MAIN or SIDEBAR)\n - RunningCursor: if this is the running DeltaGenerator for a\n non-top-level container (created with dg.container())\n - LockedCursor: if this is a locked DeltaGenerator returned by some\n other DeltaGenerator method. E.g. the dg returned in dg =\n st.text(\"foo\").\n\n parent: DeltaGenerator\n To support the `with dg` notation, DGs are arranged as a tree. Each DG\n remembers its own parent, and the root of the tree is the main DG.\n\n block_type: None or \"vertical\" or \"horizontal\" or \"column\" or \"expandable\"\n If this is a block DG, we track its type to prevent nested columns/expanders\n\n \"\"\"\n\n # The pydoc below is for user consumption, so it doesn't talk about\n # DeltaGenerator constructor parameters (which users should never use). For\n # those, see above.\n def __init__(\n self,\n container: Optional[\"BlockPath.ContainerValue\"] = BlockPath.MAIN,\n cursor: Optional[Cursor] = None,\n parent: Optional[\"DeltaGenerator\"] = None,\n block_type: Optional[str] = None,\n ):\n \"\"\"Inserts or updates elements in Streamlit apps.\n\n As a user, you should never initialize this object by hand. Instead,\n DeltaGenerator objects are initialized for you in two places:\n\n 1) When you call `dg = st.foo()` for some method \"foo\", sometimes `dg`\n is a DeltaGenerator object. You can call methods on the `dg` object to\n update the element `foo` that appears in the Streamlit app.\n\n 2) This is an internal detail, but `st.sidebar` itself is a\n DeltaGenerator. That's why you can call `st.sidebar.foo()` to place\n an element `foo` inside the sidebar.\n\n \"\"\"\n # Whether this DeltaGenerator is nested in the main area or sidebar.\n # No relation to `st.beta_container()`.\n self._container = container\n\n # NOTE: You should never use this directly! Instead, use self._cursor,\n # which is a computed property that fetches the right cursor.\n self._provided_cursor = cursor\n\n self._parent = parent\n self._block_type = block_type\n\n # Change the module of all mixin'ed functions to be st.delta_generator,\n # instead of the original module (e.g. st.elements.markdown)\n for mixin in self.__class__.__bases__:\n for (name, func) in mixin.__dict__.items():\n if callable(func):\n func.__module__ = self.__module__\n\n def __enter__(self):\n # with block started\n ctx = get_report_ctx()\n if ctx:\n ctx.dg_stack.append(self)\n\n def __exit__(self, type, value, traceback):\n # with block ended\n ctx = get_report_ctx()\n if ctx:\n ctx.dg_stack.pop()\n\n # Re-raise any exceptions\n return False\n\n @property\n def _active_dg(self) -> \"DeltaGenerator\":\n \"\"\"Return the DeltaGenerator that's currently 'active'.\n If we are the main DeltaGenerator, and are inside a `with` block that\n creates a container, our active_dg is that container. Otherwise,\n our active_dg is self.\n \"\"\"\n if self == self._main_dg:\n # We're being invoked via an `st.foo` pattern - use the current\n # `with` dg (aka the top of the stack).\n ctx = get_report_ctx()\n if ctx and len(ctx.dg_stack) > 0:\n return ctx.dg_stack[-1]\n\n # We're being invoked via an `st.sidebar.foo` pattern - ignore the\n # current `with` dg.\n return self\n\n @property\n def _main_dg(self) -> \"DeltaGenerator\":\n \"\"\"Return this DeltaGenerator's root - that is, the top-level ancestor\n DeltaGenerator that we belong to (this generally means the st._main\n DeltaGenerator).\n \"\"\"\n return self._parent._main_dg if self._parent else self\n\n def __getattr__(self, name):\n import streamlit as st\n\n streamlit_methods = [\n method_name for method_name in dir(st) if callable(getattr(st, method_name))\n ]\n\n def wrapper(*args, **kwargs):\n if name in streamlit_methods:\n if self._container == BlockPath_pb2.BlockPath.SIDEBAR:\n message = (\n \"Method `%(name)s()` does not exist for \"\n \"`st.sidebar`. Did you mean `st.%(name)s()`?\" % {\"name\": name}\n )\n else:\n message = (\n \"Method `%(name)s()` does not exist for \"\n \"`DeltaGenerator` objects. Did you mean \"\n \"`st.%(name)s()`?\" % {\"name\": name}\n )\n else:\n message = \"`%(name)s()` is not a valid Streamlit command.\" % {\n \"name\": name\n }\n\n raise StreamlitAPIException(message)\n\n return wrapper\n\n @property\n def _parent_block_types(self) -> Iterable[str]:\n \"\"\"Iterate all the block types used by this DeltaGenerator and all\n its ancestor DeltaGenerators.\n \"\"\"\n current_dg: Optional[DeltaGenerator] = self\n while current_dg is not None:\n if current_dg._block_type is not None:\n yield current_dg._block_type\n current_dg = current_dg._parent\n\n @property\n def _cursor(self) -> Optional[Cursor]:\n \"\"\"Return our Cursor. This will be None if we're not running in a\n ReportThread - e.g., if we're running a \"bare\" script outside of\n Streamlit.\n \"\"\"\n if self._provided_cursor is None:\n return cursor.get_container_cursor(self._container)\n else:\n return self._provided_cursor\n\n @property\n def _is_top_level(self) -> bool:\n return self._provided_cursor is None\n\n def _get_coordinates(self) -> str:\n \"\"\"Returns the element's 4-component location as string like \"M.(1,2).3\".\n\n This function uniquely identifies the element's position in the front-end,\n which allows (among other potential uses) the MediaFileManager to maintain\n session-specific maps of MediaFile objects placed with their \"coordinates\".\n\n This way, users can (say) use st.image with a stream of different images,\n and Streamlit will expire the older images and replace them in place.\n \"\"\"\n # Switch to the active DeltaGenerator, in case we're in a `with` block.\n self = self._active_dg\n container = self._container # Proto index of container (e.g. MAIN=1)\n\n if self._cursor:\n path_str = str(self._cursor.path)\n index_str = str(self._cursor.index) # index - element's own position\n else:\n # Case in which we have started up in headless mode.\n path_str = \"(,)\"\n index_str = \"\"\n\n return f\"{container}.{path_str}.{index_str}\"\n\n def _enqueue(\n self,\n delta_type,\n element_proto,\n return_value=None,\n last_index=None,\n element_width=None,\n element_height=None,\n ):\n \"\"\"Create NewElement delta, fill it, and enqueue it.\n\n Parameters\n ----------\n delta_type: string\n The name of the streamlit method being called\n element_proto: proto\n The actual proto in the NewElement type e.g. Alert/Button/Slider\n return_value: any or None\n The value to return to the calling script (for widgets)\n element_width : int or None\n Desired width for the element\n element_height : int or None\n Desired height for the element\n\n Returns\n -------\n DeltaGenerator\n A DeltaGenerator that can be used to modify the newly-created\n element.\n\n \"\"\"\n # Switch to the active DeltaGenerator, in case we're in a `with` block.\n self = self._active_dg\n # Warn if we're called from within an @st.cache function\n caching.maybe_show_cached_st_function_warning(self, delta_type)\n\n # Some elements have a method.__name__ != delta_type in proto.\n # This really matters for line_chart, bar_chart & area_chart,\n # since add_rows() relies on method.__name__ == delta_type\n # TODO: Fix for all elements (or the cache warning above will be wrong)\n proto_type = delta_type\n if proto_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES:\n proto_type = \"vega_lite_chart\"\n\n # Copy the marshalled proto into the overall msg proto\n msg = ForwardMsg_pb2.ForwardMsg()\n msg_el_proto = getattr(msg.delta.new_element, proto_type)\n msg_el_proto.CopyFrom(element_proto)\n\n # Only enqueue message and fill in metadata if there's a container.\n msg_was_enqueued = False\n if self._container and self._cursor:\n msg.metadata.parent_block.container = self._container\n msg.metadata.parent_block.path[:] = self._cursor.path\n msg.metadata.delta_id = self._cursor.index\n\n if element_width is not None:\n msg.metadata.element_dimension_spec.width = element_width\n if element_height is not None:\n msg.metadata.element_dimension_spec.height = element_height\n\n _enqueue_message(msg)\n msg_was_enqueued = True\n\n if msg_was_enqueued:\n # Get a DeltaGenerator that is locked to the current element\n # position.\n new_cursor = (\n self._cursor.get_locked_cursor(\n delta_type=delta_type, last_index=last_index\n )\n if self._cursor is not None\n else None\n )\n\n output_dg = DeltaGenerator(\n container=self._container,\n cursor=new_cursor,\n parent=self,\n )\n else:\n # If the message was not enqueued, just return self since it's a\n # no-op from the point of view of the app.\n output_dg = self\n\n return _value_or_dg(return_value, output_dg)\n\n def beta_container(self) -> \"DeltaGenerator\":\n \"\"\"Insert a multi-element container.\n\n Inserts an invisible container into your app that can be used to hold\n multiple elements. This allows you to, for example, insert multiple\n elements into your app out of order.\n\n To add elements to the returned container, you can use \"with\" notation\n (preferred) or just call methods directly on the returned object. See\n examples below.\n\n Examples\n --------\n\n Inserting elements using \"with\" notation:\n\n >>> with st.beta_container():\n ... st.write(\"This is inside the container\")\n ...\n ... # You can call any Streamlit command, including custom components:\n ... st.bar_chart(np.random.randn(50, 3))\n ...\n >>> st.write(\"This is outside the container\")\n\n .. output ::\n https://static.streamlit.io/0.66.0-Wnid/index.html?id=Qj8PY3v3L8dgVjjQCreHux\n height: 420px\n\n Inserting elements out of order:\n\n >>> container = st.beta_container()\n >>> container.write(\"This is inside the container\")\n >>> st.write(\"This is outside the container\")\n >>>\n >>> # Now insert some more in the container\n >>> container.write(\"This is inside too\")\n\n .. output ::\n https://static.streamlit.io/0.66.0-Wnid/index.html?id=GsFVF5QYT3Ljr6jQjErPqL\n \"\"\"\n return self._block()\n\n # TODO: Enforce that columns are not nested or in Sidebar\n def beta_columns(self, spec) -> List[\"DeltaGenerator\"]:\n \"\"\"Insert containers laid out as side-by-side columns.\n\n Inserts a number of multi-element containers laid out side-by-side and\n returns a list of container objects.\n\n To add elements to the returned containers, you can use \"with\" notation\n (preferred) or just call methods directly on the returned object. See\n examples below.\n\n .. warning::\n Currently, you may not put columns inside another column.\n\n Parameters\n ----------\n spec : int or list of numbers\n If an int\n Specifies the number of columns to insert, and all columns\n have equal width.\n\n If a list of numbers\n Creates a column for each number, and each\n column's width is proportional to the number provided. Numbers can\n be ints or floats, but they must be positive.\n\n For example, `st.beta_columns([3, 1, 2])` creates 3 columns where\n the first column is 3 times the width of the second, and the last\n column is 2 times that width.\n\n Returns\n -------\n list of containers\n A list of container objects.\n\n Examples\n --------\n\n You can use `with` notation to insert any element into a column:\n\n >>> col1, col2, col3 = st.beta_columns(3)\n >>>\n >>> with col1:\n ... st.header(\"A cat\")\n ... st.image(\"https://static.streamlit.io/examples/cat.jpg\", use_column_width=True)\n ...\n >>> with col2:\n ... st.header(\"A dog\")\n ... st.image(\"https://static.streamlit.io/examples/dog.jpg\", use_column_width=True)\n ...\n >>> with col3:\n ... st.header(\"An owl\")\n ... st.image(\"https://static.streamlit.io/examples/owl.jpg\", use_column_width=True)\n\n .. output ::\n https://static.streamlit.io/0.66.0-Wnid/index.html?id=VW45Va5XmSKed2ayzf7vYa\n height: 550px\n\n Or you can just call methods directly in the returned objects:\n\n >>> col1, col2 = st.beta_columns([3, 1])\n >>> data = np.random.randn(10, 1)\n >>>\n >>> col1.subheader(\"A wide column with a chart\")\n >>> col1.line_chart(data)\n >>>\n >>> col2.subheader(\"A narrow column with the data\")\n >>> col2.write(data)\n\n .. output ::\n https://static.streamlit.io/0.66.0-Wnid/index.html?id=XSQ6VkonfGcT2AyNYMZN83\n height: 400px\n\n \"\"\"\n weights = spec\n weights_exception = StreamlitAPIException(\n \"The input argument to st.beta_columns must be either a \"\n + \"positive integer or a list of positive numeric weights. \"\n + \"See [documentation](https://docs.streamlit.io/en/stable/api.html#streamlit.beta_columns) \"\n + \"for more information.\"\n )\n\n if isinstance(weights, int):\n # If the user provided a single number, expand into equal weights.\n # E.g. (1,) * 3 => (1, 1, 1)\n # NOTE: A negative/zero spec will expand into an empty tuple.\n weights = (1,) * weights\n\n if len(weights) == 0 or any(weight <= 0 for weight in weights):\n raise weights_exception\n\n def column_proto(weight):\n col_proto = Block_pb2.Block()\n col_proto.column.weight = weight\n col_proto.allow_empty = True\n return col_proto\n\n horiz_proto = Block_pb2.Block()\n horiz_proto.horizontal.total_weight = sum(weights)\n row = self._block(horiz_proto)\n return [row._block(column_proto(w)) for w in weights]\n\n # Internal block element, to hide the 'layout' param from our users.\n def _block(self, block_proto=Block_pb2.Block()) -> \"DeltaGenerator\":\n # Switch to the active DeltaGenerator, in case we're in a `with` block.\n self = self._active_dg\n\n # Prevent nested columns & expanders by checking all parents.\n block_type = block_proto.WhichOneof(\"type\")\n # Convert the generator to a list, so we can use it multiple times.\n parent_block_types = frozenset(self._parent_block_types)\n if block_type == \"column\" and block_type in parent_block_types:\n raise StreamlitAPIException(\n \"Columns may not be nested inside other columns.\"\n )\n if block_type == \"expandable\" and block_type in parent_block_types:\n raise StreamlitAPIException(\n \"Expanders may not be nested inside other expanders.\"\n )\n\n if self._container is None or self._cursor is None:\n return self\n\n msg = ForwardMsg_pb2.ForwardMsg()\n msg.metadata.parent_block.container = self._container\n msg.metadata.parent_block.path[:] = self._cursor.path\n msg.metadata.delta_id = self._cursor.index\n msg.delta.add_block.CopyFrom(block_proto)\n\n # Normally we'd return a new DeltaGenerator that uses the locked cursor\n # below. But in this case we want to return a DeltaGenerator that uses\n # a brand new cursor for this new block we're creating.\n block_cursor = cursor.RunningCursor(\n path=self._cursor.path + (self._cursor.index,)\n )\n block_dg = DeltaGenerator(\n container=self._container,\n cursor=block_cursor,\n parent=self,\n block_type=block_type,\n )\n\n # Must be called to increment this cursor's index.\n self._cursor.get_locked_cursor(last_index=None)\n _enqueue_message(msg)\n\n return block_dg\n\n def beta_expander(self, label=None, expanded=False) -> \"DeltaGenerator\":\n \"\"\"Insert a multi-element container that can be expanded/collapsed.\n\n Inserts a container into your app that can be used to hold multiple elements\n and can be expanded or collapsed by the user. When collapsed, all that is\n visible is the provided label.\n\n To add elements to the returned container, you can use \"with\" notation\n (preferred) or just call methods directly on the returned object. See\n examples below.\n\n .. warning::\n Currently, you may not put expanders inside another expander.\n\n Parameters\n ----------\n label : str\n A string to use as the header for the expander.\n expanded : bool\n If True, initializes the expander in \"expanded\" state. Defaults to\n False (collapsed).\n\n Examples\n --------\n >>> st.line_chart({\"data\": [1, 5, 2, 6, 2, 1]})\n >>>\n >>> with st.beta_expander(\"See explanation\"):\n ... st.write(\\\"\\\"\\\"\n ... The chart above shows some numbers I picked for you.\n ... I rolled actual dice for these, so they're *guaranteed* to\n ... be random.\n ... \\\"\\\"\\\")\n ... st.image(\"https://static.streamlit.io/examples/dice.jpg\")\n\n .. output ::\n https://static.streamlit.io/0.66.0-2BLtg/index.html?id=7v2tgefVbW278gemvYrRny\n height: 750px\n\n \"\"\"\n if label is None:\n raise StreamlitAPIException(\"A label is required for an expander\")\n\n expandable_proto = Block_pb2.Block.Expandable()\n expandable_proto.expanded = expanded\n expandable_proto.label = label\n\n block_proto = Block_pb2.Block()\n block_proto.allow_empty = True\n block_proto.expandable.CopyFrom(expandable_proto)\n\n return self._block(block_proto=block_proto)\n\n def add_rows(self, data=None, **kwargs):\n \"\"\"Concatenate a dataframe to the bottom of the current one.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n Table to concat. Optional.\n\n **kwargs : pandas.DataFrame, numpy.ndarray, Iterable, dict, or None\n The named dataset to concat. Optional. You can only pass in 1\n dataset (including the one in the data parameter).\n\n Example\n -------\n >>> df1 = pd.DataFrame(\n ... np.random.randn(50, 20),\n ... columns=('col %d' % i for i in range(20)))\n ...\n >>> my_table = st.table(df1)\n >>>\n >>> df2 = pd.DataFrame(\n ... np.random.randn(50, 20),\n ... columns=('col %d' % i for i in range(20)))\n ...\n >>> my_table.add_rows(df2)\n >>> # Now the table shown in the Streamlit app contains the data for\n >>> # df1 followed by the data for df2.\n\n You can do the same thing with plots. For example, if you want to add\n more data to a line chart:\n\n >>> # Assuming df1 and df2 from the example above still exist...\n >>> my_chart = st.line_chart(df1)\n >>> my_chart.add_rows(df2)\n >>> # Now the chart shown in the Streamlit app contains the data for\n >>> # df1 followed by the data for df2.\n\n And for plots whose datasets are named, you can pass the data with a\n keyword argument where the key is the name:\n\n >>> my_chart = st.vega_lite_chart({\n ... 'mark': 'line',\n ... 'encoding': {'x': 'a', 'y': 'b'},\n ... 'datasets': {\n ... 'some_fancy_name': df1, # <-- named dataset\n ... },\n ... 'data': {'name': 'some_fancy_name'},\n ... }),\n >>> my_chart.add_rows(some_fancy_name=df2) # <-- name used as keyword\n\n \"\"\"\n if self._container is None or self._cursor is None:\n return self\n\n if not self._cursor.is_locked:\n raise StreamlitAPIException(\"Only existing elements can `add_rows`.\")\n\n # Accept syntax st.add_rows(df).\n if data is not None and len(kwargs) == 0:\n name = \"\"\n # Accept syntax st.add_rows(foo=df).\n elif len(kwargs) == 1:\n name, data = kwargs.popitem()\n # Raise error otherwise.\n else:\n raise StreamlitAPIException(\n \"Wrong number of arguments to add_rows().\"\n \"Command requires exactly one dataset\"\n )\n\n # When doing add_rows on an element that does not already have data\n # (for example, st.line_chart() without any args), call the original\n # st.foo() element with new data instead of doing an add_rows().\n if (\n self._cursor.props[\"delta_type\"] in DELTAS_TYPES_THAT_MELT_DATAFRAMES\n and self._cursor.props[\"last_index\"] is None\n ):\n # IMPORTANT: This assumes delta types and st method names always\n # match!\n st_method_name = self._cursor.props[\"delta_type\"]\n st_method = getattr(self, st_method_name)\n st_method(data, **kwargs)\n return\n\n data, self._cursor.props[\"last_index\"] = _maybe_melt_data_for_add_rows(\n data, self._cursor.props[\"delta_type\"], self._cursor.props[\"last_index\"]\n )\n\n msg = ForwardMsg_pb2.ForwardMsg()\n msg.metadata.parent_block.container = self._container\n msg.metadata.parent_block.path[:] = self._cursor.path\n msg.metadata.delta_id = self._cursor.index\n\n import streamlit.elements.data_frame_proto as data_frame_proto\n\n data_frame_proto.marshall_data_frame(data, msg.delta.add_rows.data)\n\n if name:\n msg.delta.add_rows.name = name\n msg.delta.add_rows.has_name = True\n\n _enqueue_message(msg)\n\n return self\n\n\ndef _maybe_melt_data_for_add_rows(data, delta_type, last_index):\n import pandas as pd\n import streamlit.elements.data_frame_proto as data_frame_proto\n\n # For some delta types we have to reshape the data structure\n # otherwise the input data and the actual data used\n # by vega_lite will be different and it will throw an error.\n if delta_type in DELTAS_TYPES_THAT_MELT_DATAFRAMES:\n if not isinstance(data, pd.DataFrame):\n data = type_util.convert_anything_to_df(data)\n\n if type(data.index) is pd.RangeIndex:\n old_step = _get_pandas_index_attr(data, \"step\")\n\n # We have to drop the predefined index\n data = data.reset_index(drop=True)\n\n old_stop = _get_pandas_index_attr(data, \"stop\")\n\n if old_step is None or old_stop is None:\n raise StreamlitAPIException(\n \"'RangeIndex' object has no attribute 'step'\"\n )\n\n start = last_index + old_step\n stop = last_index + old_step + old_stop\n\n data.index = pd.RangeIndex(start=start, stop=stop, step=old_step)\n last_index = stop - 1\n\n index_name = data.index.name\n if index_name is None:\n index_name = \"index\"\n\n data = pd.melt(data.reset_index(), id_vars=[index_name])\n\n return data, last_index\n\n\ndef _get_pandas_index_attr(data, attr):\n return getattr(data.index, attr, None)\n\n\ndef _value_or_dg(value, dg):\n \"\"\"Return either value, or None, or dg.\n\n This is needed because Widgets have meaningful return values. This is\n unlike other elements, which always return None. Then we internally replace\n that None with a DeltaGenerator instance.\n\n However, sometimes a widget may want to return None, and in this case it\n should not be replaced by a DeltaGenerator. So we have a special NoValue\n object that gets replaced by None.\n\n \"\"\"\n if value is NoValue:\n return None\n if value is None:\n return dg\n return value\n\n\ndef _enqueue_message(msg):\n \"\"\"Enqueues a ForwardMsg proto to send to the app.\"\"\"\n ctx = get_report_ctx()\n\n if ctx is None:\n raise NoSessionContext()\n\n ctx.enqueue(msg)\n" ]
[ [ "pandas.RangeIndex" ] ]
rgayler/VSA_altitude_hold
[ "648e64806942a050b00bc41d9f533ee84823df39" ]
[ "python/vsa/__init__.py" ]
[ "'''\nVSA functions\n\nCopyright (c) 2021 Ross W. Gayler and Simon D. Levy\n\nMIT License\n'''\n\nimport numpy as np\n\n# ---- mk_sample_spec --------------------------------------------------------\n\n# function to make a sampling specification for adding VSA vectors\n\n\ndef mk_sample_spec(\n dim, # integer - dimensionality of VSA vectors\n sample_wt, # numeric vector - VSA vector sampling weights\n seed=None # integer - seed for random number generator\n): # returns VSA vector, the weighted sum (sampled) of the argument vectors\n\n # if seed is set the sampling specification vector is fixed\n # otherwise it is randomised\n np.random.seed(seed)\n\n return np.random.choice(len(sample_wt), size=dim, replace=True,\n p=sample_wt)\n\n# ---- mag -------------------------------------------------------------------\n\n# function to calculate the magnitude of a VSA vector\n# Allow for the possibility that the vector might not be bipolar\n\n\ndef mag(\n v1 # numeric - VSA vector (not necessarily bipolar)\n ): # value # numeric - magnitude of the VSA vector\n\n # No numerical analysis considerations\n return np.sqrt(np.sum(v1*v1))\n\n\n# ---- mk_atom_bipolar -------------------------------------------------------\n\n# function to make an atomic VSA vector\n\n\ndef mk_atom_bipolar(\n dim, # dimensionality of VSA vector\n seed=None # seed for random number generator\n ): # value # one randomly selected VSA vector of dimension dim\n\n # if seed is set the the vector is fixed\n # otherwise it is randomised\n np.random.seed(seed)\n\n # Construct a random bipolar vector\n return 2 * (np.random.random(dim) > 0.5) - 1\n\n# ---- multiply --------------------------------------------------------------\n\n# function to multiply an arbitrary number of VSA vectors\n\n\ndef multiply(\n vectors # >= 2 VSA vectors of identical dimension as arguments to multiply\n ): # returns the weighted sum (sampled) of the argument vectors\n\n result = np.ones(len(vectors[0]))\n\n for v in vectors:\n result *= v\n\n return result\n\n# ---- mk_scalar_encoder_spline_spec -----------------------------------------\n\n# function to make the specification for a piecewise linear spline encoder\n\n\ndef mk_scalar_encoder_spline_spec(\n dim, # dimensionality of VSA vectors\n knots, # numeric vector - scalar knot locations (in increasing order)\n seed=None # integer - seed for random number generator\n ): # returns dictionary for linear spline encoder specification\n\n # set the seed\n np.random.seed(seed)\n\n # generate VSA atoms corresponding to each of the knots\n return {\n 'knots_scalar': knots,\n 'knots_vsa': [mk_atom_bipolar(dim) for _ in knots]\n }\n\n# ---- dotprod ---------------------------------------------------------------\n\n# function to calculate the dot product of two VSA vectors\n# Allow for the possibility that the vectors might not be bipolar\n\n\ndef dotprod(\n v1, v2 # VSA vectors of identical dimension (not necessarily bipolar)\n ): # returns cosine similarity of the VSA vectors\n\n # No numerical analysis considerations\n return np.sum(v1*v2)\n\n# ---- decode_scalar_spline --------------------------------------------------\n\n# function to encode a scalar numeric value to a VSA vector\n# This function uses a linear interpolation spline to interpolate between a\n# sequence of VSA vectors corresponding to the spline knots\n\n\ndef decode_scalar_spline(\n v, # numeric - VSA vector (not necessarily bipolar)\n spline_spec, # spline spec from mk_scalar_encoder_spline_spec()\n zero_thresh=4 # zero threshold (in standard deviations)\n ): # returns scalar value decoded from v\n\n # get the dot product of the encoded scalar with each of the knot vectors\n dotprods = np.array(list(map(lambda w: dotprod(v, w),\n spline_spec['knots_vsa'])))\n\n # set dot products below the zero threshold to 0.5\n zero_thresh = zero_thresh * np.sqrt(len(v) * 0.5)\n dotprods[dotprods < zero_thresh] = 0\n\n # normalise the dot products\n dotprods = dotprods / np.sum(dotprods)\n\n # return the weighted sum of the knot scalars\n return np.sum(dotprods * spline_spec['knots_scalar'])\n\n# ---- add -------------------------------------------------------------------\n\n# function to add (weighted sum) an arbitrary number of VSA vectors given as\n# arguments Weighted add is implemented as weighted sampling from the source\n# vectors. If sample_spec is given it specifies which argument vector is the\n# source for each element of the output vector. If sample_wt is given the\n# sample specification is generated randomly. If neither sample_spec or\n# sample_wt is given then sample_wt is constructed with equal weight for each\n# argument vector.\n\n\ndef add(\n vectors,\n sample_spec=None, # source (argument VSA vector) for each element of result\n sample_wt=None, # numeric vector - argument vector sampling weights\n seed=None # integer - seed for random number generator\n ): # returns VSA vector, the weighted sum (sampled) of the argument vectors\n\n count = len(vectors)\n dim = len(vectors[0])\n\n if sample_spec is None:\n\n # sample spec not supplied - make a new random one\n # create a sampling weight vector if not supplied\n if sample_wt is None:\n # equal weighting for all source VSA vectors\n sample_wt = np.ones(count) / count\n\n # For each element of the result work out which source VSA vector to\n # sample\n sample_spec = mk_sample_spec(dim, sample_wt, seed)\n\n return np.array([vectors[k][j] for (j, k) in enumerate(sample_spec)])\n\n# ---- permute ---------------------------------------------------------------\n\n# function to apply the specified permutation to the VSA vector\n\n\ndef permute(\n v1, # numeric - VSA vector (not necessarily bipolar)\n perm # integer vector - specification of a permutation\n ): # returns permutation of input VSA vector\n\n # apply the permutation\n return [v1[k] for k in perm]\n\n# ---- mk_perm ---------------------------------------------------------------\n\n# function to make a permutation\n\n\ndef mk_perm(\n dim, # dimensionality of VSA vector\n seed=None # seed for random number generator\n ): # returns one randomly generated permutation specification\n # this is an integer vector of length dim\n\n # if seed is set the the vector is fixed\n # otherwise it is randomised\n np.random.seed(seed)\n\n # Construct a random permutation of 1:dim\n return np.random.choice(dim, dim, False)\n\n# ---- mk_inv_perm -----------------------------------------------------------\n\n# function to make a permutation\n\n\ndef mk_inv_perm(\n perm # integer vector - specification of a permutation\n ): # integer vector [length(perm)] - specification of inverse permutation\n\n # Invert the permutation\n return np.argsort(perm)\n\n# ---- cos_sim ---------------------------------------------------------------\n\n# function to calculate the cosine similarity of two VSA vectors\n# Allow for the possibility that the vectors might not be bipolar\n\n\ndef cos_sim(\n v1, v2 # VSA vectors of identical dimension (not necessarily bipolar)\n ): # value # numeric - cosine similarity of the VSA vectors\n\n return dotprod(v1, v2) / (mag(v1) * mag(v2))\n\n\n# ---- negate ----------------------------------------------------------------\n\n# Function to calculate the negation of a VSA vector\n# (Reverse the direction of the vector)\n# Allow for the possibility that the vector might not be bipolar\n\n\ndef negate(\n v1 # numeric - VSA vector (not necessarily bipolar)\n ): # value # negation of input VSA vector\n\n return -v1\n\n# ---- encode_scalar_spline --------------------------------------------------\n\n# function to encode a scalar numeric value to a VSA vector This function uses\n# a linear interpolation spline to interpolate between a sequence of VSA\n# vectors corresponding to the spline knots\n\n\ndef encode_scalar_spline(\n x, # numeric[1] - scalar value to be encoded\n spline_spec # data frame - spline spec from mk_scalar_encoder_spline_spec()\n ): # numeric # one VSA vector, the encoding of the scalar value\n\n knots_scalar = spline_spec['knots_scalar']\n\n # Map the scalar into a continuous index across the knots\n # Linearly interpolate the input scalar onto a scale in which knots\n # correspond to 0:(n-1)\n i = np.interp(x, knots_scalar, range(len(knots_scalar)))\n\n # Get the knot indices immediately above and below the index value\n i_lo = int(np.floor(i))\n i_hi = int(np.ceil(i))\n\n # Return the VSA vector corresponding to the index value\n if i_lo == i_hi: # check if index is on a knot\n # Exactly on a knot so return the corresponding knot VSA vector\n return spline_spec['knots_vsa'][int(i)]\n\n # Between two knots\n # Return the weighted sum of the corresponding knot VSA vectors\n i_offset = i - i_lo\n vecs = spline_spec['knots_vsa']\n return add([vecs[i_lo], vecs[i_hi]], sample_wt=(1 - i_offset, i_offset))\n" ]
[ [ "numpy.ceil", "numpy.random.choice", "numpy.random.seed", "numpy.sum", "numpy.ones", "numpy.argsort", "numpy.random.random", "numpy.floor" ] ]
aloui-mathias/ecoTeka-deepforest-demo
[ "54007af69a14d25bc8e48a02831260f4f2908ab6" ]
[ "script/functions.py" ]
[ "import json\nimport pyproj\nimport numpy\nimport pandas\nimport cv2\nfrom urllib.parse import unquote, urlencode\nfrom owslib.wmts import WebMapTileService\nfrom PIL import Image\nfrom matplotlib import pyplot\nfrom shapely import geometry\nfrom deepforest import main\nfrom qgis.core import (\n QgsApplication,\n QgsProject,\n QgsRectangle,\n QgsRasterLayer,\n QgsMapSettings,\n QgsMapRendererParallelJob\n)\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.PyQt.QtCore import QSize, QEventLoop\nfrom typing import List, Optional, Tuple\n\n\ndef get_polygons(geojson_path: str) -> List[List[List[float]]]:\n try:\n file = open(geojson_path, 'r', encoding=\"utf-8\")\n file.seek(0)\n geojson = json.loads(file.read())\n except:\n print(\n \"Using the default path, please check you are running \"\n + \"the script from ecoTeka-deepforest-demo folder.\"\n )\n raise\n file.close()\n polygons = []\n for feature in geojson['features']:\n polygons.append(feature['geometry']['coordinates'][0])\n return polygons\n\n\ndef save_polygon(polygon, name):\n output = {}\n points = []\n for coords in polygon:\n point = {}\n point[\"x\"] = str(coords[0])\n point[\"y\"] = str(coords[1])\n points.append(point)\n output[\"points\"] = points\n with open(name+\"-polygon.geojson\", \"w\") as file:\n json.dump(output, file)\n\n\ndef get_tile_coord_from_polygon(polygon):\n xmin = polygon[0][0]\n xmax = polygon[0][0]\n ymin = polygon[0][1]\n ymax = polygon[0][1]\n for point in polygon:\n xmin = point[0] if point[0] < xmin else xmin\n xmax = point[0] if point[0] > xmax else xmax\n ymin = point[1] if point[1] < ymin else ymin\n ymax = point[1] if point[1] > ymax else ymax\n return xmin, ymin, xmax, ymax\n\n\ndef convert_coord(x: float, y: float,\n input_epsg: int,\n output_epsg: int) -> Tuple[float]:\n\n input_crs = pyproj.CRS.from_epsg(input_epsg)\n output_crs = pyproj.CRS.from_epsg(output_epsg)\n\n proj = pyproj.Transformer.from_crs(input_crs, output_crs)\n\n if input_crs.is_geographic and not output_crs.is_geographic:\n coord = proj.transform(y, x)\n else:\n coord = proj.transform(x, y)\n\n if output_crs.is_geographic and not input_crs.is_geographic:\n return coord[1], coord[0]\n else:\n return coord[0], coord[1]\n\n\ndef get_ign_request() -> str:\n\n WMTS_URL_GETCAP = \"https://wxs.ign.fr/decouverte/geoportail/wmts?\"\\\n \"SERVICE%3DWMTS%26REQUEST%3DGetCapabilities\"\n WMTS = WebMapTileService(WMTS_URL_GETCAP)\n LAYER_NAME = \"ORTHOIMAGERY.ORTHOPHOTOS\"\n WMTS_LAYER = WMTS[LAYER_NAME]\n LAYER_TITLE = WMTS_LAYER.title\n WMTS_URL_PARAMS = {\n \"SERVICE\": \"WMTS\",\n \"VERSION\": \"1.0.0\",\n \"REQUEST\": \"GetCapabilities\",\n \"layers\": LAYER_NAME,\n \"crs\": \"EPSG:3857\",\n \"format\": \"image/jpeg\",\n \"styles\": \"normal\",\n \"tileMatrixSet\": \"PM\",\n \"tileMatrix\": \"21\",\n \"url\": WMTS_URL_GETCAP\n }\n WMTS_URL_FINAL = unquote(urlencode(WMTS_URL_PARAMS))\n\n return WMTS_URL_FINAL\n\n\ndef start_qgis():\n QGS = QgsApplication([], False)\n QGS.initQgis()\n return QGS\n\n\ndef end_qgis(QGS):\n QGS.exitQgis()\n\n\ndef render_image(\n request: str,\n xmin: float,\n ymin: float,\n xmax: float,\n ymax: float,\n path: str,\n high_resolution: bool) -> None:\n\n WMTS_LAYER = QgsRasterLayer(\n request, \"raster-layer\", \"wms\")\n if WMTS_LAYER.isValid():\n QgsProject.instance().addMapLayer(WMTS_LAYER)\n else:\n return WMTS_LAYER.error().message()\n extent = QgsRectangle(xmin, ymin, xmax, ymax)\n WMTS_LAYER.setExtent(extent)\n settings = QgsMapSettings()\n settings.setLayers([WMTS_LAYER])\n settings.setBackgroundColor(QColor(255, 255, 255))\n if high_resolution:\n res = 0.2\n else:\n res = 0.25\n settings.setOutputSize(QSize(\n int(extent.width() / res),\n int(extent.height() / res)\n ))\n settings.setExtent(WMTS_LAYER.extent())\n\n render = QgsMapRendererParallelJob(settings)\n\n def finished():\n img = render.renderedImage()\n img.save(path + \".tiff\", \"png\")\n\n render.finished.connect(finished)\n\n render.start()\n\n loop = QEventLoop()\n render.finished.connect(loop.quit)\n loop.exec_()\n\n del render\n\n return\n\n\ndef get_image(path: str) -> numpy.ndarray:\n image = Image.open(path + \".tiff\", 'r')\n numpy_rgba = numpy.array(image).astype('float32')\n return numpy_rgba[:, :, :3]\n\n\ndef convert_polygon(\n polygon: List[List[float]],\n image: numpy.ndarray,\n xmin: float,\n ymin: float,\n xmax: float,\n ymax: float) -> List:\n\n polygon_image = []\n width_ign = image.shape[1]\n height_ign = image.shape[0]\n\n for index in range(len(polygon)):\n coord = polygon[index]\n coord_ign = convert_coord(coord[0], coord[1], 4326, 3857)\n point = ((coord_ign[0] - xmin)*(width_ign/(xmax-xmin)),\n (ymax - coord_ign[1])*(height_ign/(ymax-ymin)))\n polygon_image.append(point)\n\n return numpy.array(polygon_image).astype('int32')\n\n\ndef predictions(\n image: numpy.ndarray,\n high_resolution: bool) -> pandas.DataFrame:\n\n model = main.deepforest()\n model.use_release()\n\n if high_resolution:\n patch_size = 800\n else:\n patch_size = 500\n\n return model.predict_tile(\n raster_path=None,\n image=image,\n patch_size=patch_size,\n patch_overlap=0.15,\n iou_threshold=0.15,\n return_plot=False,\n use_soft_nms=False,\n sigma=0.5,\n thresh=0.001)\n\n\ndef draw_box(\n image: numpy.ndarray,\n box: List,\n color: List,\n thickness: float = 2) -> None:\n\n b = numpy.array(box).astype(int)\n cv2.rectangle(\n img=image,\n pt1=(b[0], b[1]),\n pt2=(b[2], b[3]),\n color=color,\n thickness=thickness,\n lineType=cv2.LINE_AA\n )\n\n return\n\n\ndef draw_all_boxes(\n image: numpy.ndarray,\n boxes: pandas.DataFrame,\n color: List = [0, 0, 255]) -> None:\n\n for box in boxes[[\"xmin\", \"ymin\", \"xmax\", \"ymax\"]].values:\n draw_box(image, box, color)\n\n return\n\n\ndef save_image_predictions(\n path: str,\n image: numpy.ndarray,\n predictions: pandas.DataFrame,\n polygon: Optional[List[List[float]]] = None) -> None:\n\n image_copy = image.copy().astype('uint8')\n\n if polygon is None:\n boxes = predictions\n else:\n zone = geometry.Polygon(polygon)\n boxes = []\n if predictions is not None:\n for predicted_box in predictions.values:\n coord = predicted_box[:4]\n box_points = [\n [coord[0], coord[1]],\n [coord[0], coord[3]],\n [coord[2], coord[3]],\n [coord[2], coord[1]]\n ]\n box = geometry.Polygon(box_points)\n intersection_area = box.intersection(zone).area\n box_area = box.area\n ioa = intersection_area / box_area\n if ioa > 0.4:\n boxes.append(predicted_box)\n\n boxes = pandas.DataFrame(boxes, columns=predictions.columns)\n\n cv2.polylines(image_copy, [polygon], True, [255, 0, 0], thickness=10)\n\n if boxes is not None:\n draw_all_boxes(image_copy, boxes)\n print(str(len(boxes)) + \" predictions inside\")\n\n pyplot.imsave(path + \".png\", image_copy)\n\n return\n" ]
[ [ "matplotlib.pyplot.imsave", "pandas.DataFrame", "numpy.array" ] ]
milebril/Temporal-SBMC-extension
[ "57c56b73786e49d233facffde4ba80f212a00fa8" ]
[ "sbmc/losses.py" ]
[ "# encoding: utf-8\n# Sample-based Monte Carlo Denoising using a Kernel-Splatting Network\n# Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand\n# Siggraph 2019\n#\n# Copyright (c) 2019 Michaël Gharbi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Loss functions and metrics.\"\"\"\nimport torch as th\n\n\n__all__ = [\"RelativeMSE\", \"SMAPE\", \"TonemappedMSE\", \"TonemappedRelativeMSE\"]\n\n\nclass RelativeMSE(th.nn.Module):\n \"\"\"Relative Mean-Squared Error.\n\n :math:`0.5 * \\\\frac{(x - y)^2}{y^2 + \\epsilon}`\n\n Args:\n eps(float): small number to avoid division by 0.\n \"\"\"\n def __init__(self, eps=1e-2):\n super(RelativeMSE, self).__init__()\n self.eps = eps\n\n def forward(self, im, ref):\n \"\"\"Evaluate the metric.\n\n Args:\n im(th.Tensor): image.\n ref(th.Tensor): reference.\n \"\"\"\n mse = th.pow(im-ref, 2)\n loss = mse/(th.pow(ref, 2) + self.eps)\n loss = 0.5*th.mean(loss)\n return loss\n\n\nclass SMAPE(th.nn.Module):\n \"\"\"Symmetric Mean Absolute error.\n\n :math:`\\\\frac{|x - y|} {|x| + |y| + \\epsilon}`\n\n Args:\n eps(float): small number to avoid division by 0.\n \"\"\"\n\n def __init__(self, eps=1e-2):\n super(SMAPE, self).__init__()\n self.eps = eps\n\n def forward(self, im, ref):\n # NOTE: the denominator is used to scale the loss, but does not\n # contribute gradients, hence the '.detach()' call.\n loss = (th.abs(im-ref) / (\n self.eps + th.abs(im.detach()) + th.abs(ref.detach()))).mean()\n\n return loss\n\nclass TonemappedMSE(th.nn.Module):\n \"\"\"Mean-squared error on tonemaped images.\n\n Args:\n eps(float): small number to avoid division by 0.\n \"\"\"\n\n def __init__(self, eps=1e-2):\n super(TonemappedMSE, self).__init__()\n self.eps = eps # avoid division by zero\n\n def forward(self, im, ref):\n im = _tonemap(im)\n ref = _tonemap(ref)\n loss = th.pow(im-ref, 2)\n loss = 0.5*th.mean(loss)\n return loss\n\nclass TonemappedRelativeMSE(th.nn.Module):\n \"\"\"Relative mean-squared error on tonemaped images.\n\n Args:\n eps(float): small number to avoid division by 0.\n \"\"\"\n def __init__(self, eps=1e-2):\n super(TonemappedRelativeMSE, self).__init__()\n self.eps = eps # avoid division by zero\n\n def forward(self, im, ref):\n im = _tonemap(im)\n ref = _tonemap(ref)\n mse = th.pow(im-ref, 2)\n loss = mse/(th.pow(ref, 2) + self.eps)\n loss = 0.5*th.mean(loss)\n return loss\n\n\ndef _tonemap(im):\n \"\"\"Helper Reinhards tonemapper.\n\n Args:\n im(th.Tensor): image to tonemap.\n\n Returns:\n (th.Tensor) tonemaped image.\n \"\"\"\n im = th.clamp(im, min=0)\n return im / (1+im)\n" ]
[ [ "torch.abs", "torch.mean", "torch.clamp", "torch.pow" ] ]
chroneus/imgaug
[ "621a7e6a728250e818feccda9ccc6886242e196f" ]
[ "checks/check_heatmaps.py" ]
[ "from __future__ import print_function, division\n\nimport numpy as np\n\nimport imgaug as ia\nfrom imgaug import augmenters as iaa\n\n\ndef main():\n quokka = ia.data.quokka(size=0.5)\n h, w = quokka.shape[0:2]\n heatmap = np.zeros((h, w), dtype=np.float32)\n heatmap[70:120, 90:150] = 0.1\n heatmap[30:70, 50:65] = 0.5\n heatmap[20:50, 55:85] = 1.0\n heatmap[120:140, 0:20] = 0.75\n\n heatmaps = ia.HeatmapsOnImage(heatmap[..., np.newaxis], quokka.shape)\n\n print(\"Affine...\")\n aug = iaa.Affine(translate_px={\"x\": 20}, mode=\"constant\", cval=128)\n quokka_aug = aug.augment_image(quokka)\n heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"Affine with mode=edge...\")\n aug = iaa.Affine(translate_px={\"x\": 20}, mode=\"edge\")\n quokka_aug = aug.augment_image(quokka)\n heatmaps_aug = aug.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"PiecewiseAffine...\")\n aug = iaa.PiecewiseAffine(scale=0.04)\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"PerspectiveTransform...\")\n aug = iaa.PerspectiveTransform(scale=0.04)\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"ElasticTransformation alpha=3, sig=0.5...\")\n aug = iaa.ElasticTransformation(alpha=3.0, sigma=0.5)\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"ElasticTransformation alpha=10, sig=3...\")\n aug = iaa.ElasticTransformation(alpha=10.0, sigma=3.0)\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"CopAndPad mode=constant...\")\n aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode=\"constant\", pad_cval=128)\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"CopAndPad mode=constant + percent...\")\n aug = iaa.CropAndPad(percent=(-0.05, 0.05, 0.1, -0.1), pad_mode=\"constant\", pad_cval=128)\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"CropAndPad mode=edge...\")\n aug = iaa.CropAndPad(px=(-10, 10, 15, -15), pad_mode=\"edge\")\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n print(\"Resize...\")\n aug = iaa.Resize(0.5, interpolation=\"nearest\")\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(ia.draw_grid([heatmaps_drawn[0], heatmaps_aug_drawn[0]], cols=2))\n\n print(\"Alpha...\")\n aug = iaa.Alpha(0.7, iaa.Affine(rotate=20))\n aug_det = aug.to_deterministic()\n quokka_aug = aug_det.augment_image(quokka)\n heatmaps_aug = aug_det.augment_heatmaps([heatmaps])[0]\n heatmaps_drawn = heatmaps.draw_on_image(quokka)\n heatmaps_aug_drawn = heatmaps_aug.draw_on_image(quokka_aug)\n\n ia.imshow(\n np.hstack([\n heatmaps_drawn[0],\n heatmaps_aug_drawn[0]\n ])\n )\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.hstack", "numpy.zeros" ] ]
hturner08/openmc
[ "5e36cb2f5daf7ab9162734e927dd652c1118a5bd" ]
[ "openmc/deplete/results_list.py" ]
[ "import h5py\nimport numpy as np\n\nfrom .results import Results, _VERSION_RESULTS\nfrom openmc.checkvalue import check_filetype_version\n\n\nclass ResultsList(list):\n \"\"\"A list of openmc.deplete.Results objects\n\n Parameters\n ----------\n filename : str\n The filename to read from.\n\n \"\"\"\n def __init__(self, filename):\n super().__init__()\n with h5py.File(str(filename), \"r\") as fh:\n check_filetype_version(fh, 'depletion results', _VERSION_RESULTS[0])\n\n # Get number of results stored\n n = fh[\"number\"][...].shape[0]\n\n for i in range(n):\n self.append(Results.from_hdf5(fh, i))\n\n def get_atoms(self, mat, nuc):\n \"\"\"Get nuclide concentration over time from a single material\n\n Parameters\n ----------\n mat : str\n Material name to evaluate\n nuc : str\n Nuclide name to evaluate\n\n Returns\n -------\n time : numpy.ndarray\n Array of times in [s]\n concentration : numpy.ndarray\n Total number of atoms for specified nuclide\n\n \"\"\"\n time = np.empty_like(self, dtype=float)\n concentration = np.empty_like(self, dtype=float)\n\n # Evaluate value in each region\n for i, result in enumerate(self):\n time[i] = result.time[0]\n concentration[i] = result[0, mat, nuc]\n\n return time, concentration\n\n def get_reaction_rate(self, mat, nuc, rx):\n \"\"\"Get reaction rate in a single material/nuclide over time\n\n Parameters\n ----------\n mat : str\n Material name to evaluate\n nuc : str\n Nuclide name to evaluate\n rx : str\n Reaction rate to evaluate\n\n Returns\n -------\n time : numpy.ndarray\n Array of times in [s]\n rate : numpy.ndarray\n Array of reaction rates\n\n \"\"\"\n time = np.empty_like(self, dtype=float)\n rate = np.empty_like(self, dtype=float)\n\n # Evaluate value in each region\n for i, result in enumerate(self):\n time[i] = result.time[0]\n rate[i] = result.rates[0].get(mat, nuc, rx) * result[0, mat, nuc]\n\n return time, rate\n\n def get_eigenvalue(self):\n \"\"\"Evaluates the eigenvalue from a results list.\n\n Returns\n -------\n time : numpy.ndarray\n Array of times in [s]\n eigenvalue : numpy.ndarray\n k-eigenvalue at each time. Column 0\n contains the eigenvalue, while column\n 1 contains the associated uncertainty\n\n \"\"\"\n time = np.empty_like(self, dtype=float)\n eigenvalue = np.empty((len(self), 2), dtype=float)\n\n # Get time/eigenvalue at each point\n for i, result in enumerate(self):\n time[i] = result.time[0]\n eigenvalue[i] = result.k[0]\n\n return time, eigenvalue\n\n def get_depletion_time(self):\n \"\"\"Return an array of the average time to deplete a material\n\n ..note::\n\n Will have one fewer row than number of other methods,\n like :meth:`get_eigenvalues`, because no depletion\n is performed at the final transport stage\n\n Returns\n -------\n\n times : :class:`numpy.ndarray`\n Vector of average time to deplete a single material\n across all processes and materials.\n\n \"\"\"\n times = np.empty(len(self) - 1)\n # Need special logic because the predictor\n # writes EOS values for step i as BOS values\n # for step i+1\n # The first proc_time may be zero\n if self[0].proc_time > 0.0:\n items = self[:-1]\n else:\n items = self[1:]\n for ix, res in enumerate(items):\n times[ix] = res.proc_time\n return times\n" ]
[ [ "numpy.empty_like" ] ]
andrewli77/rllab-finetuning
[ "2dae9141d0fdc284d04f18931907131d66b43023" ]
[ "sandbox/finetuning/policies/categorical_mlp_policy.py" ]
[ "import lasagne.layers as L\nimport lasagne.nonlinearities as NL\nfrom contextlib import contextmanager\nimport numpy as np\n\nfrom rllab.core.lasagne_powered import LasagnePowered\nfrom rllab.core.network import MLP\nfrom rllab.core.serializable import Serializable\n\nfrom sandbox.finetuning.distributions.categorical import Categorical_oneAxis as Categorical\n# from sandbox.snn4hrl.distributions.categorical import from_index\n# from rllab.distributions.categorical import Categorical\n\nfrom rllab.misc import ext\nfrom rllab.misc.overrides import overrides\nfrom rllab.policies.base import StochasticPolicy\nfrom rllab.spaces import Discrete, Box\n\n\nclass CategoricalMLPPolicy(StochasticPolicy, LasagnePowered, Serializable):\n \"\"\"\n This class is made to run TRPO_snn for regular CategoricalMLPs (in order to get the bonus evaluators)\n \"\"\"\n\n def __init__(\n self,\n env_spec,\n latent_dim=0, # all this is fake\n latent_name='categorical',\n bilinear_integration=False,\n resample=False, # until here\n hidden_sizes=(32, 32),\n hidden_nonlinearity=NL.tanh,\n prob_network=None,\n ):\n \"\"\"\n :param env_spec: A spec for the mdp.\n :param hidden_sizes: list of sizes for the fully connected hidden layers\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :param prob_network: manually specified network for this policy, other network params\n are ignored\n :return:\n \"\"\"\n # bullshit\n self.latent_dim = latent_dim ##could I avoid needing this self for the get_action?\n self.latent_name = latent_name\n self.bilinear_integration = bilinear_integration\n self.resample = resample\n self._set_std_to_0 = False\n\n Serializable.quick_init(self, locals())\n\n assert isinstance(env_spec.action_space, Discrete)\n\n if prob_network is None:\n prob_network = MLP(\n input_shape=(env_spec.observation_space.flat_dim,),\n output_dim=env_spec.action_space.n,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=NL.softmax,\n )\n\n self._l_prob = prob_network.output_layer\n self._l_obs = prob_network.input_layer\n self._f_prob = ext.compile_function([prob_network.input_layer.input_var], L.get_output(\n prob_network.output_layer))\n\n self._dist = Categorical(env_spec.action_space.n)\n # self._actionspace_minus_one = Discrete(5) # for remove action\n\n super(CategoricalMLPPolicy, self).__init__(env_spec)\n LasagnePowered.__init__(self, [prob_network.output_layer])\n # self.to_remove = None # for remove action\n\n @property\n def latent_space(self):\n return Box(low=-np.inf, high=np.inf, shape=(1,))\n\n @contextmanager\n def set_std_to_0(self):\n self._set_std_to_0 = True\n yield\n self._set_std_to_0 = False\n\n @overrides\n def dist_info_sym(self, obs_var, state_info_vars=None):\n return dict(prob=L.get_output(self._l_prob, {self._l_obs: obs_var}))\n\n @overrides\n def dist_info(self, obs, state_infos=None):\n return dict(prob=self._f_prob(obs))\n\n # The return value is a pair. The first item is a matrix (N, A), where each\n # entry corresponds to the action value taken. The second item is a vector\n # of length N, where each entry is the density value for that action, under\n # the current policy\n @overrides\n def get_action(self, observation):\n flat_obs = self.observation_space.flatten(observation)\n dist_info = dict((k, val[0]) for k, val in self.dist_info([flat_obs]).items()) # removing extra dim\n if self._set_std_to_0:\n action = np.argmax(dist_info['prob'])\n # index = np.argmax(dist_info['prob'])\n # action = from_index(index, dim=len(dist_info['prob']))\n else:\n # action = self._dist.sample(dist_info)\n action = self.action_space.weighted_sample(dist_info['prob'])\n return action, dict(dist_info)\n\n ######## copy used when removing actions\n # @overrides\n # def get_action(self, observation):\n # flat_obs = self.observation_space.flatten(observation)\n # dist_info = dict((k, val[0]) for k, val in self.dist_info([flat_obs]).items()) # removing extra dim\n # if self._set_std_to_0:\n # action = np.argmax(dist_info['prob'])\n # # index = np.argmax(dist_info['prob'])\n # # action = from_index(index, dim=len(dist_info['prob']))\n # else:\n # # action = self._dist.sample(dist_info)\n # if self.to_remove is not None:\n # indices = [x for x in range(6) if x != self.to_remove]\n # probs = dist_info['prob'][indices]\n # probs = probs/np.sum(probs)\n # action = self._actionspace_minus_one.weighted_sample(probs)\n # if action >= self.to_remove:\n # action += 1\n # else:\n # action = self.action_space.weighted_sample(dist_info['prob'])\n # return action, dict(dist_info, latents=np.array([]))\n\n def get_actions(self, observations):\n flat_obs = self.observation_space.flatten_n(observations)\n dist_infos = self.dist_info(flat_obs)\n if self._set_std_to_0:\n # indexes = [np.argmax(dist_info['prob']) for dist_info in dist_infos]\n # actions = from_index(indexes, dim=len(dist_infos[0]['prob']))\n actions = [np.argmax(dist_info['prob']) for dist_info in dist_infos]\n else:\n actions = list(map(self.action_space.weighted_sample, dist_infos['prob']))\n latents = np.array([[]] * len(actions)) # fake latents\n return actions, dict(**dist_infos, latents=latents)\n\n @property\n def distribution(self):\n return self._dist\n" ]
[ [ "numpy.argmax" ] ]
YikSanChan/jina
[ "d7ad9fde97f02db79233ba93400e0bda74597580" ]
[ "jina/drivers/score.py" ]
[ "__copyright__ = \"Copyright (c) 2020 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\n# lift the chunk-level topk to doc-level topk\nimport numpy as np\n\nfrom . import BaseExecutableDriver\nfrom .helper import pb_obj2dict\n\n\nclass BaseRankDriver(BaseExecutableDriver):\n \"\"\"Drivers inherited from this Driver will bind :meth:`craft` by default \"\"\"\n\n def __init__(self, executor: str = None, method: str = 'score', *args, **kwargs):\n super().__init__(executor, method, *args, **kwargs)\n\n\nclass Chunk2DocRankDriver(BaseRankDriver):\n \"\"\"Extract chunk-level score and use the executor to compute the doc-level score\n\n \"\"\"\n\n def __call__(self, *args, **kwargs):\n exec = self.exec\n\n for d in self.req.docs: # d is a query in this context, i.e. for each query, compute separately\n match_idx = []\n query_chunk_meta = {}\n match_chunk_meta = {}\n for c in d.chunks:\n for k in c.topk_results:\n match_idx.append((k.match_chunk.doc_id, k.match_chunk.chunk_id, c.chunk_id, k.score.value))\n query_chunk_meta[c.chunk_id] = pb_obj2dict(c, exec.required_keys)\n match_chunk_meta[k.match_chunk.chunk_id] = pb_obj2dict(k.match_chunk, exec.required_keys)\n\n # np.uint32 uses 32 bits. np.float32 uses 23 bit mantissa, so integer greater than 2^23 will have their\n # least significant bits truncated.\n if not match_idx:\n continue\n match_idx = np.array(match_idx, dtype=np.float64)\n\n doc_idx = self.exec_fn(match_idx, query_chunk_meta, match_chunk_meta)\n\n for _d in doc_idx:\n r = d.topk_results.add()\n r.match_doc.doc_id = int(_d[0])\n r.score.value = _d[1]\n r.score.op_name = exec.__class__.__name__\n" ]
[ [ "numpy.array" ] ]
avijit-chakroborty/ngraph-bridge
[ "ea6422491ec75504e78a63db029e7f74ec3479a5", "ea6422491ec75504e78a63db029e7f74ec3479a5" ]
[ "test/python/test_updateconfig.py", "test/python/test_slice.py" ]
[ "# ==============================================================================\n# Copyright 2019-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"nGraph TensorFlow bridge update_config api test\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nimport os\n\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\nfrom tensorflow.core.protobuf import rewriter_config_pb2\n\nfrom common import NgraphTest\nimport ngraph_bridge\n\n\nclass TestUpdateConfig(NgraphTest):\n\n @pytest.mark.skipif(\n not ngraph_bridge.is_grappler_enabled(), reason='Only for Grappler')\n def test_update_config_adds_optimizer_only_once(self):\n\n # Helper function to count the number of occurances in a config\n def count_ng_optimizers(config):\n custom_opts = config.graph_options.rewrite_options.custom_optimizers\n count = 0\n for i in range(len(custom_opts)):\n if custom_opts[i].name == 'ngraph-optimizer':\n count += 1\n return count\n\n # allow_soft_placement is set just to simulate\n # a real world non-empty initial ConfigProto\n config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n assert count_ng_optimizers(config) == 0\n config_new_1 = ngraph_bridge.update_config(config)\n config_new_2 = ngraph_bridge.update_config(config_new_1)\n assert count_ng_optimizers(config) == count_ng_optimizers(\n config_new_1) == count_ng_optimizers(config_new_2) == 1\n", "# ==============================================================================\n# Copyright 2018-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"nGraph TensorFlow bridge slice operation test\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\n\nimport numpy as np\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\n\nfrom common import NgraphTest\n\n\nclass TestSliceOperations(NgraphTest):\n\n def test_slice(self):\n inp = np.random.rand(4, 4).astype(\"f\")\n slice_ts = []\n expected = []\n\n a = np.array([float(x) for x in inp.ravel(order=\"C\")])\n a.shape = (4, 4)\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n slice_ts.append(array_ops.slice(x, [0, 0], [2, 2]))\n slice_ts.append(array_ops.slice(x, [0, 0], [-1, -1]))\n slice_ts.append(array_ops.slice(x, [2, 2], [-1, -1]))\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals = self.with_ngraph(run_test)\n\n expected.append(inp[:2, :2])\n expected.append(inp[:, :])\n expected.append(inp[2:, 2:])\n\n for v, e in zip(slice_vals, expected):\n np.testing.assert_array_equal(v, e)\n\n def test_strided_slice(self):\n inp = np.random.rand(4, 5).astype(\"f\")\n\n slice_ts = []\n expected = []\n a = np.array([float(x) for x in inp.ravel(order=\"C\")])\n a.shape = (4, 5)\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n\n slice_ts.append(x[:])\n slice_ts.append(x[:, :])\n slice_ts.append(x[1:, :-2])\n slice_ts.append(x[::2, :-2])\n slice_ts.append(x[1, :])\n slice_ts.append(x[:, 1])\n slice_ts.append(x[1, 1])\n slice_ts.append(x[0])\n slice_ts.append(x[0][1])\n slice_ts.append(x[-1])\n\n # Various ways of representing identity slice\n slice_ts.append(x[:, :])\n slice_ts.append(x[::, ::])\n slice_ts.append(x[::1, ::1])\n\n # Reverse in each dimension independently\n slice_ts.append(x[::-1, :])\n slice_ts.append(x[:, ::-1])\n\n ## negative index tests i.e. n-2 in first component\n slice_ts.append(x[-2::-1, ::1])\n\n # degenerate by offering a forward interval with a negative stride\n slice_ts.append(x[0:-1:-1, :])\n # degenerate with a reverse interval with a positive stride\n slice_ts.append(x[-1:0, :])\n # empty interval in every dimension\n slice_ts.append(x[-1:0, 2:3:-1])\n slice_ts.append(x[2:2, 2:3:-1])\n # stride greater than range\n slice_ts.append(x[1:3:7, :])\n\n # ellipses and new axis\n slice_ts.append(x[:, tf.newaxis])\n slice_ts.append(x[...])\n slice_ts.append(x[1:2, ...])\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals = self.with_ngraph(run_test)\n\n expected.append(inp[:])\n expected.append(inp[:, :])\n expected.append(inp[1:, :-2])\n expected.append(inp[::2, :-2])\n expected.append(inp[1, :])\n expected.append(inp[:, 1])\n expected.append(inp[1, 1])\n expected.append(inp[0])\n expected.append(inp[0][1])\n expected.append(inp[-1])\n #TODO: support ellipses and new_axis correctly\n\n # Various ways of representing identity slice\n expected.append(inp[:, :])\n expected.append(inp[::, ::])\n expected.append(inp[::1, ::1])\n\n # Reverse in each dimension independently\n expected.append(inp[::-1, :])\n expected.append(inp[:, ::-1])\n\n ## negative index tests i.e. n-2 in first component\n expected.append(inp[-2::-1, ::1])\n\n # degenerate by offering a forward interval with a negative stride\n expected.append(inp[0:-1:-1, :])\n # degenerate with a reverse interval with a positive stride\n expected.append(inp[-1:0, :])\n # empty interval in every dimension\n expected.append(inp[-1:0, 2:3:-1])\n expected.append(inp[2:2, 2:3:-1])\n # stride greater than range\n expected.append(inp[1:3:7, :])\n\n # ellipses and new axis\n expected.append(inp[:, tf.newaxis])\n expected.append(inp[...])\n expected.append(inp[1:2, ...])\n\n for v, e in zip(slice_vals, expected):\n np.testing.assert_array_equal(v, e)\n\n def test_strided_slice_2(self):\n inp = np.random.rand(3, 2, 3).astype(\"f\")\n\n slice_ts = []\n expected = []\n a = np.array([float(x) for x in inp.ravel(order=\"C\")])\n a.shape = (3, 2, 3)\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n\n slice_ts.append(x[0:2, 1:2, 2:1:-1])\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals = self.with_ngraph(run_test)\n\n expected.append(inp[0:2, 1:2, 2:1:-1])\n\n for v, e in zip(slice_vals, expected):\n np.testing.assert_array_equal(v, e)\n\n def test_strided_slice_3(self):\n inp = np.random.rand(3, 2, 3).astype(\"f\")\n\n slice_ts = []\n expected = []\n a = np.array([float(x) for x in inp.ravel(order=\"C\")])\n a.shape = (3, 2, 3)\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n\n slice_ts.append(x[0:2, -1:3, 2:1:-1])\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals = self.with_ngraph(run_test)\n\n expected.append(inp[0:2, -1:3, 2:1:-1])\n\n for v, e in zip(slice_vals, expected):\n np.testing.assert_array_equal(v, e)\n\n def test_strided_slice_4(self):\n inp = np.random.rand(3, 2, 3).astype(\"f\")\n\n slice_ts = []\n expected = []\n a = np.array([float(x) for x in inp.ravel(order=\"C\")])\n a.shape = (3, 2, 3)\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n\n slice_ts.append(x[0:1, -2:3, 3:0:-2])\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals = self.with_ngraph(run_test)\n\n expected.append(inp[0:1, -2:3, 3:0:-2])\n\n for v, e in zip(slice_vals, expected):\n np.testing.assert_array_equal(v, e)\n\n def test_strided_slice_5(self):\n a = [[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],\n [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]\n\n bar = tf.constant(2)\n bar2 = tf.constant(3)\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n slice_ts = [\n x[..., bar:bar2], x[..., bar], x[..., 3], x[..., 2**64 // 2**63]\n ]\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals_ng = self.with_ngraph(run_test)\n slice_vals_tf = self.without_ngraph(run_test)\n\n for v, e in zip(slice_vals_ng, slice_vals_tf):\n np.testing.assert_array_equal(v, e)\n\n def test_strided_slice_zerodim(self):\n inp = np.random.rand(4, 0, 5).astype(\"f\")\n slice_ts = []\n expected = []\n\n a = np.array([float(x) for x in inp.ravel(order=\"C\")])\n a.shape = (4, 0, 5)\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n\n #(slicing an empty dim by empty slice)\n slice_ts.append(x[1:2, 2:2, 1:2])\n #(slicing an empty dim by non empty slice)\n slice_ts.append(x[1:2, 1:2, 1:2])\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: a})\n\n slice_vals = self.with_ngraph(run_test)\n\n expected.append(inp[1:2, 2:2, 1:2])\n expected.append(inp[1:2, 1:2, 1:2])\n\n for v, e in zip(slice_vals, expected):\n np.testing.assert_array_equal(v, e)\n\n def test_incorrect_strided_slice(self):\n inp = 0\n slice_ts = []\n\n x = tf.compat.v1.placeholder(dtype=dtypes.float32)\n\n #(slicing an empty dim by empty slice)\n slice_ts.append(x[1:1])\n\n def run_test(sess):\n return sess.run(slice_ts, feed_dict={x: inp})\n\n with pytest.raises(Exception) as excinfo:\n slice_vals = self.with_ngraph(run_test)\n assert excinfo.value.message\n" ]
[ [ "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.disable_eager_execution" ], [ "tensorflow.compat.v1.placeholder", "numpy.random.rand", "numpy.testing.assert_array_equal", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.constant", "tensorflow.python.ops.array_ops.slice" ] ]
afeld/pandas
[ "9a1d87bc4633c24958254218aaf9762a845fd57d" ]
[ "pandas/conftest.py" ]
[ "\"\"\"\nThis file is very long and growing, but it was decided to not split it yet, as\nit's still manageable (2020-03-17, ~1.1k LoC). See gh-31989\n\nInstead of splitting it was decided to define sections here:\n- Configuration / Settings\n- Autouse fixtures\n- Common arguments\n- Missing values & co.\n- Classes\n- Indices\n- Series'\n- DataFrames\n- Operators & Operations\n- Data sets/files\n- Time zones\n- Dtypes\n- Misc\n\"\"\"\n\nfrom collections import abc\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom decimal import Decimal\nimport operator\nimport os\n\nfrom dateutil.tz import tzlocal, tzutc\nimport hypothesis\nfrom hypothesis import strategies as st\nimport numpy as np\nimport pytest\nfrom pytz import FixedOffset, utc\n\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype\n\nimport pandas as pd\nfrom pandas import DataFrame, Interval, Period, Series, Timedelta, Timestamp\nimport pandas._testing as tm\nfrom pandas.core import ops\nfrom pandas.core.indexes.api import Index, MultiIndex\n\n\n# ----------------------------------------------------------------\n# Configuration / Settings\n# ----------------------------------------------------------------\n# pytest\ndef pytest_configure(config):\n # Register marks to avoid warnings in pandas.test()\n # sync with setup.cfg\n config.addinivalue_line(\"markers\", \"single: mark a test as single cpu only\")\n config.addinivalue_line(\"markers\", \"slow: mark a test as slow\")\n config.addinivalue_line(\"markers\", \"network: mark a test as network\")\n config.addinivalue_line(\n \"markers\", \"db: tests requiring a database (mysql or postgres)\"\n )\n config.addinivalue_line(\"markers\", \"high_memory: mark a test as a high-memory only\")\n config.addinivalue_line(\"markers\", \"clipboard: mark a pd.read_clipboard test\")\n config.addinivalue_line(\n \"markers\", \"arm_slow: mark a test as slow for arm64 architecture\"\n )\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--skip-slow\", action=\"store_true\", help=\"skip slow tests\")\n parser.addoption(\"--skip-network\", action=\"store_true\", help=\"skip network tests\")\n parser.addoption(\"--skip-db\", action=\"store_true\", help=\"skip db tests\")\n parser.addoption(\n \"--run-high-memory\", action=\"store_true\", help=\"run high memory tests\"\n )\n parser.addoption(\"--only-slow\", action=\"store_true\", help=\"run only slow tests\")\n parser.addoption(\n \"--strict-data-files\",\n action=\"store_true\",\n help=\"Fail if a test is skipped for missing data file.\",\n )\n parser.addoption(\n \"--array-manager\",\n \"--am\",\n action=\"store_true\",\n help=\"Use the experimental ArrayManager as default data manager.\",\n )\n\n\ndef pytest_sessionstart(session):\n # Note: we need to set the option here and not in pytest_runtest_setup below\n # to ensure this is run before creating fixture data\n if session.config.getoption(\"--array-manager\"):\n pd.options.mode.data_manager = \"array\"\n\n\ndef pytest_runtest_setup(item):\n if \"slow\" in item.keywords and item.config.getoption(\"--skip-slow\"):\n pytest.skip(\"skipping due to --skip-slow\")\n\n if \"slow\" not in item.keywords and item.config.getoption(\"--only-slow\"):\n pytest.skip(\"skipping due to --only-slow\")\n\n if \"network\" in item.keywords and item.config.getoption(\"--skip-network\"):\n pytest.skip(\"skipping due to --skip-network\")\n\n if \"db\" in item.keywords and item.config.getoption(\"--skip-db\"):\n pytest.skip(\"skipping due to --skip-db\")\n\n if \"high_memory\" in item.keywords and not item.config.getoption(\n \"--run-high-memory\"\n ):\n pytest.skip(\"skipping high memory test since --run-high-memory was not set\")\n\n\n# Hypothesis\nhypothesis.settings.register_profile(\n \"ci\",\n # Hypothesis timing checks are tuned for scalars by default, so we bump\n # them from 200ms to 500ms per test case as the global default. If this\n # is too short for a specific test, (a) try to make it faster, and (b)\n # if it really is slow add `@settings(deadline=...)` with a working value,\n # or `deadline=None` to entirely disable timeouts for that test.\n deadline=500,\n suppress_health_check=(hypothesis.HealthCheck.too_slow,),\n)\nhypothesis.settings.load_profile(\"ci\")\n\n# Registering these strategies makes them globally available via st.from_type,\n# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py\nfor name in \"MonthBegin MonthEnd BMonthBegin BMonthEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())\n )\n\nfor name in \"YearBegin YearEnd BYearBegin BYearEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls,\n st.builds(\n cls,\n n=st.integers(-5, 5),\n normalize=st.booleans(),\n month=st.integers(min_value=1, max_value=12),\n ),\n )\n\nfor name in \"QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd\".split():\n cls = getattr(pd.tseries.offsets, name)\n st.register_type_strategy(\n cls,\n st.builds(\n cls,\n n=st.integers(-24, 24),\n normalize=st.booleans(),\n startingMonth=st.integers(min_value=1, max_value=12),\n ),\n )\n\n\n# ----------------------------------------------------------------\n# Autouse fixtures\n# ----------------------------------------------------------------\n@pytest.fixture(autouse=True)\ndef configure_tests():\n \"\"\"\n Configure settings for all tests and test modules.\n \"\"\"\n pd.set_option(\"chained_assignment\", \"raise\")\n\n\n@pytest.fixture(autouse=True)\ndef add_imports(doctest_namespace):\n \"\"\"\n Make `np` and `pd` names available for doctests.\n \"\"\"\n doctest_namespace[\"np\"] = np\n doctest_namespace[\"pd\"] = pd\n\n\n# ----------------------------------------------------------------\n# Common arguments\n# ----------------------------------------------------------------\n@pytest.fixture(params=[0, 1, \"index\", \"columns\"], ids=lambda x: f\"axis {repr(x)}\")\ndef axis(request):\n \"\"\"\n Fixture for returning the axis numbers of a DataFrame.\n \"\"\"\n return request.param\n\n\naxis_frame = axis\n\n\n@pytest.fixture(params=[True, False, None])\ndef observed(request):\n \"\"\"\n Pass in the observed keyword to groupby for [True, False]\n This indicates whether categoricals should return values for\n values which are not in the grouper [False / None], or only values which\n appear in the grouper [True]. [None] is supported for future compatibility\n if we decide to change the default (and would need to warn if this\n parameter is not passed).\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[True, False, None])\ndef ordered(request):\n \"\"\"\n Boolean 'ordered' parameter for Categorical.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"first\", \"last\", False])\ndef keep(request):\n \"\"\"\n Valid values for the 'keep' parameter used in\n .duplicated or .drop_duplicates\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"left\", \"right\", \"both\", \"neither\"])\ndef closed(request):\n \"\"\"\n Fixture for trying all interval closed parameters.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"left\", \"right\", \"both\", \"neither\"])\ndef other_closed(request):\n \"\"\"\n Secondary closed fixture to allow parametrizing over all pairs of closed.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[None, \"gzip\", \"bz2\", \"zip\", \"xz\"])\ndef compression(request):\n \"\"\"\n Fixture for trying common compression types in compression tests.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"gzip\", \"bz2\", \"zip\", \"xz\"])\ndef compression_only(request):\n \"\"\"\n Fixture for trying common compression types in compression tests excluding\n uncompressed case.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef writable(request):\n \"\"\"\n Fixture that an array is writable.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"inner\", \"outer\", \"left\", \"right\"])\ndef join_type(request):\n \"\"\"\n Fixture for trying all types of join operations.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"nlargest\", \"nsmallest\"])\ndef nselect_method(request):\n \"\"\"\n Fixture for trying all nselect methods.\n \"\"\"\n return request.param\n\n\n# ----------------------------------------------------------------\n# Missing values & co.\n# ----------------------------------------------------------------\n@pytest.fixture(params=tm.NULL_OBJECTS, ids=str)\ndef nulls_fixture(request):\n \"\"\"\n Fixture for each null type in pandas.\n \"\"\"\n return request.param\n\n\nnulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture\n\n\n@pytest.fixture(params=[None, np.nan, pd.NaT])\ndef unique_nulls_fixture(request):\n \"\"\"\n Fixture for each null type in pandas, each null type exactly once.\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of unique_nulls_fixture:\nunique_nulls_fixture2 = unique_nulls_fixture\n\n# ----------------------------------------------------------------\n# Classes\n# ----------------------------------------------------------------\n\n\n@pytest.fixture(params=[pd.DataFrame, pd.Series])\ndef frame_or_series(request):\n \"\"\"\n Fixture to parametrize over DataFrame and Series.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(\n params=[pd.Index, pd.Series], ids=[\"index\", \"series\"] # type: ignore[list-item]\n)\ndef index_or_series(request):\n \"\"\"\n Fixture to parametrize over Index and Series, made necessary by a mypy\n bug, giving an error:\n\n List item 0 has incompatible type \"Type[Series]\"; expected \"Type[PandasObject]\"\n\n See GH#29725\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of index_or_series fixture:\nindex_or_series2 = index_or_series\n\n\n@pytest.fixture(\n params=[pd.Index, pd.Series, pd.array], ids=[\"index\", \"series\", \"array\"]\n)\ndef index_or_series_or_array(request):\n \"\"\"\n Fixture to parametrize over Index, Series, and ExtensionArray\n \"\"\"\n return request.param\n\n\n@pytest.fixture\ndef dict_subclass():\n \"\"\"\n Fixture for a dictionary subclass.\n \"\"\"\n\n class TestSubDict(dict):\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n return TestSubDict\n\n\n@pytest.fixture\ndef non_dict_mapping_subclass():\n \"\"\"\n Fixture for a non-mapping dictionary subclass.\n \"\"\"\n\n class TestNonDictMapping(abc.Mapping):\n def __init__(self, underlying_dict):\n self._data = underlying_dict\n\n def __getitem__(self, key):\n return self._data.__getitem__(key)\n\n def __iter__(self):\n return self._data.__iter__()\n\n def __len__(self):\n return self._data.__len__()\n\n return TestNonDictMapping\n\n\n# ----------------------------------------------------------------\n# Indices\n# ----------------------------------------------------------------\n@pytest.fixture\ndef multiindex_year_month_day_dataframe_random_data():\n \"\"\"\n DataFrame with 3 level MultiIndex (year, month, day) covering\n first 100 business days from 2000-01-01 with random data\n \"\"\"\n tdf = tm.makeTimeDataFrame(100)\n ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n # use Int64Index, to make sure things work\n ymd.index = ymd.index.set_levels([lev.astype(\"i8\") for lev in ymd.index.levels])\n ymd.index.set_names([\"year\", \"month\", \"day\"], inplace=True)\n return ymd\n\n\n@pytest.fixture\ndef multiindex_dataframe_random_data():\n \"\"\"DataFrame with 2 level MultiIndex with random data\"\"\"\n index = MultiIndex(\n levels=[[\"foo\", \"bar\", \"baz\", \"qux\"], [\"one\", \"two\", \"three\"]],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=[\"first\", \"second\"],\n )\n return DataFrame(\n np.random.randn(10, 3), index=index, columns=Index([\"A\", \"B\", \"C\"], name=\"exp\")\n )\n\n\ndef _create_multiindex():\n \"\"\"\n MultiIndex used to test the general functionality of this object\n \"\"\"\n\n # See Also: tests.multi.conftest.idx\n major_axis = Index([\"foo\", \"bar\", \"baz\", \"qux\"])\n minor_axis = Index([\"one\", \"two\"])\n\n major_codes = np.array([0, 0, 1, 2, 3, 3])\n minor_codes = np.array([0, 1, 0, 1, 0, 1])\n index_names = [\"first\", \"second\"]\n return MultiIndex(\n levels=[major_axis, minor_axis],\n codes=[major_codes, minor_codes],\n names=index_names,\n verify_integrity=False,\n )\n\n\ndef _create_mi_with_dt64tz_level():\n \"\"\"\n MultiIndex with a level that is a tzaware DatetimeIndex.\n \"\"\"\n # GH#8367 round trip with pickle\n return MultiIndex.from_product(\n [[1, 2], [\"a\", \"b\"], pd.date_range(\"20130101\", periods=3, tz=\"US/Eastern\")],\n names=[\"one\", \"two\", \"three\"],\n )\n\n\nindices_dict = {\n \"unicode\": tm.makeUnicodeIndex(100),\n \"string\": tm.makeStringIndex(100),\n \"datetime\": tm.makeDateIndex(100),\n \"datetime-tz\": tm.makeDateIndex(100, tz=\"US/Pacific\"),\n \"period\": tm.makePeriodIndex(100),\n \"timedelta\": tm.makeTimedeltaIndex(100),\n \"int\": tm.makeIntIndex(100),\n \"uint\": tm.makeUIntIndex(100),\n \"range\": tm.makeRangeIndex(100),\n \"float\": tm.makeFloatIndex(100),\n \"bool\": tm.makeBoolIndex(10),\n \"categorical\": tm.makeCategoricalIndex(100),\n \"interval\": tm.makeIntervalIndex(100),\n \"empty\": Index([]),\n \"tuples\": MultiIndex.from_tuples(zip([\"foo\", \"bar\", \"baz\"], [1, 2, 3])),\n \"mi-with-dt64tz-level\": _create_mi_with_dt64tz_level(),\n \"multi\": _create_multiindex(),\n \"repeats\": Index([0, 0, 1, 1, 2, 2]),\n}\n\n\n@pytest.fixture(params=indices_dict.keys())\ndef index(request):\n \"\"\"\n Fixture for many \"simple\" kinds of indices.\n\n These indices are unlikely to cover corner cases, e.g.\n - no names\n - no NaTs/NaNs\n - no values near implementation bounds\n - ...\n \"\"\"\n # copy to avoid mutation, e.g. setting .name\n return indices_dict[request.param].copy()\n\n\n# Needed to generate cartesian product of indices\nindex_fixture2 = index\n\n\n@pytest.fixture(\n params=[\n key for key in indices_dict if not isinstance(indices_dict[key], MultiIndex)\n ]\n)\ndef index_flat(request):\n \"\"\"\n index fixture, but excluding MultiIndex cases.\n \"\"\"\n key = request.param\n return indices_dict[key].copy()\n\n\n# Alias so we can test with cartesian product of index_flat\nindex_flat2 = index_flat\n\n\n@pytest.fixture(\n params=[\n key\n for key in indices_dict\n if key not in [\"int\", \"uint\", \"range\", \"empty\", \"repeats\"]\n and not isinstance(indices_dict[key], MultiIndex)\n ]\n)\ndef index_with_missing(request):\n \"\"\"\n Fixture for indices with missing values.\n\n Integer-dtype and empty cases are excluded because they cannot hold missing\n values.\n\n MultiIndex is excluded because isna() is not defined for MultiIndex.\n \"\"\"\n\n # GH 35538. Use deep copy to avoid illusive bug on np-dev\n # Azure pipeline that writes into indices_dict despite copy\n ind = indices_dict[request.param].copy(deep=True)\n vals = ind.values\n if request.param in [\"tuples\", \"mi-with-dt64tz-level\", \"multi\"]:\n # For setting missing values in the top level of MultiIndex\n vals = ind.tolist()\n vals[0] = (None,) + vals[0][1:]\n vals[-1] = (None,) + vals[-1][1:]\n return MultiIndex.from_tuples(vals)\n else:\n vals[0] = None\n vals[-1] = None\n return type(ind)(vals)\n\n\n# ----------------------------------------------------------------\n# Series'\n# ----------------------------------------------------------------\n@pytest.fixture\ndef empty_series():\n return pd.Series([], index=[], dtype=np.float64)\n\n\n@pytest.fixture\ndef string_series():\n \"\"\"\n Fixture for Series of floats with Index of unique strings\n \"\"\"\n s = tm.makeStringSeries()\n s.name = \"series\"\n return s\n\n\n@pytest.fixture\ndef object_series():\n \"\"\"\n Fixture for Series of dtype object with Index of unique strings\n \"\"\"\n s = tm.makeObjectSeries()\n s.name = \"objects\"\n return s\n\n\n@pytest.fixture\ndef datetime_series():\n \"\"\"\n Fixture for Series of floats with DatetimeIndex\n \"\"\"\n s = tm.makeTimeSeries()\n s.name = \"ts\"\n return s\n\n\ndef _create_series(index):\n \"\"\" Helper for the _series dict \"\"\"\n size = len(index)\n data = np.random.randn(size)\n return pd.Series(data, index=index, name=\"a\")\n\n\n_series = {\n f\"series-with-{index_id}-index\": _create_series(index)\n for index_id, index in indices_dict.items()\n}\n\n\n@pytest.fixture\ndef series_with_simple_index(index):\n \"\"\"\n Fixture for tests on series with changing types of indices.\n \"\"\"\n return _create_series(index)\n\n\n@pytest.fixture\ndef series_with_multilevel_index():\n \"\"\"\n Fixture with a Series with a 2-level MultiIndex.\n \"\"\"\n arrays = [\n [\"bar\", \"bar\", \"baz\", \"baz\", \"qux\", \"qux\", \"foo\", \"foo\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ]\n tuples = zip(*arrays)\n index = MultiIndex.from_tuples(tuples)\n data = np.random.randn(8)\n ser = Series(data, index=index)\n ser[3] = np.NaN\n return ser\n\n\n_narrow_dtypes = [\n np.float16,\n np.float32,\n np.int8,\n np.int16,\n np.int32,\n np.uint8,\n np.uint16,\n np.uint32,\n]\n_narrow_series = {\n f\"{dtype.__name__}-series\": tm.makeFloatSeries(name=\"a\").astype(dtype)\n for dtype in _narrow_dtypes\n}\n\n\n@pytest.fixture(params=_narrow_series.keys())\ndef narrow_series(request):\n \"\"\"\n Fixture for Series with low precision data types\n \"\"\"\n # copy to avoid mutation, e.g. setting .name\n return _narrow_series[request.param].copy()\n\n\n_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}\n\n\n@pytest.fixture(params=_index_or_series_objs.keys())\ndef index_or_series_obj(request):\n \"\"\"\n Fixture for tests on indexes, series and series with a narrow dtype\n copy to avoid mutation, e.g. setting .name\n \"\"\"\n return _index_or_series_objs[request.param].copy(deep=True)\n\n\n# ----------------------------------------------------------------\n# DataFrames\n# ----------------------------------------------------------------\n@pytest.fixture\ndef empty_frame():\n return DataFrame()\n\n\n@pytest.fixture\ndef int_frame():\n \"\"\"\n Fixture for DataFrame of ints with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D']\n\n A B C D\n vpBeWjM651 1 0 1 0\n 5JyxmrP1En -1 0 0 0\n qEDaoD49U2 -1 1 0 0\n m66TkTfsFe 0 0 0 0\n EHPaNzEUFm -1 0 -1 0\n fpRJCevQhi 2 0 0 0\n OlQvnmfi3Q 0 0 -2 0\n ... .. .. .. ..\n uB1FPlz4uP 0 0 0 1\n EcSe6yNzCU 0 0 -1 0\n L50VudaiI8 -1 1 -2 0\n y3bpw4nwIp 0 -1 0 0\n H0RdLLwrCT 1 1 0 0\n rY82K0vMwm 0 0 0 0\n 1OPIUjnkjk 2 0 0 0\n\n [30 rows x 4 columns]\n \"\"\"\n return DataFrame(tm.getSeriesData()).astype(\"int64\")\n\n\n@pytest.fixture\ndef datetime_frame():\n \"\"\"\n Fixture for DataFrame of floats with DatetimeIndex\n\n Columns are ['A', 'B', 'C', 'D']\n\n A B C D\n 2000-01-03 -1.122153 0.468535 0.122226 1.693711\n 2000-01-04 0.189378 0.486100 0.007864 -1.216052\n 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357\n 2000-01-06 0.430050 0.894352 0.090719 0.036939\n 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335\n 2000-01-10 -0.752633 0.328434 -0.815325 0.699674\n 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106\n ... ... ... ... ...\n 2000-02-03 1.642618 -0.579288 0.046005 1.385249\n 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351\n 2000-02-07 -2.656149 -0.601387 1.410148 0.444150\n 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300\n 2000-02-09 1.377373 0.398619 1.008453 -0.928207\n 2000-02-10 0.473194 -0.636677 0.984058 0.511519\n 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948\n\n [30 rows x 4 columns]\n \"\"\"\n return DataFrame(tm.getTimeSeriesData())\n\n\n@pytest.fixture\ndef float_frame():\n \"\"\"\n Fixture for DataFrame of floats with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n\n A B C D\n P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465\n qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901\n tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433\n wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651\n M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938\n QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053\n r78Jwns6dn -0.653707 0.883127 0.682199 0.206159\n ... ... ... ... ...\n IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316\n lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999\n qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121\n yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962\n 65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987\n eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871\n xSucinXxuV -1.263557 0.252799 -0.552247 0.400426\n\n [30 rows x 4 columns]\n \"\"\"\n return DataFrame(tm.getSeriesData())\n\n\n@pytest.fixture\ndef mixed_type_frame():\n \"\"\"\n Fixture for DataFrame of float/int/string columns with RangeIndex\n Columns are ['a', 'b', 'c', 'float32', 'int32'].\n \"\"\"\n return DataFrame(\n {\n \"a\": 1.0,\n \"b\": 2,\n \"c\": \"foo\",\n \"float32\": np.array([1.0] * 10, dtype=\"float32\"),\n \"int32\": np.array([1] * 10, dtype=\"int32\"),\n },\n index=np.arange(10),\n )\n\n\n@pytest.fixture\ndef rand_series_with_duplicate_datetimeindex():\n \"\"\"\n Fixture for Series with a DatetimeIndex that has duplicates.\n \"\"\"\n dates = [\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n\n return Series(np.random.randn(len(dates)), index=dates)\n\n\n# ----------------------------------------------------------------\n# Scalars\n# ----------------------------------------------------------------\n@pytest.fixture(\n params=[\n (Interval(left=0, right=5), IntervalDtype(\"int64\", \"right\")),\n (Interval(left=0.1, right=0.5), IntervalDtype(\"float64\", \"right\")),\n (Period(\"2012-01\", freq=\"M\"), \"period[M]\"),\n (Period(\"2012-02-01\", freq=\"D\"), \"period[D]\"),\n (\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n DatetimeTZDtype(tz=\"US/Eastern\"),\n ),\n (Timedelta(seconds=500), \"timedelta64[ns]\"),\n ]\n)\ndef ea_scalar_and_dtype(request):\n return request.param\n\n\n# ----------------------------------------------------------------\n# Operators & Operations\n# ----------------------------------------------------------------\n_all_arithmetic_operators = [\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__pow__\",\n \"__rpow__\",\n \"__mod__\",\n \"__rmod__\",\n]\n\n\n@pytest.fixture(params=_all_arithmetic_operators)\ndef all_arithmetic_operators(request):\n \"\"\"\n Fixture for dunder names for common arithmetic operations.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(\n params=[\n operator.add,\n ops.radd,\n operator.sub,\n ops.rsub,\n operator.mul,\n ops.rmul,\n operator.truediv,\n ops.rtruediv,\n operator.floordiv,\n ops.rfloordiv,\n operator.mod,\n ops.rmod,\n operator.pow,\n ops.rpow,\n operator.eq,\n operator.ne,\n operator.lt,\n operator.le,\n operator.gt,\n operator.ge,\n operator.and_,\n ops.rand_,\n operator.xor,\n ops.rxor,\n operator.or_,\n ops.ror_,\n ]\n)\ndef all_binary_operators(request):\n \"\"\"\n Fixture for operator and roperator arithmetic, comparison, and logical ops.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(\n params=[\n operator.add,\n ops.radd,\n operator.sub,\n ops.rsub,\n operator.mul,\n ops.rmul,\n operator.truediv,\n ops.rtruediv,\n operator.floordiv,\n ops.rfloordiv,\n operator.mod,\n ops.rmod,\n operator.pow,\n ops.rpow,\n ]\n)\ndef all_arithmetic_functions(request):\n \"\"\"\n Fixture for operator and roperator arithmetic functions.\n\n Notes\n -----\n This includes divmod and rdivmod, whereas all_arithmetic_operators\n does not.\n \"\"\"\n return request.param\n\n\n_all_numeric_reductions = [\n \"sum\",\n \"max\",\n \"min\",\n \"mean\",\n \"prod\",\n \"std\",\n \"var\",\n \"median\",\n \"kurt\",\n \"skew\",\n]\n\n\n@pytest.fixture(params=_all_numeric_reductions)\ndef all_numeric_reductions(request):\n \"\"\"\n Fixture for numeric reduction names.\n \"\"\"\n return request.param\n\n\n_all_boolean_reductions = [\"all\", \"any\"]\n\n\n@pytest.fixture(params=_all_boolean_reductions)\ndef all_boolean_reductions(request):\n \"\"\"\n Fixture for boolean reduction names.\n \"\"\"\n return request.param\n\n\n_all_reductions = _all_numeric_reductions + _all_boolean_reductions\n\n\n@pytest.fixture(params=_all_reductions)\ndef all_reductions(request):\n \"\"\"\n Fixture for all (boolean + numeric) reduction names.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"__eq__\", \"__ne__\", \"__le__\", \"__lt__\", \"__ge__\", \"__gt__\"])\ndef all_compare_operators(request):\n \"\"\"\n Fixture for dunder names for common compare operations\n\n * >=\n * >\n * ==\n * !=\n * <\n * <=\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[\"__le__\", \"__lt__\", \"__ge__\", \"__gt__\"])\ndef compare_operators_no_eq_ne(request):\n \"\"\"\n Fixture for dunder names for compare operations except == and !=\n\n * >=\n * >\n * <\n * <=\n \"\"\"\n return request.param\n\n\n@pytest.fixture(\n params=[\"__and__\", \"__rand__\", \"__or__\", \"__ror__\", \"__xor__\", \"__rxor__\"]\n)\ndef all_logical_operators(request):\n \"\"\"\n Fixture for dunder names for common logical operations\n\n * |\n * &\n * ^\n \"\"\"\n return request.param\n\n\n# ----------------------------------------------------------------\n# Data sets/files\n# ----------------------------------------------------------------\n@pytest.fixture\ndef strict_data_files(pytestconfig):\n \"\"\"\n Returns the configuration for the test setting `--strict-data-files`.\n \"\"\"\n return pytestconfig.getoption(\"--strict-data-files\")\n\n\n@pytest.fixture\ndef datapath(strict_data_files):\n \"\"\"\n Get the path to a data file.\n\n Parameters\n ----------\n path : str\n Path to the file, relative to ``pandas/tests/``\n\n Returns\n -------\n path including ``pandas/tests``.\n\n Raises\n ------\n ValueError\n If the path doesn't exist and the --strict-data-files option is set.\n \"\"\"\n BASE_PATH = os.path.join(os.path.dirname(__file__), \"tests\")\n\n def deco(*args):\n path = os.path.join(BASE_PATH, *args)\n if not os.path.exists(path):\n if strict_data_files:\n raise ValueError(\n f\"Could not find file {path} and --strict-data-files is set.\"\n )\n else:\n pytest.skip(f\"Could not find {path}.\")\n return path\n\n return deco\n\n\n@pytest.fixture\ndef iris(datapath):\n \"\"\"\n The iris dataset as a DataFrame.\n \"\"\"\n return pd.read_csv(datapath(\"io\", \"data\", \"csv\", \"iris.csv\"))\n\n\n# ----------------------------------------------------------------\n# Time zones\n# ----------------------------------------------------------------\nTIMEZONES = [\n None,\n \"UTC\",\n \"US/Eastern\",\n \"Asia/Tokyo\",\n \"dateutil/US/Pacific\",\n \"dateutil/Asia/Singapore\",\n \"+01:15\",\n \"-02:15\",\n \"UTC+01:15\",\n \"UTC-02:15\",\n tzutc(),\n tzlocal(),\n FixedOffset(300),\n FixedOffset(0),\n FixedOffset(-300),\n timezone.utc,\n timezone(timedelta(hours=1)),\n timezone(timedelta(hours=-1), name=\"foo\"),\n]\nTIMEZONE_IDS = [repr(i) for i in TIMEZONES]\n\n\n@td.parametrize_fixture_doc(str(TIMEZONE_IDS))\n@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)\ndef tz_naive_fixture(request):\n \"\"\"\n Fixture for trying timezones including default (None): {0}\n \"\"\"\n return request.param\n\n\n@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))\n@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])\ndef tz_aware_fixture(request):\n \"\"\"\n Fixture for trying explicit timezones: {0}\n \"\"\"\n return request.param\n\n\n# Generate cartesian product of tz_aware_fixture:\ntz_aware_fixture2 = tz_aware_fixture\n\n\n@pytest.fixture(params=[\"utc\", \"dateutil/UTC\", utc, tzutc(), timezone.utc])\ndef utc_fixture(request):\n \"\"\"\n Fixture to provide variants of UTC timezone strings and tzinfo objects.\n \"\"\"\n return request.param\n\n\nutc_fixture2 = utc_fixture\n\n\n# ----------------------------------------------------------------\n# Dtypes\n# ----------------------------------------------------------------\n@pytest.fixture(params=tm.STRING_DTYPES)\ndef string_dtype(request):\n \"\"\"\n Parametrized fixture for string dtypes.\n\n * str\n * 'str'\n * 'U'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.BYTES_DTYPES)\ndef bytes_dtype(request):\n \"\"\"\n Parametrized fixture for bytes dtypes.\n\n * bytes\n * 'bytes'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.OBJECT_DTYPES)\ndef object_dtype(request):\n \"\"\"\n Parametrized fixture for object dtypes.\n\n * object\n * 'object'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.DATETIME64_DTYPES)\ndef datetime64_dtype(request):\n \"\"\"\n Parametrized fixture for datetime64 dtypes.\n\n * 'datetime64[ns]'\n * 'M8[ns]'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)\ndef timedelta64_dtype(request):\n \"\"\"\n Parametrized fixture for timedelta64 dtypes.\n\n * 'timedelta64[ns]'\n * 'm8[ns]'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.FLOAT_DTYPES)\ndef float_dtype(request):\n \"\"\"\n Parameterized fixture for float dtypes.\n\n * float\n * 'float32'\n * 'float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.FLOAT_EA_DTYPES)\ndef float_ea_dtype(request):\n \"\"\"\n Parameterized fixture for float dtypes.\n\n * 'Float32'\n * 'Float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.FLOAT_DTYPES + tm.FLOAT_EA_DTYPES)\ndef any_float_allowed_nullable_dtype(request):\n \"\"\"\n Parameterized fixture for float dtypes.\n\n * float\n * 'float32'\n * 'float64'\n * 'Float32'\n * 'Float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.COMPLEX_DTYPES)\ndef complex_dtype(request):\n \"\"\"\n Parameterized fixture for complex dtypes.\n\n * complex\n * 'complex64'\n * 'complex128'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.SIGNED_INT_DTYPES)\ndef sint_dtype(request):\n \"\"\"\n Parameterized fixture for signed integer dtypes.\n\n * int\n * 'int8'\n * 'int16'\n * 'int32'\n * 'int64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.UNSIGNED_INT_DTYPES)\ndef uint_dtype(request):\n \"\"\"\n Parameterized fixture for unsigned integer dtypes.\n\n * 'uint8'\n * 'uint16'\n * 'uint32'\n * 'uint64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.ALL_INT_DTYPES)\ndef any_int_dtype(request):\n \"\"\"\n Parameterized fixture for any integer dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.ALL_EA_INT_DTYPES)\ndef any_nullable_int_dtype(request):\n \"\"\"\n Parameterized fixture for any nullable integer dtype.\n\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.ALL_INT_DTYPES + tm.ALL_EA_INT_DTYPES)\ndef any_int_or_nullable_int_dtype(request):\n \"\"\"\n Parameterized fixture for any nullable integer dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.ALL_EA_INT_DTYPES + tm.FLOAT_EA_DTYPES)\ndef any_nullable_numeric_dtype(request):\n \"\"\"\n Parameterized fixture for any nullable integer dtype and\n any float ea dtypes.\n\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n * 'Float32'\n * 'Float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.SIGNED_EA_INT_DTYPES)\ndef any_signed_nullable_int_dtype(request):\n \"\"\"\n Parameterized fixture for any signed nullable integer dtype.\n\n * 'Int8'\n * 'Int16'\n * 'Int32'\n * 'Int64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.ALL_REAL_DTYPES)\ndef any_real_dtype(request):\n \"\"\"\n Parameterized fixture for any (purely) real numeric dtype.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)\ndef any_numpy_dtype(request):\n \"\"\"\n Parameterized fixture for all numpy dtypes.\n\n * bool\n * 'bool'\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n * complex\n * 'complex64'\n * 'complex128'\n * str\n * 'str'\n * 'U'\n * bytes\n * 'bytes'\n * 'datetime64[ns]'\n * 'M8[ns]'\n * 'timedelta64[ns]'\n * 'm8[ns]'\n * object\n * 'object'\n \"\"\"\n return request.param\n\n\n# categoricals are handled separately\n_any_skipna_inferred_dtype = [\n (\"string\", [\"a\", np.nan, \"c\"]),\n (\"string\", [\"a\", pd.NA, \"c\"]),\n (\"bytes\", [b\"a\", np.nan, b\"c\"]),\n (\"empty\", [np.nan, np.nan, np.nan]),\n (\"empty\", []),\n (\"mixed-integer\", [\"a\", np.nan, 2]),\n (\"mixed\", [\"a\", np.nan, 2.0]),\n (\"floating\", [1.0, np.nan, 2.0]),\n (\"integer\", [1, np.nan, 2]),\n (\"mixed-integer-float\", [1, np.nan, 2.0]),\n (\"decimal\", [Decimal(1), np.nan, Decimal(2)]),\n (\"boolean\", [True, np.nan, False]),\n (\"boolean\", [True, pd.NA, False]),\n (\"datetime64\", [np.datetime64(\"2013-01-01\"), np.nan, np.datetime64(\"2018-01-01\")]),\n (\"datetime\", [pd.Timestamp(\"20130101\"), np.nan, pd.Timestamp(\"20180101\")]),\n (\"date\", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),\n # The following two dtypes are commented out due to GH 23554\n # ('complex', [1 + 1j, np.nan, 2 + 2j]),\n # ('timedelta64', [np.timedelta64(1, 'D'),\n # np.nan, np.timedelta64(2, 'D')]),\n (\"timedelta\", [timedelta(1), np.nan, timedelta(2)]),\n (\"time\", [time(1), np.nan, time(2)]),\n (\"period\", [pd.Period(2013), pd.NaT, pd.Period(2018)]),\n (\"interval\", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),\n]\nids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id\n\n\n@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)\ndef any_skipna_inferred_dtype(request):\n \"\"\"\n Fixture for all inferred dtypes from _libs.lib.infer_dtype\n\n The covered (inferred) types are:\n * 'string'\n * 'empty'\n * 'bytes'\n * 'mixed'\n * 'mixed-integer'\n * 'mixed-integer-float'\n * 'floating'\n * 'integer'\n * 'decimal'\n * 'boolean'\n * 'datetime64'\n * 'datetime'\n * 'date'\n * 'timedelta'\n * 'time'\n * 'period'\n * 'interval'\n\n Returns\n -------\n inferred_dtype : str\n The string for the inferred dtype from _libs.lib.infer_dtype\n values : np.ndarray\n An array of object dtype that will be inferred to have\n `inferred_dtype`\n\n Examples\n --------\n >>> import pandas._libs.lib as lib\n >>>\n >>> def test_something(any_skipna_inferred_dtype):\n ... inferred_dtype, values = any_skipna_inferred_dtype\n ... # will pass\n ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype\n \"\"\"\n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) # object dtype to avoid casting\n\n # correctness of inference tested in tests/dtypes/test_inference.py\n return inferred_dtype, values\n\n\n# ----------------------------------------------------------------\n# Misc\n# ----------------------------------------------------------------\n@pytest.fixture\ndef ip():\n \"\"\"\n Get an instance of IPython.InteractiveShell.\n\n Will raise a skip if IPython is not installed.\n \"\"\"\n pytest.importorskip(\"IPython\", minversion=\"6.0.0\")\n from IPython.core.interactiveshell import InteractiveShell\n\n # GH#35711 make sure sqlite history file handle is not leaked\n from traitlets.config import Config # isort:skip\n\n c = Config()\n c.HistoryManager.hist_file = \":memory:\"\n\n return InteractiveShell(config=c)\n\n\n@pytest.fixture(params=[\"bsr\", \"coo\", \"csc\", \"csr\", \"dia\", \"dok\", \"lil\"])\ndef spmatrix(request):\n \"\"\"\n Yields scipy sparse matrix classes.\n \"\"\"\n from scipy import sparse\n\n return getattr(sparse, request.param + \"_matrix\")\n\n\n@pytest.fixture(\n params=[\n getattr(pd.offsets, o)\n for o in pd.offsets.__all__\n if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)\n ]\n)\ndef tick_classes(request):\n \"\"\"\n Fixture for Tick based datetime offsets available for a time series.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[None, lambda x: x])\ndef sort_by_key(request):\n \"\"\"\n Simple fixture for testing keys in sorting methods.\n Tests None (no key) and the identity key.\n \"\"\"\n return request.param\n\n\n@pytest.fixture()\ndef fsspectest():\n pytest.importorskip(\"fsspec\")\n from fsspec import register_implementation\n from fsspec.implementations.memory import MemoryFileSystem\n from fsspec.registry import _registry as registry\n\n class TestMemoryFS(MemoryFileSystem):\n protocol = \"testmem\"\n test = [None]\n\n def __init__(self, **kwargs):\n self.test[0] = kwargs.pop(\"test\", None)\n super().__init__(**kwargs)\n\n register_implementation(\"testmem\", TestMemoryFS, clobber=True)\n yield TestMemoryFS()\n registry.pop(\"testmem\", None)\n TestMemoryFS.test[0] = None\n TestMemoryFS.store.clear()\n\n\n@pytest.fixture(\n params=[\n (\"foo\", None, None),\n (\"Egon\", \"Venkman\", None),\n (\"NCC1701D\", \"NCC1701D\", \"NCC1701D\"),\n ]\n)\ndef names(request):\n \"\"\"\n A 3-tuple of names, the first two for operands, the last for a result.\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc])\ndef indexer_sli(request):\n \"\"\"\n Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[tm.setitem, tm.iloc])\ndef indexer_si(request):\n \"\"\"\n Parametrize over __setitem__, iloc.__setitem__\n \"\"\"\n return request.param\n\n\n@pytest.fixture(params=[tm.setitem, tm.loc])\ndef indexer_sl(request):\n \"\"\"\n Parametrize over __setitem__, loc.__setitem__\n \"\"\"\n return request.param\n\n\n@pytest.fixture\ndef using_array_manager(request):\n \"\"\"\n Fixture to check if the array manager is being used.\n \"\"\"\n return pd.options.mode.data_manager == \"array\"\n" ]
[ [ "pandas._testing.makeTimedeltaIndex", "pandas._testing.getTimeSeriesData", "pandas.Timestamp", "pandas._testing.makeUnicodeIndex", "pandas._testing.makeDateIndex", "pandas.Timedelta", "pandas.set_option", "pandas.Interval", "pandas.core.indexes.api.Index", "pandas.DataFrame", "pandas._testing.makeFloatSeries", "pandas._testing.makeTimeDataFrame", "numpy.arange", "pandas._testing.makeUIntIndex", "pandas.core.indexes.api.MultiIndex.from_tuples", "pandas._testing.makeBoolIndex", "pandas.Period", "pandas._testing.makeFloatIndex", "numpy.array", "pandas._testing.makePeriodIndex", "pandas.core.dtypes.dtypes.IntervalDtype", "numpy.random.randn", "pandas._testing.makeStringIndex", "pandas._testing.makeCategoricalIndex", "pandas._testing.makeIntervalIndex", "pandas.core.dtypes.dtypes.DatetimeTZDtype", "numpy.datetime64", "pandas._testing.makeTimeSeries", "pandas._testing.makeObjectSeries", "pandas._testing.makeStringSeries", "pandas._testing.getSeriesData", "pandas.date_range", "pandas.core.indexes.api.MultiIndex", "pandas.Series", "pandas._testing.makeRangeIndex", "pandas._testing.makeIntIndex" ] ]
kinoute/google-research
[ "562c7c6ef959cb3cb382b1b660ccc45e8f5289c4", "562c7c6ef959cb3cb382b1b660ccc45e8f5289c4", "562c7c6ef959cb3cb382b1b660ccc45e8f5289c4", "4a59cab927579ea9722e43252c695de5da4eb5e2" ]
[ "mpi_extrapolation/render_sway.py", "summae/beam_search.py", "stacked_capsule_autoencoders/capsules/models/scae.py", "eim/small_problems_density_plot.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script to run pretrained model and render sway camera path.\n\nFor CVPR 2019 paper:\nPushing the Boundaries of View Extrapolation with Multiplane Images\nPratul P. Srinivasan, Richard Tucker, Jonathan T. Barron, Ravi Ramamoorthi, Ren\nNg, Noah Snavely.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport subprocess\n\nfrom absl import app\nfrom absl import flags\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\nfrom mpi_extrapolation.mpi import MPI\n\nflags.DEFINE_string(\"input_file\", default=\"\", help=\"Input batch filename\")\nflags.DEFINE_string(\n \"output_dir\",\n default=\"\",\n help=\"Directory to save MPI planes and renderings\")\nflags.DEFINE_string(\n \"model_dir\", default=\"\", help=\"Directory containing pretrained model\")\n\nFLAGS = flags.FLAGS\n\n# Model parameters\nnum_mpi_planes = 128\n# RealEstate dataset poses are scaled so depths lie between 1.0 and 100.0 meters\n# adjust depending on your data\nmin_depth = 1.0\nmax_depth = 100.0\n# Image dimensions for RealEstate dataset\nimage_height = 576\nimage_width = 1024\n# Patched inference parameters\n# edit based on GPU memory constraints and your image size\n# Recommend using largest patchsize possible\n# and decreasing outsize to reduce tiling artifacts\npatchsize = np.array([576, 384]) # patch size for inference\noutsize = np.array([576, 128]) # central portion of the patch to keep\n\n# Sway path parameters\nnum_frames = 128\ncrop = 20 # crop from MPI borders to avoid edgee artifacts in renderings\nmax_disp = 64.0 # maximum pixel disparity of closest plane (1.0 m) in sway\n\n\ndef main(argv):\n\n del argv # Unused.\n\n if FLAGS.input_file is None:\n raise ValueError(\"`input_file` must be defined\")\n if FLAGS.output_dir is None:\n raise ValueError(\"`output_dir` must be defined\")\n if FLAGS.model_dir is None:\n raise ValueError(\"`model_dir` must be defined\")\n\n checkpoint = FLAGS.model_dir + \"model.ckpt\"\n\n if not os.path.exists(FLAGS.output_dir):\n os.mkdir(FLAGS.output_dir)\n\n # Set up model\n model = MPI()\n\n # Load input batch\n inputs = np.load(FLAGS.input_file)\n\n # Compute plane depths\n mpi_planes = model.inv_depths(min_depth, max_depth, num_mpi_planes)\n\n # Format inputs, convert from numpy arrays to tensors\n # Change this if you are training with a dataset iterator\n in_src_images = tf.constant(inputs[\"src_images\"])\n in_ref_image = tf.constant(inputs[\"ref_image\"])\n in_ref_pose = tf.constant(inputs[\"ref_pose\"])\n # in_tgt_pose = tf.constant(inputs[\"tgt_pose\"]) # Unneeded for sway\n in_src_poses = tf.constant(inputs[\"src_poses\"])\n in_intrinsics = tf.constant(inputs[\"intrinsics\"])\n in_tgt_image = tf.constant(inputs[\"tgt_image\"])\n\n in_ref_image = tf.image.convert_image_dtype(in_ref_image, dtype=tf.float32)\n in_src_images = tf.image.convert_image_dtype(in_src_images, dtype=tf.float32)\n in_tgt_image = tf.image.convert_image_dtype(in_tgt_image, dtype=tf.float32)\n\n # Patched inference\n patch_ind = tf.placeholder(tf.int32, shape=(2))\n buffersize = (patchsize - outsize)//2\n\n # Set up graph\n outputs = model.infer_mpi(in_src_images,\n in_ref_image,\n in_ref_pose,\n in_src_poses,\n in_intrinsics,\n num_mpi_planes,\n mpi_planes,\n run_patched=True,\n patch_ind=patch_ind,\n patchsize=patchsize,\n outsize=outsize)\n\n # Define shapes to placate tensorflow\n outputs[\"rgba_layers\"].set_shape(\n (1, patchsize[0], patchsize[1], num_mpi_planes, 4))\n outputs[\"rgba_layers_refine\"].set_shape(\n (1, patchsize[0], patchsize[1], num_mpi_planes, 4))\n outputs[\"refine_input_mpi\"].set_shape(\n (1, patchsize[0], patchsize[1], num_mpi_planes, 4))\n outputs[\"stuff_behind\"].set_shape(\n (1, patchsize[0], patchsize[1], num_mpi_planes, 3))\n outputs[\"flow_vecs\"].set_shape(\n (1, patchsize[0], patchsize[1], num_mpi_planes, 2))\n\n # Patched inference for MPI (128 planes at 0.5MP res likely won't fit on GPU)\n saver = tf.train.Saver()\n with tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n\n if checkpoint is not None:\n print(\"Loading from checkpoint:\", checkpoint)\n saver.restore(sess, checkpoint)\n\n num_patches = [image_height // outsize[0], image_width // outsize[1]]\n print(\"patched inference with:\", num_patches, \"patches,\", \"buffersize:\",\n buffersize)\n out_rgba = None\n for r in range(num_patches[0]):\n out_row_rgba = None\n for c in range(num_patches[1]):\n patch_num = r*num_patches[1]+c\n print(\"running patch:\", patch_num)\n patch_ind_rc = np.array([r, c])\n patch_start = patch_ind_rc * outsize\n patch_end = patch_start + patchsize\n print(\"patch ind:\", patch_ind_rc, \"patch_start\", patch_start,\n \"patch_end\", patch_end)\n feed_dict = {\n patch_ind: patch_ind_rc,\n in_src_images: inputs[\"src_images\"],\n in_ref_image: inputs[\"ref_image\"],\n in_ref_pose: inputs[\"ref_pose\"],\n in_src_poses: inputs[\"src_poses\"],\n in_intrinsics: inputs[\"intrinsics\"]\n }\n outs = sess.run(outputs, feed_dict=feed_dict)\n outs_rgba_patch = outs[\"rgba_layers\"][:, buffersize[0]:buffersize[0] +\n outsize[0],\n buffersize[1]:buffersize[1] +\n outsize[1], :, :]\n outs_rgba_patch_refine = outs[\n \"rgba_layers_refine\"][:, buffersize[0]:buffersize[0] + outsize[0],\n buffersize[1]:buffersize[1] +\n outsize[1], :, :]\n outs_refine_input_mpi_patch = outs[\n \"refine_input_mpi\"][:, buffersize[0]:buffersize[0] + outsize[0],\n buffersize[1]:buffersize[1] + outsize[1], :, :]\n outs_stuff_behind_patch = outs[\n \"stuff_behind\"][:, buffersize[0]:buffersize[0] + outsize[0],\n buffersize[1]:buffersize[1] + outsize[1], :, :]\n outs_flow_vecs = outs[\"flow_vecs\"][:, buffersize[0]:buffersize[0] +\n outsize[0],\n buffersize[1]:buffersize[1] +\n outsize[1], :, :]\n\n if out_row_rgba is None:\n out_row_rgba = outs_rgba_patch\n out_row_rgba_refine = outs_rgba_patch_refine\n out_row_refine_input_mpi = outs_refine_input_mpi_patch\n out_row_stuff_behind = outs_stuff_behind_patch\n out_row_flow_vecs = outs_flow_vecs\n else:\n out_row_rgba = np.concatenate([out_row_rgba, outs_rgba_patch], 2)\n out_row_rgba_refine = np.concatenate(\n [out_row_rgba_refine, outs_rgba_patch_refine], 2)\n out_row_refine_input_mpi = np.concatenate(\n [out_row_refine_input_mpi, outs_refine_input_mpi_patch], 2)\n out_row_stuff_behind = np.concatenate(\n [out_row_stuff_behind, outs_stuff_behind_patch], 2)\n out_row_flow_vecs = np.concatenate(\n [out_row_flow_vecs, outs_flow_vecs], 2)\n\n if out_rgba is None:\n out_rgba = out_row_rgba\n out_rgba_refine = out_row_rgba_refine\n out_refine_input_mpi = out_row_refine_input_mpi\n out_stuff_behind = out_row_stuff_behind\n out_flow_vecs = out_row_flow_vecs\n else:\n out_rgba = np.concatenate([out_rgba, out_row_rgba], 1)\n out_rgba_refine = np.concatenate([out_rgba_refine, out_row_rgba_refine],\n 1)\n out_refine_input_mpi = np.concatenate(\n [out_refine_input_mpi, out_row_refine_input_mpi], 1)\n out_stuff_behind = np.concatenate(\n [out_stuff_behind, out_row_stuff_behind], 1)\n out_flow_vecs = np.concatenate([out_flow_vecs, out_row_flow_vecs], 1)\n\n outs[\"rgba_layers\"] = np.concatenate(\n [out_rgba[Ellipsis, :3] / 2.0 + 0.5, out_rgba[Ellipsis, 3:]], axis=-1)\n outs[\"rgba_layers_refine\"] = np.concatenate(\n [out_rgba_refine[Ellipsis, :3] / 2.0 + 0.5, out_rgba_refine[Ellipsis, 3:]],\n axis=-1)\n outs[\"refine_input_mpi\"] = np.concatenate([\n out_refine_input_mpi[Ellipsis, :3] / 2.0 + 0.5, out_refine_input_mpi[Ellipsis, 3:]\n ],\n axis=-1)\n outs[\"stuff_behind\"] = out_stuff_behind / 2.0 + 0.5\n outs[\"flow_vecs\"] = out_flow_vecs\n\n # Save MPI layers\n layers = outs[\"rgba_layers_refine\"]\n for i in range(layers.shape[3]):\n i_filename = FLAGS.output_dir + \"mpi_rgba_{:04d}.png\".format(i)\n plt.imsave(i_filename, layers[0, :, :, i, :])\n print(\"wrote layer:\", i)\n\n # Render example sway camera path\n mpi_placeholder = tf.placeholder(\n dtype=tf.float32,\n shape=[\n 1, layers.shape[1] - 2 * crop, layers.shape[2] - 2 * crop,\n layers.shape[3], 4\n ])\n tgt_pose_placeholder = tf.placeholder(dtype=tf.float32, shape=[1, 4, 4])\n intrinsics_placeholder = tf.placeholder(dtype=tf.float32, shape=[1, 3, 3])\n output_render, _ = model.mpi_render_view(mpi_placeholder,\n tgt_pose_placeholder, mpi_planes,\n intrinsics_placeholder)\n\n # Compute sway path poses\n max_trans = max_disp / inputs[\"intrinsics\"][\n 0, 0, 0] # Maximum camera translation to satisfy max_disp parameter\n output_poses = []\n for i in range(num_frames):\n i_trans = max_trans * np.sin(2.0 * np.pi * float(i) / float(num_frames))\n i_pose = np.concatenate([\n np.concatenate(\n [np.eye(3), np.array([i_trans, 0.0, 0.0])[:, np.newaxis]], axis=1),\n np.array([0.0, 0.0, 0.0, 1.0])[np.newaxis, :]\n ],\n axis=0)[np.newaxis, :, :]\n output_poses.append(i_pose)\n\n # Render sway path\n output_render_list = []\n with tf.Session() as sess:\n for i in range(num_frames):\n print(\"Rendering pose:\", i, \"of:\", num_frames)\n i_output = sess.run(\n output_render,\n feed_dict={\n mpi_placeholder:\n outs[\"rgba_layers_refine\"][:, crop:-crop, crop:-crop, :, :],\n tgt_pose_placeholder:\n output_poses[i],\n intrinsics_placeholder:\n inputs[\"intrinsics\"]\n })\n output_render_list.append(i_output)\n\n for i in range(len(output_render_list)):\n plt.imsave(FLAGS.output_dir + \"tmp_{:03d}.png\".format(i),\n output_render_list[i][0, :, :, :])\n\n # Save sway path to video (requires FFMPEG)\n subprocess.call([\n \"ffmpeg\", \"-i\", FLAGS.output_dir + \"tmp_%03d.png\",\n FLAGS.output_dir + \"sway.mp4\"\n ])\n for f in glob.glob(FLAGS.output_dir + \"tmp*.png\"):\n os.remove(f)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n", "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Beam search to find the translated sequence with the highest probability.\n\n Source implementation from:\n\n https://github.com/tensorflow/models/tree/master/official/transformer/model\n\nitself based on implementation from Tensor2Tensor:\n\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef inf(dtype):\n \"\"\"Returns a value close to infinity, but is still finite in `dtype`.\n\n This is useful to get a very large value that is still zero when multiplied by\n zero. The floating-point \"Inf\" value is NaN when multiplied by zero.\n\n Args:\n dtype: A dtype. The returned value will be finite when casted to this dtype.\n\n Returns:\n A very large value.\n \"\"\"\n if dtype == \"float32\":\n return 1e7\n elif dtype == \"float16\":\n # Disable no-member lint error, as the linter thinks np.float16 does not\n # exist for some reason.\n return np.finfo(np.float16).max # pylint: disable=no-member\n else:\n raise AssertionError(\"Invalid dtype: %s\" % dtype)\n\n\nclass _StateKeys(object):\n \"\"\"Keys to dictionary storing the state of the beam search loop.\"\"\"\n\n # Variable storing the loop index.\n CUR_INDEX = \"CUR_INDEX\"\n\n # Top sequences that are alive for each batch item. Alive sequences are ones\n # that have not generated an EOS token. Sequences that reach EOS are marked as\n # finished and moved to the FINISHED_SEQ tensor.\n # Has shape [batch_size, beam_size, CUR_INDEX + 1]\n ALIVE_SEQ = \"ALIVE_SEQ\"\n # Log probabilities of each alive sequence. Shape [batch_size, beam_size]\n ALIVE_LOG_PROBS = \"ALIVE_LOG_PROBS\"\n # Dictionary of cached values for each alive sequence. The cache stores\n # the encoder output, attention bias, and the decoder attention output from\n # the previous iteration.\n ALIVE_CACHE = \"ALIVE_CACHE\"\n\n # Top finished sequences for each batch item.\n # Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are\n # shorter than CUR_INDEX + 1 are padded with 0s.\n FINISHED_SEQ = \"FINISHED_SEQ\"\n # Scores for each finished sequence. Score = log probability / length norm\n # Shape [batch_size, beam_size]\n FINISHED_SCORES = \"FINISHED_SCORES\"\n # Flags indicating which sequences in the finished sequences are finished.\n # At the beginning, all of the sequences in FINISHED_SEQ are filler values.\n # True -> finished sequence, False -> filler. Shape [batch_size, beam_size]\n FINISHED_FLAGS = \"FINISHED_FLAGS\"\n\n\nclass SequenceBeamSearch(object):\n \"\"\"Implementation of beam search loop.\"\"\"\n\n def __init__(self,\n symbols_to_logits_fn,\n vocab_size,\n batch_size,\n beam_size,\n alpha,\n max_decode_length,\n eos_id,\n padded_decode,\n dtype=tf.float32):\n \"\"\"Initialize sequence beam search.\n\n Args:\n symbols_to_logits_fn: A function to provide logits, which is the\n interface to the Transformer model. The passed in arguments are:\n ids -> A tensor with shape [batch_size * beam_size, index].\n index -> A scalar.\n cache -> A nest dictionary of tensors [batch_size * beam_size, ...].\n The function must return a tuple of logits and the updated cache:\n logits -> A tensor with shape [batch * beam_size, vocab_size].\n updated cache -> A nested dictionary with the same structure as the\n input cache.\n vocab_size: An integer, the size of the vocabulary, used for topk\n computation.\n batch_size: An integer, the decode batch size.\n beam_size: An integer, number of beams for beam search.\n alpha: A float, defining the strength of length normalization.\n max_decode_length: An integer, the maximum number of steps to decode\n a sequence.\n eos_id: An integer. ID of end of sentence token.\n padded_decode: A bool, indicating if max_sequence_length padding is used\n for beam search.\n dtype: A tensorflow data type used for score computation. The default is\n tf.float32.\n \"\"\"\n self.symbols_to_logits_fn = symbols_to_logits_fn\n self.vocab_size = vocab_size\n self.batch_size = batch_size\n self.beam_size = beam_size\n self.alpha = alpha\n self.max_decode_length = max_decode_length\n self.eos_id = eos_id\n self.padded_decode = padded_decode\n self.dtype = tf.as_dtype(dtype)\n\n def search(self, initial_ids, initial_cache):\n \"\"\"Beam search for sequences with highest scores.\"\"\"\n state, state_shapes = self._create_initial_state(initial_ids, initial_cache)\n\n finished_state = tf.while_loop(\n self._continue_search, self._search_step, loop_vars=[state],\n shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)\n finished_state = finished_state[0]\n\n alive_seq = finished_state[_StateKeys.ALIVE_SEQ]\n alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]\n finished_seq = finished_state[_StateKeys.FINISHED_SEQ]\n finished_scores = finished_state[_StateKeys.FINISHED_SCORES]\n finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]\n\n # Account for corner case where there are no finished sequences for a\n # particular batch item. In that case, return alive sequences for that batch\n # item.\n finished_seq = tf.where(\n tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)\n finished_scores = tf.where(\n tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)\n return finished_seq, finished_scores\n\n def _create_initial_state(self, initial_ids, initial_cache):\n \"\"\"Return initial state dictionary and its shape invariants.\n\n Args:\n initial_ids: initial ids to pass into the symbols_to_logits_fn.\n int tensor with shape [batch_size, 1]\n initial_cache: dictionary storing values to be passed into the\n symbols_to_logits_fn.\n\n Returns:\n state and shape invariant dictionaries with keys from _StateKeys\n \"\"\"\n for key, value in initial_cache.items():\n for inner_value in tf.nest.flatten(value):\n if inner_value.dtype != self.dtype:\n raise TypeError(\n \"initial_cache element for key '%s' has dtype %s that does not \"\n \"match SequenceBeamSearch's dtype of %s. Value: %s\" %\n (key, value.dtype.name, self.dtype.name, inner_value))\n\n # Current loop index (starts at 0)\n cur_index = tf.constant(0)\n\n # Create alive sequence with shape [batch_size, beam_size, 1]\n alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)\n alive_seq = tf.expand_dims(alive_seq, axis=2)\n if self.padded_decode:\n alive_seq = tf.tile(alive_seq, [1, 1, self.max_decode_length + 1])\n\n # Create tensor for storing initial log probabilities.\n # Assume initial_ids are prob 1.0\n initial_log_probs = tf.constant(\n [[0.] + [-float(\"inf\")] * (self.beam_size - 1)], dtype=self.dtype)\n alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])\n\n # Expand all values stored in the dictionary to the beam size, so that each\n # beam has a separate cache.\n alive_cache = tf.nest.map_structure(\n lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)\n\n # Initialize tensor storing finished sequences with filler values.\n finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)\n\n # Set scores of the initial finished seqs to negative infinity.\n finished_scores = tf.ones([self.batch_size, self.beam_size],\n dtype=self.dtype) * -inf(self.dtype)\n\n # Initialize finished flags with all False values.\n finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)\n\n # Create state dictionary\n state = {\n _StateKeys.CUR_INDEX: cur_index,\n _StateKeys.ALIVE_SEQ: alive_seq,\n _StateKeys.ALIVE_LOG_PROBS: alive_log_probs,\n _StateKeys.ALIVE_CACHE: alive_cache,\n _StateKeys.FINISHED_SEQ: finished_seq,\n _StateKeys.FINISHED_SCORES: finished_scores,\n _StateKeys.FINISHED_FLAGS: finished_flags\n }\n\n # Create state invariants for each value in the state dictionary. Each\n # dimension must be a constant or None. A None dimension means either:\n # 1) the dimension's value is a tensor that remains the same but may\n # depend on the input sequence to the model (e.g. batch size).\n # 2) the dimension may have different values on different iterations.\n if self.padded_decode:\n state_shape_invariants = {\n _StateKeys.CUR_INDEX:\n tf.TensorShape([]),\n _StateKeys.ALIVE_SEQ:\n tf.TensorShape(\n [self.batch_size, self.beam_size,\n self.max_decode_length + 1]),\n _StateKeys.ALIVE_LOG_PROBS:\n tf.TensorShape([self.batch_size, self.beam_size]),\n _StateKeys.ALIVE_CACHE:\n tf.nest.map_structure(_get_shape, alive_cache),\n _StateKeys.FINISHED_SEQ:\n tf.TensorShape(\n [self.batch_size, self.beam_size,\n self.max_decode_length + 1]),\n _StateKeys.FINISHED_SCORES:\n tf.TensorShape([self.batch_size, self.beam_size]),\n _StateKeys.FINISHED_FLAGS:\n tf.TensorShape([self.batch_size, self.beam_size])\n }\n else:\n state_shape_invariants = {\n _StateKeys.CUR_INDEX:\n tf.TensorShape([]),\n _StateKeys.ALIVE_SEQ:\n tf.TensorShape([None, self.beam_size, None]),\n _StateKeys.ALIVE_LOG_PROBS:\n tf.TensorShape([None, self.beam_size]),\n _StateKeys.ALIVE_CACHE:\n tf.nest.map_structure(_get_shape_keep_last_dim, alive_cache),\n _StateKeys.FINISHED_SEQ:\n tf.TensorShape([None, self.beam_size, None]),\n _StateKeys.FINISHED_SCORES:\n tf.TensorShape([None, self.beam_size]),\n _StateKeys.FINISHED_FLAGS:\n tf.TensorShape([None, self.beam_size])\n }\n\n return state, state_shape_invariants\n\n def _continue_search(self, state):\n \"\"\"Return whether to continue the search loop.\n\n The loops should terminate when\n 1) when decode length has been reached, or\n 2) when the worst score in the finished sequences is better than the best\n score in the alive sequences (i.e. the finished sequences are provably\n unchanging)\n\n Args:\n state: A dictionary with the current loop state.\n\n Returns:\n Bool tensor with value True if loop should continue, False if loop should\n terminate.\n \"\"\"\n i = state[_StateKeys.CUR_INDEX]\n alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]\n finished_scores = state[_StateKeys.FINISHED_SCORES]\n finished_flags = state[_StateKeys.FINISHED_FLAGS]\n\n not_at_max_decode_length = tf.less(i, self.max_decode_length)\n\n # Calculate largest length penalty (the larger penalty, the better score).\n max_length_norm = _length_normalization(self.alpha, self.max_decode_length,\n dtype=self.dtype)\n # Get the best possible scores from alive sequences.\n best_alive_scores = alive_log_probs[:, 0] / max_length_norm\n\n # Compute worst score in finished sequences for each batch element\n finished_scores *= tf.cast(finished_flags,\n self.dtype) # set filler scores to zero\n lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)\n\n # If there are no finished sequences in a batch element, then set the lowest\n # finished score to -INF for that element.\n finished_batches = tf.reduce_any(finished_flags, 1)\n lowest_finished_scores += ((1.0 -\n tf.cast(finished_batches, self.dtype)) *\n -inf(self.dtype))\n\n worst_finished_score_better_than_best_alive_score = tf.reduce_all(\n tf.greater(lowest_finished_scores, best_alive_scores)\n )\n\n return tf.logical_and(\n not_at_max_decode_length,\n tf.logical_not(worst_finished_score_better_than_best_alive_score)\n )\n\n def _search_step(self, state):\n \"\"\"Beam search loop body.\n\n Grow alive sequences by a single ID. Sequences that have reached the EOS\n token are marked as finished. The alive and finished sequences with the\n highest log probabilities and scores are returned.\n\n A sequence's finished score is calculating by dividing the log probability\n by the length normalization factor. Without length normalization, the\n search is more likely to return shorter sequences.\n\n Args:\n state: A dictionary with the current loop state.\n\n Returns:\n new state dictionary.\n \"\"\"\n # Grow alive sequences by one token.\n new_seq, new_log_probs, new_cache = self._grow_alive_seq(state)\n # Collect top beam_size alive sequences\n alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_cache)\n\n # Combine newly finished sequences with existing finished sequences, and\n # collect the top k scoring sequences.\n finished_state = self._get_new_finished_state(state, new_seq, new_log_probs)\n\n # Increment loop index and create new state dictionary\n new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1}\n new_state.update(alive_state)\n new_state.update(finished_state)\n return [new_state]\n\n def _grow_alive_seq(self, state):\n \"\"\"Grow alive sequences by one token, and collect top 2*beam_size sequences.\n\n 2*beam_size sequences are collected because some sequences may have reached\n the EOS token. 2*beam_size ensures that at least beam_size sequences are\n still alive.\n\n Args:\n state: A dictionary with the current loop state.\n Returns:\n Tuple of\n (Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1],\n Scores of returned sequences [batch_size, 2 * beam_size],\n New alive cache, for each of the 2 * beam_size sequences)\n \"\"\"\n i = state[_StateKeys.CUR_INDEX]\n alive_seq = state[_StateKeys.ALIVE_SEQ]\n alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]\n alive_cache = state[_StateKeys.ALIVE_CACHE]\n\n beams_to_keep = 2 * self.beam_size\n\n # Get logits for the next candidate IDs for the alive sequences. Get the new\n # cache values at the same time.\n if self.padded_decode:\n flat_ids = tf.reshape(\n tf.slice(alive_seq, [0, 0, i], [self.batch_size, self.beam_size, 1]),\n [self.batch_size * self.beam_size, -1])\n else:\n flat_ids = _flatten_beam_dim(alive_seq) # [batch_size * beam_size]\n flat_cache = tf.nest.map_structure(_flatten_beam_dim, alive_cache)\n\n flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache)\n\n # Unflatten logits to shape [batch_size, beam_size, vocab_size]\n logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size)\n new_cache = tf.nest.map_structure(\n lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size),\n flat_cache)\n\n # Convert logits to normalized log probs\n candidate_log_probs = _log_prob_from_logits(logits)\n\n # Calculate new log probabilities if each of the alive sequences were\n # extended # by the the candidate IDs.\n # Shape [batch_size, beam_size, vocab_size]\n log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)\n\n # Each batch item has beam_size * vocab_size candidate sequences. For each\n # batch item, get the k candidates with the highest log probabilities.\n flat_log_probs = tf.reshape(log_probs,\n [-1, self.beam_size * self.vocab_size])\n topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep)\n\n # Extract the alive sequences that generate the highest log probabilities\n # after being extended.\n topk_beam_indices = topk_indices // self.vocab_size\n topk_seq, new_cache = _gather_beams(\n [alive_seq, new_cache], topk_beam_indices, self.batch_size,\n beams_to_keep)\n\n # Append the most probable IDs to the topk sequences\n topk_ids = topk_indices % self.vocab_size\n if self.padded_decode:\n topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1])\n topk_seq = tf.tensor_scatter_nd_update(topk_seq, [i + 1], topk_ids)\n topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0])\n else:\n topk_ids = tf.expand_dims(topk_ids, axis=2)\n topk_seq = tf.concat([topk_seq, topk_ids], axis=2)\n return topk_seq, topk_log_probs, new_cache\n\n def _get_new_alive_state(self, new_seq, new_log_probs, new_cache):\n \"\"\"Gather the top k sequences that are still alive.\n\n Args:\n new_seq: New sequences generated by growing the current alive sequences\n int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]\n new_log_probs: Log probabilities of new sequences\n float32 tensor with shape [batch_size, beam_size]\n new_cache: Dict of cached values for each sequence.\n\n Returns:\n Dictionary with alive keys from _StateKeys:\n {Top beam_size sequences that are still alive (don't end with eos_id)\n Log probabilities of top alive sequences\n Dict cache storing decoder states for top alive sequences}\n \"\"\"\n # To prevent finished sequences from being considered, set log probs to -inf\n new_finished_flags = tf.equal(new_seq[:, :, -1], self.eos_id)\n new_log_probs += tf.cast(new_finished_flags, self.dtype) * -inf(self.dtype)\n\n top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams(\n [new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size,\n self.beam_size)\n\n return {\n _StateKeys.ALIVE_SEQ: top_alive_seq,\n _StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs,\n _StateKeys.ALIVE_CACHE: top_alive_cache\n }\n\n def _get_new_finished_state(self, state, new_seq, new_log_probs):\n \"\"\"Combine new and old finished sequences, and gather the top k sequences.\n\n Args:\n state: A dictionary with the current loop state.\n new_seq: New sequences generated by growing the current alive sequences\n int32 tensor with shape [batch_size, beam_size, i + 1]\n new_log_probs: Log probabilities of new sequences\n float32 tensor with shape [batch_size, beam_size]\n\n Returns:\n Dictionary with finished keys from _StateKeys:\n {Top beam_size finished sequences based on score,\n Scores of finished sequences,\n Finished flags of finished sequences}\n \"\"\"\n i = state[_StateKeys.CUR_INDEX]\n finished_seq = state[_StateKeys.FINISHED_SEQ]\n finished_scores = state[_StateKeys.FINISHED_SCORES]\n finished_flags = state[_StateKeys.FINISHED_FLAGS]\n\n # First append a column of 0-ids to finished_seq to increment the length.\n # New shape of finished_seq: [batch_size, beam_size, i + 1]\n if not self.padded_decode:\n finished_seq = tf.concat([\n finished_seq,\n tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)\n ],\n axis=2)\n\n # Calculate new seq scores from log probabilities.\n length_norm = _length_normalization(self.alpha, i + 1, dtype=self.dtype)\n new_scores = new_log_probs / length_norm\n\n # Set the scores of the still-alive seq in new_seq to large negative values.\n new_finished_flags = tf.equal(new_seq[:, :, -1], self.eos_id)\n new_scores += ((1. - tf.cast(new_finished_flags, self.dtype)) *\n -inf(self.dtype))\n\n # Combine sequences, scores, and flags.\n finished_seq = tf.concat([finished_seq, new_seq], axis=1)\n finished_scores = tf.concat([finished_scores, new_scores], axis=1)\n finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)\n\n # Return the finished sequences with the best scores.\n top_finished_seq, top_finished_scores, top_finished_flags = (\n _gather_topk_beams([finished_seq, finished_scores, finished_flags],\n finished_scores, self.batch_size, self.beam_size))\n\n return {\n _StateKeys.FINISHED_SEQ: top_finished_seq,\n _StateKeys.FINISHED_SCORES: top_finished_scores,\n _StateKeys.FINISHED_FLAGS: top_finished_flags\n }\n\n\ndef sequence_beam_search(\n symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size,\n alpha, max_decode_length, eos_id, padded_decode=False):\n \"\"\"Search for sequence of subtoken ids with the largest probability.\n\n Args:\n symbols_to_logits_fn: A function that takes in ids, index, and cache as\n arguments. The passed in arguments will have shape:\n ids -> A tensor with shape [batch_size * beam_size, index].\n index -> A scalar.\n cache -> A nested dictionary of tensors [batch_size * beam_size, ...].\n The function must return a tuple of logits and new cache:\n logits -> A tensor with shape [batch * beam_size, vocab_size].\n new cache -> A nested dictionary with the same shape/structure as the\n inputted cache.\n initial_ids: An int32 tensor with shape [batch_size]. Starting ids for\n each batch item.\n initial_cache: A dictionary, containing starting decoder variables\n information.\n vocab_size: An integer, the size of the vocabulary, used for topk\n computation.\n beam_size: An integer, the number of beams.\n alpha: A float, defining the strength of length normalization.\n max_decode_length: An integer, the maximum length to decoded a sequence.\n eos_id: An integer, ID of eos token, used to determine when a sequence has\n finished.\n padded_decode: A bool, indicating if max_sequence_length padding is used\n for beam search.\n\n Returns:\n Top decoded sequences [batch_size, beam_size, max_decode_length]\n sequence scores [batch_size, beam_size]\n \"\"\"\n batch_size = (\n initial_ids.shape.as_list()[0] if padded_decode else\n tf.shape(initial_ids)[0])\n sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size,\n beam_size, alpha, max_decode_length, eos_id,\n padded_decode)\n return sbs.search(initial_ids, initial_cache)\n\n\ndef _log_prob_from_logits(logits):\n return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True)\n\n\ndef _length_normalization(alpha, length, dtype=tf.float32):\n \"\"\"Return length normalization factor.\"\"\"\n return tf.pow(((5. + tf.cast(length, dtype)) / 6.), alpha)\n\n\ndef _expand_to_beam_size(tensor, beam_size):\n \"\"\"Tiles a given tensor by beam_size.\n\n Args:\n tensor: tensor to tile [batch_size, ...]\n beam_size: How much to tile the tensor by.\n\n Returns:\n Tiled tensor [batch_size, beam_size, ...]\n \"\"\"\n tensor = tf.expand_dims(tensor, axis=1)\n tile_dims = [1] * tensor.shape.ndims\n tile_dims[1] = beam_size\n\n return tf.tile(tensor, tile_dims)\n\n\ndef _shape_list(tensor):\n \"\"\"Return a list of the tensor's shape, and ensure no None values in list.\"\"\"\n # Get statically known shape (may contain None's for unknown dimensions)\n shape = tensor.get_shape().as_list()\n\n # Ensure that the shape values are not None\n dynamic_shape = tf.shape(tensor)\n for i in range(len(shape)): # pylint: disable=consider-using-enumerate\n if shape[i] is None:\n shape[i] = dynamic_shape[i]\n return shape\n\n\ndef _get_shape_keep_last_dim(tensor):\n shape_list = _shape_list(tensor)\n\n # Only the last\n for i in range(len(shape_list) - 1):\n shape_list[i] = None\n\n if isinstance(shape_list[-1], tf.Tensor):\n shape_list[-1] = None\n return tf.TensorShape(shape_list)\n\n\ndef _get_shape(tensor):\n \"\"\"Return the shape of the input tensor.\"\"\"\n return tf.TensorShape(_shape_list(tensor))\n\n\ndef _flatten_beam_dim(tensor):\n \"\"\"Reshapes first two dimensions in to single dimension.\n\n Args:\n tensor: Tensor to reshape of shape [A, B, ...]\n\n Returns:\n Reshaped tensor of shape [A*B, ...]\n \"\"\"\n shape = _shape_list(tensor)\n shape[0] *= shape[1]\n shape.pop(1) # Remove beam dim\n return tf.reshape(tensor, shape)\n\n\ndef _unflatten_beam_dim(tensor, batch_size, beam_size):\n \"\"\"Reshapes first dimension back to [batch_size, beam_size].\n\n Args:\n tensor: Tensor to reshape of shape [batch_size*beam_size, ...]\n batch_size: Tensor, original batch size.\n beam_size: int, original beam size.\n\n Returns:\n Reshaped tensor of shape [batch_size, beam_size, ...]\n \"\"\"\n shape = _shape_list(tensor)\n new_shape = [batch_size, beam_size] + shape[1:]\n return tf.reshape(tensor, new_shape)\n\n\ndef _gather_beams(nested, beam_indices, batch_size, new_beam_size):\n \"\"\"Gather beams from nested structure of tensors.\n\n Each tensor in nested represents a batch of beams, where beam refers to a\n single search state (beam search involves searching through multiple states\n in parallel).\n\n This function is used to gather the top beams, specified by\n beam_indices, from the nested tensors.\n\n Args:\n nested: Nested structure (tensor, list, tuple or dict) containing tensors\n with shape [batch_size, beam_size, ...].\n beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each\n value in beam_indices must be between [0, beam_size), and are not\n necessarily unique.\n batch_size: int size of batch\n new_beam_size: int number of beams to be pulled from the nested tensors.\n\n Returns:\n Nested structure containing tensors with shape\n [batch_size, new_beam_size, ...]\n \"\"\"\n # Computes the i'th coodinate that contains the batch index for gather_nd.\n # Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..].\n batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size\n batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])\n\n # Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor\n # with shape [batch_size, beam_size, 2], where the last dimension contains\n # the (i, j) gathering coordinates.\n coordinates = tf.stack([batch_pos, beam_indices], axis=2)\n\n return tf.nest.map_structure(\n lambda state: tf.gather_nd(state, coordinates), nested)\n\n\ndef _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size):\n \"\"\"Gather top beams from nested structure.\"\"\"\n _, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size)\n return _gather_beams(nested, topk_indexes, batch_size, beam_size)\n", "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Capsule autoencoder implementation.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport sonnet as snt\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom stacked_capsule_autoencoders.capsules import capsule as _capsule\nfrom stacked_capsule_autoencoders.capsules import math_ops\nfrom stacked_capsule_autoencoders.capsules import plot\nfrom stacked_capsule_autoencoders.capsules import probe\nfrom stacked_capsule_autoencoders.capsules import tensor_ops\nfrom stacked_capsule_autoencoders.capsules.data import preprocess\nfrom stacked_capsule_autoencoders.capsules.models import Model\nfrom stacked_capsule_autoencoders.capsules.tensor_ops import make_brodcastable\n\ntfd = tfp.distributions\n\n\nclass ImageCapsule(snt.AbstractModule):\n \"\"\"Capsule decoder for constellations.\"\"\"\n\n def __init__(self, n_caps, n_caps_dims, n_votes, **capsule_kwargs):\n \"\"\"Builds the module.\n\n Args:\n n_caps: int, number of capsules.\n n_caps_dims: int, number of capsule coordinates.\n n_votes: int, number of votes generated by each capsule.\n **capsule_kwargs: kwargs passed to capsule layer.\n \"\"\"\n super(ImageCapsule, self).__init__()\n self._n_caps = n_caps\n self._n_caps_dims = n_caps_dims\n self._n_votes = n_votes\n self._capsule_kwargs = capsule_kwargs\n\n def _build(self, h, x, presence=None):\n \"\"\"Builds the module.\n\n Args:\n h: Tensor of encodings of shape [B, n_enc_dims].\n x: Tensor of inputs of shape [B, n_points, n_input_dims]\n presence: Tensor of shape [B, n_points, 1] or None; if it exists, it\n indicates which input points exist.\n\n Returns:\n A bunch of stuff.\n \"\"\"\n batch_size = int(x.shape[0])\n\n capsule = _capsule.CapsuleLayer(self._n_caps, self._n_caps_dims,\n self._n_votes, **self._capsule_kwargs)\n\n res = capsule(h)\n vote_shape = [batch_size, self._n_caps, self._n_votes, 6]\n res.vote = tf.reshape(res.vote[Ellipsis, :-1, :], vote_shape)\n\n votes, scale, vote_presence_prob = res.vote, res.scale, res.vote_presence\n\n likelihood = _capsule.CapsuleLikelihood(votes, scale, vote_presence_prob)\n ll_res = likelihood(x, presence)\n res.update(ll_res._asdict())\n\n caps_presence_prob = tf.reduce_max(\n tf.reshape(vote_presence_prob,\n [batch_size, self._n_caps, self._n_votes]), 2)\n\n res.caps_presence_prob = caps_presence_prob\n return res\n\n\nclass ImageAutoencoder(Model):\n \"\"\"Capsule autoencoder.\"\"\"\n\n def __init__(\n self,\n primary_encoder,\n primary_decoder,\n encoder,\n decoder,\n input_key,\n label_key=None,\n n_classes=None,\n dynamic_l2_weight=0.,\n caps_ll_weight=0.,\n vote_type='soft',\n pres_type='enc',\n img_summaries=False,\n stop_grad_caps_inpt=False,\n stop_grad_caps_target=False,\n prior_sparsity_loss_type='kl',\n prior_within_example_sparsity_weight=0.,\n prior_between_example_sparsity_weight=0.,\n prior_within_example_constant=0.,\n posterior_sparsity_loss_type='kl',\n posterior_within_example_sparsity_weight=0.,\n posterior_between_example_sparsity_weight=0.,\n primary_caps_sparsity_weight=0.,\n weight_decay=0.,\n feed_templates=True,\n prep='none',\n ):\n\n super(ImageAutoencoder, self).__init__()\n self._primary_encoder = primary_encoder\n self._primary_decoder = primary_decoder\n self._encoder = encoder\n self._decoder = decoder\n self._input_key = input_key\n self._label_key = label_key\n self._n_classes = n_classes\n\n self._dynamic_l2_weight = dynamic_l2_weight\n self._caps_ll_weight = caps_ll_weight\n self._vote_type = vote_type\n self._pres_type = pres_type\n self._img_summaries = img_summaries\n\n self._stop_grad_caps_inpt = stop_grad_caps_inpt\n self._stop_grad_caps_target = stop_grad_caps_target\n self._prior_sparsity_loss_type = prior_sparsity_loss_type\n self._prior_within_example_sparsity_weight = prior_within_example_sparsity_weight\n self._prior_between_example_sparsity_weight = prior_between_example_sparsity_weight\n self._prior_within_example_constant = prior_within_example_constant\n self._posterior_sparsity_loss_type = posterior_sparsity_loss_type\n self._posterior_within_example_sparsity_weight = posterior_within_example_sparsity_weight\n self._posterior_between_example_sparsity_weight = posterior_between_example_sparsity_weight\n self._primary_caps_sparsity_weight = primary_caps_sparsity_weight\n self._weight_decay = weight_decay\n self._feed_templates = feed_templates\n\n self._prep = prep\n\n\n def _img(self, data, prep='none'):\n\n img = data[self._input_key]\n if prep == 'sobel':\n img = preprocess.normalized_sobel_edges(img)\n\n return img\n\n def _label(self, data):\n return data.get(self._label_key, None)\n\n def _build(self, data):\n\n input_x = self._img(data, False)\n target_x = self._img(data, prep=self._prep)\n batch_size = int(input_x.shape[0])\n\n primary_caps = self._primary_encoder(input_x)\n pres = primary_caps.presence\n\n expanded_pres = tf.expand_dims(pres, -1)\n pose = primary_caps.pose\n input_pose = tf.concat([pose, 1. - expanded_pres], -1)\n\n input_pres = pres\n if self._stop_grad_caps_inpt:\n input_pose = tf.stop_gradient(input_pose)\n input_pres = tf.stop_gradient(pres)\n\n target_pose, target_pres = pose, pres\n if self._stop_grad_caps_target:\n target_pose = tf.stop_gradient(target_pose)\n target_pres = tf.stop_gradient(target_pres)\n\n # skip connection from the img to the higher level capsule\n if primary_caps.feature is not None:\n input_pose = tf.concat([input_pose, primary_caps.feature], -1)\n\n # try to feed presence as a separate input\n # and if that works, concatenate templates to poses\n # this is necessary for set transformer\n n_templates = int(primary_caps.pose.shape[1])\n templates = self._primary_decoder.make_templates(n_templates,\n primary_caps.feature)\n\n try:\n if self._feed_templates:\n inpt_templates = templates\n if self._stop_grad_caps_inpt:\n inpt_templates = tf.stop_gradient(inpt_templates)\n\n if inpt_templates.shape[0] == 1:\n inpt_templates = snt.TileByDim([0], [batch_size])(inpt_templates)\n inpt_templates = snt.BatchFlatten(2)(inpt_templates)\n pose_with_templates = tf.concat([input_pose, inpt_templates], -1)\n else:\n pose_with_templates = input_pose\n\n h = self._encoder(pose_with_templates, input_pres)\n\n except TypeError:\n h = self._encoder(input_pose)\n\n res = self._decoder(h, target_pose, target_pres)\n res.primary_presence = primary_caps.presence\n\n if self._vote_type == 'enc':\n primary_dec_vote = primary_caps.pose\n elif self._vote_type == 'soft':\n primary_dec_vote = res.soft_winner\n elif self._vote_type == 'hard':\n primary_dec_vote = res.winner\n else:\n raise ValueError('Invalid vote_type=\"{}\"\".'.format(self._vote_type))\n\n if self._pres_type == 'enc':\n primary_dec_pres = pres\n elif self._pres_type == 'soft':\n primary_dec_pres = res.soft_winner_pres\n elif self._pres_type == 'hard':\n primary_dec_pres = res.winner_pres\n else:\n raise ValueError('Invalid pres_type=\"{}\"\".'.format(self._pres_type))\n\n res.bottom_up_rec = self._primary_decoder(\n primary_caps.pose,\n primary_caps.presence,\n template_feature=primary_caps.feature,\n img_embedding=primary_caps.img_embedding)\n\n res.top_down_rec = self._primary_decoder(\n res.winner,\n primary_caps.presence,\n template_feature=primary_caps.feature,\n img_embedding=primary_caps.img_embedding)\n\n rec = self._primary_decoder(\n primary_dec_vote,\n primary_dec_pres,\n template_feature=primary_caps.feature,\n img_embedding=primary_caps.img_embedding)\n\n tile = snt.TileByDim([0], [res.vote.shape[1]])\n tiled_presence = tile(primary_caps.presence)\n\n tiled_feature = primary_caps.feature\n if tiled_feature is not None:\n tiled_feature = tile(tiled_feature)\n\n tiled_img_embedding = tile(primary_caps.img_embedding)\n\n res.top_down_per_caps_rec = self._primary_decoder(\n snt.MergeDims(0, 2)(res.vote),\n snt.MergeDims(0, 2)(res.vote_presence) * tiled_presence,\n template_feature=tiled_feature,\n img_embedding=tiled_img_embedding)\n\n res.templates = templates\n res.template_pres = pres\n res.used_templates = rec.transformed_templates\n\n res.rec_mode = rec.pdf.mode()\n res.rec_mean = rec.pdf.mean()\n\n res.mse_per_pixel = tf.square(target_x - res.rec_mode)\n res.mse = math_ops.flat_reduce(res.mse_per_pixel)\n\n res.rec_ll_per_pixel = rec.pdf.log_prob(target_x)\n res.rec_ll = math_ops.flat_reduce(res.rec_ll_per_pixel)\n\n n_points = int(res.posterior_mixing_probs.shape[1])\n mass_explained_by_capsule = tf.reduce_sum(res.posterior_mixing_probs, 1)\n\n (res.posterior_within_sparsity_loss,\n res.posterior_between_sparsity_loss) = _capsule.sparsity_loss(\n self._posterior_sparsity_loss_type,\n mass_explained_by_capsule / n_points,\n num_classes=self._n_classes)\n\n (res.prior_within_sparsity_loss,\n res.prior_between_sparsity_loss) = _capsule.sparsity_loss(\n self._prior_sparsity_loss_type,\n res.caps_presence_prob,\n num_classes=self._n_classes,\n within_example_constant=self._prior_within_example_constant)\n\n label = self._label(data)\n if label is not None:\n res.posterior_cls_xe, res.posterior_cls_acc = probe.classification_probe(\n mass_explained_by_capsule,\n label,\n self._n_classes,\n labeled=data.get('labeled', None))\n res.prior_cls_xe, res.prior_cls_acc = probe.classification_probe(\n res.caps_presence_prob,\n label,\n self._n_classes,\n labeled=data.get('labeled', None))\n\n res.best_cls_acc = tf.maximum(res.prior_cls_acc, res.posterior_cls_acc)\n\n res.primary_caps_l1 = math_ops.flat_reduce(res.primary_presence)\n\n\n if self._weight_decay > 0.0:\n decay_losses_list = []\n for var in tf.trainable_variables():\n if 'w:' in var.name or 'weights:' in var.name:\n decay_losses_list.append(tf.nn.l2_loss(var))\n res.weight_decay_loss = tf.reduce_sum(decay_losses_list)\n else:\n res.weight_decay_loss = 0.0\n\n\n return res\n\n def _loss(self, data, res):\n\n loss = (-res.rec_ll - self._caps_ll_weight * res.log_prob +\n self._dynamic_l2_weight * res.dynamic_weights_l2 +\n self._primary_caps_sparsity_weight * res.primary_caps_l1 +\n self._posterior_within_example_sparsity_weight *\n res.posterior_within_sparsity_loss -\n self._posterior_between_example_sparsity_weight *\n res.posterior_between_sparsity_loss +\n self._prior_within_example_sparsity_weight *\n res.prior_within_sparsity_loss -\n self._prior_between_example_sparsity_weight *\n res.prior_between_sparsity_loss +\n self._weight_decay * res.weight_decay_loss\n )\n\n try:\n loss += res.posterior_cls_xe + res.prior_cls_xe\n except AttributeError:\n pass\n\n return loss\n\n def _report(self, data, res):\n reports = super(ImageAutoencoder, self)._report(data, res)\n\n n_caps = self._decoder._n_caps # pylint:disable=protected-access\n\n is_from_capsule = res.is_from_capsule\n ones = tf.ones_like(is_from_capsule)\n capsule_one_hot = tf.one_hot((is_from_capsule + ones),\n depth=n_caps + 1)[Ellipsis, 1:]\n\n num_per_group = tf.reduce_sum(capsule_one_hot, 1)\n num_per_group_per_batch = tf.reduce_mean(tf.to_float(num_per_group), 0)\n\n reports.update({\n 'votes_per_capsule_{}'.format(k): v\n for k, v in enumerate(tf.unstack(num_per_group_per_batch))\n })\n\n label = self._label(data)\n\n\n return reports\n\n def _plot(self, data, res, name=None):\n\n img = self._img(data)\n label = self._label(data)\n if label is not None:\n label_one_hot = tf.one_hot(label, depth=self._n_classes)\n\n _render_activations = functools.partial( # pylint:disable=invalid-name\n plot.render_activations,\n height=int(img.shape[1]),\n pixels_per_caps=3,\n cmap='viridis')\n\n mass_explained_by_capsule = tf.reduce_sum(res.posterior_mixing_probs, 1)\n normalized_mass_expplained_by_capsule = mass_explained_by_capsule / tf.reduce_max(\n mass_explained_by_capsule, -1, keepdims=True) # pylint:disable=line-too-long\n\n posterior_caps_activation = _render_activations(\n normalized_mass_expplained_by_capsule) # pylint:disable=line-too-long\n prior_caps_activation = _render_activations(res.caps_presence_prob)\n\n is_from_capsule = snt.BatchApply(_render_activations)(\n res.posterior_mixing_probs)\n\n green = res.top_down_rec\n rec_red = res.rec_mode\n rec_green = green.pdf.mode()\n\n flat_per_caps_rec = res.top_down_per_caps_rec.pdf.mode()\n shape = res.vote.shape[:2].concatenate(flat_per_caps_rec.shape[1:])\n per_caps_rec = tf.reshape(flat_per_caps_rec, shape)\n per_caps_rec = plot.concat_images(\n tf.unstack(per_caps_rec, axis=1), 1, vertical=False)\n one_image = tf.reduce_mean(\n self._img(data, self._prep), axis=-1, keepdims=True)\n one_rec = tf.reduce_mean(rec_red, axis=-1, keepdims=True)\n diff = tf.concat([one_image, one_rec, tf.zeros_like(one_image)], -1)\n\n used_templates = tf.reduce_mean(res.used_templates, axis=-1, keepdims=True)\n green_templates = tf.reduce_mean(\n green.transformed_templates, axis=-1, keepdims=True)\n templates = tf.concat(\n [used_templates, green_templates,\n tf.zeros_like(used_templates)], -1)\n\n templates = tf.concat(\n [templates,\n tf.ones_like(templates[:, :, :, :1]), is_from_capsule], 3)\n\n all_imgs = [\n img, rec_red, rec_green, diff, prior_caps_activation,\n tf.zeros_like(rec_red[:, :, :1]), posterior_caps_activation,\n per_caps_rec\n ] + list(tf.unstack(templates, axis=1))\n\n for i, img in enumerate(all_imgs):\n if img.shape[-1] == 1:\n all_imgs[i] = tf.image.grayscale_to_rgb(img)\n\n img_with_templates = plot.concat_images(all_imgs, 1, vertical=False)\n\n def render_corr(x, y):\n corr = abs(plot.correlation(x, y))\n rendered_corr = tf.expand_dims(_render_activations(corr), 0)\n return plot.concat_images(\n tf.unstack(rendered_corr, axis=1), 3, vertical=False)\n\n if label is not None:\n\n posterior_label_corr = render_corr(normalized_mass_expplained_by_capsule,\n label_one_hot)\n prior_label_corr = render_corr(res.caps_presence_prob, label_one_hot)\n label_corr = plot.concat_images([prior_label_corr, posterior_label_corr],\n 3,\n vertical=True)\n else:\n label_corr = tf.zeros_like(img)\n\n n_examples = min(int(shape[0]), 16)\n plot_params = dict(\n img_with_templates=dict(\n grid_height=n_examples,\n zoom=3.,\n ))\n\n templates = res.templates\n if len(templates.shape) == 5:\n if templates.shape[0] == 1:\n templates = tf.squeeze(templates, 0)\n\n else:\n templates = templates[:n_examples]\n templates = plot.concat_images(\n tf.unstack(templates, axis=1), 1, vertical=False)\n plot_params['templates'] = dict(grid_height=n_examples)\n\n plot_dict = dict(\n templates=templates,\n img_with_templates=img_with_templates[:n_examples],\n label_corr=label_corr,\n )\n\n return plot_dict, plot_params\n\n\n", "# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Plot density from model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport math\nimport os\nimport numpy as np\nfrom six.moves import range\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom eim.models import base\nfrom eim.models import his\nfrom eim.models import lars\nfrom eim.models import nis\nfrom eim.models import rejection_sampling\nimport eim.small_problems_dists as dists\ntfd = tfp.distributions\n\ntf.logging.set_verbosity(tf.logging.INFO)\ntf.app.flags.DEFINE_enum(\n \"algo\", \"lars\", [\"lars\", \"nis\", \"his\", \"density\", \"rejection_sampling\"],\n \"The algorithm to run. Density draws the targeted density\")\ntf.app.flags.DEFINE_enum(\"target\", dists.NINE_GAUSSIANS_DIST,\n dists.TARGET_DISTS, \"Distribution to draw data from.\")\ntf.app.flags.DEFINE_float(\"proposal_variance\", 1.0,\n \"Variance for proposal distribution\")\ntf.app.flags.DEFINE_string(\n \"energy_fn_sizes\", \"20,20\",\n \"List of hidden layer sizes for energy function as as comma \"\n \"separated list.\")\ntf.app.flags.DEFINE_integer(\n \"his_t\", 5, \"Number of steps for hamiltonian importance sampling.\")\ntf.app.flags.DEFINE_float(\"his_stepsize\", 1e-2,\n \"Stepsize for hamiltonian importance sampling.\")\ntf.app.flags.DEFINE_float(\"his_alpha\", 0.995,\n \"Alpha for hamiltonian importance sampling.\")\ntf.app.flags.DEFINE_boolean(\"his_learn_stepsize\", False,\n \"Allow HIS to learn the stepsize\")\ntf.app.flags.DEFINE_boolean(\"his_learn_alpha\", False,\n \"Allow HIS to learn alpha.\")\ntf.app.flags.DEFINE_integer(\"K\", 1024,\n \"The number of samples for NIS and LARS.\")\ntf.app.flags.DEFINE_integer(\"num_bins\", 500,\n \"Number of points per axis when plotting density.\")\ntf.app.flags.DEFINE_integer(\"num_samples\", 10000000,\n \"Number of samples to use when plotting density.\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 100000, \"The batch size.\")\ntf.app.flags.DEFINE_string(\"logdir\", \"/tmp/lars\",\n \"Directory for summaries and checkpoints.\")\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef make_sample_density_summary(session,\n data,\n max_samples_per_batch=100000,\n num_samples=1000000,\n num_bins=100):\n \"\"\"Plot approximate density based on samples.\"\"\"\n bounds = (-2, 2)\n num_batches = int(math.ceil(num_samples / float(max_samples_per_batch)))\n hist = None\n for i in range(num_batches):\n tf.logging.info(\"Processing batch %d / %d of samples for density image.\" %\n (i + 1, num_batches))\n s = session.run(data)\n if hist is None:\n hist = np.histogram2d(\n s[:, 0], s[:, 1], bins=num_bins, range=[bounds, bounds])[0]\n else:\n hist += np.histogram2d(\n s[:, 0], s[:, 1], bins=num_bins, range=[bounds, bounds])[0]\n with tf.io.gfile.GFile(os.path.join(FLAGS.logdir, \"density\"), \"w\") as out:\n np.save(out, hist)\n tf.logging.info(\"Density image saved to %s\" %\n os.path.join(FLAGS.logdir, \"density.npy\"))\n\n\ndef reduce_logavgexp(input_tensor, axis=None, keepdims=None, name=None):\n dims = tf.shape(input_tensor)\n if axis is not None:\n dims = tf.gather(dims, axis)\n denominator = tf.reduce_prod(dims)\n return (tf.reduce_logsumexp(\n input_tensor, axis=axis, keepdims=keepdims, name=name) -\n tf.log(tf.to_float(denominator)))\n\n\ndef make_density_summary(log_density_fn, num_bins=100):\n \"\"\"Plot density.\"\"\"\n if FLAGS.target == dists.NINE_GAUSSIANS_DIST or FLAGS.target == dists.TWO_RINGS_DIST:\n bounds = (-2, 2)\n elif FLAGS.target == dists.CHECKERBOARD_DIST:\n bounds = (0, 1)\n\n x = tf.range(\n bounds[0], bounds[1], delta=(bounds[1] - bounds[0]) / float(num_bins))\n grid_x, grid_y = tf.meshgrid(x, x, indexing=\"ij\")\n grid_xy = tf.stack([grid_x, grid_y], axis=-1)\n\n log_z = log_density_fn(grid_xy)\n log_bigz = reduce_logavgexp(log_z)\n z = tf.exp(log_z - log_bigz)\n\n plot = tf.reshape(z, [num_bins, num_bins])\n return plot\n\n\ndef main(unused_argv):\n g = tf.Graph()\n with g.as_default():\n energy_fn_layers = [\n int(x.strip()) for x in FLAGS.energy_fn_sizes.split(\",\")\n ]\n if FLAGS.algo == \"density\":\n target = dists.get_target_distribution(FLAGS.target)\n plot = make_density_summary(target.log_prob, num_bins=FLAGS.num_bins)\n with tf.train.SingularMonitoredSession(\n checkpoint_dir=FLAGS.logdir) as sess:\n plot = sess.run(plot)\n with tf.io.gfile.GFile(os.path.join(FLAGS.logdir, \"density\"),\n \"w\") as out:\n np.save(out, plot)\n elif FLAGS.algo == \"lars\":\n tf.logging.info(\"Running LARS\")\n proposal = base.get_independent_normal([2], FLAGS.proposal_variance)\n model = lars.SimpleLARS(\n K=FLAGS.K, data_dim=[2], accept_fn_layers=energy_fn_layers,\n proposal=proposal)\n plot = make_density_summary(\n lambda x: tf.squeeze(model.accept_fn(x)) + model.proposal.log_prob(x),\n num_bins=FLAGS.num_bins)\n with tf.train.SingularMonitoredSession(\n checkpoint_dir=FLAGS.logdir) as sess:\n plot = sess.run(plot)\n with tf.io.gfile.GFile(os.path.join(FLAGS.logdir, \"density\"),\n \"w\") as out:\n np.save(out, plot)\n else:\n proposal = base.get_independent_normal([2], FLAGS.proposal_variance)\n if FLAGS.algo == \"nis\":\n tf.logging.info(\"Running NIS\")\n model = nis.NIS(\n K=FLAGS.K, data_dim=[2], energy_hidden_sizes=energy_fn_layers,\n proposal=proposal)\n elif FLAGS.algo == \"his\":\n tf.logging.info(\"Running HIS\")\n model = his.FullyConnectedHIS(\n T=FLAGS.his_t,\n data_dim=[2],\n energy_hidden_sizes=energy_fn_layers,\n q_hidden_sizes=energy_fn_layers,\n init_step_size=FLAGS.his_stepsize,\n learn_stepsize=FLAGS.his_learn_stepsize,\n init_alpha=FLAGS.his_alpha,\n learn_temps=FLAGS.his_learn_alpha,\n proposal=proposal)\n elif FLAGS.algo == \"rejection_sampling\":\n model = rejection_sampling.RejectionSampling(\n T=FLAGS.K, data_dim=[2], energy_hidden_sizes=energy_fn_layers,\n proposal=proposal)\n samples = model.sample(FLAGS.batch_size)\n with tf.train.SingularMonitoredSession(\n checkpoint_dir=FLAGS.logdir) as sess:\n make_sample_density_summary(\n sess,\n samples,\n max_samples_per_batch=FLAGS.batch_size,\n num_samples=FLAGS.num_samples,\n num_bins=FLAGS.num_bins)\n\n\nif __name__ == \"__main__\":\n tf.app.run(main)\n" ]
[ [ "numpy.concatenate", "numpy.array", "matplotlib.pyplot.imsave", "tensorflow.image.convert_image_dtype", "tensorflow.Session", "numpy.load", "tensorflow.train.Saver", "numpy.eye", "tensorflow.constant", "tensorflow.placeholder", "tensorflow.global_variables_initializer" ], [ "tensorflow.reduce_min", "tensorflow.ones", "tensorflow.logical_not", "tensorflow.reshape", "numpy.finfo", "tensorflow.stack", "tensorflow.tile", "tensorflow.greater", "tensorflow.cast", "tensorflow.shape", "tensorflow.concat", "tensorflow.less", "tensorflow.while_loop", "tensorflow.TensorShape", "tensorflow.transpose", "tensorflow.nest.flatten", "tensorflow.constant", "tensorflow.reduce_logsumexp", "tensorflow.zeros", "tensorflow.range", "tensorflow.expand_dims", "tensorflow.gather_nd", "tensorflow.nest.map_structure", "tensorflow.nn.top_k", "tensorflow.as_dtype", "tensorflow.reduce_any", "tensorflow.equal", "tensorflow.tensor_scatter_nd_update", "tensorflow.slice" ], [ "tensorflow.trainable_variables", "tensorflow.concat", "tensorflow.expand_dims", "tensorflow.ones_like", "tensorflow.reshape", "tensorflow.nn.l2_loss", "tensorflow.reduce_max", "tensorflow.zeros_like", "tensorflow.squeeze", "tensorflow.reduce_sum", "tensorflow.to_float", "tensorflow.stop_gradient", "tensorflow.maximum", "tensorflow.one_hot", "tensorflow.reduce_mean", "tensorflow.square", "tensorflow.unstack", "tensorflow.image.grayscale_to_rgb" ], [ "tensorflow.exp", "tensorflow.reshape", "tensorflow.stack", "tensorflow.to_float", "tensorflow.shape", "tensorflow.app.flags.DEFINE_enum", "tensorflow.logging.info", "numpy.save", "tensorflow.app.run", "tensorflow.reduce_logsumexp", "tensorflow.train.SingularMonitoredSession", "tensorflow.logging.set_verbosity", "tensorflow.reduce_prod", "numpy.histogram2d", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.Graph", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.meshgrid", "tensorflow.app.flags.DEFINE_float", "tensorflow.gather" ] ]
hamedomidvar/associativeconv
[ "9930915abd3625871354df676865fc44eb92abf3" ]
[ "Implementations/CIFAR10/models/densenet.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n#############################\nCODE_SIZE = 72\nSLICE_SHAPE = [12,12,3,3]\n#############################\n\nclass Bottleneck(nn.Module):\n def __init__(self, in_planes, growth_rate, CSG, reg_mode=False):\n super(Bottleneck, self).__init__()\n self.reg_mode = reg_mode\n\n self.CSG = CSG\n\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)\n\n self.bn2 = nn.BatchNorm2d(4*growth_rate)\n\n ###############################################################\n #### The following covolutional layer is replaced by our CSG generated kernel\n #self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n\n ###############################################################\n ## Here we set things up for the CSG including defining a code matrix for each layer\n self.filter_size2 = 3\n self.in_filters2 = 4*growth_rate\n self.out_filters2 = growth_rate\n self.num_slices2 = int(np.ceil(self.in_filters2/SLICE_SHAPE[0])*np.ceil(self.out_filters2/SLICE_SHAPE[1]))\n self.code2 = torch.nn.Parameter(torch.randn([self.num_slices2]+[CODE_SIZE]))\n self.kernel2 = None\n self.kernel2_defined = False\n\n def forward(self, x):\n\n out = self.conv1(F.relu(self.bn1(x)))\n\n #########################################\n ## Updating the kernel\n self.kernel2 = self.CSG(self.code2)\n self.kernel2 = self.kernel2.view(int(np.ceil(self.out_filters2/SLICE_SHAPE[0])*SLICE_SHAPE[0]),int(np.ceil(self.in_filters2/SLICE_SHAPE[1])*SLICE_SHAPE[1]),3,3)\n self.kernel2 = self.kernel2[:self.out_filters2, :self.in_filters2, :self.filter_size2, :self.filter_size2]\n self.kernel2_defined = True\n\n\n ###########################################\n ### This is replaced by our kernel\n #out = self.conv2(F.relu(self.bn2(out)))\n\n\n ###########################################\n ## Convolution with our kernel\n out = F.conv2d(F.relu(self.bn2(out)),self.kernel2,padding=1)\n out = torch.cat([out,x], 1)\n return out\n\n\nclass BottleneckOrg(nn.Module):\n def __init__(self, in_planes, growth_rate, CSG, reg_mode=False):\n super(BottleneckOrg, self).__init__()\n\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)\n\n self.bn2 = nn.BatchNorm2d(4*growth_rate)\n self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)\n\n\n def forward(self, x):\n\n out = self.conv1(F.relu(self.bn1(x)))\n out = self.conv2(F.relu(self.bn2(out)))\n out = torch.cat([out,x], 1)\n return out\n\nclass Transition(nn.Module):\n def __init__(self, in_planes, out_planes, reg_mode, last = False):\n super(Transition, self).__init__()\n\n self.last = last\n\n self.reg_mode = reg_mode\n\n self.bn = nn.BatchNorm2d(in_planes)\n if not last:\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)\n\n\n def forward(self, x):\n out = F.relu(self.bn(x))\n if not self.last:\n out = self.conv(out)\n #out = F.conv2d(out,self.kernel1,padding=1)\n out = F.avg_pool2d(out, 2)\n else:\n out = F.avg_pool2d(out, 8)\n return out\n\n\nclass DenseNet(nn.Module):\n def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10, reg_mode = False, org = False):\n super(DenseNet, self).__init__()\n\n self.growth_rate = growth_rate\n\n num_planes = 2*growth_rate\n self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)\n\n #################################################\n if not org:\n #############################################\n ## Here is where the CSG is defined.\n self.CSG = torch.nn.Linear(CODE_SIZE,np.prod(SLICE_SHAPE),bias=False)\n \n #### BINARY CSG:\n # nn.init.kaiming_normal_(self.CSG.weight)\n # self.CSG.weight.data = torch.sign(self.CSG.weight.data)*0.5\n # self.CSG.weight.requires_grad_(False)\n\n else:\n self.CSG = None\n\n\n #################################################\n ## The following is based on densenet\n\n self.dense1 = self._make_dense_layers(block, num_planes, nblocks, reg_mode=reg_mode)\n num_planes += nblocks*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans1 = Transition(num_planes, out_planes, reg_mode=reg_mode)\n num_planes = out_planes\n\n self.dense2 = self._make_dense_layers(block, num_planes, nblocks, reg_mode=reg_mode)\n num_planes += nblocks*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans2 = Transition(num_planes, out_planes, reg_mode=reg_mode)\n num_planes = out_planes\n\n self.dense3 = self._make_dense_layers(block, num_planes, nblocks, reg_mode=reg_mode)\n num_planes += nblocks*growth_rate\n out_planes = int(math.floor(num_planes*reduction))\n self.trans3 = Transition(num_planes, out_planes, reg_mode=reg_mode, last = True)\n\n self.linear = nn.Linear(num_planes, num_classes)\n\n def _make_dense_layers(self, block, in_planes, nblock, reg_mode=False):\n layers = []\n for i in range(nblock):\n ###################################################\n ## We merely pass the reference of CSG to all blocks\n layers.append(block(in_planes, self.growth_rate, self.CSG, reg_mode=reg_mode))\n in_planes += self.growth_rate\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.trans1(self.dense1(out))\n out = self.trans2(self.dense2(out))\n out = self.trans3(self.dense3(out))\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\ndef densenet_cifar(reg_mode=False):\n # L = 40 --> (40-4)/3 = 12 --> Using Bottleneck --> 12/2 = 6 = nblock\n return DenseNet(Bottleneck, 6, growth_rate=48, reg_mode=reg_mode)\n\ndef densenet_cifar_org(reg_mode=False):\n return DenseNet(BottleneckOrg, 6, growth_rate=48, reg_mode=reg_mode, org = True)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.nn.functional.avg_pool2d", "numpy.ceil", "torch.nn.Sequential", "torch.nn.BatchNorm2d", "torch.nn.Conv2d", "numpy.prod", "torch.randn" ] ]
minus31/BlazeFace
[ "bddfb3261868b1a888408898c3de9bf6e12a372d" ]
[ "implementation/dataloader.py" ]
[ "import cv2\nimport pickle\nimport glob \nimport os \nimport numpy as np \n\n\nIM_EXTENSIONS = ['png', 'jpg', 'bmp']\n\n\ndef read_img(img_path, img_shape=(128,128)):\n \"\"\"\n load image file and divide by 255.\n \"\"\"\n img = cv2.imread(img_path)\n img = cv2.resize(img, img_shape)\n img /= 255.\n\n return img\n\n\ndef dataloader(dataset_dir, label_path, batch_size=32, img_shape=(128, 128)):\n\n \"\"\"\n data loader\n\n return image, [class_label, class_and_location_label]\n \"\"\"\n \n img_files = glob.glob(dataset_dir)\n img_files = [f for f in img_files if f[-3:] in IM_EXTENSIONS]\n\n with open(label_path, \"rb\") as f:\n labels = pickle.load(f)\n \n numofData = len(img_files)# endwiths(png,jpg ...)\n data_idx = np.arange(numofData)\n \n while True:\n batch_idx = np.random.choice(data_idx, size=batch_size, replace=False)\n \n batch_img = []\n batch_label = []\n batch_label_cls = []\n \n for i in batch_idx:\n \n img = read_img(img_files[i], img_shape=img_shape)\n label = labels[i]\n \n batch_img.append(img)\n batch_label.append(label)\n batch_label_cls.append(label[0:1])\n \n yield np.array(batch_img, dtype=np.float32), \n [np.array(batch_label_cls, dtype=np.float32), np.array(batch_label, dtype=np.float32)]\n\n\nif __name__ == \"__main__\":\n pass\n\n" ]
[ [ "numpy.array", "numpy.arange", "numpy.random.choice" ] ]
dawidvdh/programmers-introduction-to-mathematics
[ "2345a118f055bb7f98140ee58d5332c6691e1fc1" ]
[ "secret-sharing/plot_collisions.py" ]
[ "if __name__ == \"__main__\":\n import matplotlib as mpl\n mpl.use('TkAgg')\n\n import numpy as np\n import matplotlib.pyplot as plt\n from interpolate import *\n\n # Create a figure of size 8x6 points, 80 dots per inch\n plt.figure(figsize=(8, 8), dpi=80)\n\n # starting from these base points, we compute different polynomials\n # passing through these two points with a whatever decoded \"secret\" we wish.\n points = [(2., 1083.), (5., 6609.)]\n\n f = interpolate(points + [(0., 533.)])\n x = np.linspace(-1.0, 6.0, 256, endpoint=True)\n y = np.array([f.evaluateAt(p) for p in x])\n plt.plot(x, y, color=\"black\", linewidth=3.0, linestyle=\"-\")\n\n f = interpolate(points + [(0., 2000.)])\n y = np.array([f.evaluateAt(p) for p in x])\n plt.plot(x, y, color=\"black\", linewidth=3.0, linestyle=\"-.\")\n\n f = interpolate(points + [(0., 5000.)])\n y = np.array([f.evaluateAt(p) for p in x])\n plt.plot(x, y, color=\"black\", linewidth=3.0, linestyle=\":\")\n\n f = interpolate(points + [(0., 7500.)])\n y = np.array([f.evaluateAt(p) for p in x])\n plt.plot(x, y, color=\"black\", linewidth=3.0, linestyle=\"--\")\n\n plt.scatter(*zip(*points), zorder=10)\n\n p0 = points[0]\n plt.annotate(\n \"({}, {})\".format(*p0),\n xy=(p0[0], p0[1]),\n xytext=(p0[0], p0[1] - 1000),\n arrowprops=dict(facecolor='black', shrink=0.01))\n\n p1 = points[1]\n plt.annotate(\n \"({}, {})\".format(*p1),\n xy=(p1[0], p1[1]),\n xytext=(p1[0] - 1.5, p1[1] + 500),\n arrowprops=dict(facecolor='black', shrink=0.01))\n\n # Set labels\n plt.xlim(0, 6)\n plt.ylim(-200, 8000)\n ax = plt.gca()\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n\n # Save the figure to the output SVG file\n plt.savefig(\"polynomials-perfect-secrecy.svg\")\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlim", "matplotlib.pyplot.gca", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.linspace" ] ]
slimgroup/dfno
[ "3751ceddee14b949503300f6570ff56bac34c6e6" ]
[ "tests/gradient_test_torch.py" ]
[ "from gradient_test import gradient_test\n\nimport gc\nimport torch\nimport torch.nn as nn\n\ninput_shape = (16, 16)\nf = nn.Sequential(\n nn.Linear(16, 16, dtype=torch.float64),\n nn.Linear(16, 16, dtype=torch.float64)\n)\n\n# Initialize lazy parameters\nwith torch.no_grad():\n x = torch.rand(*input_shape, dtype=torch.float64, device=torch.device('cpu'))\n y = f(x)\n del x\n del y\n gc.collect()\n\n# Run test\nall_ok = True\nfor r in gradient_test(f, input_shape):\n print(str(r))\n all_ok = all_ok and r.converged[0] and r.converged[1]\n\nif all_ok:\n print(f'passed gradcheck')\nelse:\n print(f'failed gradcheck')\n" ]
[ [ "torch.nn.Linear", "torch.device", "torch.no_grad" ] ]
apple35932003/IVS-Caffe
[ "54bb78daab04a8188d0df9ef5b8f4855082e0da5" ]
[ "lib/datasets/imdb.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport os\nimport os.path as osp\nimport PIL\nfrom utils.cython_bbox import bbox_overlaps\nimport numpy as np\nimport scipy.sparse\nfrom fast_rcnn.config import cfg\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name):\n self._name = name\n self._num_classes = 0\n self._classes = []\n self._image_index = []\n self._obj_proposer = 'selective_search'\n self._roidb = None\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n return [PIL.Image.open(self.image_path_at(i)).size[0]\n for i in xrange(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in xrange(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = widths[i] - oldx2 - 1\n boxes[:, 2] = widths[i] - oldx1 - 1\n #TODO remove the following 3 line and solve 1-based 0-based annotation\n for b in range(len(boxes)):\n if boxes[b][2]< boxes[b][0]:\n boxes[b][0] = 0\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes' : boxes,\n 'gt_overlaps' : self.roidb[i]['gt_overlaps'],\n 'gt_classes' : self.roidb[i]['gt_classes'],\n 'flipped' : True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = { 'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [ [0**2, 1e5**2], # all\n [0**2, 32**2], # small\n [32**2, 96**2], # medium\n [96**2, 1e5**2], # large\n [96**2, 128**2], # 96-128\n [128**2, 256**2], # 128-256\n [256**2, 512**2], # 256-512\n [512**2, 1e5**2], # 512-inf\n ]\n assert areas.has_key(area), 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in xrange(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in xrange(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert(gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert(_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in xrange(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)\n maxes = gt_overlaps.max(axis=1)\n I = np.where(maxes > 0)[0]\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes' : boxes,\n 'gt_classes' : np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps' : overlaps,\n 'flipped' : False,\n 'seg_areas' : np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in xrange(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.where", "numpy.arange", "numpy.sort", "numpy.hstack", "numpy.vstack" ] ]
Aid91/Deep_Q_Learning_ATARI
[ "3e35f0bd21120774b48a918267055fb6f8a7d90e" ]
[ "dqn/pong_dqn_conv.py" ]
[ "\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport numpy.random as rd\nimport gym\nimport cv2\n\nimport matplotlib.pyplot as plt\nfrom tf_tools import variable_summaries\nfrom replay_memory import *\n\n\nenv = gym.make(\"Pong-v0\")\n\nmini_batch_size = 32\nreplay_memory_size = 100000\nreplay_memory = ReplayMemory(replay_memory_size)\n\nshould_render = False\ndouble_q_learning = False\n\n# 0-> no movement, 2->UP, 3->DOWN\nn_action = 3\naction_list = [0, 2, 3]\n\nprint_per_episode = 1\nn_train_trials = 4000\nn_test_trials = 10\ntrial_duration = 10000\ngamma = 0.99\nlearning_rate = 0.00025\n\n\nstate_holder = tf.placeholder(\"float\", [None, 84, 84, 4])\ntarget_placeholder = tf.placeholder(tf.float32, shape=(None,))\nactions_placeholder = tf.placeholder(tf.float32, shape=(None, n_action))\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev = 0.01)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.01, shape = shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W, stride):\n return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = \"VALID\")\n\nwith tf.name_scope('conv_target_Q_network'):\n # network weights\n W_conv_tar1 = weight_variable([8, 8, 4, 32])\n b_conv_tar1 = bias_variable([32])\n\n W_conv_tar2 = weight_variable([4, 4, 32, 64])\n b_conv_tar2 = bias_variable([64])\n\n W_conv_tar3 = weight_variable([3, 3, 64, 64])\n b_conv_tar3 = bias_variable([64])\n\n W_fc_tar1 = weight_variable([7*7*64, 512])\n b_fc_tar1 = bias_variable([512])\n\n W_fc_tar2 = weight_variable([512, n_action])\n b_fc_tar2 = bias_variable([n_action])\n\n # hidden layers\n h_conv1 = tf.nn.relu(conv2d(state_holder, W_conv_tar1, 4) + b_conv_tar1)\n\n h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv_tar2, 2) + b_conv_tar2)\n\n h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv_tar3, 1) + b_conv_tar3)\n\n h_conv3_flat = tf.reshape(h_conv3, [-1, 7*7*64])\n\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc_tar1) + b_fc_tar1)\n\n # output layer\n target_Q= tf.matmul(h_fc1, W_fc_tar2) + b_fc_tar2\n\nwith tf.name_scope('conv_Q_network'):\n # network weights\n W_conv1 = weight_variable([8, 8, 4, 32])\n b_conv1 = bias_variable([32])\n\n W_conv2 = weight_variable([4, 4, 32, 64])\n b_conv2 = bias_variable([64])\n\n W_conv3 = weight_variable([3, 3, 64, 64])\n b_conv3 = bias_variable([64])\n\n W_fc1 = weight_variable([7 * 7 * 64, 512])\n b_fc1 = bias_variable([512])\n\n W_fc2 = weight_variable([512, n_action])\n b_fc2 = bias_variable([n_action])\n\n # hidden layers\n h_conv1 = tf.nn.relu(conv2d(state_holder, W_conv1, 4) + b_conv1)\n\n h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)\n\n h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)\n\n h_conv3_flat = tf.reshape(h_conv3, [-1, 7 * 7 * 64])\n\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # output layer\n Q= tf.matmul(h_fc1, W_fc2) + b_fc2\n\n # defining the placeholders and ops for copying the parameters for Q to target-Q network\n W_update_placeholder = tf.placeholder(W_conv1.dtype, shape=W_conv1.get_shape())\n W_update_placeholder_op = W_conv_tar1.assign(W_update_placeholder)\n\n w_update_placeholder = tf.placeholder(W_conv3.dtype, shape=W_conv3.get_shape())\n w_update_placeholder_op = W_conv_tar3.assign(w_update_placeholder)\n\n b_update_placeholder = tf.placeholder(b_conv3.dtype, shape=b_conv3.get_shape())\n b_update_placeholder_op = b_conv_tar3.assign(b_update_placeholder)\n\n bhid_update_placeholder = tf.placeholder(b_conv1.dtype, shape=b_conv1.get_shape())\n bhid_update_placeholder_op = b_conv_tar1.assign(bhid_update_placeholder)\n\n W2_update_placeholder = tf.placeholder(W_conv2.dtype, shape=W_conv2.get_shape())\n W2_update_placeholder_op = W_conv_tar2.assign(W2_update_placeholder)\n\n b2_update_placeholder = tf.placeholder(b_conv2.dtype, shape=b_conv2.get_shape())\n b2_update_placeholder_op = b_conv_tar2.assign(b2_update_placeholder)\n\n W_fc1_update_placeholder = tf.placeholder(W_fc1.dtype, shape=W_fc1.get_shape())\n W_fc1_update_placeholder_op = W_fc_tar1.assign(W_fc1_update_placeholder)\n\n b_fc1_update_placeholder = tf.placeholder(b_fc1.dtype, shape=b_fc1.get_shape())\n b_fc1_update_placeholder_op = b_fc_tar1.assign(b_fc1_update_placeholder)\n\n W_fc2_update_placeholder = tf.placeholder(W_fc2.dtype, shape=W_fc2.get_shape())\n W_fc2_update_placeholder_op = W_fc_tar2.assign(W_fc2_update_placeholder)\n\n b_fc2_update_placeholder = tf.placeholder(b_fc2.dtype, shape=b_fc2.get_shape())\n b_fc2_update_placeholder_op = b_fc_tar2.assign(b_fc2_update_placeholder)\n\n variable_summaries(Q, '/Q_values')\n\n max_Q = tf.reduce_max(Q, reduction_indices=1)\n variable_summaries(max_Q, '/max_Q')\n\n acc_episode_reward_placeholder = tf.placeholder(dtype=tf.float32)\n variable_summaries(acc_episode_reward_placeholder, '/acc_episode_reward')\n\n\nwith tf.name_scope('loss'):\n prediction = tf.reduce_sum(tf.mul(Q, actions_placeholder), 1)\n error = tf.reduce_mean((tf.square(target_placeholder - prediction)))\n variable_summaries(prediction, '/prediction')\n variable_summaries(target_placeholder, '/target')\n variable_summaries(error, '/error')\n\nwith tf.name_scope('train'):\n # We define the optimizer to use the RMSProp optimizer, and ask it to minimize our loss\n training_step = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=0.99, epsilon=1e-6).minimize(error)\n\n# train step used in the test without experience replay\n# defines sampling and generating actions, states and targets\ndef train_step_no_experience(state,action,reward,next_state,done):\n targets = []\n stats = []\n actns = []\n\n target = 0\n if done:\n target += reward\n else:\n Q_values = sess.run(target_Q, feed_dict={state_holder: next_state.reshape(1, 84, 84, 4)})\n val = np.max(Q_values[0, :])\n target += reward + gamma * val\n\n action_t = np.zeros(n_action)\n action_t[action] = 1\n\n actns.append(action_t)\n stats.append(state)\n targets.append(target)\n\n return actns, stats, targets\n\n# train step used in the real test with experience replay\n# defines sampling and generating actions, states and targets\ndef train_step():\n targets = []\n stats = []\n actns = []\n\n if replay_memory.length() > mini_batch_size:\n states, actions, rewards, next_states, terminals = replay_memory.sample(mini_batch_size)\n\n\n for i in range(len(terminals)):\n target = 0\n if terminals[i]:\n target += rewards[i]\n else:\n Q_values = sess.run(target_Q, feed_dict={state_holder: next_states[i].reshape(1,84,84,4)})\n val = np.max(Q_values[0, :])\n target += rewards[i] + gamma * val\n\n action_t = np.zeros(n_action)\n action_t[actions[i]] = 1\n\n actns.append(action_t)\n stats.append(states[i])\n targets.append(target)\n\n return actns, stats, targets\n\n# definition of epsilon-greedy policy\ndef policy(state):\n '''\n This should implement an epsilon greedy policy:\n - with probability epsilon take a random action\n - otherwise take an action the action that maximizes Q(s,a) with s the current state\n\n :param Q_table:\n :param state:\n :return:\n '''\n if rd.rand() < epsilon:\n return rd.randint(0, n_action)\n\n Q_values = sess.run(Q,\n feed_dict={state_holder: state.reshape(1,84,84,4)\n })\n val = np.max(Q_values[0, :])\n max_indices = np.where(Q_values[0, :] == val)[0]\n return rd.choice(max_indices)\n\nsess = tf.Session() # FOR NOW everything is symbolic, this object has to be called to compute each value of Q\n\n# SUMMARIES\nmerged = tf.merge_all_summaries()\nsuffix = time.strftime('%Y-%m-%d--%H-%M-%S')\ntrain_writer = tf.train.SummaryWriter('tensorboard/pong/{}'.format(suffix) + '/train', sess.graph)\ntest_writer = tf.train.SummaryWriter('tensorboard/pong/{}'.format(suffix) + '/test')\n# Start\n\nsess.run(tf.initialize_all_variables())\n\nsingle_episode_rewards = []\nepisode_rewards_list = []\nepisode_steps_list = []\n\nstep = 0\nepisode_no = 0\n\nreward_list = []\nerr_list = []\nval_list = []\nerr_sum_list = []\n\nstart_learning = 100000\nepsilon_start = 1.\nepsilon_end = .1\n\nC = 10000\nannealing_period = 500000\nupdate_frequency = 4\nacc_t = 0\n\nfor k in range(n_train_trials + n_test_trials):\n\n observation = env.reset() # Init the state\n error_list = []\n\n # preproces the initial observation by scaling it to 84x84x1 and then make the binary image\n # stack last 4 frames of the same image at the beginning\n obs = cv2.cvtColor(cv2.resize(observation, (84, 84)), cv2.COLOR_BGR2GRAY)\n ret, obs = cv2.threshold(obs, 100, 255, cv2.THRESH_BINARY)\n processed_observation = np.stack((obs, obs, obs, obs), axis=2)\n\n for t in range(trial_duration): # The number of time steps in this game is maximum 200\n\n acc_t += 1\n step += 1\n\n epsilon = (epsilon_end +\n max(0., (epsilon_start - epsilon_end)\n * (annealing_period - max(0., acc_t - start_learning)) / annealing_period))\n\n if acc_t % 500 == 0:\n print ('T: %d' % acc_t)\n print ('Epsilon: {%.10f}' % epsilon)\n\n\n if should_render: env.render()\n\n action = policy(processed_observation) # Init the first action\n\n observation, reward, done, info = env.step(action_list[action]) # Take the action\n\n # preproces the new observation by scaling it to 84x84x1 and then make the binary image\n obs = cv2.cvtColor(cv2.resize(observation, (84, 84)), cv2.COLOR_BGR2GRAY)\n _, obs = cv2.threshold(obs, 100, 255, cv2.THRESH_BINARY)\n obs = np.reshape(obs, (84, 84, 1))\n processed_new_observation = np.append(processed_observation[:, :, 1:],obs, axis=2)\n\n replay_memory.add(state=processed_observation, action=action, reward=reward, next_state=processed_new_observation, is_terminal=done)\n\n processed_observation = processed_new_observation\n\n err = 0\n if acc_t > start_learning:\n\n if acc_t % 500 == 0:\n print ('STARTED TRAINING')\n\n\n if t % update_frequency == 0:\n\n #actions, states, targets = train_step_no_experience(state=processed_new_observation, action=action, done=done, next_state=processed_new_observation, reward=reward)\n\n actions, states, targets = train_step()\n\n # Perform one step of gradient descent\n summary, _ = sess.run([merged, training_step], feed_dict={\n state_holder: np.array(states),\n target_placeholder: np.array(targets),\n actions_placeholder: np.array(actions),\n acc_episode_reward_placeholder: np.sum(single_episode_rewards)\n })\n\n train_writer.add_summary(summary, acc_t)\n\n # Compute the Error for monitoring\n err = sess.run(error, feed_dict={\n target_placeholder: np.array(targets),\n state_holder: np.array(states),\n actions_placeholder: np.array(actions),\n })\n\n # copy the parameters of the Q network to target-Q network\n if acc_t % C == 0:\n sess.run(W_update_placeholder_op, {W_update_placeholder: W_conv1.eval(sess)})\n sess.run(w_update_placeholder_op, {w_update_placeholder: W_conv3.eval(sess)})\n sess.run(b_update_placeholder_op, {b_update_placeholder: b_conv3.eval(sess)})\n sess.run(bhid_update_placeholder_op, {bhid_update_placeholder: b_conv1.eval(sess)})\n sess.run(W2_update_placeholder_op, {W2_update_placeholder: W_conv2.eval(sess)})\n sess.run(b2_update_placeholder_op, {b2_update_placeholder: b_conv2.eval(sess)})\n\n sess.run(W_fc1_update_placeholder_op, {W_fc1_update_placeholder: W_fc1.eval(sess)})\n sess.run(b_fc1_update_placeholder_op, {b_fc1_update_placeholder: b_fc1.eval(sess)})\n sess.run(W_fc2_update_placeholder_op, {W_fc2_update_placeholder: W_fc2.eval(sess)})\n sess.run(b_fc2_update_placeholder_op, {b_fc2_update_placeholder: b_fc2.eval(sess)})\n\n err_list.append(err)\n single_episode_rewards.append(reward)\n\n\n if done:\n # Done with episode. Reset stuff.\n\n episode_no += 1\n step = 0\n\n err_sum_list.append(np.mean(err_list))\n episode_rewards_list.append(np.sum(single_episode_rewards))\n episode_steps_list.append(step)\n\n single_episode_rewards = []\n err_list = []\n\n if episode_no % print_per_episode == 0:\n print('ERROR {}'.format(np.mean(err_sum_list)))\n\n print(\"Average REWARDS in last {} episodes before episode {}\".format(print_per_episode, episode_no),\n np.mean(episode_rewards_list[(episode_no - print_per_episode):episode_no]), '+-',\n np.std(episode_rewards_list[(episode_no - print_per_episode):episode_no])\n )\n print(\"Average STEPS in last {} episodes before episode {}\".format(print_per_episode, episode_no),\n np.mean(episode_steps_list[(episode_no - print_per_episode):episode_no]), '+-',\n np.std(episode_steps_list[(episode_no - print_per_episode):episode_no])\n )\n break\n\nplt.figure()\nax = plt.subplot(121)\nax.plot(range(len(episode_rewards_list)), episode_rewards_list)\nax.set_title(\"Training rewards\")\nax.set_xlabel('Episode number')\nax.set_ylabel('Episde reward')\nplt.show()" ]
[ [ "numpy.random.choice", "tensorflow.nn.conv2d", "numpy.random.rand", "tensorflow.matmul", "tensorflow.merge_all_summaries", "tensorflow.reshape", "numpy.mean", "tensorflow.mul", "numpy.where", "numpy.max", "tensorflow.Variable", "tensorflow.constant", "numpy.random.randint", "numpy.append", "matplotlib.pyplot.subplot", "numpy.array", "tensorflow.initialize_all_variables", "numpy.zeros", "numpy.reshape", "tensorflow.train.RMSPropOptimizer", "tensorflow.Session", "matplotlib.pyplot.figure", "tensorflow.truncated_normal", "numpy.std", "numpy.stack", "tensorflow.placeholder", "tensorflow.name_scope", "matplotlib.pyplot.show", "numpy.sum", "tensorflow.reduce_max", "tensorflow.square" ] ]
domschl/ml-indie-tools
[ "70a458a9e385acc883b979f3db8e6d391fdb863b" ]
[ "src/ml_indie_tools/keras_custom_layers.py" ]
[ "import tensorflow as tf\ntry:\n # the endless shuffle of keras modules\n import tensorflow.keras as keras\n from tensorflow.keras import layers\n print(\"Using TF-Keras version:\", keras.__version__)\nexcept ImportError:\n import keras\n import keras.layers as layers\n print(\"Using Keras version:\", keras.__version__)\nimport numpy as np\nimport math\n\n\n# sphinx-doc hickup: a member named `call` seems to cause all kinds of sphinx-hickup\n# error starting with non-existing line-12 docstrings, if automatic :member: doc\n# is activated in index.rst.\n\nclass ResidualBlock(layers.Layer):\n \"\"\" Residual Block layer for Keras\n\n The residual block consists of two fully connected layers with units neurons \n followed by two BatchNorms and ReLUs:\n\n .. code-block:: none\n\n # ┌──────────────────────────────────────────────────┐\n # │ ┌─────┐ ┌──┐ ┌────┐ ┌─────┐ ┌──┐ ┌────┐ ▼\n # ──┴─►│Dense│─►│BN│─►│ReLU│───►│Dense│─►│BN│─►│ReLU│─ + ─► highway=True\n # └─────┘ └──┘ └────┘ └─────┘ └──┘ └────┘\n #\n # ┌──────────────────────────────────────────┐\n # │ ┌─────┐ ┌──┐ ┌────┐ ┌─────┐ ┌──┐ ▼ ┌────┐\n # ──┴─►│Dense│─►│BN│─►│ReLU│───►│Dense│─►│BN│─ + ─►│ReLU│─► highway=False\n # └─────┘ └──┘ └────┘ └─────┘ └──┘ └────┘\n\n The additive residual connection either bridges all layers (highway), or \n connects just before the last ReLU.\n\n :param units: Positive integer, number of hidden units.\n :param highway: Boolean, whether to use highway connection or not.\n \"\"\"\n def __init__(self, units, highway=False, **kwargs):\n self.units=units\n self.highway=highway\n super(ResidualBlock, self).__init__(**kwargs)\n self.dense1 = layers.Dense(self.units)\n self.bn1 = layers.BatchNormalization()\n self.relu = layers.ReLU()\n self.dense2 = layers.Dense(self.units)\n self.bn2 = layers.BatchNormalization()\n self.relu2 = layers.ReLU()\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'units': self.units,\n 'highway': self.highway\n })\n return config\n\n def call(self, inputs): # This member name kills sphinx's autodoc for members! Beware!\n x=self.dense1(inputs)\n x=self.bn1(x)\n x=self.relu(x)\n x=self.dense2(x)\n x=self.bn2(x)\n if self.highway:\n x=self.relu2(x)\n x=x+inputs\n else:\n x=x+inputs\n x=self.relu2(x)\n return x\n\nclass ResidualDense(layers.Layer):\n \"\"\" Residual Dense layer for Keras\n\n The residual dense layer consists of a fully connected layer followed by BatchNorm and ReLU:\n\n .. code-block:: none\n\n # ┌─────────────────────────┐\n # │ ┌─────┐ ┌──┐ ┌────┐ ▼\n # ──┴─►│Dense│─►│BN│─►│ReLU│─ + ─►\n # └─────┘ └──┘ └────┘\n\n :param units: Positive integer, number of hidden units.\n :param regularizer: Positive float, regularization strength for the Dense layer.\n \"\"\"\n def __init__(self, units, regularizer=0, **kwargs):\n self.units=units\n self.regularizer=regularizer\n super(ResidualDense, self).__init__(**kwargs)\n if self.regularizer != 0:\n self.dense1 = layers.Dense(self.units, \n kernel_regularizer=keras.regularizers.l2(self.regularizer))\n else:\n self.dense1 = layers.Dense(self.units) \n self.bn1 = layers.BatchNormalization()\n self.relu = layers.ReLU()\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'units': self.units,\n 'regularizer': self.regularizer\n })\n return config\n\n def call(self, inputs):\n x=self.dense1(inputs)\n x=self.relu(x)\n x=self.bn1(x)\n x=x+inputs\n return x\n\nclass ResidualDenseStack(layers.Layer):\n \"\"\" Residual Dense layer for Keras\n\n The residual dense layer stack consists of `layer_count` :class:`ResidualDense` layers.\n\n .. code-block:: none\n\n # ┌─────────── n ─────────────┐ n = layer_count repetitions\n # ┌─────────────────────────┐\n # │ ┌─────┐ ┌──┐ ┌────┐ ▼\n # ──┴─►│Dense│─►│BN│─►│ReLU│─ + ─►\n # └─────┘ └──┘ └────┘\n\n :param units: Positive integer, number of hidden units.\n :param layer_count: Positive integer, number of layer-blocks, each a `ResidualDense` block.\n :param regularizer: Positive float, regularization strength for the Dense layer.\n \"\"\"\n def __init__(self, units, layer_count, regularizer=0, **kwargs):\n self.units=units\n self.layer_count=layer_count\n self.regularizer=regularizer\n\n super(ResidualDenseStack, self).__init__(**kwargs)\n self.rd=[]\n for _ in range(0, self.layer_count):\n self.rd.append(ResidualDense(self.units, regularizer=self.regularizer))\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'units': self.units,\n 'layers': self.layer_count,\n 'regularizer': self.regularizer\n })\n return config\n\n def call(self, inputs):\n x=self.rd[0](inputs)\n for i in range(1, self.layer_count):\n x=self.rd[i](x)\n return x\n\nclass ParallelResidualDenseStacks(layers.Layer):\n \"\"\" Parallel Residual Dense Stacks layer for Keras\n\n The parallel residual dense layer stacks consist of `stacks` count parallel\n :class:`ResidualDenseStack`, each of which consists of `layer_count` :class:`ResidualDense` \n layers. The output of all parallel stacks is concatenated and scaled down to `units` units.\n\n .. code-block:: none\n\n # ┌─────────── n ─────────────┐ n = layer_count repetitions\n # ┌─────────────────────────┐ \n # │ ┌─────┐ ┌──┐ ┌────┐ ▼ ┌──────┐ \n # ┌─────┴─►│Dense│─►│BN│─►│ReLU│─ + ─► │ │ \n # │ └─────┘ └──┘ └────┘ │ │ \n # │ │ │\n # │ ┌─────────── n ─────────────┐ │ │\n # │ ┌─────────────────────────┐ │ │ \n # │ │ ┌─────┐ ┌──┐ ┌────┐ ▼ │concat│ ┌─────┐ ┌────┐ \n # ├─────┴─►│Dense│─►│BN│─►│ReLU│─ + ─► │ │ ─►│Dense│─►│ReLU│─► \n # ──┤ └─────┘ └──┘ └────┘ │ │ └─────┘ └────┘ \n # │ . │ │ scale down to\n # │ . `stacks` reps │ │ `units`.\n # │ . │ │\n # │ ┌─────────── n ─────────────┐ │ │\n # │ ┌─────────────────────────┐ │ │ \n # │ │ ┌─────┐ ┌──┐ ┌────┐ ▼ │ │ \n # └─────┴─►│Dense│─►│BN│─►│ReLU│─ + ─► │ │ \n # └─────┘ └──┘ └────┘ └──────┘ \n\n :param units: Positive integer, number of hidden units.\n :param layer_count: Positive integer, number of layer-blocks, each a `ResidualDense` block.\n :param stacks: Positive integer, number of parallel stacks.\n :param regularizer: Positive float, regularization strength for the Dense layer.\n \"\"\"\n def __init__(self, units, layer_count, stacks, dispatch, regularizer=0, **kwargs):\n super(ParallelResidualDenseStacks, self).__init__(**kwargs)\n self.units=units\n self.layer_count=layer_count\n self.stacks=stacks\n self.dispatch=dispatch\n self.regularizer=regularizer\n\n if self.dispatch is True:\n self.scale = layers.Dense(units*stacks, activation=None)\n else:\n self.scale = layers.Dense(units, activation=None)\n\n self.rds=[]\n for _ in range(0, self.stacks):\n self.rds.append(ResidualDenseStack(self.units, self.layer_count, \n regularizer=self.regularizer))\n self.rescale_relu = layers.ReLU()\n self.concat = layers.Concatenate()\n if self.regularizer != 0:\n self.rescale = layers.Dense(self.units, \n kernel_regularizer=keras.regularizers.l2(self.regularizer))\n else:\n self.rescale = layers.Dense(self.units)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'units': self.units,\n 'layers': self.layer_count,\n 'stacks': self.stacks,\n 'dispatch': self.dispatch,\n 'regularizer': self.regularizer\n })\n return config\n\n def call(self, inputs):\n xa=[]\n # Scale up\n x=self.scale(inputs)\n for i in range(0, self.stacks):\n if self.dispatch:\n xa.append(self.rds[i](x[:,i*self.units:(i+1)*self.units]))\n else:\n xa.append(self.rds[i](x))\n x=self.concat(xa)\n x=self.rescale(x)\n x=self.rescale_relu(x)\n return x\n\nclass SelfAttention(layers.Layer):\n \"\"\" Self-attention layer for Keras\n\n The self-attention layer learns three matrices (key :math:`W_k`, query :math:`W_q`, value :math:`W_v`)\n that provide context-information for the :math:`input`.\n Input is mutiplied with all three matrices, then :math:`W_k` and :math:`W_q` are multiplied,\n scaled down by :math:`\\\\sqrt{\\\\dim{input}[-1]}` and normalized, either by LayerNorm,\n BatchNorm or Softmax or not at all. The result is then multiplied with :math:`W_v`, and, if hidden\n dimension of the :math:`W_{x_i}` matrices is different from input units last dimension, \n rescaled by a final dense matrix multiply. Output has same shape as input.\n\n .. code-block:: none\n\n # \n # ┌──┐ \n # ┌► │Wk│───┐ ┌─────┐\n # │ └──┘ │ │Scale│\n # │ ┌──┐ × ─►│Norm │─┐ (opt.)\n # ─┼─►│Wq│───┘ └─────┘ │ ┌─────┐\n # │ └──┘ │ │Scale│──►\n # │ ┌──┐ × ─►│Dense│\n # └► │Wv│───────────────┘ └─────┘\n # └──┘\n #\n\n :param units: Positive integer, number of hidden units. The matrices :math:`W_{x_i}` are of shape :math:`hs \\\\times hs`.\n :param norm: either 'batchnorm', 'layernorm', 'softmax', or None\n \"\"\"\n def __init__(self, units=None, norm=None, **kwargs):\n super(SelfAttention, self).__init__(**kwargs)\n self.units = units\n self.norm = norm\n if self.norm==\"layernorm\":\n self.norm = layers.LayerNormalization(axis=-1)\n elif self.norm==\"batchnorm\":\n self.norm = layers.BatchNormalization()\n elif self.norm==\"softmax\":\n self.norm = layers.Softmax()\n elif self.norm==None or self.norm == \"none\":\n self.norm = None\n else:\n raise ValueError(\"Unknown norm: {}\".format(self.norm))\n self.pm = layers.Permute((2,1))\n\n def build(self, input_shape):\n self.fact = math.sqrt(input_shape[-1])\n if self.units is None:\n dim2 = input_shape[-1]\n else:\n dim2 = self.units\n self.scale = self.add_weight(shape=(dim2, input_shape[-1]),\n initializer=\"random_normal\", name='w1', trainable=True)\n self.w_keys = self.add_weight(shape=(input_shape[-1], dim2),\n initializer=\"random_normal\", name='w2', trainable=True)\n self.w_queries = self.add_weight(shape=(input_shape[-1], dim2),\n initializer=\"random_normal\", name='w3', trainable=True)\n self.w_values = self.add_weight(shape=(input_shape[-1], dim2),\n initializer=\"random_normal\", name='w4', trainable=True)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'units': self.units,\n 'norm': self.norm\n })\n return config \n\n def call(self, inputs):\n vk = tf.matmul(inputs, self.w_keys)\n vq = tf.matmul(inputs, self.w_queries)\n vv = tf.matmul(inputs, self.w_values)\n kq = tf.matmul(vk, vq, transpose_b=True)\n kqs = kq/self.fact\n if self.norm is not None:\n sn = self.norm(kqs)\n else:\n sn = kqs\n out = tf.matmul(sn, self.pm(vv), transpose_b=True)\n\n if self.units is not None:\n out = tf.matmul(out, self.scale)\n return out\n\nclass MultiHeadSelfAttention(layers.Layer):\n \"\"\" Multi-head self-attention layer for Keras\n\n The multi-head self-attention layer concatenates the output of `heads` :class:`SelfAttention`\n layers. Each of the self-attention layers has an additive residual connection.\n If `mh_normalize` is True, the concatenated output is normalized.\n After scaling down to the number of units, the output is then passed through a\n ReLU and Dense layer again with residual connection.\n Finally, optional normalization and a final optional ReLU is applied. \n Output has same shape as input.\n\n .. code-block:: none\n\n # ┌──────────────┐\n # │ ┌────────┐ ▼ ┌──────┐ ┌────┐\n # ┌─┴─►│SelfAtt.│─ + ─►│ │ │ │\n # │ └────────┘ │ │ │ │\n # │ ┌──────────────┐ │ │ │ │ ┌───────────────────┐ ┌────┐ \n # ─┤ │ ┌────────┐ ▼ │ │ │Opt.│ ┌─────┐ │ ┌────┐ ┌─────┐ ▼ │Opt │ \n # ├─┴─►│SelfAtt.│─ + ─►│ │─►│Norm│─►│Scale│─┴─►│ReLU│─►│Dense│─ + ─►│Norm│─►\n # │ └────────┘ │concat│ │ │ └─────┘ └────┘ └─────┘ └────┘ \n # │ . │ or │ │ │\n # │ . head │ relu │ │ │\n # │ . reps │ +add │ │ │\n # │ ┌──────────────┐ │ │ │ │\n # │ │ ┌────────┐ ▼ │ │ │ │\n # └─┴─►│SelfAtt.│─ + ─►│ │ │ │\n # └────────┘ └──────┘ └────┘\n \n :param units: Positive integer `hs`, number of hidden units.\n :param heads: Positive integer, number of self-attention heads.\n :param mh_normalize: Boolean, whether to normalize the output of the multi-head self-attention.\n :param norm: either 'batchnorm', 'layernorm, or 'softmax', the normalization used within each self-attention head.\n :param join_heads_by_add: on true heads are added after additional relu-nonlin, instead of concatenated (original all-you-need).\n \"\"\"\n def __init__(self, heads, units=None, norm=None, mh_normalize=True,\n final_relu=False, join_heads_by_add=False, **kwargs):\n super(MultiHeadSelfAttention, self).__init__(**kwargs)\n self.heads=heads\n self.units = units\n self.norm = norm\n self.mh_normalize = mh_normalize\n self.final_relu = final_relu\n self.mhsa=[]\n for _ in range(0,self.heads):\n self.mhsa.append(SelfAttention(units=self.units, norm=self.norm))\n self.join_heads_by_add = join_heads_by_add\n if self.join_heads_by_add is False:\n self.cc = layers.Concatenate(axis=1)\n if self.mh_normalize is True:\n self.ln1 = layers.LayerNormalization()\n self.ln2 = layers.LayerNormalization()\n self.relu1 = layers.ReLU()\n self.relu2 = layers.ReLU()\n self.pm = layers.Permute((2,1))\n\n def build(self, input_shape):\n if self.join_heads_by_add is False:\n self.w_heads = self.add_weight(shape=(self.heads * input_shape[-1], input_shape[-1]),\n initializer=\"random_normal\", name='w5concat', trainable=True)\n else:\n self.w_heads = self.add_weight(shape=(input_shape[-1], input_shape[-1]),\n initializer=\"random_normal\", name='w5add', trainable=True)\n self.lin = self.add_weight(shape=(input_shape[-1], input_shape[-1]),\n initializer=\"random_normal\", name='w6', trainable=True)\n \n def get_config(self):\n config = super().get_config()\n config.update({\n 'heads': self.heads,\n 'units': self.units,\n 'norm': self.norm,\n 'mh_normalize': self.mh_normalize,\n 'final_relu': self.final_relu,\n 'join_heads_by_add': self.join_heads_by_add\n })\n return config\n\n def call(self, inputs):\n xa=[]\n for i in range(0, self.heads):\n xa.append(self.pm(self.mhsa[i](inputs)+inputs))\n if self.join_heads_by_add is True:\n for i in range(len(xa)):\n if i==0:\n x=self.relu2(xa[i])\n else:\n x=x+self.relu2(xa[i])\n x=self.pm(x)\n else:\n x=self.pm(self.cc(xa))\n if self.mh_normalize is True:\n x = self.ln1(x)\n xt = tf.matmul(x, self.w_heads)\n x = self.relu1(xt)\n x = tf.matmul(x, self.lin) + xt\n if self.mh_normalize is True:\n x = self.ln2(x)\n return x\n\nclass PositionalEncoding(layers.Layer):\n \"\"\" Positional encoding layer.\n\n adds sinusoid of different frequencies to the input. Can be used to add sequence-information to input\n data for transformers or attention layers.\n\n :param amplitude: float, amplitude of the encoding, default=1.0.\n :param trainable: boolean, whether the weights of the layer are trainable, default=False.\n \"\"\"\n def __init__(self, amplitude=1.0, trainable=False, **kwargs):\n super(PositionalEncoding, self).__init__(**kwargs)\n self.amplitude = amplitude\n self.trainable = trainable\n\n # positional encoding taken from: https://www.tensorflow.org/text/tutorials/transformer\n @staticmethod\n def _get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))\n return pos * angle_rates\n\n def _positional_encoding(self, position, d_model):\n angle_rads = PositionalEncoding._get_angles(np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :],\n d_model)\n # apply sin to even indices in the array; 2i\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n # apply cos to odd indices in the array; 2i+1\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n pos_encoding = angle_rads[np.newaxis, ...] * self.amplitude\n return tf.cast(pos_encoding, dtype=tf.float32)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'amplitude': self.amplitude,\n 'trainable': self.trainable,\n })\n return config\n\n def build(self, input_shape):\n self.pe = self._positional_encoding(input_shape[1], input_shape[2])\n\n def call(self, inputs):\n return tf.add(inputs, self.pe)\n\n" ]
[ [ "numpy.sin", "tensorflow.matmul", "numpy.float32", "numpy.arange", "numpy.cos", "tensorflow.add", "tensorflow.cast" ] ]
jselvam11/funsor
[ "c54b2c5dbc0185bd4ed727d780596ab2da883d2b" ]
[ "test/test_memoize.py" ]
[ "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\nimport pytest\n\nimport funsor.ops as ops\nfrom funsor.cnf import BACKEND_TO_EINSUM_BACKEND, BACKEND_TO_LOGSUMEXP_BACKEND\nfrom funsor.einsum import einsum, naive_plated_einsum\nfrom funsor.interpreter import interpretation, reinterpret\nfrom funsor.memoize import memoize\nfrom funsor.tensor import numeric_array\nfrom funsor.terms import reflect\nfrom funsor.testing import make_einsum_example, xfail_param\nfrom funsor.util import get_backend\n\n\nEINSUM_EXAMPLES = [\n (\"a,b->\", ''),\n (\"ab,a->\", ''),\n (\"a,a->\", ''),\n (\"a,a->a\", ''),\n (\"ab,bc,cd->da\", ''),\n (\"ab,cd,bc->da\", ''),\n (\"a,a,a,ab->ab\", ''),\n ('i->', 'i'),\n (',i->', 'i'),\n ('ai->', 'i'),\n (',ai,abij->', 'ij'),\n ('a,ai,bij->', 'ij'),\n ('ai,abi,bci,cdi->', 'i'),\n ('aij,abij,bcij->', 'ij'),\n ('a,abi,bcij,cdij->', 'ij'),\n]\n\n\ndef backend_to_einsum_backends(backend):\n backends = [BACKEND_TO_EINSUM_BACKEND[get_backend()],\n BACKEND_TO_LOGSUMEXP_BACKEND[get_backend()]]\n return backends[:1]\n\n\n@pytest.mark.parametrize('equation,plates', EINSUM_EXAMPLES)\n@pytest.mark.parametrize('backend', backend_to_einsum_backends(get_backend()))\n@pytest.mark.parametrize('einsum_impl,same_lazy', [\n (einsum, True),\n (einsum, xfail_param(False, reason=\"nested interpreters?\")),\n (naive_plated_einsum, True),\n (naive_plated_einsum, False)\n])\ndef test_einsum_complete_sharing(equation, plates, backend, einsum_impl, same_lazy):\n inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(equation)\n\n with interpretation(reflect):\n lazy_expr1 = einsum_impl(equation, *funsor_operands, backend=backend, plates=plates)\n lazy_expr2 = lazy_expr1 if same_lazy else \\\n einsum_impl(equation, *funsor_operands, backend=backend, plates=plates)\n\n with memoize():\n expr1 = reinterpret(lazy_expr1)\n expr2 = reinterpret(lazy_expr2)\n expr3 = reinterpret(lazy_expr1)\n\n assert expr1 is expr2\n assert expr1 is not expr3\n\n\n@pytest.mark.parametrize('equation,plates', EINSUM_EXAMPLES)\n@pytest.mark.parametrize('backend', backend_to_einsum_backends(get_backend()))\n@pytest.mark.parametrize('einsum_impl,same_lazy', [\n (einsum, True),\n (einsum, xfail_param(False, reason=\"nested interpreters?\")),\n (naive_plated_einsum, True),\n (naive_plated_einsum, False)\n])\ndef test_einsum_complete_sharing_reuse_cache(equation, plates, backend, einsum_impl, same_lazy):\n inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(equation)\n\n with interpretation(reflect):\n lazy_expr1 = einsum_impl(equation, *funsor_operands, backend=backend, plates=plates)\n lazy_expr2 = lazy_expr1 if same_lazy else \\\n einsum_impl(equation, *funsor_operands, backend=backend, plates=plates)\n\n cache = {}\n with memoize(cache) as cache:\n expr1 = reinterpret(lazy_expr1)\n\n with memoize(cache):\n expr2 = reinterpret(lazy_expr2)\n\n expr3 = reinterpret(lazy_expr1)\n\n assert expr1 is expr2\n assert expr1 is not expr3\n\n\n@pytest.mark.parametrize('check_sample', [\n False, xfail_param(True, reason=\"Joint.sample cannot directly be memoized in this way yet\")])\n@pytest.mark.skipif(get_backend() == \"numpy\", reason=\"there is no numpy distributions backend\")\ndef test_memoize_sample(check_sample):\n if get_backend() == \"jax\":\n from funsor.jax.distributions import Normal\n else:\n from funsor.torch.distributions import Normal\n\n rng_keys = (None, None, None) if get_backend() == \"torch\" \\\n else np.array([[0, 1], [0, 2], [0, 3]], dtype=np.uint32)\n\n with memoize():\n m, s = numeric_array(0.), numeric_array(1.)\n j1 = Normal(m, s, 'x')\n j2 = Normal(m, s, 'x')\n x1 = j1.sample(frozenset({'x'}), rng_key=rng_keys[0])\n x12 = j1.sample(frozenset({'x'}), rng_key=rng_keys[1])\n x2 = j2.sample(frozenset({'x'}), rng_key=rng_keys[2])\n\n # this assertion now passes\n assert j1 is j2\n\n # these assertions fail because sample is not memoized\n if check_sample:\n assert x1 is x12\n assert x1 is x2\n\n\n@pytest.mark.parametrize(\"eqn1,eqn2\", [(\"ab,bc,cd->d\", \"de,ef,fg->\")])\n@pytest.mark.parametrize(\"einsum_impl1\", [naive_plated_einsum, xfail_param(einsum, reason=\"nested interpreters?\")])\n@pytest.mark.parametrize(\"einsum_impl2\", [naive_plated_einsum, xfail_param(einsum, reason=\"nested interpreters?\")])\n@pytest.mark.parametrize('backend1', backend_to_einsum_backends(get_backend()))\n@pytest.mark.parametrize('backend2', backend_to_einsum_backends(get_backend()))\ndef test_nested_einsum_complete_sharing(eqn1, eqn2, einsum_impl1, einsum_impl2, backend1, backend2):\n\n inputs1, outputs1, sizes1, operands1, funsor_operands1 = make_einsum_example(eqn1, sizes=(3,))\n inputs2, outputs2, sizes2, operands2, funsor_operands2 = make_einsum_example(eqn2, sizes=(3,))\n\n with memoize():\n output1_1 = einsum_impl1(eqn1, *funsor_operands1, backend=backend1)\n output2_1 = einsum_impl2(outputs1[0] + \",\" + eqn2, *([output1_1] + funsor_operands2), backend=backend2)\n\n output1_2 = einsum_impl1(eqn1, *funsor_operands1, backend=backend1)\n output2_2 = einsum_impl2(outputs1[0] + \",\" + eqn2, *([output1_2] + funsor_operands2), backend=backend2)\n\n assert output1_1 is output1_2\n assert output2_1 is output2_2\n\n\ndef test_nested_complete_sharing_direct():\n\n inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(\"ab,bc,cd->d\")\n ab, bc, cd = funsor_operands\n\n # avoids the complicated internal interpreter usage of the nested optimized einsum tests above\n with interpretation(reflect):\n c1 = (ab * bc).reduce(ops.add, frozenset({\"a\", \"b\"}))\n d1 = (c1 * cd).reduce(ops.add, frozenset({\"c\"}))\n\n # this does not trigger a second alpha-renaming\n c2 = (ab * bc).reduce(ops.add, frozenset({\"a\", \"b\"}))\n d2 = (c2 * cd).reduce(ops.add, frozenset({\"c\"}))\n\n with memoize():\n assert reinterpret(c1) is reinterpret(c2)\n assert reinterpret(d1) is reinterpret(d2)\n" ]
[ [ "numpy.array" ] ]
tmkkk/fcn
[ "e2d60fd5d54fd69f2b1d8280fe870f9af8cfda50" ]
[ "src/models.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\nfrom tensorflow.keras.initializers import Zeros\n\n\ndef build_fcn32s(nb_classes, target_size=(None, None)):\n inputs = Input(shape=(*target_size, 3))\n vgg = VGG16(weights='imagenet', include_top=False, input_tensor=inputs, input_shape=(*target_size, 3))\n x = Conv2D(4096, (7, 7), activation='relu', padding='same')(vgg.output)\n x = Dropout(0.5)(x)\n x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)\n x = Dropout(0.5)(x)\n x = Conv2D(nb_classes, (1, 1), padding='same', kernel_initializer='he_normal')(x)\n x = Conv2DTranspose(nb_classes, (64, 64), strides=(32, 32), use_bias=False, padding='same', activation='softmax', name='fcn32s-transpose')(x)\n\n model = Model(inputs=inputs, outputs=x)\n return model\n\n\ndef build_fcn16s(nb_classes, target_size=(None, None)):\n inputs = Input(shape=(*target_size, 3))\n vgg = VGG16(weights='imagenet', include_top=False, input_tensor=inputs, input_shape=(*target_size, 3))\n x = Conv2D(4096, (7, 7), activation='relu', padding='same')(vgg.output)\n x = Dropout(0.5)(x)\n x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)\n x = Dropout(0.5)(x)\n x = Conv2D(nb_classes, (1, 1), padding='same', kernel_initializer='he_normal')(x)\n\n x = Conv2DTranspose(nb_classes, (4, 4), strides=(2, 2), use_bias=False, padding='same', activation='relu', name='fcn16s-transpose-first')(x)\n\n skip_con = Conv2D(nb_classes, (1, 1), strides=(1, 1), padding='same', bias_initializer=Zeros(), kernel_initializer=Zeros(), name='fcn16s-skip-con')(vgg.get_layer(name=\"block4_pool\").output)\n x = Add()([x, skip_con])\n x = Conv2DTranspose(nb_classes, (32, 32), strides=(16, 16), use_bias=False, padding='same', activation='softmax', name='fcn16s-transpose-second')(x)\n\n model = Model(inputs=inputs, outputs=x)\n return model\n\n" ]
[ [ "tensorflow.keras.initializers.Zeros", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.Input", "tensorflow.keras.applications.vgg16.VGG16", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Conv2DTranspose" ] ]
magnificent1208/mmdetection
[ "ce53f9afb4e6cd9343c02fb8218e54411f58c2b7" ]
[ ".history/mmdet/models/dense_heads/center_triplets_head_20200812191524.py" ]
[ "from math import ceil, log\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmcv.cnn import normal_init, ConvModule, bias_init_with_prob\nfrom mmcv.ops import CornerPool\n\nimport numpy as np\nimport cv2\nimport math\n\nfrom mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32\nfrom ..builder import build_loss, HEADS\nfrom ..utils import gaussian_radius, gen_gaussian_target\nfrom .corner_head import CornerHead\n\n\nINF = 1e8\n\n\nclass BiCascadeCornerPool(nn.Module):\n \"\"\"BiCascadial Corner Pooling Module (TopLeft, BottomRight, etc.)\n\n Args:\n in_channels (int): Input channels of module.\n out_channels (int): Output channels of module.\n feat_channels (int): Feature channels of module.\n directions (list[str]): Directions of two CornerPools.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n \"\"\"\n\n def __init__(self,\n in_channels,\n direction,\n feat_channels=128,\n out_channels=128,\n norm_cfg=dict(type='BN', requires_grad=True)):\n super(CascadeCornerPool, self).__init__()\n\n self.pool1 = CornerPool(direction[0])\n self.pool2 = CornerPool(direction[1])\n # pool1 conv\n self.look1_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n self.direct1_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n self.aftconcat1_conv = ConvModule(\n feat_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None) \n # pool2 conv\n self.look2_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n self.direct2_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n self.aftconcat2_conv = ConvModule(\n feat_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None)\n # main and direct conv\n self.aftconcat_conv = ConvModule(\n feat_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=None)\n self.direct_conv = ConvModule(\n feat_channels, feat_channels, 1, padding=1, norm_cfg=norm_cfg, act_cfg=None)\n self.out_conv = ConvModule(\n feat_channels, out_channels, 3, paddng=1, norm_cfg=norm_cfg, act_cfg=None)\n\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tensor): Input feature of BiCornerPool.\n\n Returns:\n conv2 (tensor): Output feature of BiCornerPool.\n \"\"\"\n # pool 1\n look_1 = self.look1_conv(x)\n look_1 = self.pool2(look_1)\n direct_1 = self.direct1_conv(x)\n main_1 = self.aftconcat1_conv(look_1 + direct_1)\n main_1 = self.pool1(main_1)\n\n # pool 2\n look_2 = self.look2_conv(x)\n look_2 = self.pool1(look_2)\n direct_2 = self.direct2_conv(x)\n main_2 = self.aftconcat2_conv(look_2 + direct_2)\n main_2 = self.pool2(main_2)\n\n # pool1 + pool2\n main = self.aftconcat_conv(main_1 + main_2)\n direct = self.direct_conv(x)\n\n result = self.relu(main + direct)\n result = self.out_conv(result)\n return result\n\n\nclass CenterPool(nn.Module):\n \"\"\"Center Pooling Module. Pooling four times in every directions.\n\n Args:\n in_channels (int): Input channels of module.\n out_channels (int): Output channels of module.\n feat_channels (int): Feature channels of module.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n \"\"\"\n def __init__(self,\n in_channels,\n feat_channels=128,\n out_channels=128,\n norm_cfg=dict(type='BN', requires_grad=True)):\n super(CenterPool, self).__init__()\n self.direction1_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n self.direction2_conv = ConvModule(\n in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n self.aftpool_conv = ConvModule(\n feat_channels,\n out_channels,\n 3,\n padding=1,\n norm_cfg=norm_cfg,\n act_cfg=None)\n \n self.conv1 = ConvModule(\n in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\n self.conv2 = ConvModule(\n in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n self.left_pool = CornerPool('left')\n self.right_pool = CornerPool('right')\n self.top_pool = CornerPool('top')\n self.bottom_pool = CornerPool('bottom')\n\n self.relu = nn.ReLU(inplace=True)\n \n def forward(self, x):\n direction1_conv = self.direction1_conv(x)\n direction2_conv = self.direction2_conv(x)\n direction1_feat = self.right_pool(self.left_pool(direction1_conv))\n direction2_feat = self.bottom_pool(self.top_pool(direction2_conv))\n aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)\n conv1 = self.conv1(x) \n relu = self.relu(aftpool_conv + conv1)\n conv2 = self.conv2(relu)\n return conv2\n\n\n@HEADS.register_module\nclass CenterHead(CornerHead):\n \"\"\"Head of CenterNet: Chou maggie\n\n Args:\n num_classes (int): Number of detect classes. (Including background)\n in_channels (int):\n num_feat_levels (int): Number of channels from backbone.\n 2 for HourglassNet-104 (Default)\n 1 for HourglassNet-52\n corner_emb_channels (int): Channel of embedding vector. Defaulat: 1.\n\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels,\n num_feat_levels=2,\n corner_emb_channels=1,\n train_cfg=None,\n test_cfg=None,\n loss_heatmap=dict(\n type='GaussianFocalLoss',\n alpha=2.0,\n gamma=4.0,\n loss_weight=1),\n loss_embedding=dict(\n type='AssociativeEmbeddingLoss',\n pull_weight=0.25,\n push_weight=0.25),\n loss_offset=dict(\n type='SmoothL1Loss', beta=1.0, loss_weight=1)):\n super(CenterHead, self).__init__(**Args)\n\n self.num_classes = num_classes\n # self.cls_out_channels = num_classes\n self.in_channels = in_channels\n self.feat_channels = feat_channels\n self.corner_emb_channels = corner_emb_channels\n self.with_corner_emb = self.corner_emb_channels > 0\n self.corner_offset_channels = 2\n # TODO: set center offset channels\n self.center_offset_channels = 2\n self.num_feat_levels = num_feat_levels\n self.loss_heatmap = build_loss(\n loss_heatmap) if loss_heatmap is not None else None\n self.loss_embedding = build_loss(\n loss_embedding) if loss_embedding is not None else None\n self.loss_offset = build_loss(\n loss_offset) if loss_offset is not None else None\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self._init_layers()\n \n def _init_corner_kpt_layers(self):\n \"\"\"Initialize corner keypoint layers.\n\n Including corner heatmap branch and corner offset branch. Each branch\n has two parts: prefix `tl_` for top-left and `br_` for bottom-right.\n \"\"\"\n self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()\n self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()\n self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()\n\n for _ in range(self.num_feat_levels):\n self.tl_pool.append(\n CascadeCornerPool(\n self.in_channels, ['top', 'left'],\n out_channels=self.in_channels))\n self.br_pool.append(\n CascadeCornerPool(\n self.in_channels, ['bottom', 'right'],\n out_channels=self.in_channels))\n\n self.tl_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n self.br_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n\n self.tl_off.append(\n self._make_layers(\n out_channels=self.corner_offset_channels,\n in_channels=self.in_channels))\n self.br_off.append(\n self._make_layers(\n out_channels=self.corner_offset_channels,\n in_channels=self.in_channels))\n \n def _init_center_kpt_layers(self):\n \"\"\"Initialize center keypoint layers. Use CenterPool.\n\n Including center heatmap branch and center offset branch.\n \"\"\"\n self.center_pool = nn.ModuleList()\n self.center_heat = nn.ModuleList()\n self.center_off = nn.ModuleList()\n\n for _ in range(self.num_feat_levels):\n self.center_pool.append(\n CenterPool(\n self.in_channels,\n out_channels=self.in_channels))\n self.center_heat.append(\n self._make_layers(\n out_channels=self.num_classes,\n in_channels=self.in_channels))\n self.center_off.append(\n self._make_layers(\n out_channels=self.center_offset_channels,\n in_channels=self.in_channels))\n \n def _init_layers(self):\n \"\"\"Initialize layers for CenterHead.\n \"\"\"\n self._init_corner_kpt_layers()\n if self.with_corner_emb:\n self._init_corner_emb_layers()\n self._init_center_kpt_layers()\n\n def init_weights(self):\n \"\"\"Initialize weights of the head.\n \"\"\"\n bias_init = bias_init_with_prob(0.1)\n for i in range(self.num_feat_levels):\n self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)\n self.br_heat[i][-1].conv.bias.data.fill_(bias_init)\n self.center_heat[i][-1].conv.bias.data.fill_(bias_init)\n \n # TODO: add center information to forward output\n def forward(self, feats):\n \"\"\"Forward features from the upstream network.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: Usually a tuple of corner heatmaps, offset heatmaps and\n embedding heatmaps.\n - tl_heats (list[Tensor]): Top-left corner heatmaps for all\n levels, each is a 4D-tensor, the channels number is\n num_classes.\n - br_heats (list[Tensor]): Bottom-right corner heatmaps for all\n levels, each is a 4D-tensor, the channels number is\n num_classes.\n - tl_embs (list[Tensor] | list[None]): Top-left embedding\n heatmaps for all levels, each is a 4D-tensor or None.\n If not None, the channels number is corner_emb_channels.\n - br_embs (list[Tensor] | list[None]): Bottom-right embedding\n heatmaps for all levels, each is a 4D-tensor or None.\n If not None, the channels number is corner_emb_channels.\n - tl_offs (list[Tensor]): Top-left offset heatmaps for all\n levels, each is a 4D-tensor. The channels number is\n corner_offset_channels.\n - br_offs (list[Tensor]): Bottom-right offset heatmaps for all\n levels, each is a 4D-tensor. The channels number is\n corner_offset_channels.\n \"\"\"\n lvl_ind = list(range(self.num_feat_levels))\n return multi_apply(self.forward_single, feats, lvl_ind)\n\n def forward_single(self, x, lvl_ind, return_pool=False):\n \"\"\"Forward feature of a single level.\n\n Args:\n x (Tensor): Feature of a single level.\n lvl_ind (int): Level index of current feature.\n return_pool (bool): Return corner pool feature or not.\n\n Returns:\n tuple[Tensor]: A tuple of CornerHead's output for current feature\n level. Containing the following Tensors:\n\n - tl_heat (Tensor): Predicted top-left corner heatmap.\n - br_heat (Tensor): Predicted bottom-right corner heatmap.\n - tl_emb (Tensor | None): Predicted top-left embedding heatmap.\n None for `self.with_corner_emb == False`.\n - br_emb (Tensor | None): Predicted bottom-right embedding\n heatmap. None for `self.with_corner_emb == False`.\n - tl_off (Tensor): Predicted top-left offset heatmap.\n - br_off (Tensor): Predicted bottom-right offset heatmap.\n - tl_pool (Tensor): Top-left corner pool feature. Not must\n have.\n - br_pool (Tensor): Bottom-right corner pool feature. Not must\n have.\n \"\"\"\n tl_pool = self.tl_pool[lvl_ind](x)\n tl_heat = self.tl_heat[lvl_ind](tl_pool)\n br_pool = self.br_pool[lvl_ind](x)\n br_heat = self.br_heat[lvl_ind](br_pool)\n\n tl_emb, br_emb = None, None\n if self.with_corner_emb:\n tl_emb = self.tl_emb[lvl_ind](tl_pool)\n br_emb = self.br_emb[lvl_ind](br_pool)\n\n tl_off = self.tl_off[lvl_ind](tl_pool)\n br_off = self.br_off[lvl_ind](br_pool)\n\n center_pool = self.center_pool[lvl_ind](x)\n center_heat = self.center_heat[lvl_ind](center_pool)\n center_off = self.center_off[lvl_ind](center_heat)\n\n result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]\n if return_pool:\n result_list.append(tl_pool)\n result_list.append(br_pool)\n result_list.append(center_pool)\n \n result_list.append(center_heat)\n result_list.append(center_off)\n\n return result_list\n\n def get_targets(self,\n gt_bboxes,\n gt_labels,\n feat_shape,\n img_shape,\n with_corner_emb=False,\n with_guiding_shift=False,\n with_centripetal_shift=False):\n \"\"\"Generate corner and center targets.\n\n Including corner heatmap, corner offset, center heatmap, center offset.\n\n Optional: corner embedding, corner guiding shift, centripetal shift.\n\n For CenterNet, we generate corner heatmap, corner offset, corner\n embedding and center heatmap, center offset from this function.\n\n For CentripetalNet, we generate corner heatmap, corner offset, guiding\n shift and centripetal shift from this function.\n\n Args:\n gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each\n has shape (num_gt, 4).\n gt_labels (list[Tensor]): Ground truth labels of each box, each has\n shape (num_gt,).\n feat_shape (list[int]): Shape of output feature,\n [batch, channel, height, width].\n img_shape (list[int]): Shape of input image,\n [height, width, channel].\n with_corner_emb (bool): Generate corner embedding target or not.\n Default: False.\n with_guiding_shift (bool): Generate guiding shift target or not.\n Default: False.\n with_centripetal_shift (bool): Generate centripetal shift target or\n not. Default: False.\n\n Returns:\n dict: Ground truth of corner heatmap, corner offset, corner\n embedding, guiding shift and centripetal shift. Containing the\n following keys:\n\n - topleft_heatmap (Tensor): Ground truth top-left corner\n heatmap.\n - bottomright_heatmap (Tensor): Ground truth bottom-right\n corner heatmap.\n - topleft_offset (Tensor): Ground truth top-left corner offset.\n - bottomright_offset (Tensor): Ground truth bottom-right corner\n offset.\n - corner_embedding (list[list[list[int]]]): Ground truth corner\n embedding. Not must have.\n - topleft_guiding_shift (Tensor): Ground truth top-left corner\n guiding shift. Not must have.\n - bottomright_guiding_shift (Tensor): Ground truth bottom-right\n corner guiding shift. Not must have.\n - topleft_centripetal_shift (Tensor): Ground truth top-left\n corner centripetal shift. Not must have.\n - bottomright_centripetal_shift (Tensor): Ground truth\n bottom-right corner centripetal shift. Not must have.\n \"\"\"\n batch_size, _, height, width = feat_shape\n img_h, img_w = img_shape[:2]\n\n width_ratio = float(width / img_w)\n height_ratio = float(height / img_h)\n\n gt_tl_heatmap = gt_bboxes[-1].new_zeros(\n [batch_size, self.num_classes, height, width])\n gt_br_heatmap = gt_bboxes[-1].new_zeros(\n [batch_size, self.num_classes, height, width])\n gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n\n gt_center_heatmap = gt_bboxes[-1].new_zeros(\n [batch_size, self.num_classes, height, width])\n gt_center_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n\n if with_corner_emb:\n match = []\n\n for batch_id in range(batch_size):\n # Ground truth of corner embedding per image is a list of coord set\n corner_match = []\n for box_id in range(len(gt_labels[batch_id])):\n left, top, right, bottom = gt_bboxes[batch_id][box_id]\n center_x = (left + right) / 2.0\n center_y = (top + bottom) / 2.0\n label = gt_labels[batch_id][box_id]\n\n # Use coords in the feature level to generate ground truth\n scale_left = left * width_ratio\n scale_right = right * width_ratio\n scale_top = top * height_ratio\n scale_bottom = bottom * height_ratio\n scale_center_x = center_x * width_ratio\n scale_center_y = center_y * height_ratio\n\n # Int coords on feature map/ground truth tensor\n left_idx = int(min(scale_left, width - 1))\n right_idx = int(min(scale_right, width - 1))\n top_idx = int(min(scale_top, height - 1))\n bottom_idx = int(min(scale_bottom, height - 1))\n center_x_idx = int(scale_center_x)\n center_y_idx = int(scale_center_y)\n\n # Generate gaussian heatmap\n scale_box_width = ceil(scale_right - scale_left)\n scale_box_height = ceil(scale_bottom - scale_top)\n radius = gaussian_radius((scale_box_height, scale_box_width),\n min_overlap=0.3)\n radius = max(0, int(radius))\n gt_tl_heatmap[batch_id, label] = gen_gaussian_target(\n gt_tl_heatmap[batch_id, label], [left_idx, top_idx],\n radius)\n gt_br_heatmap[batch_id, label] = gen_gaussian_target(\n gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],\n radius)\n gt_center_heatmap[batch_id, label] = gen_gaussian_target(\n gt_center_heatmap[batch_id, label], [center_x_idx, center_y_idx],\n radius)\n\n # Generate corner offset\n left_offset = scale_left - left_idx\n top_offset = scale_top - top_idx\n right_offset = scale_right - right_idx\n bottom_offset = scale_bottom - bottom_idx\n center_x_offset = scale_center_x - center_x_idx\n center_y_offset = scale_center_y - center_y_idx\n\n gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset\n gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset\n gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset\n gt_br_offset[batch_id, 1, bottom_idx, right_idx] = bottom_offset\n gt_center_offset[batch_id, 0, center_x_idx, center_y_idx] = center_x_offset\n gt_center_offset[batch_id, 1, center_x_idx, center_y_idx] = center_y_offset\n\n # Generate corner embedding\n if with_corner_emb:\n corner_match.append([[top_idx, left_idx],\n [bottom_idx, right_idx]])\n # Generate guiding shift\n if with_guiding_shift:\n gt_tl_guiding_shift[batch_id, 0, top_idx,\n left_idx] = scale_center_x - left_idx\n gt_tl_guiding_shift[batch_id, 1, top_idx,\n left_idx] = scale_center_y - top_idx\n gt_br_guiding_shift[batch_id, 0, bottom_idx,\n right_idx] = right_idx - scale_center_x\n gt_br_guiding_shift[\n batch_id, 1, bottom_idx,\n right_idx] = bottom_idx - scale_center_y\n # Generate centripetal shift\n if with_centripetal_shift:\n gt_tl_centripetal_shift[batch_id, 0, top_idx,\n left_idx] = log(scale_center_x -\n scale_left)\n gt_tl_centripetal_shift[batch_id, 1, top_idx,\n left_idx] = log(scale_center_y -\n scale_top)\n gt_br_centripetal_shift[batch_id, 0, bottom_idx,\n right_idx] = log(scale_right -\n scale_center_x)\n gt_br_centripetal_shift[batch_id, 1, bottom_idx,\n right_idx] = log(scale_bottom -\n scale_center_y)\n\n if with_corner_emb:\n match.append(corner_match)\n\n target_result = dict(\n topleft_heatmap=gt_tl_heatmap,\n topleft_offset=gt_tl_offset,\n bottomright_heatmap=gt_br_heatmap,\n bottomright_offset=gt_br_offset,\n center_heatmap=gt_center_heatmap,\n center_offset=gt_center_offset)\n\n if with_corner_emb:\n target_result.update(corner_embedding=match)\n if with_guiding_shift:\n target_result.update(\n topleft_guiding_shift=gt_tl_guiding_shift,\n bottomright_guiding_shift=gt_br_guiding_shift)\n if with_centripetal_shift:\n target_result.update(\n topleft_centripetal_shift=gt_tl_centripetal_shift,\n bottomright_centripetal_shift=gt_br_centripetal_shift)\n\n return target_result\n\n def loss(self,\n tl_heats,\n br_heats,\n tl_embs,\n br_embs,\n tl_offs,\n br_offs,\n gt_bboxes,\n gt_labels,\n img_metas,\n center_heats,\n center_offs,\n gt_bboxes_ignore=None):\n \"\"\"Compute losses of the head.\n\n Args:\n tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n with shape (N, num_classes, H, W).\n br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n level with shape (N, num_classes, H, W).\n tl_embs (list[Tensor]): Top-left corner embeddings for each level\n with shape (N, corner_emb_channels, H, W).\n br_embs (list[Tensor]): Bottom-right corner embeddings for each\n level with shape (N, corner_emb_channels, H, W).\n tl_offs (list[Tensor]): Top-left corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n br_offs (list[Tensor]): Bottom-right corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [left, top, right, bottom] format.\n gt_labels (list[Tensor]): Class indices corresponding to each box.\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n boxes can be ignored when computing the loss.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components. Containing the\n following losses:\n\n - det_loss (list[Tensor]): Corner keypoint losses of all\n feature levels.\n - pull_loss (list[Tensor]): Part one of AssociativeEmbedding\n losses of all feature levels.\n - push_loss (list[Tensor]): Part two of AssociativeEmbedding\n losses of all feature levels.\n - off_loss (list[Tensor]): Corner offset losses of all feature\n levels.\n \"\"\"\n targets = self.get_targets(\n gt_bboxes,\n gt_labels,\n tl_heats[-1].shape,\n img_metas[0]['pad_shape'],\n with_corner_emb=self.with_corner_emb)\n mlvl_targets = [targets for _ in range(self.num_feat_levels)]\n\n corner_det_losses, pull_losses, push_losses, corner_off_losses, center_det_loss, center_off_loss = multi_apply(self.loss_single, \n tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs, center_heats, center_offs, mlvl_targets)\n \n loss_dict = dict(corner_det_loss=corner_det_losses, \n corner_off_loss=corner_off_losses,\n center_det_loss=center_det_loss,\n center_off_loss=center_off_loss)\n if self.with_corner_emb:\n loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)\n return loss_dict\n\n def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,\n center_hmp, center_off, targets):\n \"\"\"Compute losses for single level.\n\n Args:\n tl_hmp (Tensor): Top-left corner heatmap for current level with\n shape (N, num_classes, H, W).\n br_hmp (Tensor): Bottom-right corner heatmap for current level with\n shape (N, num_classes, H, W).\n tl_emb (Tensor): Top-left corner embedding for current level with\n shape (N, corner_emb_channels, H, W).\n br_emb (Tensor): Bottom-right corner embedding for current level\n with shape (N, corner_emb_channels, H, W).\n tl_off (Tensor): Top-left corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n br_off (Tensor): Bottom-right corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n center_hmp (Tensor): Center heatmap for current level with\n shape (N, num_classes, H, W).\n center_off (Tensor): Center offset for current level with\n shape (N, num_classes, H, W).\n targets (dict): Corner target generated by `get_targets`.\n\n Returns:\n tuple[torch.Tensor]: Losses of the head's differnet branches\n containing the following losses:\n\n - det_loss (Tensor): Corner keypoint loss.\n - pull_loss (Tensor): Part one of AssociativeEmbedding loss.\n - push_loss (Tensor): Part two of AssociativeEmbedding loss.\n - off_loss (Tensor): Corner offset loss.\n - center_\n \"\"\"\n gt_tl_hmp = targets['topleft_heatmap']\n gt_br_hmp = targets['bottomright_heatmap']\n gt_tl_off = targets['topleft_offset']\n gt_br_off = targets['bottomright_offset']\n gt_center_hmp = targets['center_heatmap']\n gt_center_off = targets['center_offset']\n gt_embedding = targets['corner_embedding']\n\n # Detection loss for corner\n tl_det_loss = self.loss_heatmap(\n tl_hmp.sigmoid(),\n gt_tl_hmp,\n avg_factor=max(1, gt_tl_hmp.eq(1).sum()))\n br_det_loss = self.loss_heatmap(\n br_hmp.sigmoid(),\n gt_br_hmp,\n avg_factor=max(1, gt_br_hmp.eq(1).sum()))\n corener_det_loss = (tl_det_loss + br_det_loss) / 2.0\n\n # Detection loss for center\n center_det_loss = self.loss_heatmap(\n center_hmp.sigmoid(),\n gt_center_hmp,\n avg_factor=max(1, gt_center_hmp.eq(1).sum()))\n \n # AssociativeEmbedding loss\n if self.with_corner_emb and self.loss_embedding is not None:\n pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,\n gt_embedding)\n else:\n pull_loss, push_loss = None, None\n\n # Offset loss\n # We only compute the offset loss at the real corner position.\n # The value of real corner would be 1 in heatmap ground truth.\n # The mask is computed in class agnostic mode and its shape is\n # batch * 1 * width * height.\n tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n gt_tl_hmp)\n br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n gt_br_hmp)\n tl_off_loss = self.loss_offset(\n tl_off,\n gt_tl_off,\n tl_off_mask,\n avg_factor=max(1, tl_off_mask.sum()))\n br_off_loss = self.loss_offset(\n br_off,\n gt_br_off,\n br_off_mask,\n avg_factor=max(1, br_off_mask.sum()))\n \n center_off_mask = gt_center_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n gt_center_hmp)\n center_off_loss = self.loss_offset(\n center_off,\n gt_center_off,\n center_off_mask,\n avg_factor=max(1, br_off_mask.sum()))\n \n corner_off_loss = (tl_off_loss + br_off_loss) / 2.0\n\n return corner_det_loss, pull_loss, push_loss, corner_off_loss, center_det_loss, center_off_loss\n\n def get_bboxes(self,\n tl_heats,\n br_heats,\n tl_embs,\n br_embs,\n tl_offs,\n br_offs,\n img_metas,\n center_heats,\n center_offs,\n rescale=False,\n with_nms=True):\n \"\"\"Transform network output for a batch into bbox predictions.\n\n Args:\n tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n with shape (N, num_classes, H, W).\n br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n level with shape (N, num_classes, H, W).\n tl_embs (list[Tensor]): Top-left corner embeddings for each level\n with shape (N, corner_emb_channels, H, W).\n br_embs (list[Tensor]): Bottom-right corner embeddings for each\n level with shape (N, corner_emb_channels, H, W).\n tl_offs (list[Tensor]): Top-left corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n br_offs (list[Tensor]): Bottom-right corner offsets for each level\n with shape (N, corner_offset_channels, H, W).\n img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n \"\"\" \n assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == center_heats[-1].shape[0] == len(img_metas)\n result_list = []\n for img_id in range(len(img_metas)):\n result_list.append(\n self._get_bboxes_single(\n tl_heats[-1][img_id:img_id + 1, :],\n br_heats[-1][img_id:img_id + 1, :],\n tl_embs[-1][img_id:img_id + 1, :],\n br_embs[-1][img_id:img_id + 1, :],\n tl_offs[-1][img_id:img_id + 1, :],\n br_offs[-1][img_id:img_id + 1, :],\n img_metas[img_id],\n center_heats[-1][img_id:img_id + 1, :],\n center_offs[-1][img_id:img_id + 1, :],\n rescale=rescale,\n with_nms=with_nms))\n\n return result_list\n\n def _get_bboxes_single(self,\n tl_heat,\n br_heat,\n tl_emb,\n br_emb,\n tl_off,\n br_off,\n img_meta,\n ct_heat,\n ct_off,\n rescale=False,\n with_nms=True):\n \"\"\"Transform outputs for a single batch item into bbox predictions.\n\n Args:\n tl_heat (Tensor): Top-left corner heatmap for current level with\n shape (N, num_classes, H, W).\n br_heat (Tensor): Bottom-right corner heatmap for current level\n with shape (N, num_classes, H, W).\n tl_emb (Tensor): Top-left corner embedding for current level with\n shape (N, corner_emb_channels, H, W).\n br_emb (Tensor): Bottom-right corner embedding for current level\n with shape (N, corner_emb_channels, H, W).\n tl_off (Tensor): Top-left corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n br_off (Tensor): Bottom-right corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n img_meta (dict): Meta information of current image, e.g.,\n image size, scaling factor, etc.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n \"\"\"\n if isinstance(img_meta, (list, tuple)):\n img_meta = img_meta[0]\n\n batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(\n tl_heat=tl_heat.sigmoid(),\n br_heat=br_heat.sigmoid(),\n tl_off=tl_off,\n br_off=br_off,\n ct_heat=ct_heat,\n ct_off=ct_off,\n tl_emb=tl_emb,\n br_emb=br_emb,\n img_meta=img_meta,\n k=self.test_cfg.corner_topk,\n kernel=self.test_cfg.local_maximum_kernel,\n distance_threshold=self.test_cfg.distance_threshold)\n\n if rescale:\n batch_bboxes /= img_meta['scale_factor']\n\n bboxes = batch_bboxes.view([-1, 4])\n scores = batch_scores.view([-1, 1])\n clses = batch_clses.view([-1, 1])\n\n idx = scores.argsort(dim=0, descending=True)\n bboxes = bboxes[idx].view([-1, 4])\n scores = scores[idx].view(-1)\n clses = clses[idx].view(-1)\n\n detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)\n keepinds = (detections[:, -1] > -0.1)\n detections = detections[keepinds]\n labels = clses[keepinds]\n\n if with_nms:\n detections, labels = self._bboxes_nms(detections, labels,\n self.test_cfg)\n\n return detections, labels\n\n def decode_heatmap(self,\n tl_heat,\n br_heat,\n tl_off,\n br_off,\n ct_heat,\n ct_off,\n tl_emb=None,\n br_emb=None,\n tl_centripetal_shift=None,\n br_centripetal_shift=None,\n img_meta=None,\n k=100,\n kernel=3,\n distance_threshold=0.5,\n num_dets=1000):\n \"\"\"Transform outputs for a single batch item into raw bbox predictions.\n\n Args:\n tl_heat (Tensor): Top-left corner heatmap for current level with\n shape (N, num_classes, H, W).\n br_heat (Tensor): Bottom-right corner heatmap for current level\n with shape (N, num_classes, H, W).\n tl_off (Tensor): Top-left corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n br_off (Tensor): Bottom-right corner offset for current level with\n shape (N, corner_offset_channels, H, W).\n tl_emb (Tensor | None): Top-left corner embedding for current\n level with shape (N, corner_emb_channels, H, W).\n br_emb (Tensor | None): Bottom-right corner embedding for current\n level with shape (N, corner_emb_channels, H, W).\n tl_centripetal_shift (Tensor | None): Top-left centripetal shift\n for current level with shape (N, 2, H, W).\n br_centripetal_shift (Tensor | None): Bottom-right centripetal\n shift for current level with shape (N, 2, H, W).\n img_meta (dict): Meta information of current image, e.g.,\n image size, scaling factor, etc.\n k (int): Get top k corner keypoints from heatmap.\n kernel (int): Max pooling kernel for extract local maximum pixels.\n distance_threshold (float): Distance threshold. Top-left and\n bottom-right corner keypoints with feature distance less than\n the threshold will be regarded as keypoints from same object.\n num_dets (int): Num of raw boxes before doing nms.\n\n Returns:\n tuple[torch.Tensor]: Decoded output of CornerHead, containing the\n following Tensors:\n\n - bboxes (Tensor): Coords of each box.\n - scores (Tensor): Scores of each box.\n - clses (Tensor): Categories of each box.\n \"\"\"\n with_embedding = tl_emb is not None and br_emb is not None\n with_centripetal_shift = (\n tl_centripetal_shift is not None\n and br_centripetal_shift is not None)\n assert with_embedding + with_centripetal_shift == 1\n batch, _, height, width = tl_heat.size()\n inp_h, inp_w, _ = img_meta['pad_shape']\n\n # perform nms on heatmaps\n # 1. 使用默认3x3的卷积核筛选出所有局部(在卷积核范围内)最大的点作为候选点,减少计算量\n # 2. 对heatmap进行topk排列,得到最可能为顶点的k个\n # 3. 将所有的tl和br进行全排列的配对,分别计算每个框对应的匹配分数、特征\n tl_heat = self._local_maximum(tl_heat, kernel=kernel)\n br_heat = self._local_maximum(br_heat, kernel=kernel)\n\n tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)\n br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)\n\n tl_ys = tl_ys.view(batch, k, 1).expand(batch, k, k)\n tl_xs = tl_xs.view(batch, k, 1).expand(batch, k, k)\n br_ys = br_ys.view(batch, 1, k).expand(batch, k, k)\n br_xs = br_xs.view(batch, 1, k).expand(batch, k, k)\n\n tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)\n tl_off = tl_off.view(batch, k, 1, 2)\n br_off = self._transpose_and_gather_feat(br_off, br_inds)\n br_off = br_off.view(batch, 1, k, 2)\n\n tl_xs = tl_xs + tl_off[..., 0]\n tl_ys = tl_ys + tl_off[..., 1]\n br_xs = br_xs + br_off[..., 0]\n br_ys = br_ys + br_off[..., 1]\n\n if with_centripetal_shift:\n tl_centripetal_shift = self._transpose_and_gather_feat(\n tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()\n br_centripetal_shift = self._transpose_and_gather_feat(\n br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()\n\n tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]\n tl_ctys = tl_ys + tl_centripetal_shift[..., 1]\n br_ctxs = br_xs - br_centripetal_shift[..., 0]\n br_ctys = br_ys - br_centripetal_shift[..., 1]\n\n # all possible boxes based on top k corners (ignoring class)\n tl_xs *= (inp_w / width)\n tl_ys *= (inp_h / height)\n br_xs *= (inp_w / width)\n br_ys *= (inp_h / height)\n\n if with_centripetal_shift:\n tl_ctxs *= (inp_w / width)\n tl_ctys *= (inp_h / height)\n br_ctxs *= (inp_w / width)\n br_ctys *= (inp_h / height)\n\n x_off = img_meta['border'][2]\n y_off = img_meta['border'][0]\n\n tl_xs -= x_off\n tl_ys -= y_off\n br_xs -= x_off\n br_ys -= y_off\n\n # 将小于等于0的参数置零\n tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)\n tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)\n br_xs *= br_xs.gt(0.0).type_as(br_xs)\n br_ys *= br_ys.gt(0.0).type_as(br_ys)\n\n bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3) # shape (batch, k, k, 4)\n area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()\n\n # 生成ct点的实际位置,格式和bboxes位置对应\n # TODO: 判断这里是否前后匹配,且能映射到原图对应的位置\n ct_heat = self._local_maximum(ct_heat, kernal=kernel)\n ct_scores, ct_inds, ct_class, ct_ys, ct_xs = self._topk(ct_heat, k=k)\n # TODO: 判断是否需要按github上CenterNet进行修改\n # ct_ys = ct_ys.view(batch, k, 1)\n # ct_xs = ct_xs.view(batch, k, 1)\n ct_ys = ct_ys.view(batch, 1, K).expand(batch, K, K)\n ct_xs = ct_xs.view(batch, 1, K).expand(batch, K, K)\n ct_off = self._transpose_and_gather_feat(ct_off, ct_inds)\n # ct_off = ct_off.view(batch, k, 1, 2)\n ct_off = ct_off.view(batch, 1, k, 2)\n ct_xs = ct_xs + ct_off[..., 0]\n ct_ys = ct_ys + ct_off[..., 0]\n\n if with_centripetal_shift:\n print('你这个写的不好使了,回来改center_triplets_head的976行')\n \n ct_xs *= (inp_w / width)\n ct_ys *= (inp_h / height)\n ct_xs -= x_off\n xt_ys -= y_off\n ct_xs *= ct_xs.gt(0.0).type_as(ct_xs) \n ct_ys *= ct_ys.gt(0.0).type_as(ct_ys)\n\n if with_centripetal_shift:\n tl_ctxs -= x_off\n tl_ctys -= y_off\n br_ctxs -= x_off\n br_ctys -= y_off\n\n tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)\n tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)\n br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)\n br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)\n\n ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),\n dim=3)\n area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()\n\n rcentral = torch.zeros_like(ct_bboxes)\n # magic nums from paper section 4.1\n mu = torch.ones_like(area_bboxes) / 2.4\n mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu\n\n bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2\n bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2\n rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -\n bboxes[..., 0]) / 2\n rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -\n bboxes[..., 1]) / 2\n rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -\n bboxes[..., 0]) / 2\n rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -\n bboxes[..., 1]) / 2\n area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *\n (rcentral[..., 3] - rcentral[..., 1])).abs()\n dists = area_ct_bboxes / area_rcentral\n\n tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (\n ct_bboxes[..., 0] >= rcentral[..., 2])\n tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (\n ct_bboxes[..., 1] >= rcentral[..., 3])\n br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (\n ct_bboxes[..., 2] >= rcentral[..., 2])\n br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (\n ct_bboxes[..., 3] >= rcentral[..., 3])\n\n if with_embedding:\n tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)\n tl_emb = tl_emb.view(batch, k, 1)\n br_emb = self._transpose_and_gather_feat(br_emb, br_inds)\n br_emb = br_emb.view(batch, 1, k)\n dists = torch.abs(tl_emb - br_emb)\n\n tl_scores = tl_scores.view(batch, k, 1).expand(batch, k, k)\n br_scores = br_scores.view(batch, 1, k).expand(batch, k, k)\n\n scores = (tl_scores + br_scores) / 2 # scores for all possible boxes\n\n # tl and br should have same class\n tl_clses = tl_clses.view(batch, k, 1).expand(batch, k, k)\n br_clses = br_clses.view(batch, 1, k).expand(batch, k, k)\n cls_inds = (tl_clses != br_clses)\n\n # reject boxes based on distances\n dist_inds = dists > distance_threshold\n\n # reject boxes based on widths and heights\n width_inds = (br_xs <= tl_xs)\n height_inds = (br_ys <= tl_ys)\n\n scores[cls_inds] = -1\n scores[width_inds] = -1\n scores[height_inds] = -1\n scores[dist_inds] = -1\n if with_centripetal_shift:\n scores[tl_ctx_inds] = -1\n scores[tl_cty_inds] = -1\n scores[br_ctx_inds] = -1\n scores[br_cty_inds] = -1\n\n scores = scores.view(batch, -1)\n scores, inds = torch.topk(scores, num_dets)\n scores = scores.unsqueeze(2)\n\n bboxes = bboxes.view(batch, -1, 4)\n bboxes = self._gather_feat(bboxes, inds)\n\n clses = tl_clses.contiguous().view(batch, -1, 1)\n clses = self._gather_feat(clses, inds).float()\n\n tl_scores = tl_scores.contiguous().view(batch, -1, 1)\n tl_scores = _gather_feat(tl_scores, inds).float()\n br_scores = br_scores.contiguous().view(batch, -1, 1)\n br_scores = _gather_feat(br_scores, inds).float()\n\n ct_xs = ct_xs[:, 0, :]\n ct_ys = ct_ys[:, 0, :]\n\n center = torch.cat([ct_xs.unsqueeze(2), ct_ys.unsqueeze(2), ct_clses.float().unsqueeze(2), \n ct_scores.unsqueeze(2)], dim=2)\n detections = torch.cat([bboxes, scores, tl_scores, br_scores, clses], dim=2)\n\n return detections, center\n # return bboxes, scores, clses\n\n def _create_ct_bboxes(self, bboxes, n=3):\n \"\"\"根据bbox生成中心区域框\n\n Args:\n bboxes (Tensor [batch, k, k, 4]): 输入的所有默认候选框各顶点坐标\n n (int): 分割的中心框占整体框的比例,default = 3\n \n Returns:\n ct_bboxes (Tensor [batch, k, k, 4]): 计算得到的中心框各顶点坐标\n \"\"\"\n batch, k, k_b, c = bboxes.shape()\n assert (k == k_b and c == 4), '输入bbox尺寸问题,检查_center_triplets_head的1077行'\n\n ct_bboxes = torch.zeros_like(bboxes)\n ct_bboxes[..., 0] += ((n+1)*bboxes[..., 0] + (n-1)*bboxes[..., 2]) / (2*n)\n ct_bboxes[..., 1] += ((n+1)*bboxes[..., 1] + (n-1)*bboxes[..., 3]) / (2*n)\n ct_bboxes[..., 2] += ((n-1)*bboxes[..., 0] + (n+1)*bboxes[..., 2]) / (2*n)\n ct_bboxes[..., 3] += ((n+1)*bboxes[..., 1] + (n+1)*bboxes[..., 3]) / (2*n)\n\n return ct_bboxes\n\n def _local_maximum(self, heat, kernel=3):\n \"\"\"Extract local maximum pixel with given kernal.\n Similar with NMS.\n\n Args:\n heat (Tensor): Target heatmap.\n kernel (int): Kernel size of max pooling. Default: 3.\n\n Returns:\n heat (Tensor): A heatmap where local maximum pixels maintain its\n own value and other positions are 0.\n \"\"\"\n \n pad = (kernel - 1) // 2\n hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n keep = (hmax == heat).float()\n return heat * keep\n \n def _topk(self, scores, k=20):\n \"\"\"Get top k positions from heatmap.\n\n Args:\n scores (Tensor): Target heatmap with shape\n [batch, num_classes, height, width].\n k (int): Target number. Default: 20.\n\n Returns:\n tuple[torch.Tensor]: Scores, indexes, categories and coords of\n topk keypoint. Containing following Tensors:\n\n - topk_scores (Tensor): Max scores of each topk keypoint.\n - topk_inds (Tensor): Indexes of each topk keypoint.\n - topk_clses (Tensor): Categories of each topk keypoint.\n - topk_ys (Tensor): Y-coord of each topk keypoint.\n - topk_xs (Tensor): X-coord of each topk keypoint.\n \"\"\"\n batch, _, height, width = scores.size()\n topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)\n topk_clses = (topk_inds / (height * width)).int()\n topk_inds = topk_inds % (height * width)\n topk_ys = (topk_inds / width).int().float()\n topk_xs = (topk_inds % width).int().float()\n return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs\n\n def _transpose_and_gather_feat(self, feat, ind):\n \"\"\"Transpose and gather feature according to index.\n\n Args:\n feat (Tensor): Target feature map.\n ind (Tensor): Target coord index.\n\n Returns:\n feat (Tensor): Transposed and gathered feature.\n \"\"\"\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = self._gather_feat(feat, ind)\n return feat\n \n def _gather_feat(self, feat, ind, mask=None):\n \"\"\"Gather feature according to index.\n\n Args:\n feat (Tensor): Target feature map.\n ind (Tensor): Target coord index.\n mask (Tensor | None): Mask of featuremap. Default: None.\n\n Returns:\n feat (Tensor): Gathered feature.\n \"\"\"\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\n" ]
[ [ "torch.cat", "torch.stack", "torch.nn.ModuleList", "torch.nn.ReLU", "torch.abs", "torch.ones_like", "torch.zeros_like", "torch.nn.functional.max_pool2d", "torch.topk" ] ]
yxyang/fast_and_efficient
[ "1d284337461ee826497bfc811b2461aa0ffcd27f" ]
[ "src/convex_mpc_controller/locomotion_controller.py" ]
[ "\"\"\"A model based controller framework.\"\"\"\nfrom absl import logging\n\nfrom datetime import datetime\nimport enum\nimport ml_collections\nimport numpy as np\nimport os\nimport pickle\nimport pybullet\nfrom pybullet_utils import bullet_client\nimport threading\nimport time\nfrom typing import Tuple\n\nfrom src.convex_mpc_controller import com_velocity_estimator\nfrom src.convex_mpc_controller import offset_gait_generator\nfrom src.convex_mpc_controller import raibert_swing_leg_controller\nfrom src.convex_mpc_controller import torque_stance_leg_controller_mpc\nfrom src.convex_mpc_controller.gait_configs import crawl, trot, flytrot\nfrom src.robots import a1\nfrom src.robots import a1_robot\nfrom src.robots.motors import MotorCommand\nfrom src.robots.motors import MotorControlMode\n\n\nclass ControllerMode(enum.Enum):\n DOWN = 1\n STAND = 2\n WALK = 3\n TERMINATE = 4\n\n\nclass GaitType(enum.Enum):\n CRAWL = 1\n TROT = 2\n FLYTROT = 3\n\n\ndef get_sim_conf():\n config = ml_collections.ConfigDict()\n config.timestep: float = 0.002\n config.action_repeat: int = 1\n config.reset_time_s: float = 3.\n config.num_solver_iterations: int = 30\n config.init_position: Tuple[float, float, float] = (0., 0., 0.32)\n config.init_rack_position: Tuple[float, float, float] = [0., 0., 1]\n config.on_rack: bool = False\n return config\n\n\nclass LocomotionController(object):\n \"\"\"Generates the quadruped locomotion.\n\n The actual effect of this controller depends on the composition of each\n individual subcomponent.\n\n \"\"\"\n def __init__(self,\n use_real_robot: bool = False,\n show_gui: bool = False,\n logdir: str = 'logs/'):\n \"\"\"Initializes the class.\n\n Args:\n robot: A robot instance.\n gait_generator: Generates the leg swing/stance pattern.\n state_estimator: Estimates the state of the robot (e.g. center of mass\n position or velocity that may not be observable from sensors).\n swing_leg_controller: Generates motor actions for swing legs.\n stance_leg_controller: Generates motor actions for stance legs.\n clock: A real or fake clock source.\n \"\"\"\n self._use_real_robot = use_real_robot\n self._show_gui = show_gui\n self._setup_robot_and_controllers()\n self.reset_robot()\n self.reset_controllers()\n self._reset_time = self._clock()\n self._time_since_reset = 0\n self._logs = []\n self._logdir = logdir\n\n self._mode = ControllerMode.DOWN\n self.set_controller_mode(ControllerMode.STAND)\n self._gait = None\n self._desired_gait = GaitType.CRAWL\n self._handle_gait_switch()\n self.run_thread = threading.Thread(target=self.run)\n self.run_thread.start()\n\n def _setup_robot_and_controllers(self):\n # Construct robot\n if self._show_gui and not self._use_real_robot:\n p = bullet_client.BulletClient(connection_mode=pybullet.GUI)\n else:\n p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)\n\n p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)\n p.setAdditionalSearchPath('src/data')\n\n self.pybullet_client = p\n p.setPhysicsEngineParameter(numSolverIterations=30)\n p.setTimeStep(0.002)\n p.setGravity(0, 0, -9.8)\n p.setPhysicsEngineParameter(enableConeFriction=0)\n self.ground_id = p.loadURDF('plane.urdf')\n\n # Construct robot class:\n if self._use_real_robot:\n self._robot = a1_robot.A1Robot(\n pybullet_client=p,\n sim_conf=get_sim_conf(),\n motor_control_mode=MotorControlMode.HYBRID)\n else:\n self._robot = a1.A1(pybullet_client=p,\n sim_conf=get_sim_conf(),\n motor_control_mode=MotorControlMode.HYBRID)\n\n if self._show_gui and not self._use_real_robot:\n p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)\n self._clock = lambda: self._robot.time_since_reset\n\n self._gait_generator = offset_gait_generator.OffsetGaitGenerator(\n self._robot, [0., np.pi, np.pi, 0.])\n\n desired_speed, desired_twisting_speed = (0., 0.), 0.\n\n self._state_estimator = com_velocity_estimator.COMVelocityEstimator(\n self._robot, velocity_window_size=60, ground_normal_window_size=10)\n\n self._swing_controller = \\\n raibert_swing_leg_controller.RaibertSwingLegController(\n self._robot,\n self._gait_generator,\n self._state_estimator,\n desired_speed=desired_speed,\n desired_twisting_speed=desired_twisting_speed,\n desired_height=self._robot.mpc_body_height,\n foot_landing_clearance=0.01,\n foot_height=0.1,\n use_raibert_heuristic=True)\n\n mpc_friction_coef = 0.4\n self._stance_controller = \\\n torque_stance_leg_controller_mpc.TorqueStanceLegController(\n self._robot,\n self._gait_generator,\n self._state_estimator,\n desired_speed=(desired_speed[0], desired_speed[1]),\n desired_twisting_speed=desired_twisting_speed,\n desired_body_height=self._robot.mpc_body_height,\n body_mass=self._robot.mpc_body_mass,\n body_inertia=self._robot.mpc_body_inertia,\n friction_coeffs=np.ones(4) * mpc_friction_coef)\n\n @property\n def swing_leg_controller(self):\n return self._swing_controller\n\n @property\n def stance_leg_controller(self):\n return self._stance_controller\n\n @property\n def gait_generator(self):\n return self._gait_generator\n\n @property\n def state_estimator(self):\n return self._state_estimator\n\n @property\n def time_since_reset(self):\n return self._time_since_reset\n\n def reset_robot(self):\n self._robot.reset(hard_reset=False)\n if self._show_gui and not self._use_real_robot:\n self.pybullet_client.configureDebugVisualizer(\n self.pybullet_client.COV_ENABLE_RENDERING, 1)\n\n def reset_controllers(self):\n # Resetting other components\n self._reset_time = self._clock()\n self._time_since_reset = 0\n self._gait_generator.reset()\n self._state_estimator.reset(self._time_since_reset)\n self._swing_controller.reset(self._time_since_reset)\n self._stance_controller.reset(self._time_since_reset)\n\n def update(self):\n self._time_since_reset = self._clock() - self._reset_time\n self._gait_generator.update()\n self._state_estimator.update(self._gait_generator.desired_leg_state)\n self._swing_controller.update(self._time_since_reset)\n future_contact_estimate = self._gait_generator.get_estimated_contact_states(\n torque_stance_leg_controller_mpc.PLANNING_HORIZON_STEPS,\n torque_stance_leg_controller_mpc.PLANNING_TIMESTEP)\n self._stance_controller.update(self._time_since_reset,\n future_contact_estimate)\n\n def get_action(self):\n \"\"\"Returns the control ouputs (e.g. positions/torques) for all motors.\"\"\"\n swing_action = self._swing_controller.get_action()\n stance_action, qp_sol = self._stance_controller.get_action()\n\n actions = []\n for joint_id in range(self._robot.num_motors):\n if joint_id in swing_action:\n actions.append(swing_action[joint_id])\n else:\n assert joint_id in stance_action\n actions.append(stance_action[joint_id])\n\n vectorized_action = MotorCommand(\n desired_position=[action.desired_position for action in actions],\n kp=[action.kp for action in actions],\n desired_velocity=[action.desired_velocity for action in actions],\n kd=[action.kd for action in actions],\n desired_extra_torque=[\n action.desired_extra_torque for action in actions\n ])\n\n return vectorized_action, dict(qp_sol=qp_sol)\n\n def _get_stand_action(self):\n return MotorCommand(\n desired_position=self._robot.motor_group.init_positions,\n kp=self._robot.motor_group.kps,\n desired_velocity=0,\n kd=self._robot.motor_group.kds,\n desired_extra_torque=0)\n\n def _handle_mode_switch(self):\n if self._mode == self._desired_mode:\n return\n self._mode = self._desired_mode\n if self._desired_mode == ControllerMode.DOWN:\n logging.info(\"Entering joint damping mode.\")\n self._flush_logging()\n elif self._desired_mode == ControllerMode.STAND:\n logging.info(\"Standing up.\")\n self.reset_robot()\n else:\n logging.info(\"Walking.\")\n self.reset_controllers()\n self._start_logging()\n\n def _start_logging(self):\n self._logs = []\n\n def _update_logging(self, action, qp_sol):\n frame = dict(\n desired_speed=(self._swing_controller.desired_speed,\n self._swing_controller.desired_twisting_speed),\n timestamp=self._time_since_reset,\n base_rpy=self._robot.base_orientation_rpy,\n motor_angles=self._robot.motor_angles,\n base_vel=self._robot.motor_velocities,\n base_vels_body_frame=self._state_estimator.com_velocity_body_frame,\n base_rpy_rate=self._robot.base_rpy_rate,\n motor_vels=self._robot.motor_velocities,\n motor_torques=self._robot.motor_torques,\n contacts=self._robot.foot_contacts,\n desired_grf=qp_sol,\n robot_action=action,\n gait_generator_phase=self._gait_generator.current_phase.copy(),\n gait_generator_state=self._gait_generator.leg_state,\n ground_orientation=self._state_estimator.\n ground_orientation_world_frame,\n )\n self._logs.append(frame)\n\n def _flush_logging(self):\n if not os.path.exists(self._logdir):\n os.makedirs(self._logdir)\n filename = 'log_{}.pkl'.format(\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))\n pickle.dump(self._logs, open(os.path.join(self._logdir, filename), 'wb'))\n logging.info(\"Data logged to: {}\".format(\n os.path.join(self._logdir, filename)))\n\n def _handle_gait_switch(self):\n if self._gait == self._desired_gait:\n return\n if self._desired_gait == GaitType.CRAWL:\n logging.info(\"Switched to Crawling gait.\")\n self._gait_config = crawl.get_config()\n elif self._desired_gait == GaitType.TROT:\n logging.info(\"Switched to Trotting gait.\")\n self._gait_config = trot.get_config()\n else:\n logging.info(\"Switched to Fly-Trotting gait.\")\n self._gait_config = flytrot.get_config()\n\n self._gait = self._desired_gait\n self._gait_generator.gait_params = self._gait_config.gait_parameters\n self._swing_controller.foot_height = self._gait_config.foot_clearance_max\n self._swing_controller.foot_landing_clearance = \\\n self._gait_config.foot_clearance_land\n\n def run(self):\n logging.info(\"Low level thread started...\")\n while True:\n self._handle_mode_switch()\n self._handle_gait_switch()\n self.update()\n if self._mode == ControllerMode.DOWN:\n time.sleep(0.1)\n elif self._mode == ControllerMode.STAND:\n action = self._get_stand_action()\n self._robot.step(action)\n time.sleep(0.001)\n elif self._mode == ControllerMode.WALK:\n action, qp_sol = self.get_action()\n self._robot.step(action)\n self._update_logging(action, qp_sol)\n else:\n logging.info(\"Running loop terminated, exiting...\")\n break\n\n # Camera setup:\n if self._show_gui:\n self.pybullet_client.resetDebugVisualizerCamera(\n cameraDistance=1.0,\n cameraYaw=30 + self._robot.base_orientation_rpy[2] / np.pi * 180,\n cameraPitch=-30,\n cameraTargetPosition=self._robot.base_position,\n )\n\n def set_controller_mode(self, mode):\n self._desired_mode = mode\n\n def set_gait(self, gait):\n self._desired_gait = gait\n\n @property\n def is_safe(self):\n if self.mode != ControllerMode.WALK:\n return True\n rot_mat = np.array(\n self._robot.pybullet_client.getMatrixFromQuaternion(\n self._state_estimator.com_orientation_quat_ground_frame)).reshape(\n (3, 3))\n up_vec = rot_mat[2, 2]\n base_height = self._robot.base_position[2]\n return up_vec > 0.85 and base_height > 0.18\n\n @property\n def mode(self):\n return self._mode\n\n def set_desired_speed(self, desired_lin_speed_ratio,\n desired_rot_speed_ratio):\n desired_lin_speed = (\n self._gait_config.max_forward_speed * desired_lin_speed_ratio[0],\n self._gait_config.max_side_speed * desired_lin_speed_ratio[1],\n 0,\n )\n desired_rot_speed = \\\n self._gait_config.max_rot_speed * desired_rot_speed_ratio\n self._swing_controller.desired_speed = desired_lin_speed\n self._swing_controller.desired_twisting_speed = desired_rot_speed\n self._stance_controller.desired_speed = desired_lin_speed\n self._stance_controller.desired_twisting_speed = desired_rot_speed\n\n def set_gait_parameters(self, gait_parameters):\n raise NotImplementedError()\n\n def set_qp_weight(self, qp_weight):\n raise NotImplementedError()\n\n def set_mpc_mass(self, mpc_mass):\n raise NotImplementedError()\n\n def set_mpc_inertia(self, mpc_inertia):\n raise NotImplementedError()\n\n def set_mpc_foot_friction(self, mpc_foot_friction):\n raise NotImplementedError()\n\n def set_foot_landing_clearance(self, foot_landing_clearance):\n raise NotImplementedError()\n" ]
[ [ "numpy.ones" ] ]
Mario-Kart-Felix/python-neo
[ "951c97cf9eb56f5489da88940de920329e0f4c1b" ]
[ "neo/test/iotest/test_nixio.py" ]
[ "# Copyright (c) 2016, German Neuroinformatics Node (G-Node)\n# Achilleas Koutsou <achilleas.k@gmail.com>\n#\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted under the terms of the BSD License. See\n# LICENSE file in the root of the Project.\n\"\"\"\nTests for NixIO\n\"\"\"\n\nimport os\nimport shutil\n\ntry:\n from collections.abc import Iterable\nexcept ImportError:\n from collections import Iterable\nfrom datetime import date, time, datetime\n\nfrom tempfile import mkdtemp\nfrom itertools import chain\nimport unittest\nimport string\nimport numpy as np\nimport quantities as pq\n\nfrom neo.core import (Block, Segment, AnalogSignal,\n IrregularlySampledSignal, SpikeTrain,\n Event, Epoch, ImageSequence, Group, ChannelView)\nfrom neo.test.iotest.common_io_test import BaseTestIO\nfrom neo.io.nixio import (NixIO, create_quantity, units_to_string, neover,\n dt_from_nix, dt_to_nix, DATETIMEANNOTATION)\nfrom neo.io.nixio_fr import NixIO as NixIO_lazy\nfrom neo.io.proxyobjects import (AnalogSignalProxy, SpikeTrainProxy,\n EventProxy, EpochProxy)\n\ntry:\n import nixio as nix\n\n HAVE_NIX = True\nexcept ImportError:\n HAVE_NIX = False\n\ntry:\n from unittest import mock\n\n SKIPMOCK = False\nexcept ImportError:\n SKIPMOCK = True\n\n\n@unittest.skipUnless(HAVE_NIX, \"Requires NIX\")\nclass NixIOTest(unittest.TestCase):\n io = None\n tempdir = None\n filename = None\n\n def compare_blocks(self, neoblocks, nixblocks):\n for neoblock, nixblock in zip(neoblocks, nixblocks):\n self.compare_attr(neoblock, nixblock)\n self.assertEqual(len(neoblock.segments),\n len([grp for grp in nixblock.groups if grp.type == \"neo.segment\"]))\n self.assertEqual(len(neoblock.groups),\n len([grp for grp in nixblock.groups if grp.type == \"neo.group\"]))\n for idx, neoseg in enumerate(neoblock.segments):\n nixgrp = nixblock.groups[neoseg.annotations[\"nix_name\"]]\n self.compare_segment_group(neoseg, nixgrp)\n self.check_refs(neoblock, nixblock)\n\n def check_refs(self, neoblock, nixblock):\n \"\"\"\n Checks whether the references between objects that are not nested are\n mapped correctly (e.g., SpikeTrains referenced by a Unit).\n\n :param neoblock: A Neo block\n :param nixblock: The corresponding NIX block\n \"\"\"\n\n # Events and Epochs must reference all Signals in the Group (NIX only)\n for nixgroup in nixblock.groups:\n nixevep = list(mt for mt in nixgroup.multi_tags\n if mt.type in [\"neo.event\", \"neo.epoch\"])\n nixsigs = list(da.name for da in nixgroup.data_arrays\n if da.type in [\"neo.analogsignal\",\n \"neo.irregularlysampledsignal\"])\n for nee in nixevep:\n for ns in nixsigs:\n self.assertIn(ns, nee.references)\n\n def compare_segment_group(self, neoseg, nixgroup):\n self.compare_attr(neoseg, nixgroup)\n neo_signals = neoseg.analogsignals + neoseg.irregularlysampledsignals \\\n + neoseg.imagesequences\n self.compare_signals_das(neo_signals, nixgroup.data_arrays)\n neo_eests = neoseg.epochs + neoseg.events + neoseg.spiketrains\n self.compare_eests_mtags(neo_eests, nixgroup.multi_tags)\n\n def compare_signals_das(self, neosignals, data_arrays):\n totalsignals = 0\n for sig in neosignals:\n dalist = list()\n nixname = sig.annotations[\"nix_name\"]\n for da in data_arrays:\n if da.metadata.name == nixname:\n dalist.append(da)\n nsig = np.shape(sig)[-1]\n totalsignals += nsig\n self.assertEqual(nsig, len(dalist))\n self.compare_signal_dalist(sig, dalist)\n self.assertEqual(totalsignals, len(data_arrays))\n\n def compare_signal_dalist(self, neosig, nixdalist):\n \"\"\"\n Check if a Neo Analog or IrregularlySampledSignal matches a list of\n NIX DataArrays.\n\n :param neosig: Neo Analog or IrregularlySampledSignal\n :param nixdalist: List of DataArrays\n \"\"\"\n nixmd = nixdalist[0].metadata\n self.assertTrue(all(nixmd == da.metadata for da in nixdalist))\n neounit = neosig.units\n if isinstance(neosig, AnalogSignalProxy):\n neosig = neosig.load()\n for sig, da in zip(np.transpose(neosig), nixdalist):\n self.compare_attr(neosig, da)\n daquant = create_quantity(da[:], da.unit)\n np.testing.assert_almost_equal(sig.view(pq.Quantity), daquant)\n nixunit = create_quantity(1, da.unit)\n self.assertEqual(neounit, nixunit)\n\n if isinstance(neosig, AnalogSignal):\n timedim = da.dimensions[0]\n self.assertEqual(timedim.dimension_type,\n nix.DimensionType.Sample)\n neosp = neosig.sampling_period\n nixsp = create_quantity(timedim.sampling_interval,\n timedim.unit)\n self.assertEqual(neosp, nixsp)\n tsunit = timedim.unit\n if \"t_start.units\" in da.metadata.props:\n tsunit = da.metadata[\"t_start.units\"]\n neots = neosig.t_start\n nixts = create_quantity(timedim.offset, tsunit)\n self.assertEqual(neots, nixts)\n elif isinstance(neosig, IrregularlySampledSignal):\n timedim = da.dimensions[0]\n self.assertEqual(timedim.dimension_type,\n nix.DimensionType.Range)\n np.testing.assert_almost_equal(neosig.times.magnitude,\n timedim.ticks)\n self.assertEqual(timedim.unit,\n units_to_string(neosig.times.units))\n elif isinstance(neosig, ImageSequence):\n rate = da.metadata[\"sampling_rate\"]\n unit = da.metadata.props[\"sampling_rate\"].unit\n sampling_rate = create_quantity(rate, unit)\n neosr = neosig.sampling_rate\n self.assertEqual(sampling_rate, neosr)\n scale = da.metadata[\"spatial_scale\"]\n unit = da.metadata.props[\"spatial_scale\"].unit\n spatial_scale = create_quantity(scale, unit)\n neosps = neosig.spatial_scale\n self.assertEqual(spatial_scale, neosps)\n\n def compare_eests_mtags(self, eestlist, mtaglist):\n self.assertEqual(len(eestlist), len(mtaglist))\n for eest in eestlist:\n if isinstance(eest, (EventProxy, EpochProxy, SpikeTrainProxy)):\n eest = eest.load()\n mtag = mtaglist[eest.annotations[\"nix_name\"]]\n if isinstance(eest, Epoch):\n self.compare_epoch_mtag(eest, mtag)\n elif isinstance(eest, Event):\n self.compare_event_mtag(eest, mtag)\n elif isinstance(eest, SpikeTrain):\n self.compare_spiketrain_mtag(eest, mtag)\n else:\n self.fail(\"Stray object\")\n\n def compare_epoch_mtag(self, epoch, mtag):\n self.assertEqual(mtag.type, \"neo.epoch\")\n self.compare_attr(epoch, mtag)\n pos = mtag.positions\n posquant = create_quantity(pos[:], pos.unit)\n ext = mtag.extents\n extquant = create_quantity(ext[:], ext.unit)\n np.testing.assert_almost_equal(epoch.as_quantity(), posquant)\n np.testing.assert_almost_equal(epoch.durations, extquant)\n for neol, nixl in zip(epoch.labels,\n mtag.positions.dimensions[0].labels):\n self.assertEqual(neol, nixl)\n\n def compare_event_mtag(self, event, mtag):\n self.assertEqual(mtag.type, \"neo.event\")\n self.compare_attr(event, mtag)\n pos = mtag.positions\n posquant = create_quantity(pos[:], pos.unit)\n np.testing.assert_almost_equal(event.as_quantity(), posquant)\n for neol, nixl in zip(event.labels,\n mtag.positions.dimensions[0].labels):\n self.assertEqual(neol, nixl)\n\n def compare_spiketrain_mtag(self, spiketrain, mtag):\n self.assertEqual(mtag.type, \"neo.spiketrain\")\n self.compare_attr(spiketrain, mtag)\n pos = mtag.positions\n posquant = create_quantity(pos[:], pos.unit)\n np.testing.assert_almost_equal(spiketrain.as_quantity(), posquant)\n if len(mtag.features):\n neowfs = spiketrain.waveforms\n nixwfs = mtag.features[0].data\n self.assertEqual(np.shape(neowfs), np.shape(nixwfs))\n for nixwf, neowf in zip(nixwfs, neowfs):\n for nixrow, neorow in zip(nixwf, neowf):\n for nixv, neov in zip(nixrow, neorow):\n self.assertEqual(create_quantity(nixv, nixwfs.unit),\n neov)\n self.assertEqual(nixwfs.dimensions[0].dimension_type,\n nix.DimensionType.Set)\n self.assertEqual(nixwfs.dimensions[1].dimension_type,\n nix.DimensionType.Set)\n self.assertEqual(nixwfs.dimensions[2].dimension_type,\n nix.DimensionType.Sample)\n\n def compare_attr(self, neoobj, nixobj):\n if isinstance(neoobj, (AnalogSignal, IrregularlySampledSignal,\n ImageSequence)):\n nix_name = \".\".join(nixobj.name.split(\".\")[:-1])\n else:\n nix_name = nixobj.name\n\n self.assertEqual(neoobj.annotations[\"nix_name\"], nix_name)\n self.assertEqual(neoobj.description, nixobj.definition)\n if hasattr(neoobj, \"rec_datetime\") and neoobj.rec_datetime:\n self.assertEqual(neoobj.rec_datetime,\n datetime.fromtimestamp(nixobj.created_at))\n if hasattr(neoobj, \"file_datetime\") and neoobj.file_datetime:\n nixdt = dt_from_nix(nixobj.metadata[\"file_datetime\"],\n DATETIMEANNOTATION)\n assert neoobj.file_datetime == nixdt\n self.assertEqual(neoobj.file_datetime, nixdt)\n if neoobj.annotations:\n nixmd = nixobj.metadata\n for k, v, in neoobj.annotations.items():\n if k == \"nix_name\":\n continue\n if isinstance(v, pq.Quantity):\n nixunit = nixmd.props[str(k)].unit\n self.assertEqual(nixunit, units_to_string(v.units))\n nixvalue = nixmd[str(k)]\n if isinstance(nixvalue, Iterable):\n nixvalue = np.array(nixvalue)\n np.testing.assert_almost_equal(nixvalue, v.magnitude)\n else:\n self.assertEqual(nixmd[str(k)], v,\n \"Property value mismatch: {}\".format(k))\n if hasattr(neoobj, 'array_annotations'):\n if neoobj.array_annotations:\n nixmd = nixobj.metadata\n for k, v, in neoobj.array_annotations.items():\n if k in ['labels', 'durations']:\n continue\n if isinstance(v, pq.Quantity):\n nixunit = nixmd.props[str(k)].unit\n self.assertEqual(nixunit, units_to_string(v.units))\n nixvalue = nixmd[str(k)]\n if isinstance(nixvalue, Iterable):\n nixvalue = np.array(nixvalue)\n np.testing.assert_almost_equal(nixvalue, v.magnitude)\n if isinstance(v, np.ndarray):\n self.assertTrue(np.all(v == nixmd[str(k)]))\n else:\n msg = \"Property value mismatch: {}\".format(k)\n self.assertEqual(nixmd[str(k)], v, msg)\n\n @classmethod\n def create_full_nix_file(cls, filename):\n nixfile = nix.File.open(filename, nix.FileMode.Overwrite)\n\n nix_block_a = nixfile.create_block(cls.rword(10), \"neo.block\")\n nix_block_a.definition = cls.rsentence(5, 10)\n nix_block_b = nixfile.create_block(cls.rword(10), \"neo.block\")\n nix_block_b.definition = cls.rsentence(3, 3)\n\n nix_block_a.metadata = nixfile.create_section(\n nix_block_a.name, nix_block_a.name + \".metadata\"\n )\n nix_block_a.metadata[\"neo_name\"] = cls.rword(5)\n\n nix_block_b.metadata = nixfile.create_section(\n nix_block_b.name, nix_block_b.name + \".metadata\"\n )\n nix_block_b.metadata[\"neo_name\"] = cls.rword(5)\n\n nix_blocks = [nix_block_a, nix_block_b]\n\n for blk in nix_blocks:\n for ind in range(3):\n group = blk.create_group(cls.rword(), \"neo.segment\")\n group.definition = cls.rsentence(10, 15)\n\n group_md = blk.metadata.create_section(\n group.name, group.name + \".metadata\"\n )\n group.metadata = group_md\n\n blk = nix_blocks[0]\n group = blk.groups[0]\n allspiketrains = list()\n allsignalgroups = list()\n\n # analogsignals\n for n in range(5):\n siggroup = list()\n asig_name = \"{}_asig{}\".format(cls.rword(10), n)\n asig_definition = cls.rsentence(5, 5)\n asig_md = group.metadata.create_section(asig_name,\n asig_name + \".metadata\")\n\n arr_ann_name, arr_ann_val = 'anasig_arr_ann', cls.rquant(10, pq.uV)\n asig_md.create_property(arr_ann_name,\n arr_ann_val.magnitude.flatten())\n asig_md.props[arr_ann_name].unit = str(arr_ann_val.dimensionality)\n asig_md.props[arr_ann_name].type = 'ARRAYANNOTATION'\n\n for idx in range(10):\n da_asig = blk.create_data_array(\n \"{}.{}\".format(asig_name, idx),\n \"neo.analogsignal\",\n data=cls.rquant(100, 1)\n )\n da_asig.definition = asig_definition\n da_asig.unit = \"mV\"\n\n da_asig.metadata = asig_md\n\n timedim = da_asig.append_sampled_dimension(0.01)\n timedim.unit = \"ms\"\n timedim.label = \"time\"\n timedim.offset = 10\n da_asig.append_set_dimension()\n group.data_arrays.append(da_asig)\n siggroup.append(da_asig)\n asig_md[\"t_start.dim\"] = \"ms\"\n allsignalgroups.append(siggroup)\n # imagesequence\n for n in range(5):\n imgseqgroup = list()\n imgseq_name = \"{}_imgs{}\".format(cls.rword(10), n)\n imgseq_definition = cls.rsentence(5, 5)\n imgseq_md = group.metadata.create_section(imgseq_name,\n imgseq_name + \".metadata\")\n\n arr_ann_name, arr_ann_val = 'imgseq_arr_ann', cls.rquant(10, pq.V)\n imgseq_md.create_property(arr_ann_name,\n arr_ann_val.magnitude.flatten())\n imgseq_md.props[arr_ann_name].unit = str(arr_ann_val.dimensionality)\n imgseq_md.props[arr_ann_name].type = 'ARRAYANNOTATION'\n\n for idx in range(10):\n da_imgseq = blk.create_data_array(\n \"{}.{}\".format(imgseq_name, idx),\n \"neo.imagesequence\",\n data=cls.rquant((20, 10), 1)\n )\n da_imgseq.definition = imgseq_definition\n da_imgseq.unit = \"mV\"\n\n da_imgseq.metadata = imgseq_md\n imgseq_md[\"sampling_rate\"] = 10\n imgseq_md.props[\"sampling_rate\"].unit = units_to_string(pq.V)\n imgseq_md[\"spatial_scale\"] = 10\n imgseq_md.props[\"spatial_scale\"].unit = units_to_string(pq.micrometer)\n\n group.data_arrays.append(da_imgseq)\n imgseqgroup.append(da_imgseq)\n\n allsignalgroups.append(imgseqgroup)\n # irregularlysampledsignals\n for n in range(2):\n siggroup = list()\n isig_name = \"{}_isig{}\".format(cls.rword(10), n)\n isig_definition = cls.rsentence(12, 12)\n isig_md = group.metadata.create_section(isig_name,\n isig_name + \".metadata\")\n isig_times = cls.rquant(200, 1, True)\n arr_ann_name, arr_ann_val = 'irrsig_arr_ann', cls.rquant(7, pq.uV)\n isig_md.create_property(arr_ann_name,\n arr_ann_val.magnitude.flatten())\n isig_md.props[arr_ann_name].unit = str(arr_ann_val.dimensionality)\n isig_md.props[arr_ann_name].type = 'ARRAYANNOTATION'\n for idx in range(7):\n da_isig = blk.create_data_array(\n \"{}.{}\".format(isig_name, idx),\n \"neo.irregularlysampledsignal\",\n data=cls.rquant(200, 1)\n )\n da_isig.definition = isig_definition\n da_isig.unit = \"mV\"\n\n da_isig.metadata = isig_md\n\n timedim = da_isig.append_range_dimension(isig_times)\n timedim.unit = \"s\"\n timedim.label = \"time\"\n da_isig.append_set_dimension()\n group.data_arrays.append(da_isig)\n siggroup.append(da_isig)\n allsignalgroups.append(siggroup)\n # SpikeTrains with Waveforms\n for n in range(4):\n stname = \"{}-st{}\".format(cls.rword(20), n)\n times = cls.rquant(40, 1, True)\n times_da = blk.create_data_array(\n \"{}.times\".format(stname),\n \"neo.spiketrain.times\",\n data=times\n )\n times_da.unit = \"ms\"\n mtag_st = blk.create_multi_tag(stname, \"neo.spiketrain\", times_da)\n group.multi_tags.append(mtag_st)\n mtag_st.definition = cls.rsentence(20, 30)\n mtag_st_md = group.metadata.create_section(\n mtag_st.name, mtag_st.name + \".metadata\"\n )\n mtag_st.metadata = mtag_st_md\n mtag_st_md.create_property(\"t_stop\", times[-1] + 1.0)\n\n arr_ann_name, arr_ann_val = 'st_arr_ann', cls.rquant(40, pq.uV)\n mtag_st_md.create_property(arr_ann_name,\n arr_ann_val.magnitude.flatten())\n mtag_st_md.props[arr_ann_name].unit = str(arr_ann_val.dimensionality)\n mtag_st_md.props[arr_ann_name].type = 'ARRAYANNOTATION'\n\n waveforms = cls.rquant((10, 8, 5), 1)\n wfname = \"{}.waveforms\".format(mtag_st.name)\n wfda = blk.create_data_array(wfname, \"neo.waveforms\",\n data=waveforms)\n wfda.unit = \"mV\"\n mtag_st.create_feature(wfda, nix.LinkType.Indexed)\n wfda.append_set_dimension() # spike dimension\n wfda.append_set_dimension() # channel dimension\n wftimedim = wfda.append_sampled_dimension(0.1)\n wftimedim.unit = \"ms\"\n wftimedim.label = \"time\"\n wfda.metadata = mtag_st_md.create_section(\n wfname, \"neo.waveforms.metadata\"\n )\n wfda.metadata.create_property(\"left_sweep\",\n [20] * 5)\n allspiketrains.append(mtag_st)\n\n # Epochs\n for n in range(3):\n epname = \"{}-ep{}\".format(cls.rword(5), n)\n times = cls.rquant(5, 1, True)\n times_da = blk.create_data_array(\n \"{}.times\".format(epname),\n \"neo.epoch.times\",\n data=times\n )\n times_da.unit = \"s\"\n\n extents = cls.rquant(5, 1)\n extents_da = blk.create_data_array(\n \"{}.durations\".format(epname),\n \"neo.epoch.durations\",\n data=extents\n )\n extents_da.unit = \"s\"\n mtag_ep = blk.create_multi_tag(\n epname, \"neo.epoch\", times_da\n )\n mtag_ep.metadata = group.metadata.create_section(\n epname, epname + \".metadata\"\n )\n group.multi_tags.append(mtag_ep)\n mtag_ep.definition = cls.rsentence(2)\n mtag_ep.extents = extents_da\n\n arr_ann_name, arr_ann_val = 'ep_arr_ann', cls.rquant(5, pq.uV)\n mtag_ep.metadata.create_property(arr_ann_name,\n arr_ann_val.magnitude.flatten())\n mtag_ep.metadata.props[arr_ann_name].unit = str(arr_ann_val.dimensionality)\n mtag_ep.metadata.props[arr_ann_name].type = 'ARRAYANNOTATION'\n\n label_dim = mtag_ep.positions.append_set_dimension()\n label_dim.labels = cls.rsentence(5).split(\" \")\n # reference all signals in the group\n for siggroup in allsignalgroups:\n mtag_ep.references.extend(siggroup)\n\n # Events\n for n in range(2):\n evname = \"{}-ev{}\".format(cls.rword(5), n)\n times = cls.rquant(5, 1, True)\n times_da = blk.create_data_array(\n \"{}.times\".format(evname),\n \"neo.event.times\",\n data=times\n )\n times_da.unit = \"s\"\n\n mtag_ev = blk.create_multi_tag(\n evname, \"neo.event\", times_da\n )\n mtag_ev.metadata = group.metadata.create_section(\n evname, evname + \".metadata\"\n )\n group.multi_tags.append(mtag_ev)\n mtag_ev.definition = cls.rsentence(2)\n\n arr_ann_name, arr_ann_val = 'ev_arr_ann',\\\n cls.rquant(5, pq.uV)\n mtag_ev.metadata.create_property(arr_ann_name,\n arr_ann_val.magnitude.flatten())\n mtag_ev.metadata.props[arr_ann_name].unit = str(arr_ann_val.dimensionality)\n mtag_ev.metadata.props[arr_ann_name].type = 'ARRAYANNOTATION'\n\n label_dim = mtag_ev.positions.append_set_dimension()\n label_dim.labels = cls.rsentence(5).split(\" \")\n # reference all signals in the group\n for siggroup in allsignalgroups:\n mtag_ev.references.extend(siggroup)\n\n # CHX\n nixchx = blk.create_source(cls.rword(10),\n \"neo.channelindex\")\n nixchx.metadata = nix_blocks[0].metadata.create_section(\n nixchx.name, \"neo.channelindex.metadata\"\n )\n chantype = \"neo.channelindex\"\n # 3 channels\n for idx, chan in enumerate([2, 5, 9]):\n channame = \"{}.ChannelIndex{}\".format(nixchx.name, idx)\n nixrc = nixchx.create_source(channame, chantype)\n nixrc.definition = cls.rsentence(13)\n nixrc.metadata = nixchx.metadata.create_section(\n nixrc.name, \"neo.channelindex.metadata\"\n )\n nixrc.metadata.create_property(\"index\", chan)\n nixrc.metadata.create_property(\"channel_id\", chan + 1)\n dims = cls.rquant(3, 1)\n coordprop = nixrc.metadata.create_property(\"coordinates\", dims)\n coordprop.unit = \"pm\"\n\n nunits = 1\n stsperunit = np.array_split(allspiketrains, nunits)\n for idx in range(nunits):\n unitname = \"{}-unit{}\".format(cls.rword(5), idx)\n nixunit = nixchx.create_source(unitname, \"neo.unit\")\n nixunit.metadata = nixchx.metadata.create_section(\n unitname, unitname + \".metadata\"\n )\n nixunit.definition = cls.rsentence(4, 10)\n for st in stsperunit[idx]:\n st.sources.append(nixchx)\n st.sources.append(nixunit)\n\n # pick a few signal groups to reference this CHX\n rand_idxs = np.random.choice(range(len(allsignalgroups)), 5, False)\n randsiggroups = [allsignalgroups[idx] for idx in rand_idxs]\n for siggroup in randsiggroups:\n for sig in siggroup:\n sig.sources.append(nixchx)\n\n return nixfile\n\n @staticmethod\n def rdate():\n return datetime(year=np.random.randint(1980, 2020),\n month=np.random.randint(1, 13),\n day=np.random.randint(1, 29))\n\n @classmethod\n def populate_dates(cls, obj):\n obj.file_datetime = cls.rdate()\n obj.rec_datetime = cls.rdate()\n\n @staticmethod\n def rword(n=10):\n return \"\".join(np.random.choice(list(string.ascii_letters), n))\n\n @classmethod\n def rsentence(cls, n=3, maxwl=10):\n return \" \".join(cls.rword(np.random.randint(1, maxwl))\n for _ in range(n))\n\n @classmethod\n def rdict(cls, nitems):\n rd = dict()\n for _ in range(nitems):\n key = cls.rword()\n value = cls.rword() if np.random.choice((0, 1)) \\\n else np.random.uniform()\n rd[key] = value\n return rd\n\n @staticmethod\n def rquant(shape, unit, incr=False):\n try:\n dim = len(shape)\n except TypeError:\n dim = 1\n if incr and dim > 1:\n raise TypeError(\"Shape of quantity array may only be \"\n \"one-dimensional when incremental values are \"\n \"requested.\")\n arr = np.random.random(shape)\n if incr:\n arr = np.array(np.cumsum(arr))\n return arr * unit\n\n @classmethod\n def create_all_annotated(cls):\n times = cls.rquant(10, pq.s, incr=True)\n times_ann = {cls.rword(6): cls.rquant(10, pq.ms)}\n signal = cls.rquant((10, 10), pq.V)\n signal_ann = {cls.rword(6): cls.rquant(10, pq.uV)}\n blk = Block()\n blk.annotate(**cls.rdict(3))\n cls.populate_dates(blk)\n\n seg = Segment()\n seg.annotate(**cls.rdict(4))\n cls.populate_dates(seg)\n blk.segments.append(seg)\n\n asig = AnalogSignal(signal=signal, sampling_rate=pq.Hz,\n array_annotations=signal_ann)\n asig.annotate(**cls.rdict(2))\n seg.analogsignals.append(asig)\n\n isig = IrregularlySampledSignal(times=times, signal=signal,\n time_units=pq.s,\n array_annotations=signal_ann)\n isig.annotate(**cls.rdict(2))\n seg.irregularlysampledsignals.append(isig)\n\n epoch = Epoch(times=times, durations=times,\n array_annotations=times_ann)\n epoch.annotate(**cls.rdict(4))\n seg.epochs.append(epoch)\n\n event = Event(times=times, array_annotations=times_ann)\n event.annotate(**cls.rdict(4))\n seg.events.append(event)\n\n spiketrain = SpikeTrain(times=times, t_stop=10 * pq.s,\n units=pq.s, array_annotations=times_ann)\n d = cls.rdict(6)\n d[\"quantity\"] = pq.Quantity(10, \"mV\")\n d[\"qarray\"] = pq.Quantity(range(10), \"mA\")\n spiketrain.annotate(**d)\n seg.spiketrains.append(spiketrain)\n\n chx = Group(name=\"achx\", index=[1, 2], channel_ids=[0, 10])\n chx.annotate(**cls.rdict(5))\n blk.groups.append(chx)\n\n unit = Group()\n unit.annotate(**cls.rdict(2))\n chx.add(unit)\n\n return blk\n\n\n@unittest.skipUnless(HAVE_NIX, \"Requires NIX\")\nclass NixIOWriteTest(NixIOTest):\n def setUp(self):\n self.tempdir = mkdtemp(prefix=\"nixiotest\")\n self.filename = os.path.join(self.tempdir, \"testnixio.nix\")\n self.writer = NixIO(self.filename, \"ow\")\n self.io = self.writer\n self.reader = nix.File.open(self.filename, nix.FileMode.ReadOnly)\n\n def tearDown(self):\n self.writer.close()\n self.reader.close()\n shutil.rmtree(self.tempdir)\n\n def write_and_compare(self, blocks, use_obj_names=False):\n self.writer.write_all_blocks(blocks, use_obj_names)\n self.compare_blocks(blocks, self.reader.blocks)\n self.compare_blocks(self.writer.read_all_blocks(), self.reader.blocks)\n self.compare_blocks(blocks, self.reader.blocks)\n\n def test_block_write(self):\n block = Block(name=self.rword(),\n description=self.rsentence())\n self.write_and_compare([block])\n\n block.annotate(**self.rdict(5))\n self.write_and_compare([block])\n\n def test_segment_write(self):\n block = Block(name=self.rword())\n segment = Segment(name=self.rword(), description=self.rword())\n block.segments.append(segment)\n self.write_and_compare([block])\n\n segment.annotate(**self.rdict(2))\n self.write_and_compare([block])\n\n def test_signals_write(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n asig = AnalogSignal(signal=self.rquant((19, 15), pq.mV),\n sampling_rate=pq.Quantity(10, \"Hz\"))\n seg.analogsignals.append(asig)\n self.write_and_compare([block])\n\n anotherblock = Block(\"ir signal block\")\n seg = Segment(\"ir signal seg\")\n anotherblock.segments.append(seg)\n irsig = IrregularlySampledSignal(\n signal=np.random.random((20, 30)),\n times=self.rquant(20, pq.ms, incr=True),\n units=pq.A\n )\n seg.irregularlysampledsignals.append(irsig)\n self.write_and_compare([block, anotherblock])\n\n block.segments[0].analogsignals.append(\n AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,\n sampling_period=pq.Quantity(3, \"s\"),\n dtype=np.double, name=\"signal42\",\n description=\"this is an analogsignal\",\n t_start=45 * pq.ms),\n )\n self.write_and_compare([block, anotherblock])\n\n block.segments[0].irregularlysampledsignals.append(\n IrregularlySampledSignal(times=np.sort(np.random.random(10)),\n signal=np.random.random((10, 13)),\n units=\"mV\", time_units=\"s\",\n dtype=np.float32,\n name=\"some sort of signal\",\n description=\"the signal is described\")\n )\n self.write_and_compare([block, anotherblock])\n\n def test_imagesequence_write(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n imgseq = ImageSequence(image_data=self.rquant((19, 10, 15), 1),\n sampling_rate=pq.Quantity(10, \"Hz\"),\n spatial_scale=pq.Quantity(10, \"micrometer\"),\n units=pq.V)\n seg.imagesequences.append(imgseq)\n self.write_and_compare([block])\n\n def test_signals_compound_units(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n units = pq.CompoundUnit(\"1/30000*V\")\n srate = pq.Quantity(10, pq.CompoundUnit(\"1.0/10 * Hz\"))\n asig = AnalogSignal(signal=self.rquant((10, 23), units),\n sampling_rate=srate)\n seg.analogsignals.append(asig)\n\n self.write_and_compare([block])\n\n anotherblock = Block(\"ir signal block\")\n seg = Segment(\"ir signal seg\")\n anotherblock.segments.append(seg)\n irsig = IrregularlySampledSignal(\n signal=np.random.random((20, 3)),\n times=self.rquant(20, pq.CompoundUnit(\"0.1 * ms\"), True),\n units=pq.CompoundUnit(\"10 * V / s\")\n )\n seg.irregularlysampledsignals.append(irsig)\n self.write_and_compare([block, anotherblock])\n\n block.segments[0].analogsignals.append(\n AnalogSignal(signal=[10.0, 1.0, 3.0], units=pq.S,\n sampling_period=pq.Quantity(3, \"s\"),\n dtype=np.double, name=\"signal42\",\n description=\"this is an analogsignal\",\n t_start=45 * pq.CompoundUnit(\"3.14 * s\")),\n )\n self.write_and_compare([block, anotherblock])\n\n times = self.rquant(10, pq.CompoundUnit(\"3 * year\"), True)\n block.segments[0].irregularlysampledsignals.append(\n IrregularlySampledSignal(times=times,\n signal=np.random.random((10, 3)),\n units=\"mV\", dtype=float,\n name=\"some sort of signal\",\n description=\"the signal is described\")\n )\n\n self.write_and_compare([block, anotherblock])\n\n def test_imagesequence_compound_units(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n units = pq.CompoundUnit(\"1/30000*V\")\n srate = pq.Quantity(10, pq.CompoundUnit(\"1.0/10 * Hz\"))\n size = pq.Quantity(10, pq.CompoundUnit(\"1.0/10 * micrometer\"))\n imgseq = ImageSequence(image_data=self.rquant((10, 20, 10), units),\n sampling_rate=srate, spatial_scale=size)\n seg.imagesequences.append(imgseq)\n\n self.write_and_compare([block])\n\n def test_epoch_write(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n epoch = Epoch(times=[1, 1, 10, 3] * pq.ms,\n durations=[3, 3, 3, 1] * pq.ms,\n labels=np.array([\"one\", \"two\", \"three\", \"four\"], dtype='U'),\n name=\"test epoch\", description=\"an epoch for testing\")\n\n seg.epochs.append(epoch)\n self.write_and_compare([block])\n\n def test_event_write(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n event = Event(times=np.arange(0, 30, 10) * pq.s,\n labels=np.array([\"0\", \"1\", \"2\"], dtype='U'),\n name=\"event name\",\n description=\"event description\")\n seg.events.append(event)\n self.write_and_compare([block])\n\n def test_spiketrain_write(self):\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n spiketrain = SpikeTrain(times=[3, 4, 5] * pq.s, t_stop=10.0,\n name=\"spikes!\", description=\"sssssspikes\")\n seg.spiketrains.append(spiketrain)\n self.write_and_compare([block])\n\n waveforms = self.rquant((3, 5, 10), pq.mV)\n spiketrain = SpikeTrain(times=[1, 1.1, 1.2] * pq.ms, t_stop=1.5 * pq.s,\n name=\"spikes with wf\",\n description=\"spikes for waveform test\",\n waveforms=waveforms)\n\n seg.spiketrains.append(spiketrain)\n self.write_and_compare([block])\n\n spiketrain.left_sweep = np.random.random(10) * pq.ms\n self.write_and_compare([block])\n\n spiketrain.left_sweep = pq.Quantity(-10, \"ms\")\n self.write_and_compare([block])\n\n def test_group_write(self):\n signals = [\n AnalogSignal(np.random.random(size=(1000, 5)) * pq.mV,\n sampling_period=1 * pq.ms, name=\"sig1\"),\n AnalogSignal(np.random.random(size=(1000, 3)) * pq.mV,\n sampling_period=1 * pq.ms, name=\"sig2\"),\n ]\n spiketrains = [\n SpikeTrain([0.1, 54.3, 76.6, 464.2], units=pq.ms,\n t_stop=1000.0 * pq.ms, t_start=0.0 * pq.ms),\n SpikeTrain([30.1, 154.3, 276.6, 864.2], units=pq.ms,\n t_stop=1000.0 * pq.ms, t_start=0.0 * pq.ms),\n SpikeTrain([120.1, 454.3, 576.6, 764.2], units=pq.ms,\n t_stop=1000.0 * pq.ms, t_start=0.0 * pq.ms),\n ]\n epochs = [\n Epoch(times=[0, 500], durations=[100, 100], units=pq.ms, labels=[\"A\", \"B\"])\n ]\n\n seg = Segment(name=\"seg1\")\n seg.analogsignals.extend(signals)\n seg.spiketrains.extend(spiketrains)\n seg.epochs.extend(epochs)\n for obj in chain(signals, spiketrains, epochs):\n obj.segment = seg\n\n views = [ChannelView(index=np.array([0, 3, 4]), obj=signals[0], name=\"view_of_sig1\")]\n groups = [\n Group(objects=(signals[0:1] + spiketrains[0:2] + epochs + views), name=\"group1\"),\n Group(objects=(signals[1:2] + spiketrains[1:] + epochs), name=\"group2\")\n ]\n\n block = Block(name=\"block1\")\n block.segments.append(seg)\n block.groups.extend(groups)\n for obj in chain([seg], groups):\n obj.block = block\n\n self.write_and_compare([block])\n\n def test_group_write_nested(self):\n signals = [\n AnalogSignal(np.random.random(size=(1000, 5)) * pq.mV,\n sampling_period=1 * pq.ms, name=\"sig1\"),\n AnalogSignal(np.random.random(size=(1000, 3)) * pq.mV,\n sampling_period=1 * pq.ms, name=\"sig2\"),\n ]\n spiketrains = [\n SpikeTrain([0.1, 54.3, 76.6, 464.2], units=pq.ms,\n t_stop=1000.0 * pq.ms, t_start=0.0 * pq.ms),\n SpikeTrain([30.1, 154.3, 276.6, 864.2], units=pq.ms,\n t_stop=1000.0 * pq.ms, t_start=0.0 * pq.ms),\n SpikeTrain([120.1, 454.3, 576.6, 764.2], units=pq.ms,\n t_stop=1000.0 * pq.ms, t_start=0.0 * pq.ms),\n ]\n epochs = [\n Epoch(times=[0, 500], durations=[100, 100], units=pq.ms, labels=[\"A\", \"B\"])\n ]\n\n seg = Segment(name=\"seg1\")\n seg.analogsignals.extend(signals)\n seg.spiketrains.extend(spiketrains)\n seg.epochs.extend(epochs)\n for obj in chain(signals, spiketrains, epochs):\n obj.segment = seg\n\n views = [ChannelView(index=np.array([0, 3, 4]), obj=signals[0], name=\"view_of_sig1\")]\n\n subgroup = Group(objects=(signals[0:1] + views), name=\"subgroup\")\n groups = [\n Group(objects=([subgroup] + spiketrains[0:2] + epochs), name=\"group1\"),\n Group(objects=(signals[1:2] + spiketrains[1:] + epochs), name=\"group2\")\n ]\n\n block = Block(name=\"block1\")\n block.segments.append(seg)\n block.groups.extend(groups)\n for obj in chain([seg], groups):\n obj.block = block\n\n self.write_and_compare([block])\n\n def test_metadata_structure_write(self):\n neoblk = self.create_all_annotated()\n self.io.write_block(neoblk)\n blk = self.io.nix_file.blocks[0]\n\n blkmd = blk.metadata\n self.assertEqual(blk.name, blkmd.name)\n\n grp = blk.groups[0] # segment\n self.assertIn(grp.name, blkmd.sections)\n\n grpmd = blkmd.sections[grp.name]\n for da in grp.data_arrays: # signals\n name = \".\".join(da.name.split(\".\")[:-1])\n self.assertIn(name, grpmd.sections)\n for mtag in grp.multi_tags: # spiketrains, events, and epochs\n self.assertIn(mtag.name, grpmd.sections)\n\n self.write_and_compare([neoblk])\n\n def test_anonymous_objects_write(self):\n nblocks = 2\n nsegs = 2\n nanasig = 4\n nimgseq = 4\n nirrseg = 2\n nepochs = 3\n nevents = 4\n nspiketrains = 3\n nchx = 5\n nunits = 10\n\n times = self.rquant(1, pq.s)\n signal = self.rquant(1, pq.V)\n\n blocks = []\n for blkidx in range(nblocks):\n blk = Block()\n blocks.append(blk)\n for segidx in range(nsegs):\n seg = Segment()\n blk.segments.append(seg)\n for anaidx in range(nanasig):\n seg.analogsignals.append(AnalogSignal(signal=signal,\n sampling_rate=pq.Hz))\n for imgseqdx in range(nimgseq):\n seg.imagesequences.append(ImageSequence(image_data=self.rquant(\n (10, 20, 10), pq.V),\n sampling_rate=pq.Hz,\n spatial_scale=pq.micrometer))\n for irridx in range(nirrseg):\n seg.irregularlysampledsignals.append(\n IrregularlySampledSignal(times=times,\n signal=signal,\n time_units=pq.s)\n )\n for epidx in range(nepochs):\n seg.epochs.append(Epoch(times=times, durations=times))\n for evidx in range(nevents):\n seg.events.append(Event(times=times))\n for stidx in range(nspiketrains):\n seg.spiketrains.append(SpikeTrain(times=times,\n t_stop=times[-1] + pq.s,\n units=pq.s))\n for chidx in range(nchx):\n chx = Group(index=[1, 2],\n channel_ids=[11, 22])\n blk.groups.append(chx)\n for unidx in range(nunits):\n unit = Group()\n chx.add(unit)\n self.writer.write_all_blocks(blocks)\n self.compare_blocks(blocks, self.reader.blocks)\n\n with self.assertRaises(ValueError):\n self.writer.write_all_blocks(blocks, use_obj_names=True)\n\n def test_name_objects_write(self):\n nblocks = 2\n nsegs = 2\n nanasig = 4\n nimgseq = 2\n nirrseg = 2\n nepochs = 3\n nevents = 4\n nspiketrains = 3\n nchx = 5\n nunits = 10\n\n times = self.rquant(1, pq.s)\n signal = self.rquant(1, pq.V)\n blocks = []\n for blkidx in range(nblocks):\n blk = Block(name=\"block{}\".format(blkidx))\n blocks.append(blk)\n for segidx in range(nsegs):\n seg = Segment(name=\"seg{}\".format(segidx))\n blk.segments.append(seg)\n for anaidx in range(nanasig):\n asig = AnalogSignal(\n name=\"{}:as{}\".format(seg.name, anaidx),\n signal=signal, sampling_rate=pq.Hz\n )\n seg.analogsignals.append(asig)\n # imagesequence\n for imgseqdx in range(nimgseq):\n imseq = ImageSequence(\n name=\"{}:imgs{}\".format(seg.name, imgseqdx),\n image_data=np.random.rand(20, 10, 10), units=pq.mV,\n sampling_rate=pq.Hz, spatial_scale=pq.micrometer\n )\n seg.imagesequences.append(imseq)\n for irridx in range(nirrseg):\n isig = IrregularlySampledSignal(\n name=\"{}:is{}\".format(seg.name, irridx),\n times=times,\n signal=signal,\n time_units=pq.s\n )\n seg.irregularlysampledsignals.append(isig)\n for epidx in range(nepochs):\n seg.epochs.append(\n Epoch(name=\"{}:ep{}\".format(seg.name, epidx),\n times=times, durations=times)\n )\n for evidx in range(nevents):\n seg.events.append(\n Event(name=\"{}:ev{}\".format(seg.name, evidx),\n times=times)\n )\n for stidx in range(nspiketrains):\n seg.spiketrains.append(\n SpikeTrain(name=\"{}:st{}\".format(seg.name, stidx),\n times=times,\n t_stop=times[-1] + pq.s,\n units=pq.s)\n )\n for chidx in range(nchx):\n chx = Group(name=\"chx{}\".format(chidx),\n index=[1, 2],\n channel_ids=[11, 22])\n blk.groups.append(chx)\n for unidx in range(nunits):\n unit = Group(name=\"chx{}-unit{}\".format(chidx, unidx))\n chx.add(unit)\n\n # put guard on _generate_nix_name\n if not SKIPMOCK:\n nixgenmock = mock.Mock(name=\"_generate_nix_name\",\n wraps=self.io._generate_nix_name)\n self.io._generate_nix_name = nixgenmock\n self.writer.write_block(blocks[0], use_obj_names=True)\n self.compare_blocks([blocks[0]], self.reader.blocks)\n self.compare_blocks(self.writer.read_all_blocks(), self.reader.blocks)\n self.compare_blocks(blocks, self.reader.blocks)\n if not SKIPMOCK:\n nixgenmock.assert_not_called()\n\n self.write_and_compare(blocks, use_obj_names=True)\n if not SKIPMOCK:\n nixgenmock.assert_not_called()\n\n self.assertEqual(self.reader.blocks[0].name, \"block0\")\n\n blocks[0].name = blocks[1].name # name conflict\n with self.assertRaises(ValueError):\n self.writer.write_all_blocks(blocks, use_obj_names=True)\n blocks[0].name = \"new name\"\n self.assertEqual(blocks[0].segments[1].spiketrains[1].name, \"seg1:st1\")\n st0 = blocks[0].segments[0].spiketrains[0].name\n blocks[0].segments[0].spiketrains[1].name = st0 # name conflict\n with self.assertRaises(ValueError):\n self.writer.write_all_blocks(blocks, use_obj_names=True)\n with self.assertRaises(ValueError):\n self.writer.write_block(blocks[0], use_obj_names=True)\n if not SKIPMOCK:\n nixgenmock.assert_not_called()\n\n def test_name_conflicts(self):\n # anon block\n blk = Block()\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n # two anon blocks\n blocks = [Block(), Block()]\n with self.assertRaises(ValueError):\n self.io.write_all_blocks(blocks, use_obj_names=True)\n\n # same name blocks\n blocks = [Block(name=\"one\"), Block(name=\"one\")]\n with self.assertRaises(ValueError):\n self.io.write_all_blocks(blocks, use_obj_names=True)\n\n # one block, two same name segments\n blk = Block(\"new\")\n seg = Segment(\"I am the segment\", a=\"a annoation\")\n blk.segments.append(seg)\n seg = Segment(\"I am the segment\", a=\"b annotation\")\n blk.segments.append(seg)\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n times = self.rquant(1, pq.s)\n signal = self.rquant(1, pq.V)\n # name conflict: analog + irregular signals\n seg.analogsignals.append(\n AnalogSignal(name=\"signal\", signal=signal, sampling_rate=pq.Hz)\n )\n seg.imagesequences.append(\n ImageSequence(name='signal',\n image_data=self.rquant((10, 20, 10), pq.V),\n sampling_rate=pq.Hz,\n spatial_scale=pq.micrometer))\n\n seg.irregularlysampledsignals.append(\n IrregularlySampledSignal(name=\"signal\", signal=signal, times=times)\n )\n blk = Block(name=\"Signal conflict Block\")\n blk.segments.append(seg)\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n # name conflict: event + spiketrain\n blk = Block(name=\"Event+SpikeTrain conflict Block\")\n seg = Segment(name=\"Event+SpikeTrain conflict Segment\")\n blk.segments.append(seg)\n seg.events.append(Event(name=\"TimeyStuff\", times=times))\n seg.spiketrains.append(SpikeTrain(name=\"TimeyStuff\", times=times,\n t_stop=pq.s))\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n # make spiketrain anon\n blk.segments[0].spiketrains[0].name = None\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n # name conflict in groups\n blk = Block(name=\"Group conflict Block\")\n blk.groups.append(Group(name=\"chax\", index=[1]))\n blk.groups.append(Group(name=\"chax\", index=[2]))\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n # name conflict in sub-groups\n blk = Block(name=\"unitconf\")\n chx = Group(name=\"ok\", index=[100])\n blk.groups.append(chx)\n chx.add(Group(name=\"IHAVEATWIN\"))\n chx.add(Group(name=\"IHAVEATWIN\"))\n with self.assertRaises(ValueError):\n self.io.write_block(blk, use_obj_names=True)\n\n def test_multiref_write(self):\n blk = Block(\"blk1\")\n signal = AnalogSignal(name=\"sig1\", signal=[0, 1, 2], units=\"mV\",\n sampling_period=pq.Quantity(1, \"ms\"))\n othersignal = IrregularlySampledSignal(name=\"i1\", signal=[0, 0, 0],\n units=\"mV\", times=[1, 2, 3],\n time_units=\"ms\")\n imgseq = ImageSequence(name=\"img1\", image_data=self.rquant((10, 20, 10), pq.mV),\n frame_duration=pq.Quantity(1, \"ms\"),\n spatial_scale=pq.meter)\n event = Event(name=\"Evee\", times=[0.3, 0.42], units=\"year\")\n epoch = Epoch(name=\"epoche\", times=[0.1, 0.2] * pq.min,\n durations=[0.5, 0.5] * pq.min)\n st = SpikeTrain(name=\"the train of spikes\", times=[0.1, 0.2, 10.3],\n t_stop=11, units=\"us\")\n\n for idx in range(3):\n segname = \"seg\" + str(idx)\n seg = Segment(segname)\n blk.segments.append(seg)\n seg.analogsignals.append(signal)\n seg.imagesequences.append(imgseq)\n seg.irregularlysampledsignals.append(othersignal)\n seg.events.append(event)\n seg.epochs.append(epoch)\n seg.spiketrains.append(st)\n\n chidx = Group(index=[10, 20, 29])\n seg = blk.segments[0]\n st = SpikeTrain(name=\"choochoo\", times=[10, 11, 80], t_stop=1000,\n units=\"s\")\n seg.spiketrains.append(st)\n blk.groups.append(chidx)\n for idx in range(6):\n unit = Group(name=\"unit\" + str(idx))\n chidx.add(unit)\n unit.add(st)\n\n self.writer.write_block(blk)\n self.compare_blocks([blk], self.reader.blocks)\n\n # NOTE: storing data objects that are not within a segment is currently\n # disallowed. Leaving this test commented out until this policy\n # is properly discussed.\n # def test_no_segment_write(self):\n # # Tests storing AnalogSignal, IrregularlySampledSignal, and SpikeTrain\n # # objects in the secondary (Group) substructure without them\n # # being attached to a Segment.\n # blk = Block(\"segmentless block\")\n # signal = AnalogSignal(name=\"sig1\", signal=[0, 1, 2], units=\"mV\",\n # sampling_period=pq.Quantity(1, \"ms\"))\n # othersignal = IrregularlySampledSignal(name=\"i1\", signal=[0, 0, 0],\n # units=\"mV\", times=[1, 2, 3],\n # time_units=\"ms\")\n # sta = SpikeTrain(name=\"the train of spikes\", times=[0.1, 0.2, 10.3],\n # t_stop=11, units=\"us\")\n # stb = SpikeTrain(name=\"the train of spikes b\", times=[1.1, 2.2, 10.1],\n # t_stop=100, units=\"ms\")\n\n # chidx = Group(index=[8, 13, 21])\n # blk.groups.append(chidx)\n # chidx.add(signal)\n # chidx.add(othersignal)\n\n # unit = Group()\n # chidx.add(unit)\n # unit.add(sta, stb)\n # self.writer.write_block(blk)\n # self.writer.close()\n\n # self.compare_blocks([blk], self.reader.blocks)\n\n # reader = NixIO(self.filename, \"ro\")\n # blk = reader.read_block(neoname=\"segmentless block\")\n # chx = blk.groups[0]\n # self.assertEqual(len(chx.analogsignals), 1)\n # self.assertEqual(len(chx.irregularlysampledsignals), 1)\n # self.assertEqual(len(chx.units[0].spiketrains), 2)\n\n def test_rewrite_refs(self):\n\n def checksignalcounts(fname):\n with NixIO(fname, \"ro\") as r:\n blk = r.read_block()\n chidx = blk.groups[0]\n seg = blk.segments[0]\n self.assertEqual(len(chidx.analogsignals), 2)\n self.assertEqual(len(chidx.groups[0].spiketrains), 3)\n self.assertEqual(len(seg.analogsignals), 3)\n self.assertEqual(len(seg.spiketrains), 4)\n\n blk = Block()\n seg = Segment()\n blk.segments.append(seg)\n\n # Group replacing previous ChannelIndex\n chidx = Group(index=[1])\n blk.groups.append(chidx)\n\n # Two signals in Group\n for idx in range(2):\n asigchx = AnalogSignal(signal=[idx], units=\"mV\",\n sampling_rate=pq.Hz)\n chidx.add(asigchx)\n seg.analogsignals.append(asigchx)\n\n # Group replacing previous Unit\n unit = Group()\n chidx.add(unit)\n\n # Three SpikeTrains on Unit\n for idx in range(3):\n st = SpikeTrain([idx], units=\"ms\", t_stop=40)\n unit.add(st)\n seg.spiketrains.append(st)\n\n # One signal in Segment but not in Group\n asigseg = AnalogSignal(signal=[2], units=\"uA\",\n sampling_rate=pq.Hz)\n seg.analogsignals.append(asigseg)\n\n # One spiketrain in Segment but not in Group\n stseg = SpikeTrain([10], units=\"ms\", t_stop=40)\n seg.spiketrains.append(stseg)\n\n # Write, compare, and check counts\n self.writer.write_block(blk)\n self.compare_blocks([blk], self.reader.blocks)\n self.assertEqual(len(chidx.analogsignals), 2)\n self.assertEqual(len(seg.analogsignals), 3)\n self.assertEqual(len(chidx.groups[0].spiketrains), 3)\n self.assertEqual(len(seg.spiketrains), 4)\n\n # Check counts with separate reader\n checksignalcounts(self.filename)\n\n # Write again and check counts\n secondwrite = os.path.join(self.tempdir, \"testnixio-2.nix\")\n with NixIO(secondwrite, \"ow\") as w:\n w.write_block(blk)\n\n self.compare_blocks([blk], self.reader.blocks)\n\n # Read back and check counts\n scndreader = nix.File.open(secondwrite, mode=nix.FileMode.ReadOnly)\n self.compare_blocks([blk], scndreader.blocks)\n checksignalcounts(secondwrite)\n\n def test_to_value(self):\n section = self.io.nix_file.create_section(\"Metadata value test\",\n \"Test\")\n writeprop = self.io._write_property\n\n # quantity\n qvalue = pq.Quantity(10, \"mV\")\n writeprop(section, \"qvalue\", qvalue)\n self.assertEqual(section[\"qvalue\"], 10)\n self.assertEqual(section.props[\"qvalue\"].unit, \"mV\")\n\n # datetime\n dt = self.rdate()\n writeprop(section, \"dt\", dt)\n self.assertEqual(section[\"dt\"], dt_to_nix(dt)[0])\n\n # string\n randstr = self.rsentence()\n writeprop(section, \"randstr\", randstr)\n self.assertEqual(section[\"randstr\"], randstr)\n\n # bytes\n bytestring = b\"bytestring\"\n writeprop(section, \"randbytes\", bytestring)\n self.assertEqual(section[\"randbytes\"], bytestring.decode())\n\n # iterables\n randlist = np.random.random(10).tolist()\n writeprop(section, \"randlist\", randlist)\n self.assertEqual(randlist, section[\"randlist\"])\n\n randarray = np.random.random(10)\n writeprop(section, \"randarray\", randarray)\n np.testing.assert_almost_equal(randarray, section[\"randarray\"])\n\n # numpy item\n npval = np.float64(2398)\n writeprop(section, \"npval\", npval)\n self.assertEqual(npval, section[\"npval\"])\n\n # number\n val = 42\n writeprop(section, \"val\", val)\n self.assertEqual(val, section[\"val\"])\n\n # empty string (gets stored as empty list)\n writeprop(section, \"emptystring\", \"\")\n self.assertEqual(list(), section[\"emptystring\"])\n\n def test_annotations_special_cases(self):\n # Special cases for annotations: empty list, list of strings,\n # multidimensional lists/arrays\n # These are handled differently on read, so we test them on a block\n # instead of just checking the property writer method\n # empty value\n\n # empty list\n wblock = Block(\"block with empty list\", an_empty_list=list())\n self.writer.write_block(wblock)\n rblock = self.writer.read_block(neoname=\"block with empty list\")\n self.assertEqual(rblock.annotations[\"an_empty_list\"], list())\n\n # empty tuple (gets read out as list)\n wblock = Block(\"block with empty tuple\", an_empty_tuple=tuple())\n self.writer.write_block(wblock)\n rblock = self.writer.read_block(neoname=\"block with empty tuple\")\n self.assertEqual(rblock.annotations[\"an_empty_tuple\"], list())\n\n # list of strings\n losval = [\"one\", \"two\", \"one million\"]\n wblock = Block(\"block with list of strings\",\n los=losval)\n self.writer.write_block(wblock)\n rblock = self.writer.read_block(neoname=\"block with list of strings\")\n self.assertEqual(rblock.annotations[\"los\"], losval)\n\n # TODO: multi dimensional value (GH Issue #501)\n\n def test_empty_array_annotations(self):\n wblock = Block(\"block with spiketrain\")\n wseg = Segment()\n wseg.spiketrains = [SpikeTrain(times=[] * pq.s, t_stop=1 * pq.s,\n array_annotations={'empty': []})]\n wblock.segments = [wseg]\n self.writer.write_block(wblock)\n try:\n rblock = self.writer.read_block(neoname=\"block with spiketrain\")\n except Exception as exc:\n self.fail('The following exception was raised when'\n + ' reading the block with an empty array annotation:\\n'\n + str(exc))\n rst = rblock.segments[0].spiketrains[0]\n self.assertEqual(len(rst.array_annotations), 1)\n self.assertIn('empty', rst.array_annotations.keys())\n self.assertEqual(len(rst.array_annotations['empty']), 0)\n\n def test_write_proxyobjects(self):\n\n def generate_complete_block():\n block = Block()\n seg = Segment()\n block.segments.append(seg)\n\n # add spiketrain\n waveforms = self.rquant((3, 5, 10), pq.mV)\n spiketrain = SpikeTrain(times=[1, 1.1, 1.2] * pq.ms,\n t_stop=1.5 * pq.s,\n name=\"spikes with wf\",\n description=\"spikes for waveform test\",\n waveforms=waveforms)\n seg.spiketrains.append(spiketrain)\n # add imagesequence\n imgseq = ImageSequence(name=\"img1\",\n image_data=self.rquant((10, 20, 10), pq.mV),\n frame_duration=pq.Quantity(1, \"ms\"),\n spatial_scale=pq.meter)\n\n seg.imagesequences.append(imgseq)\n # add signals\n asig = AnalogSignal(signal=self.rquant((19, 15), pq.mV),\n sampling_rate=pq.Quantity(10, \"Hz\"))\n seg.analogsignals.append(asig)\n irsig = IrregularlySampledSignal(signal=np.random.random((20, 30)),\n times=self.rquant(20, pq.ms, True),\n units=pq.A)\n seg.irregularlysampledsignals.append(irsig)\n\n # add events and epochs\n epoch = Epoch(times=[1, 1, 10, 3] * pq.ms,\n durations=[3, 3, 3, 1] * pq.ms,\n labels=np.array([\"one\", \"two\", \"three\", \"four\"]),\n name=\"test epoch\", description=\"an epoch for testing\")\n seg.epochs.append(epoch)\n event = Event(times=np.arange(0, 30, 10) * pq.s,\n labels=np.array([\"0\", \"1\", \"2\"]),\n name=\"event name\",\n description=\"event description\")\n seg.events.append(event)\n\n # add channel index and unit\n channel = Group(index=[0], channel_names=['mychannelname'],\n channel_ids=[4],\n name=['testname'])\n block.groups.append(channel)\n unit = Group(name='myunit', description='blablabla',\n file_origin='fileA.nix',\n myannotation='myannotation')\n channel.add(unit)\n unit.add(spiketrain)\n\n # make sure everything is linked properly\n block.create_relationship()\n\n return block\n\n block = generate_complete_block()\n\n basename, ext = os.path.splitext(self.filename)\n filename2 = basename + '-2.' + ext\n\n # writing block to file 1\n with NixIO(filename2, 'ow') as io:\n io.write_block(block)\n\n # reading data as lazy objects from file 1\n with NixIO_lazy(filename2) as io:\n block_lazy = io.read_block(lazy=True)\n\n self.write_and_compare([block_lazy])\n\n def test_annotation_types(self):\n annotations = {\n \"somedate\": self.rdate(),\n \"now\": datetime.now(),\n \"today\": date.today(),\n \"sometime\": time(13, 37, 42),\n \"somequantity\": self.rquant(10, pq.ms),\n \"somestring\": self.rsentence(3),\n \"npfloat\": np.float64(10),\n \"nparray\": np.array([1, 2, 400]),\n \"emptystr\": \"\",\n }\n wblock = Block(\"annotation_block\", **annotations)\n self.writer.write_block(wblock)\n rblock = self.writer.read_block(neoname=\"annotation_block\")\n for k in annotations:\n orig = annotations[k]\n readval = rblock.annotations[k]\n if isinstance(orig, np.ndarray):\n np.testing.assert_almost_equal(orig, readval)\n else:\n self.assertEqual(annotations[k], rblock.annotations[k])\n\n\n@unittest.skipUnless(HAVE_NIX, \"Requires NIX\")\nclass NixIOReadTest(NixIOTest):\n nixfile = None\n nix_blocks = None\n\n @classmethod\n def setUpClass(cls):\n cls.tempdir = mkdtemp(prefix=\"nixiotest\")\n cls.filename = os.path.join(cls.tempdir, \"testnixio.nix\")\n if HAVE_NIX:\n cls.nixfile = cls.create_full_nix_file(cls.filename)\n\n def setUp(self):\n self.io = NixIO(self.filename, \"ro\")\n\n @classmethod\n def tearDownClass(cls):\n if HAVE_NIX:\n cls.nixfile.close()\n shutil.rmtree(cls.tempdir)\n\n def tearDown(self):\n self.io.close()\n\n def test_all_read(self):\n neo_blocks = self.io.read_all_blocks()\n nix_blocks = self.io.nix_file.blocks\n self.compare_blocks(neo_blocks, nix_blocks)\n\n def test_iter_read(self):\n blocknames = [blk.name for blk in self.nixfile.blocks]\n for blk, nixname in zip(self.io.iter_blocks(), blocknames):\n self.assertEqual(blk.annotations[\"nix_name\"], nixname)\n\n def test_nix_name_read(self):\n for nixblock in self.nixfile.blocks:\n nixname = nixblock.name\n neoblock = self.io.read_block(nixname=nixname)\n self.assertEqual(neoblock.annotations[\"nix_name\"], nixname)\n\n def test_index_read(self):\n for idx, nixblock in enumerate(self.nixfile.blocks):\n neoblock = self.io.read_block(index=idx)\n self.assertEqual(neoblock.annotations[\"nix_name\"], nixblock.name)\n self.assertEqual(neoblock.annotations[\"nix_name\"],\n self.nixfile.blocks[idx].name)\n\n def test_auto_index_read(self):\n for nixblock in self.nixfile.blocks:\n neoblock = self.io.read_block() # don't specify index\n self.assertEqual(neoblock.annotations[\"nix_name\"], nixblock.name)\n\n # No more blocks - should return None\n self.assertIs(self.io.read_block(), None)\n self.assertIs(self.io.read_block(), None)\n self.assertIs(self.io.read_block(), None)\n\n with NixIO(self.filename, \"ro\") as nf:\n neoblock = nf.read_block(index=1)\n self.assertEqual(self.nixfile.blocks[1].name,\n neoblock.annotations[\"nix_name\"])\n\n neoblock = nf.read_block() # should start again from 0\n self.assertEqual(self.nixfile.blocks[0].name,\n neoblock.annotations[\"nix_name\"])\n\n def test_neo_name_read(self):\n for nixblock in self.nixfile.blocks:\n neoname = nixblock.metadata[\"neo_name\"]\n neoblock = self.io.read_block(neoname=neoname)\n self.assertEqual(neoblock.annotations[\"nix_name\"], nixblock.name)\n\n def test_array_annotations_read(self):\n for bl in self.io.read_all_blocks():\n nix_block = self.nixfile.blocks[bl.annotations['nix_name']]\n for seg in bl.segments:\n\n for anasig in seg.analogsignals:\n da = nix_block.data_arrays[anasig.annotations['nix_name'] + '.0']\n self.assertIn('anasig_arr_ann', da.metadata)\n self.assertIn('anasig_arr_ann', anasig.array_annotations)\n nix_ann = da.metadata['anasig_arr_ann']\n neo_ann = anasig.array_annotations['anasig_arr_ann']\n self.assertTrue(np.all(nix_ann == neo_ann.magnitude))\n self.assertEqual(da.metadata.props['anasig_arr_ann'].unit,\n units_to_string(neo_ann.units))\n for irrsig in seg.irregularlysampledsignals:\n da = nix_block.data_arrays[irrsig.annotations['nix_name'] + '.0']\n self.assertIn('irrsig_arr_ann', da.metadata)\n self.assertIn('irrsig_arr_ann', irrsig.array_annotations)\n nix_ann = da.metadata['irrsig_arr_ann']\n neo_ann = irrsig.array_annotations['irrsig_arr_ann']\n self.assertTrue(np.all(nix_ann == neo_ann.magnitude))\n self.assertEqual(da.metadata.props['irrsig_arr_ann'].unit,\n units_to_string(neo_ann.units))\n for imgseq in seg.imagesequences:\n da = nix_block.data_arrays[imgseq.annotations['nix_name'] + '.0']\n self.assertIn('imgseq_arr_ann', da.metadata)\n self.assertIn('imgseq_arr_ann', imgseq.array_annotations)\n nix_ann = da.metadata['imgseq_arr_ann']\n neo_ann = imgseq.array_annotations['imgseq_arr_ann']\n self.assertTrue(np.all(nix_ann == neo_ann.magnitude))\n self.assertEqual(da.metadata.props['imgseq_arr_ann'].unit,\n units_to_string(neo_ann.units))\n for ev in seg.events:\n da = nix_block.multi_tags[ev.annotations['nix_name']]\n self.assertIn('ev_arr_ann', da.metadata)\n self.assertIn('ev_arr_ann', ev.array_annotations)\n nix_ann = da.metadata['ev_arr_ann']\n neo_ann = ev.array_annotations['ev_arr_ann']\n self.assertTrue(np.all(nix_ann == neo_ann.magnitude))\n self.assertEqual(da.metadata.props['ev_arr_ann'].unit,\n units_to_string(neo_ann.units))\n for ep in seg.epochs:\n da = nix_block.multi_tags[ep.annotations['nix_name']]\n self.assertIn('ep_arr_ann', da.metadata)\n self.assertIn('ep_arr_ann', ep.array_annotations)\n nix_ann = da.metadata['ep_arr_ann']\n neo_ann = ep.array_annotations['ep_arr_ann']\n self.assertTrue(np.all(nix_ann == neo_ann.magnitude))\n self.assertEqual(da.metadata.props['ep_arr_ann'].unit,\n units_to_string(neo_ann.units))\n for st in seg.spiketrains:\n da = nix_block.multi_tags[st.annotations['nix_name']]\n self.assertIn('st_arr_ann', da.metadata)\n self.assertIn('st_arr_ann', st.array_annotations)\n nix_ann = da.metadata['st_arr_ann']\n neo_ann = st.array_annotations['st_arr_ann']\n self.assertTrue(np.all(nix_ann == neo_ann.magnitude))\n self.assertEqual(da.metadata.props['st_arr_ann'].unit,\n units_to_string(neo_ann.units))\n\n def test_read_blocks_are_writable(self):\n filename = os.path.join(self.tempdir, \"testnixio_out.nix\")\n writer = NixIO(filename, \"ow\")\n\n blocks = self.io.read_all_blocks()\n\n try:\n writer.write_all_blocks(blocks)\n except Exception as exc:\n self.fail('The following exception was raised when'\n + ' writing the blocks loaded with NixIO:\\n'\n + str(exc))\n\n\n@unittest.skipUnless(HAVE_NIX, \"Requires NIX\")\nclass NixIOContextTests(NixIOTest):\n def setUp(self):\n self.tempdir = mkdtemp(prefix=\"nixiotest\")\n self.filename = os.path.join(self.tempdir, \"testnixio.nix\")\n\n def tearDown(self):\n shutil.rmtree(self.tempdir)\n\n def test_context_write(self):\n neoblock = Block(name=self.rword(), description=self.rsentence())\n with NixIO(self.filename, \"ow\") as iofile:\n iofile.write_block(neoblock)\n\n nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly)\n self.compare_blocks([neoblock], nixfile.blocks)\n nixfile.close()\n\n neoblock.annotate(**self.rdict(5))\n with NixIO(self.filename, \"rw\") as iofile:\n iofile.write_block(neoblock)\n nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly)\n self.compare_blocks([neoblock], nixfile.blocks)\n nixfile.close()\n\n def test_context_read(self):\n nixfile = nix.File.open(self.filename, nix.FileMode.Overwrite)\n name_one = self.rword()\n name_two = self.rword()\n nixfile.create_block(name_one, \"neo.block\")\n nixfile.create_block(name_two, \"neo.block\")\n nixfile.close()\n\n with NixIO(self.filename, \"ro\") as iofile:\n blocks = iofile.read_all_blocks()\n\n self.assertEqual(blocks[0].annotations[\"nix_name\"], name_one)\n self.assertEqual(blocks[1].annotations[\"nix_name\"], name_two)\n\n\n@unittest.skipUnless(HAVE_NIX, \"Requires NIX\")\nclass NixIOVerTests(NixIOTest):\n def setUp(self):\n self.tempdir = mkdtemp(prefix=\"nixiotest\")\n self.filename = os.path.join(self.tempdir, \"testnixio.nix\")\n\n def tearDown(self):\n shutil.rmtree(self.tempdir)\n\n def test_new_file(self):\n with NixIO(self.filename, \"ow\") as iofile:\n self.assertEqual(iofile._file_version, neover)\n\n nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly)\n filever = nixfile.sections[\"neo\"][\"version\"]\n self.assertEqual(filever, neover)\n nixfile.close()\n\n def test_oldfile_nover(self):\n nixfile = nix.File.open(self.filename, nix.FileMode.Overwrite)\n nixfile.close()\n with NixIO(self.filename, \"ro\") as iofile:\n self.assertEqual(iofile._file_version, '0.5.2') # compat version\n\n nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly)\n self.assertNotIn(\"neo\", nixfile.sections)\n nixfile.close()\n\n with NixIO(self.filename, \"rw\") as iofile:\n self.assertEqual(iofile._file_version, '0.5.2') # compat version\n\n # section should have been created now\n nixfile = nix.File.open(self.filename, nix.FileMode.ReadOnly)\n self.assertIn(\"neo\", nixfile.sections)\n self.assertEqual(nixfile.sections[\"neo\"][\"version\"], '0.5.2')\n nixfile.close()\n\n def test_file_with_ver(self):\n someversion = '0.100.10'\n nixfile = nix.File.open(self.filename, nix.FileMode.Overwrite)\n filemd = nixfile.create_section(\"neo\", \"neo.metadata\")\n filemd[\"version\"] = someversion\n nixfile.close()\n\n with NixIO(self.filename, \"ro\") as iofile:\n self.assertEqual(iofile._file_version, someversion)\n\n with NixIO(self.filename, \"rw\") as iofile:\n self.assertEqual(iofile._file_version, someversion)\n\n with NixIO(self.filename, \"ow\") as iofile:\n self.assertEqual(iofile._file_version, neover)\n\n\n@unittest.skipUnless(HAVE_NIX, \"Requires NIX\")\nclass CommonTests(BaseTestIO, unittest.TestCase):\n ioclass = NixIO\n read_and_write_is_bijective = False\n entities_to_download = []\n entities_to_est = []\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.random.choice", "numpy.random.rand", "numpy.testing.assert_almost_equal", "numpy.shape", "numpy.float64", "numpy.random.uniform", "numpy.transpose", "numpy.random.randint", "numpy.arange", "numpy.cumsum", "numpy.all", "numpy.random.random", "numpy.array_split" ] ]
mmuu1987/MyFluid
[ "406f8bcadc6607b3a87383e85864bf5a5556f231" ]
[ "python/RiemannSolver/Burgers1dGodunov.py" ]
[ "import numpy as np\n#Godunov Scheme for 1d Burgers equation\n#介绍:https://zhuanlan.zhihu.com/p/331771977\nnmax = 8\ntmax = 100\ndx = 1.0 / nmax\ndt = 1.0 / tmax\nU = np.zeros((tmax,nmax))\nF = np.zeros((tmax,nmax + 1))\nfor i in range(0,nmax):\n if(i < 2):\n U[0,i] = 2\n else:\n U[0,i] = 1\n\ncfl = 0.8\ndx = 0.1\ndt = 0.01\n\ndef Flux(uL,uR):# Godunov\n FL = 0.5 * uL * uL\n FR = 0.5 * uR * uR\n s = 0.5*(uL + uR)\n if (uL < uR):\n if (uL > 0.0): #对应第一种情况\n return FL\n elif (uR < 0.0): #对应第一种情况\n return FR\n else: #对应第三种情况\n return 0.0\n else:\n if (s > 0.0): #对应第四种情况\n return FL\n else: #对应第五种情况\n return FR \n\nfor k in range(0,tmax-1):\n for i in range(1,nmax):\n uL = U[k,i-1]\n uR = U[k,i]\n F[k,i] = Flux(uL,uR)\n \n if(U[k,0] < 0.0):\n uL = 2.0 * U[k,0] - U[k,1]\n else:\n uL = U[k,0]\n uR = U[k,0]\n F[k,0] = Flux(uL,uR)\n \n if(U[k,nmax-1] > 0.0):\n uR = 2.0 * U[k,nmax-1] - U[k,nmax-2]\n else:\n uR = U[k,nmax-1]\n uL = U[k,nmax-1]\n F[k,nmax] = Flux(uL,uR)\n \n for i in range(0,nmax):\n U[k+1,i] = U[k,i] - dt/dx * (F[k,i+1] - F[k,i])" ]
[ [ "numpy.zeros" ] ]
vdike/pyHalo
[ "c8d55cfbce2b0b9cb9c5520a2ec345a75ab502df" ]
[ "pyHalo/Halos/HaloModels/coreTNFW.py" ]
[ "from pyHalo.Halos.halo_base import Halo\nfrom pyHalo.Halos.HaloModels.TNFW import TNFWFieldHalo, TNFWSubhalo\nfrom lenstronomy.LensModel.Profiles.tnfw import TNFW\nimport numpy as np\n\n\nclass coreTNFWBase(Halo):\n \"\"\"\n The main class for a cored NFW field halo profile\n\n See the base class in Halos/halo_base.py for the required routines for any instance of a Halo class\n \"\"\"\n\n def __init__(self, mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag, tnfw_class):\n\n \"\"\"\n See documentation in base class (Halos/halo_base.py)\n\n \"\"\"\n self._tnfw_lenstronomy = TNFW()\n self._tnfw = tnfw_class\n self._lens_cosmo = lens_cosmo_instance\n\n super(coreTNFWBase, self).__init__(mass, x, y, r3d, mdef, z, sub_flag,\n lens_cosmo_instance, args, unique_tag)\n\n @property\n def lenstronomy_ID(self):\n \"\"\"\n See documentation in base class (Halos/halo_base.py)\n \"\"\"\n return ['NumericalAlpha']\n\n @property\n def params_physical(self):\n \"\"\"\n See documentation in base class (Halos/halo_base.py)\n \"\"\"\n if not hasattr(self, '_params_physical'):\n [concentration, rt, core_density] = self.profile_args\n rhos, rs, r200 = self._lens_cosmo.NFW_params_physical(self.mass, concentration, self.z)\n self._params_physical = {'rhos': rhos, 'rs': rs, 'r200': r200, 'r_trunc': rt,\n 'rc_over_rs': min(1., rhos/core_density)}\n\n return self._params_physical\n\n @property\n def central_density(self):\n \"\"\"\n Computes the central density of the cored profile using the user-specified class \"SIDM_rhocentral_function\"\n \"\"\"\n z_eval = self._tnfw.z_eval\n profile_args_tnfw = self._tnfw.profile_args\n median_concentration = self._lens_cosmo.NFW_concentration(self.mass,\n z_eval,\n self._args['mc_model'],\n self._args['mc_mdef'],\n self._args['log_mc'],\n False,\n 0.,\n self._args['kwargs_suppression'],\n self._args['suppression_model'])\n\n c = profile_args_tnfw[0]\n delta_c_over_c = (c - median_concentration)/c\n cross_section_type = self._args['cross_section_type']\n kwargs_cross_section = self._args['kwargs_cross_section']\n args_function = (self.mass, self.z, delta_c_over_c, cross_section_type, kwargs_cross_section)\n function_rho = self._args['SIDM_rhocentral_function']\n rho_central = function_rho(*args_function)\n\n return rho_central\n\n @property\n def lenstronomy_params(self):\n \"\"\"\n See documentation in base class (Halos/halo_base.py)\n \"\"\"\n if not hasattr(self, '_kwargs_lenstronomy'):\n\n [concentration, rt, rho_central] = self.profile_args\n rhos, rs, _ = self.lens_cosmo.NFW_params_physical(self.mass, concentration, self.z)\n\n rhos_mpc, rs_mpc = rhos * 1000 ** 3, rs / 1000\n Rs_angle, theta_Rs = self._lens_cosmo.nfw_physical2angle_fromNFWparams(rhos_mpc, rs_mpc, self.z)\n\n numerical_deflection_class = self._args['numerical_deflection_angle_class']\n\n beta_norm = 0.0025 * Rs_angle\n x_match = 10.\n r_trunc_norm = x_match * Rs_angle\n alpha_norm, _ = numerical_deflection_class(x_match * Rs_angle, 0., Rs_angle, beta_norm, r_trunc_norm, norm=1.)\n alpha_tnfw, _ = self._tnfw_lenstronomy.derivatives(x_match * Rs_angle, 0., Rs=Rs_angle,\n alpha_Rs=theta_Rs,\n r_trunc=r_trunc_norm)\n\n norm = alpha_tnfw / alpha_norm\n Rs_angle = np.round(Rs_angle, 10)\n\n beta = min(1., rhos / rho_central)\n\n r_trunc_arcsec = rt / self._lens_cosmo.cosmo.kpc_proper_per_asec(self.z)\n\n self._kwargs_lenstronomy = [{'Rs': Rs_angle, 'r_core': beta * Rs_angle,\n 'center_x': self.x, 'center_y': self.y, 'norm': norm,\n 'r_trunc': r_trunc_arcsec}]\n\n return self._kwargs_lenstronomy, self._args['numerical_deflection_angle_class']\n\n @property\n def profile_args(self):\n \"\"\"\n See documentation in base class (Halos/halo_base.py)\n \"\"\"\n if not hasattr(self, '_profile_args'):\n profile_args_tnfw = self._tnfw.profile_args\n core_density = self.central_density\n self._profile_args = (profile_args_tnfw[0], profile_args_tnfw[1], core_density)\n\n return self._profile_args\n\nclass coreTNFWFieldHalo(coreTNFWBase):\n \"\"\"\n Describes a cored TNFW profile in the field\n \"\"\"\n def __init__(self, mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag):\n\n tnfw_class = TNFWFieldHalo(mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag)\n super(coreTNFWFieldHalo, self).__init__(mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag, tnfw_class)\n\n @classmethod\n def fromTNFW(cls, tnfw_halo, kwargs_new):\n \"\"\"\n Creates the profile class from an instance of a TNFWSubhalo\n :param tnfw_halo: an instance of TNFWSubhalo\n :param kwargs_new: new keyword arguments required to constrct the coreTNFW profile\n :return: instance of coreTNFW\n \"\"\"\n new_halo = coreTNFWFieldHalo(tnfw_halo.mass, tnfw_halo.x, tnfw_halo.y, tnfw_halo.r3d, 'coreTNFW',\n tnfw_halo.z, False, tnfw_halo.lens_cosmo, kwargs_new, tnfw_halo.unique_tag)\n profile_args = tnfw_halo.profile_args\n new_halo._profile_args = (profile_args[0], profile_args[1], new_halo.central_density)\n\n return new_halo\n\nclass coreTNFWSubhalo(coreTNFWBase):\n \"\"\"\n Describes a cored TNFW subhalo\n \"\"\"\n def __init__(self, mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag):\n\n tnfw_class = TNFWSubhalo(mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag)\n super(coreTNFWSubhalo, self).__init__(mass, x, y, r3d, mdef, z,\n sub_flag, lens_cosmo_instance, args, unique_tag, tnfw_class)\n\n @classmethod\n def fromTNFW(cls, tnfw_halo, kwargs_new):\n \"\"\"\n Creates the profile class from an instance of a TNFWFieldHalo\n :param tnfw_halo: an instance of TNFWFieldHalo\n :param kwargs_new: new keyword arguments required to constrct the coreTNFW profile\n :return: instance of coreTNFW\n \"\"\"\n new_halo = coreTNFWSubhalo(tnfw_halo.mass, tnfw_halo.x, tnfw_halo.y, tnfw_halo.r3d, 'coreTNFW',\n tnfw_halo.z, True, tnfw_halo.lens_cosmo, kwargs_new, tnfw_halo.unique_tag)\n profile_args = tnfw_halo.profile_args\n new_halo._profile_args = (profile_args[0], profile_args[1], new_halo.central_density)\n\n return new_halo\n" ]
[ [ "numpy.round" ] ]
manojkrishnan2490/gpt-2
[ "f597167237a635e11fa95ea5d10d76a77db487de" ]
[ "encode.py" ]
[ "#!/usr/bin/env python3\n# Usage:\n# PYTHONPATH=src ./encode.py <file|directory|glob> /path/to/output.npy\n# PYTHONPATH=src ./train --dataset /path/to/output.npy\n\nimport argparse\nimport numpy as np\n\nimport encoder\nfrom load_dataset import load_dataset\n\nparser = argparse.ArgumentParser(\n description='Pre-encode text files into tokenized training set.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--model', metavar='MODEL', type=str, default='124M', help='Pretrained model name')\nparser.add_argument('--models_dir', metavar='PATH', type=str, default='models', help='Path to models directory')\nparser.add_argument('--combine', metavar='CHARS', type=int, default=50000, help='Concatenate files with <|endoftext|> separator into chunks of this minimum size')\nparser.add_argument('--encoding', type=str, default='utf-8', help='Set the encoding for reading and writing files.')\nparser.add_argument('in_text', metavar='PATH', type=str, help='Input file, directory, or glob pattern (utf-8 text).')\nparser.add_argument('out_npy', metavar='OUT.npy', type=str, help='Output file path')\n\ndef main():\n args = parser.parse_args()\n enc = encoder.get_encoder(args.model, models_dir=args.models_dir)\n print('Reading files')\n chunks = load_dataset(enc, args.in_text, args.combine, encoding=args.encoding)\n print('Writing', args.out_npy)\n np.save(args.out_npy, *chunks)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.save" ] ]
luisvalesilva/chr_size_bias_in_ssDNA_chip-chip
[ "becc79c553d3da9a7781e6d84b067b0b408467a8" ]
[ "helpers.py" ]
[ "\"\"\" Helper functions \"\"\"\n\n# Standard library\nimport time\nimport os\nimport logging\n\n# Third party libraries\nimport pandas as pd\nfrom scipy.stats import ttest_ind\n\n\ndef array_file_names(path):\n \"\"\"\n Given a path to a folder (array library), returns dictionary\n containing each directory (experiment folder) as a key and a\n list of array data files (analysis of array containing Log2Ratio\n data) as its value.\n \"\"\"\n array_dict = {}\n # Get all directory names in the path (array database)\n for d in os.listdir(path):\n if os.path.isdir(os.path.join(path, d)):\n # Get all array file names (ignore dirs, hidden files, and file names with and extension)\n ffs = []\n for f in os.listdir(os.path.join(path, d)):\n if os.path.isfile(os.path.join(path, d, f)) and not f[0] == '.' and not os.path.splitext(f)[1]:\n ffs.append(f)\n array_dict[d] = ffs\n \n return array_dict\n\n\ndef list_array_paths(path, array_dict):\n \"\"\"\n Given a dictionary containing each directory (experiment folder) as a\n key and a list of array data files (analysis of array containing Log2Ratio\n data) as its value (i.e., the output of 'find_arrays'), returns a list of\n full paths to array files.\n \"\"\"\n array_path_list = []\n \n for key_folder in array_dict:\n for array_value in array_dict[key_folder]:\n array_path_list.append(path + key_folder + \"/\" + array_value)\n \n return array_path_list\n\n\ndef avrg_array_signal(array_df, signal_column, exp_folder, array_file_name):\n \"\"\"\n Given a pandas dataframe containing an array analysis,\n calculates and returns a pandas data frame containing\n total length covered by probes, total signal (typically\n ratio after converted back fromLog2Ratio), and\n total signal/total length for each chromosome.\n \"\"\"\n # Loop over all chromosomes in turn\n d = []\n for chr_num in array_df['chr'].unique():\n chr_data = array_df.loc[(array_df.chr == chr_num), :]\n # Calculate total signal\n total_signal = array_df.loc[(array_df.chr == chr_num), signal_column].sum()\n mean_signal = array_df.loc[(array_df.chr == chr_num), signal_column].mean()\n # Loop over all rows for each chromosome and calculate\n # sum of probe lengths\n total_length = 0\n for _, row in array_df.loc[(array_df.chr == chr_num), :].iterrows():\n # This length is usualy constant, so I could also just\n # calculate it first and then count probes and multiply by length\n total_length += row['end'] - (row['start'] - 1)\n \n d.append({'exp_folder': exp_folder, 'array': array_file_name,\n 'chr': chr_num, 'total_length': total_length,\n 'total_signal': total_signal,\n 'mean_signal': mean_signal})\n \n return pd.DataFrame(d)\n\n\ndef small_vs_large(chr_df):\n \"\"\"\n Given a pandas dataframe containing mean signal per chromosome ('chr_df),\n calculates and returns the ratio between mean signal for small versus\n large chromosomes (TO-DO: add t-test / Mann-Whitney test P value calc.).\n \"\"\"\n # Get means and std\n small = [1, 3, 6]\n small_data = chr_df.loc[chr_df.chr.isin(small), 'mean_signal']\n large_data = chr_df.loc[~chr_df.chr.isin(small), 'mean_signal']\n \n small_avrg = small_data.mean()\n small_std = small_data.std()\n large_avrg = large_data.mean()\n large_std = large_data.std()\n ratio = small_avrg / large_avrg\n \n # Student's t-test (do small chromosomes show higher ssDNA signal?)\n two_tailed_p_val = ttest_ind(small_data, large_data)\n \n #return small_avrg, small_std, large_avrg, large_std, ratio\n return {'exp_folder': chr_df['exp_folder'][0], # names repeated in all 16 lines\n 'array': chr_df['array'][0], # names repeated in all 16 lines\n 'small_chr_avrg': small_avrg, 'small_chr_sd': small_std,\n 'large_chr_avrg': large_avrg, 'large_chr_sd': large_std,\n 'ratio_small_vs_large': ratio, 'ttest_2-tail_p_val': two_tailed_p_val[1]}\n\n\n\ndef print_elapsed_time(start_time):\n \"\"\"\n Computes and prints time elapsed since a provided start time.\n\n Keyword arguments:\n :param start_time: Start time (start_time = time.time()) to compute\n elapsed time from (no default)\n :return: Prints elapsed time since 'start_time'\n \"\"\"\n elapsed_time = time.time() - start_time\n \n print(\"\\n---\")\n if elapsed_time < 60:\n print(\"Completed in {:2.1f} sec.\".format(elapsed_time))\n elif 60 < elapsed_time < 3600:\n print(\"Completed in {:2.1f} min.\".format(elapsed_time / 60))\n else:\n print(\"Completed in {:2.1f} hr.\".format(elapsed_time / 3600))\n" ]
[ [ "pandas.DataFrame", "scipy.stats.ttest_ind" ] ]
baoguangsheng/g-transformer
[ "928f08f3391f589f8a89f1db9ff1fb6981a9443d" ]
[ "fairseq/models/fairseq_encoder.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nfrom typing import Dict, List, NamedTuple, Optional\nfrom torch import Tensor\n\nEncoderOut = NamedTuple(\n \"EncoderOut\",\n [\n (\"encoder_out\", Tensor), # T x B x C\n (\"encoder_tags\", Tensor), # B x T\n (\"encoder_padding_mask\", Optional[Tensor]), # B x T\n (\"encoder_embedding\", Optional[Tensor]), # B x T x C\n (\"encoder_states\", Optional[List[Tensor]]), # List[T x B x C]\n (\"encoder_attn\", Optional[Dict[str, Tensor]]), # Dict[B x T]\n (\"src_tokens\", Optional[Tensor]), # B x T\n (\"src_lengths\", Optional[Tensor]), # B x 1\n ],\n)\n\n\nclass FairseqEncoder(nn.Module):\n \"\"\"Base class for encoders.\"\"\"\n\n def __init__(self, dictionary):\n super().__init__()\n self.dictionary = dictionary\n\n def forward(self, src_tokens, src_lengths=None, **kwargs):\n \"\"\"\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n \"\"\"\n raise NotImplementedError\n\n def forward_torchscript(self, net_input: Dict[str, Tensor]):\n \"\"\"A TorchScript-compatible version of forward.\n\n Encoders which use additional arguments may want to override\n this method for TorchScript compatibility.\n \"\"\"\n if torch.jit.is_scripting():\n return self.forward(\n src_tokens=net_input[\"src_tokens\"],\n src_lengths=net_input[\"src_lengths\"],\n )\n else:\n return self.forward_non_torchscript(net_input)\n\n @torch.jit.unused\n def forward_non_torchscript(self, net_input: Dict[str, Tensor]):\n encoder_input = {\n k: v\n for k, v in net_input.items()\n if k != \"prev_output_tokens\"\n }\n return self.forward(**encoder_input)\n\n def reorder_encoder_out(self, encoder_out, new_order):\n \"\"\"\n Reorder encoder output according to `new_order`.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n `encoder_out` rearranged according to `new_order`\n \"\"\"\n raise NotImplementedError\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return 1e6 # an arbitrary large number\n\n def upgrade_state_dict(self, state_dict):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n return state_dict\n" ]
[ [ "torch.jit.is_scripting" ] ]
burgerkingeater/io
[ "f2de208f474d6ba4926e2c7f9e901e102ca5c254" ]
[ "tensorflow_io/core/python/ops/hdf5_io_tensor_ops.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"HDF5IOTensor\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_io.core.python.ops import core_ops\nfrom tensorflow_io.core.python.ops import io_tensor_ops\n\n\nclass BaseHDF5GraphIOTensor:\n \"\"\"BaseHDF5GraphIOTensor\"\"\"\n\n # =============================================================================\n # Constructor (private)\n # =============================================================================\n def __init__(self, filename, component, shape, dtype, internal=False):\n with tf.name_scope(\"BaseHDF5GraphIOTensor\"):\n assert internal\n self._filename = filename\n self._component = component\n self._shape = shape\n self._dtype = dtype\n super().__init__()\n\n # =============================================================================\n # Accessors\n # =============================================================================\n\n @property\n def shape(self):\n \"\"\"Returns the `TensorShape` that represents the shape of the tensor.\"\"\"\n return self._shape\n\n @property\n def dtype(self):\n \"\"\"Returns the `dtype` of elements in the tensor.\"\"\"\n return self._dtype\n\n # =============================================================================\n # String Encoding\n # =============================================================================\n def __repr__(self):\n return \"<{}: shape={}, dtype={}>\".format(\n self.__class__.__name__, self.shape, self.dtype\n )\n\n # =============================================================================\n # Tensor Type Conversions\n # =============================================================================\n\n def to_tensor(self):\n \"\"\"Converts this `IOTensor` into a `tf.Tensor`.\n\n Args:\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `Tensor` with value obtained from this `IOTensor`.\n \"\"\"\n return core_ops.io_hdf5_readable_read(\n input=self._filename,\n shared=self._filename,\n component=self._component,\n shape=self._shape,\n start=0,\n stop=-1,\n dtype=self._dtype,\n container=\"HDF5IOTensor\",\n )\n\n # =============================================================================\n # Indexing and slicing\n # =============================================================================\n def __getitem__(self, key):\n \"\"\"Returns the specified piece of this IOTensor.\"\"\"\n # always convert to tuple to process\n if not isinstance(key, tuple):\n key = tuple([key])\n # get the start and stop of each element\n indices = [\n (k.start, k.stop) if isinstance(k, slice) else (k, k + 1) for k in key\n ]\n # get the start and stop, and use 0 (start) and -1 (stop) if needed\n indices = list(zip(*indices))\n start = [0 if e is None else e for e in indices[0]]\n stop = [-1 if e is None else e for e in indices[1]]\n\n item = core_ops.io_hdf5_readable_read(\n input=self._filename,\n shared=self._filename,\n component=self._component,\n shape=self._shape,\n start=start,\n stop=stop,\n dtype=self._dtype,\n container=\"HDF5IOTensor\",\n )\n\n # in case certain dimension is not slice, then this dimension will need to\n # collapse as `0`, otherwise `:` or `slice(None, None, None)`\n indices = [slice(None) if isinstance(k, slice) else 0 for k in key]\n\n return item.__getitem__(indices)\n\n def __len__(self):\n \"\"\"Returns the total number of items of this IOTensor.\"\"\"\n return self._shape[0]\n\n\nclass HDF5IOTensor(\n io_tensor_ops._CollectionIOTensor\n): # pylint: disable=protected-access\n \"\"\"HDF5IOTensor\"\"\"\n\n # =============================================================================\n # Constructor (private)\n # =============================================================================\n def __init__(self, filename, spec=None, internal=False):\n with tf.name_scope(\"HDF5IOTensor\"):\n columns, shapes, dtypes = core_ops.io_hdf5_readable_info(\n filename, shared=filename, container=\"HDF5IOTensor\"\n )\n if tf.executing_eagerly():\n columns = tf.unstack(columns)\n shapes = [\n tf.boolean_mask(shape, tf.math.greater_equal(shape, 0))\n for shape in tf.unstack(shapes)\n ]\n dtypes = [tf.as_dtype(dtype.numpy()) for dtype in tf.unstack(dtypes)]\n entries = [\n tf.TensorSpec(shape, dtype, column)\n for (shape, dtype, column) in zip(shapes, dtypes, columns)\n ]\n else:\n assert spec is not None\n\n entries = spec.items()\n\n def f(column, columns, shapes):\n shape = tf.boolean_mask(shapes, tf.math.equal(columns, column))[0]\n shape = tf.boolean_mask(shape, tf.math.greater_equal(shape, 0))\n return shape\n\n shapes = [f(column, columns, shapes) for column, _ in entries]\n dtypes = [\n entry if isinstance(entry, tf.dtypes.DType) else entry.dtype\n for _, entry in entries\n ]\n columns = [column for column, _ in entries]\n\n entries = [\n tf.TensorSpec(None, dtype, column)\n for (dtype, column) in zip(dtypes, columns)\n ]\n\n def g(entry, shape):\n return BaseHDF5GraphIOTensor(\n filename, entry.name, shape, entry.dtype, internal=True\n )\n\n elements = [g(entry, shape) for (entry, shape) in zip(entries, shapes)]\n spec = tuple(entries)\n super().__init__(spec, columns, elements, internal=internal)\n" ]
[ [ "tensorflow.TensorSpec", "tensorflow.math.greater_equal", "tensorflow.executing_eagerly", "tensorflow.name_scope", "tensorflow.unstack", "tensorflow.math.equal" ] ]
MarcCote/Theano
[ "f0d293161a624ccf10c60ee8405a92e7d321151a", "f0d293161a624ccf10c60ee8405a92e7d321151a" ]
[ "theano/sandbox/cuda/fftconv.py", "theano/d3viz/tests/test_d3viz.py" ]
[ "from __future__ import absolute_import, print_function, division\n\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nfrom theano.misc.pycuda_init import pycuda_available\nfrom theano.sandbox.cuda import cuda_available, GpuOp\nfrom theano.ifelse import ifelse\n\nif cuda_available:\n from theano.sandbox.cuda import (basic_ops, CudaNdarrayType,\n CudaNdarray)\nif pycuda_available:\n import pycuda.gpuarray\n\ntry:\n import scikits.cuda\n from scikits.cuda import fft, cublas\n scikits.cuda.misc.init()\n scikits_cuda_available = True\nexcept (ImportError, Exception):\n scikits_cuda_available = False\n\n\n# TODO: investigate the effect of enabling fastmath on FFT performance\n# (how can it be enabled?).\n\n# base class for shared code between scikits.cuda-based ops\nclass ScikitsCudaOp(GpuOp):\n def __eq__(self, other):\n return type(self) == type(other)\n\n def __hash__(self):\n return hash(type(self))\n\n def __str__(self):\n return self.__class__.__name__\n\n def output_type(self, inp):\n raise NotImplementedError\n\n def make_node(self, inp):\n inp = basic_ops.gpu_contiguous(\n basic_ops.as_cuda_ndarray_variable(inp))\n\n assert inp.dtype == \"float32\"\n\n return theano.Apply(self, [inp], [self.output_type(inp)()])\n\n def make_thunk(self, node, storage_map, _, _2, impl=None):\n if not scikits_cuda_available:\n raise RuntimeError(\n \"scikits.cuda is needed for all GPU fft implementation,\"\n \" including fftconv.\")\n\n\nclass CuFFTOp(ScikitsCudaOp):\n def output_type(self, inp):\n # add one extra dim for real/imag\n return CudaNdarrayType(\n broadcastable=[False] * (inp.type.ndim + 1))\n\n def make_thunk(self, node, storage_map, _, _2, impl=None):\n super(CuFFTOp, self).make_thunk(node, storage_map, _, _2)\n\n from theano.misc.pycuda_utils import to_gpuarray\n inputs = [storage_map[v] for v in node.inputs]\n outputs = [storage_map[v] for v in node.outputs]\n\n plan_input_shape = [None]\n plan = [None]\n\n def thunk():\n input_shape = inputs[0][0].shape\n\n # construct output shape\n output_shape = list(input_shape)\n # DFT of real input is symmetric, no need to store\n # redundant coefficients\n output_shape[-1] = output_shape[-1] // 2 + 1\n # extra dimension with length 2 for real/imag\n output_shape += [2]\n output_shape = tuple(output_shape)\n\n z = outputs[0]\n\n # only allocate if there is no previous allocation of the\n # right size.\n if z[0] is None or z[0].shape != output_shape:\n z[0] = CudaNdarray.zeros(output_shape)\n\n input_pycuda = to_gpuarray(inputs[0][0])\n # I thought we'd need to change the type on output_pycuda\n # so it is complex64, but as it turns out scikits.cuda.fft\n # doesn't really care either way and treats the array as\n # if it is complex64 anyway.\n output_pycuda = to_gpuarray(z[0])\n\n # only initialise plan if necessary\n if plan[0] is None or plan_input_shape[0] != input_shape:\n plan_input_shape[0] = input_shape\n plan[0] = fft.Plan(input_shape[1:], np.float32, np.complex64,\n batch=input_shape[0])\n\n fft.fft(input_pycuda, output_pycuda, plan[0])\n\n thunk.inputs = inputs\n thunk.outputs = outputs\n thunk.lazy = False\n\n return thunk\n\n\nclass CuIFFTOp(ScikitsCudaOp):\n def output_type(self, inp):\n # remove extra real/imag dim\n return CudaNdarrayType(\n broadcastable=[False] * (inp.type.ndim - 1))\n\n def make_thunk(self, node, storage_map, _, _2, impl=None):\n super(CuIFFTOp, self).make_thunk(node, storage_map, _, _2)\n\n from theano.misc.pycuda_utils import to_gpuarray\n inputs = [storage_map[v] for v in node.inputs]\n outputs = [storage_map[v] for v in node.outputs]\n\n plan_input_shape = [None]\n plan = [None]\n\n def thunk():\n input_shape = inputs[0][0].shape\n\n # construct output shape\n # chop off the extra length-2 dimension for real/imag\n output_shape = list(input_shape[:-1])\n # restore full signal length\n output_shape[-1] = (output_shape[-1] - 1) * 2\n output_shape = tuple(output_shape)\n\n z = outputs[0]\n\n # only allocate if there is no previous allocation of the\n # right size.\n if z[0] is None or z[0].shape != output_shape:\n z[0] = CudaNdarray.zeros(output_shape)\n\n input_pycuda = to_gpuarray(inputs[0][0])\n # input_pycuda is a float32 array with an extra dimension,\n # but will be interpreted by scikits.cuda as a complex64\n # array instead.\n output_pycuda = to_gpuarray(z[0])\n\n # only initialise plan if necessary\n if plan[0] is None or plan_input_shape[0] != input_shape:\n plan_input_shape[0] = input_shape\n plan[0] = fft.Plan(output_shape[1:], np.complex64, np.float32,\n batch=output_shape[0])\n\n fft.ifft(input_pycuda, output_pycuda, plan[0])\n # strangely enough, enabling rescaling here makes it run\n # very, very slowly. so do this rescaling manually\n # afterwards!\n\n thunk.inputs = inputs\n thunk.outputs = outputs\n thunk.lazy = False\n\n return thunk\n\n\ndef to_complex_gpuarray(x, copyif=False):\n \"\"\"\n Adapted version of theano.misc.pycuda_utils.to_gpuarray that takes\n an array with an extra trailing dimension of length 2 for\n real/imaginary parts, and turns it into a complex64 PyCUDA\n GPUArray.\n\n \"\"\"\n if not isinstance(x, CudaNdarray):\n raise ValueError(\"We can transfer only CudaNdarray \"\n \"to pycuda.gpuarray.GPUArray\")\n else:\n # Check if trailing dimension has length 2\n assert x.shape[-1] == 2\n\n # check if dtype is float32\n assert x.dtype == 'float32'\n\n # Check if it is c contiguous\n size = 1\n c_contiguous = True\n for i in range(x.ndim - 1, -1, -1):\n if x.shape[i] == 1:\n continue\n if x._strides[i] != size:\n c_contiguous = False\n break\n size *= x.shape[i]\n if not c_contiguous:\n if copyif:\n x = x.copy()\n else:\n raise ValueError(\"We were asked to not copy memory, \"\n \"but the memory is not c contiguous.\")\n\n # Now x is always c contiguous\n px = pycuda.gpuarray.GPUArray(x.shape[:-1], np.complex64, base=x,\n gpudata=x.gpudata)\n return px\n\n\ndef bptrs(a):\n \"\"\"\n Pointer array when input represents a batch of matrices.\n\n Taken from scikits.cuda tests/test_cublas.py.\n\n \"\"\"\n return pycuda.gpuarray.arange(a.ptr, a.ptr + a.shape[0] * a.strides[0],\n a.strides[0], dtype=cublas.ctypes.c_void_p)\n\n\ndef sc_complex_dot_batched(bx_gpu, by_gpu, bc_gpu, transa='N', transb='N',\n handle=None):\n \"\"\"\n Uses cublasCgemmBatched to compute a bunch of complex dot products\n in parallel.\n\n \"\"\"\n if handle is None:\n handle = scikits.cuda.misc._global_cublas_handle\n\n assert len(bx_gpu.shape) == 3\n assert len(by_gpu.shape) == 3\n assert len(bc_gpu.shape) == 3\n assert bx_gpu.dtype == np.complex64\n assert by_gpu.dtype == np.complex64\n assert bc_gpu.dtype == np.complex64\n\n # Get the shapes of the arguments\n bx_shape = bx_gpu.shape\n by_shape = by_gpu.shape\n\n # Perform matrix multiplication for 2D arrays:\n alpha = np.complex64(1.0)\n beta = np.complex64(0.0)\n\n transa = transa.lower()\n transb = transb.lower()\n\n if transb in ['t', 'c']:\n N, m, k = by_shape\n elif transb in ['n']:\n N, k, m = by_shape\n else:\n raise ValueError('invalid value for transb')\n\n if transa in ['t', 'c']:\n N2, l, n = bx_shape\n elif transa in ['n']:\n N2, n, l = bx_shape\n else:\n raise ValueError('invalid value for transa')\n\n if l != k:\n raise ValueError('objects are not aligned')\n\n if N != N2:\n raise ValueError('batch sizes are not the same')\n\n if transb == 'n':\n lda = max(1, m)\n else:\n lda = max(1, k)\n\n if transa == 'n':\n ldb = max(1, k)\n else:\n ldb = max(1, n)\n\n ldc = max(1, m)\n\n # construct pointer arrays needed for cublasCgemmBatched\n bx_arr = bptrs(bx_gpu)\n by_arr = bptrs(by_gpu)\n bc_arr = bptrs(bc_gpu)\n\n cublas.cublasCgemmBatched(handle, transb, transa, m, n, k, alpha,\n by_arr.gpudata, lda, bx_arr.gpudata, ldb,\n beta, bc_arr.gpudata, ldc, N)\n\n\nclass BatchedComplexDotOp(ScikitsCudaOp):\n \"\"\"\n This version uses cublasCgemmBatched under the hood, instead of\n doing multiple cublasCgemm calls.\n\n \"\"\"\n\n def make_node(self, inp1, inp2):\n inp1 = basic_ops.gpu_contiguous(\n basic_ops.as_cuda_ndarray_variable(inp1))\n inp2 = basic_ops.gpu_contiguous(\n basic_ops.as_cuda_ndarray_variable(inp2))\n\n assert inp1.dtype == \"float32\"\n assert inp2.dtype == \"float32\"\n assert inp1.ndim == 4 # (batch, a, b, real/imag)\n assert inp2.ndim == 4\n\n return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])\n\n def output_type(self, inp):\n return CudaNdarrayType(broadcastable=[False] * inp.type.ndim)\n\n def make_thunk(self, node, storage_map, _, _2, impl=None):\n super(BatchedComplexDotOp, self).make_thunk(node, storage_map, _, _2)\n\n inputs = [storage_map[v] for v in node.inputs]\n outputs = [storage_map[v] for v in node.outputs]\n\n def thunk():\n bx = inputs[0]\n by = inputs[1]\n\n input_shape_x = bx[0].shape # (batch, a, b, 2)\n input_shape_y = by[0].shape # (batch, b, c, 2)\n\n output_shape = (input_shape_x[0], input_shape_x[1],\n input_shape_y[2], 2) # (batch, a, c, 2)\n\n bz = outputs[0]\n\n # only allocate if there is no previous allocation of the\n # right size.\n if bz[0] is None or bz[0].shape != output_shape:\n bz[0] = CudaNdarray.zeros(output_shape)\n\n input_bx_pycuda = to_complex_gpuarray(bx[0])\n input_by_pycuda = to_complex_gpuarray(by[0])\n output_b_pycuda = to_complex_gpuarray(bz[0])\n\n # fancy native batched version\n sc_complex_dot_batched(input_bx_pycuda, input_by_pycuda,\n output_b_pycuda)\n\n thunk.inputs = inputs\n thunk.outputs = outputs\n thunk.lazy = False\n\n return thunk\n\n\ncufft = CuFFTOp()\ncuifft = CuIFFTOp()\nbatched_complex_dot = BatchedComplexDotOp()\n\n\ndef mult_and_reduce(input_fft_v, filters_fft_v, input_shape=None,\n filter_shape=None):\n \"\"\"\n\n Parameters\n ----------\n input_fft_v\n It's (b, ic, i0, i1//2 + 1, 2).\n filters_fft_v\n It's (oc, ic, i0, i1//2 + 1, 2).\n\n \"\"\"\n if input_shape is None:\n input_shape = input_fft_v.shape # symbolic\n\n if filter_shape is None:\n filter_shape = filters_fft_v.shape # symbolic\n\n b, ic, i0, i1_f, _ = input_shape\n oc = filter_shape[0]\n\n # reshape to flatten the dimensions that are multiplied elemwise\n input_r = input_fft_v.reshape((b, ic, i0 * i1_f, 2))\n filters_r = filters_fft_v.reshape((oc, ic, i0 * i1_f, 2))\n\n # shuffle for batched dot product\n input_s = input_r.dimshuffle(2, 0, 1, 3) # (i0 * i1_f, b, ic, 2)\n filters_s = filters_r.dimshuffle(2, 1, 0, 3) # (i0 * i1_f, ic, oc, 2)\n\n output_s = batched_complex_dot(input_s, filters_s)\n\n # shuffle again\n output_r = output_s.dimshuffle(1, 2, 0, 3)\n\n # reshape to unflatten\n output = output_r.reshape((b, oc, i0, i1_f, 2))\n\n return output\n\n\ndef conv2d_fft(input, filters, image_shape=None, filter_shape=None,\n border_mode='valid', pad_last_dim=False):\n \"\"\"\n Perform a convolution through fft.\n\n Only support input which will be even on the last dimension\n (width). All other dimensions can be anything and the filters can\n have an even or odd width.\n\n If you must use input which has an odd width, you can either pad\n it or use the `pad_last_dim` argument which will do it for you and\n take care to strip the padding before returning. Don't use this\n argument if you are not sure the input is odd since the padding is\n unconditional and will make even input odd, thus leading to\n problems.\n\n On valid mode the filters must be smaller than the input.\n\n Parameters\n ----------\n input\n (b, ic, i0, i1).\n filters\n (oc, ic, f0, f1).\n border_mode : {'valid', 'full'}\n pad_last_dim\n Unconditionally pad the last dimension of the input\n to to turn it from odd to even. Will strip the\n padding before returning the result.\n\n \"\"\"\n # use symbolic shapes to compute shape info at runtime if not specified\n if image_shape is None:\n image_shape = input.shape\n\n if filter_shape is None:\n filter_shape = filters.shape\n\n # batch size, input channels, input dim 0, input dim 1\n b, ic, i0, i1 = image_shape\n # output channels, input channels, filter dim 0, filter dim 1\n oc, ic_, f0, f1 = filter_shape\n\n # pad filters/image to output shape\n if border_mode == 'valid':\n o0 = i0\n if pad_last_dim:\n o1 = i1 + 1\n input_padded = T.zeros((b, ic, o0, o1), dtype='float32')\n input_padded = T.set_subtensor(input_padded[:, :, :i0, :i1],\n input)\n else:\n o1 = i1\n input_padded = input\n\n filters_padded = T.zeros((oc, ic, o0, o1), dtype='float32')\n filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1],\n filters)\n\n elif border_mode == 'full':\n\n # In this particular case, the values of (o0, o1) represent\n # the dimensions of the work buffer more than the actual dimensions\n # of the desired output.\n o0 = i0 + 2 * (f0 - 1)\n o1 = i1 + 2 * (f1 - 1)\n\n if pad_last_dim:\n o1 = o1 + 1\n\n # We line up the filters and the images in a way\n # such that the filters are tightly placed against the\n # top-left of the array, and the images intersect with\n # them on one pixel. The top-left pixel of the images\n # is the bottom-right pixel of the filters when we\n # do the layout here.\n\n filters_padded = T.zeros((oc, ic, o0, o1), dtype='float32')\n filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1],\n filters)\n\n input_padded = T.zeros((b, ic, o0, o1), dtype='float32')\n input_padded = T.set_subtensor(input_padded[:, :, (f0 - 1):(f0 - 1 + i0), (f1 - 1):(f1 - 1 + i1)],\n input)\n else:\n raise ValueError('invalid mode')\n\n input_padded = T.opt.Assert(\"in conv2d_fft: width is not even\")(\n input_padded, T.eq(o1 % 2, 0))\n\n # reshape for FFT\n input_flat = input_padded.reshape((b * ic, o0, o1))\n filters_flat = filters_padded.reshape((oc * ic, o0, o1))\n\n # perform FFT\n input_fft_flat = cufft(input_flat) # (b * ic, o0, o1//2 + 1, 2)\n filters_fft_flat = cufft(filters_flat) # (oc * ic, o0, o1//2 + 1, 2)\n\n # unfold ic dimension\n input_fft_v_shape = (b, ic, o0, o1 // 2 + 1, 2)\n filters_fft_v_shape = (oc, ic, o0, o1 // 2 + 1, 2)\n input_fft_v = input_fft_flat.reshape(input_fft_v_shape)\n filters_fft_v = filters_fft_flat.reshape(filters_fft_v_shape)\n\n # (b, oc, o0, o1//2 + 1, 2)\n output_fft_s = mult_and_reduce(input_fft_v, filters_fft_v,\n input_shape=input_fft_v_shape,\n filter_shape=filters_fft_v_shape)\n\n # reshape for IFFT\n output_fft_flat = output_fft_s.reshape((b * oc, o0, o1 // 2 + 1, 2))\n\n # perform IFFT\n output_flat = cuifft(output_fft_flat) # (b * oc, o0, o1)\n\n # reshape\n output_circ = output_flat.reshape((b, oc, o0, o1)) # circular!\n\n # Now we extract the region of interest.\n # We just cut it out from the output_circ\n # array that was used for the computation.\n # We do not need to handle pad_last_dim in a\n # special way because we specify explicitly here\n # how much values are expected.\n if border_mode == 'valid':\n output = output_circ[:, :, (f0 - 1):(f0 - 1 + i0 - f0 + 1),\n (f1 - 1):(f1 - 1 + i1 - f1 + 1)]\n elif border_mode == 'full':\n output = output_circ[:, :, (f0 - 1):(f0 - 1 + i0 + f0 - 1),\n (f1 - 1):(f1 - 1 + i1 + f1 - 1)]\n else:\n raise ValueError('invalid mode')\n\n # Rescale manually. This is just a factor that comes in during the\n # trip through FFT and inverse FFT.\n output = (1.0 / T.cast(o0 * o1, 'float32')) * output\n\n # output should now be the result of a batched valid convolution\n # of the input with the filters.\n return basic_ops.as_cuda_ndarray_variable(output)\n\n\ndef conv3d_fft(input, filters, image_shape=None, filter_shape=None,\n border_mode='valid', pad_last_dim=False):\n \"\"\"\n Perform a convolution through fft.\n\n Only supports input whose shape is even on the last dimension.\n All other dimensions can be anything and the filters can\n have an even or odd last dimension.\n\n The semantics associated with the last three dimensions\n are not important as long as they are in the same order between\n the inputs and the filters. For example, when the convolution\n is done on a sequence of images, they could be either\n (duration, height, width) or (height, width, duration).\n\n If you must use input which has an odd width, you can either pad\n it or use the `pad_last_dim` argument which will do it for you and\n take care to strip the padding before returning. pad_last_dim checks\n that the last dimension is odd before the actual paddding\n\n On valid mode the filters must be smaller than the input.\n\n Parameters\n ----------\n input\n (b, ic, i0, i1, i2).\n filters\n (oc, ic, f0, f1, i2).\n border_mode : {'valid', 'full'}.\n pad_last_dim\n Unconditionally pad the last dimension of the input\n to to turn it from odd to even. Will strip the\n padding before returning the result.\n\n \"\"\"\n # use symbolic shapes to compute shape info at runtime if not specified\n if image_shape is None:\n image_shape = input.shape\n\n if filter_shape is None:\n filter_shape = filters.shape\n\n # batch size, input channels, input dim 0, input dim 1\n b, ic, i0, i1, i2 = image_shape\n # output channels, input channels, filter dim 0, filter dim 1\n oc, ic_, f0, f1, f2 = filter_shape\n\n # Check that the last dimension is odd\n is_odd = T.eq(T.mod(input.shape[4], 2), 1)\n\n # pad filters/image to output shape\n if border_mode == 'valid':\n o0 = i0\n o1 = i1\n o2 = i2\n input_padded = input\n if pad_last_dim:\n o2 = ifelse(is_odd, o2 + 1, o2)\n input_padded = T.zeros((b, ic, o0, o1, o2), dtype='float32')\n input_padded = T.set_subtensor(input_padded[:, :, :i0, :i1, :i2],\n input)\n filters_padded = T.zeros((oc, ic, o0, o1, o2), dtype='float32')\n filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1, :f2],\n filters)\n\n elif border_mode == 'full':\n\n # In this particular case, the values of (o0, o1) represent\n # the dimensions of the work buffer more than the actual dimensions\n # of the desired output.\n o0 = i0 + 2 * (f0 - 1)\n o1 = i1 + 2 * (f1 - 1)\n o2 = i2 + 2 * (f2 - 1)\n\n if pad_last_dim:\n o2 = ifelse(is_odd, o2 + 1, o2)\n\n # We line up the filters and the images in a way\n # such that the filters are tightly placed against the\n # top-left of the array, and the images intersect with\n # them on one pixel. The top-left pixel of the images\n # is the bottom-right pixel of the filters when we\n # do the layout here.\n\n filters_padded = T.zeros((oc, ic, o0, o1, o2), dtype='float32')\n filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1, :f2],\n filters)\n\n input_padded = T.zeros((b, ic, o0, o1, o2), dtype='float32')\n input_padded = T.set_subtensor(input_padded[:, :, (f0 - 1):(f0 - 1 + i0), (f1 - 1):(f1 - 1 + i1), (f2 - 1):(f2 - 1 + i2)],\n input)\n else:\n raise ValueError('invalid mode')\n\n # reshape for FFT\n input_flat = input_padded.reshape((b * ic, o0, o1, o2))\n filters_flat = filters_padded.reshape((oc * ic, o0, o1, o2))\n\n # perform FFT\n input_fft_flat = cufft(input_flat) # (b * ic, o0, o1, o2//2 + 1, 2)\n filters_fft_flat = cufft(filters_flat) # (oc * ic, o0, o1, o2//2 + 1, 2)\n\n # Unfold ic dimension.\n # We have to collapse two dimensions together\n # in order to reuse the same `mult_and_reduce`.\n # This explains the o0 * 01 instead of just keeping\n # the two dimensions intact.\n input_fft_v_shape = (b, ic, o0 * o1, o2 // 2 + 1, 2)\n filters_fft_v_shape = (oc, ic, o0 * o1, o2 // 2 + 1, 2)\n\n input_fft_v = input_fft_flat.reshape(input_fft_v_shape)\n filters_fft_v = filters_fft_flat.reshape(filters_fft_v_shape)\n\n # (b, oc, o0 * o1, o2//2 + 1, 2)\n output_fft_s = mult_and_reduce(input_fft_v, filters_fft_v,\n input_shape=input_fft_v_shape,\n filter_shape=filters_fft_v_shape)\n # output_fft_s = input_fft_v\n\n # reshape for IFFT\n output_fft_flat = output_fft_s.reshape((b * oc, o0, o1, o2 // 2 + 1, 2))\n\n # perform IFFT\n output_flat = cuifft(output_fft_flat) # (b * oc, o0, o1, o2)\n\n # reshape\n output_circ = output_flat.reshape((b, oc, o0, o1, o2)) # circular!\n\n # Now we extract the region of interest.\n # We just cut it out from the output_circ\n # array that was used for the computation.\n # We do not need to handle pad_last_dim in a\n # special way because we specify explicitly here\n # how much values are expected.\n if border_mode == 'valid':\n output = output_circ[:, :, (f0 - 1):(f0 - 1 + i0 - f0 + 1),\n (f1 - 1):(f1 - 1 + i1 - f1 + 1),\n (f2 - 1):(f2 - 1 + i2 - f2 + 1)]\n elif border_mode == 'full':\n output = output_circ[:, :, (f0 - 1):(f0 - 1 + i0 + f0 - 1),\n (f1 - 1):(f1 - 1 + i1 + f1 - 1),\n (f2 - 1):(f2 - 1 + i2 + f2 - 1)]\n else:\n raise ValueError('invalid mode')\n # output = output_circ[:, :, :, :, :]\n\n # Rescale manually. This is just a factor that comes in during the\n # trip through FFT and inverse FFT.\n output = (1.0 / T.cast(o0 * o1 * o2, 'float32')) * output\n\n # output should now be the result of a batched valid convolution\n # of the input with the filters.\n return basic_ops.as_cuda_ndarray_variable(output)\n", "from __future__ import absolute_import, print_function, division\n\nimport numpy as np\nimport os.path as pt\nimport tempfile\nimport unittest\nimport filecmp\n\nimport theano as th\nimport theano.d3viz as d3v\nfrom theano.d3viz.tests import models\n\nfrom nose.plugins.skip import SkipTest\nfrom theano.d3viz.formatting import pydot_imported, pydot_imported_msg\nif not pydot_imported:\n raise SkipTest('pydot not available: ' + pydot_imported_msg)\n\n\nclass TestD3Viz(unittest.TestCase):\n\n def setUp(self):\n self.rng = np.random.RandomState(0)\n self.data_dir = pt.join('data', 'test_d3viz')\n\n def check(self, f, reference=None, verbose=False):\n tmp_dir = tempfile.mkdtemp()\n html_file = pt.join(tmp_dir, 'index.html')\n if verbose:\n print(html_file)\n d3v.d3viz(f, html_file)\n assert pt.getsize(html_file) > 0\n if reference:\n assert filecmp.cmp(html_file, reference)\n\n def test_mlp(self):\n m = models.Mlp()\n f = th.function(m.inputs, m.outputs)\n self.check(f)\n\n def test_mlp_profiled(self):\n m = models.Mlp()\n profile = th.compile.profiling.ProfileStats(False)\n f = th.function(m.inputs, m.outputs, profile=profile)\n x_val = self.rng.normal(0, 1, (1000, m.nfeatures))\n f(x_val)\n self.check(f)\n\n def test_ofg(self):\n m = models.Ofg()\n f = th.function(m.inputs, m.outputs)\n self.check(f)\n\n def test_ofg_nested(self):\n m = models.OfgNested()\n f = th.function(m.inputs, m.outputs)\n self.check(f)\n\n def test_ofg_simple(self):\n m = models.OfgSimple()\n f = th.function(m.inputs, m.outputs)\n self.check(f)\n" ]
[ [ "numpy.complex64" ], [ "numpy.random.RandomState" ] ]
asharron/banditproblem
[ "25987b4c751ed871cb9512c3efa9e6efa5c35584" ]
[ "incremental.py" ]
[ "#This file uses the incremental approach to calculating\n# Q values instead of redoing the q equation for each entry in\n# the table\n\nimport numpy as np\nimport random\n\nrandomint = random.randint(1,51) #How many actions we will have between 1 - 50\nrewards = np.random.rand(1,randomint) #Rewards for each action set randomly\nbest_action = max(rewards[0]) #Find the best reward\n\nprint(\"Total choices are \", randomint, \"!\")\n\n#Table for holding our progress so far, Col index is the action number\n# Row 1: Total reward for each action\n# Row 2: Total number of times called\n# Row 3: Old Q value\ntable = np.zeros((3,randomint))\n\ne_rate = .5 #Exploration rate\ntotal_reward = 0 \n\n#This is the equation for determining the value of an action\ndef q_equation(total_reward, total_called):\n return total_reward * total_called\n\n\n#Loop for any number of times to allow the agent to try and pick\nfor attempt in range(10000):\n\n #Do a random choice for e_rate % of the time\n if (random.random() > e_rate):\n #Equation is action = argmax(Qt(a))\n #Get the current action by greedily picking the max of the q_equation applied to\n # each reward and value in the table\n curr_action = max([(qvalue,index) for index, qvalue in\\\n enumerate(table[2])])\n else:\n #Pick a random action\n ran_action = (random.randint(1,randomint) - 1)\n curr_action = (0,ran_action)\n\n curr_reward = rewards[0][curr_action[1]] #Grab reward from rewards table \n\n #Update the table\n table[0][curr_action[1]] = curr_reward #Just remember the nth reward\n table[1][curr_action[1]] += 1 #update number of times called\n #The new q value equation is:\n # Qn+1 = Qn + 1/n[Rn - Qn]\n # OR\n # Qn+1 = Qn + a[Rn - Qn]\n # ^ If using a fixed n timestep ratio\n old_q = table[2][curr_action[1]] #Helper for indexing\n #First equation\n table[2][curr_action[1]] = old_q + (1/table[1][curr_action[1]]) * \\\n (curr_reward - old_q) #Update the q value for this action\n #Second equation\n #table[2][curr_action[1]] = old_q + (1/table[1][curr_action[1]]) * \\\n #(curr_reward - old_q) #Update the q value for this action\n\n #Update total reward\n total_reward += curr_reward\n\n#Grab what the machine thinks is the best option overall\nguess_best = max([(q_equation(tReward,tCalls),index) for index,\\\n (tReward,tCalls) in \\\n enumerate(zip(table[0],table[1]))])\n\nprint(\"Total reward was \", total_reward, \"!\")\nprint(\"Our AI thinks that the best choice is \", guess_best[1]+1, \"!\")\n\nif rewards[0][guess_best[1]] == best_action:\n print(\"Our AI was correct!\")\nelse:\n print(\"Dang, it was wrong...\")\n \n" ]
[ [ "numpy.random.rand", "numpy.zeros" ] ]
LetteraUnica/unipi_lab_courses
[ "5bec20184cba3ca0e61831b193c407dba04651c1", "5bec20184cba3ca0e61831b193c407dba04651c1" ]
[ "Lab 1/5) Piano Inclinato/theta 2 lineare.py", "Lab 2/Relazione primo semestre/Diodo con led/analisis diodo con led.py" ]
[ "import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef regression_line (datix, datiy, n_dati) : #Calcola la linea di miglior fit\n ux = np.average(datix) #Formule prese da http://mathworld.wolfram.com/LeastSquaresFitting.html \n uy = np.average(datiy)\n sxx = -n_dati * ux**2\n sxy = -n_dati * ux * uy\n syy = -n_dati * uy**2\n \n i = 0\n while i < n_dati :\n sxx = sxx + datix[i]**2\n sxy = sxy + datix[i] * datiy[i]\n syy = syy + datiy[i]**2\n i = i + 1\n \n b = sxy / sxx\n a = uy - b * ux\n s = np.sqrt((syy - sxy**2 / sxx) / (n_dati - 2))\n delta_b = s / np.sqrt(sxx)\n delta_a = s * np.sqrt(1 / n_dati + ux**2 / sxx)\n return np.array([b, a, delta_b, delta_a])\n\ndati = np.genfromtxt(\"C:/Users/Lorenzo/Desktop/Relazioni fisica/Piano Inclinato/Dati theta2.txt\", unpack = True, skip_header = 1)\n\ntempi = dati[2:]\nlist_medie = np.array([])\nlist_errori = np.array([])\nfor tempo in tempi :\n list_medie = np.append(list_medie, np.average(tempo))\n list_errori = np.append(list_errori, np.std(tempo))\n \nlist_medie_2 = list_medie**2\nlist_errori_2 = list_errori * 2 * list_medie\ndistanze = dati[0][:4]\ndistanze_errore = np.ones(4) * 0.1\n\nbest_fit_1 = regression_line(distanze, list_medie_2[::3], 4)\nx = np.linspace(0, 100, 1000)\ny = best_fit_1[0] * x + best_fit_1[1]\nplt.figure()\nplt.xlabel(\"l [cm]\")\nplt.ylabel(\"t^2 [s^2]\")\nplt.title(\"theta 2 sfera 1\")\nplt.plot(x, y)\nplt.errorbar(distanze, list_medie_2[::3], yerr = list_errori_2[::3], xerr = distanze_errore, fmt = 'o')\nplt.show()\n\nbest_fit_2 = regression_line(distanze, list_medie_2[1::3], 4)\ny = best_fit_2[0] * x + best_fit_2[1]\nplt.figure()\nplt.xlabel(\"l [cm]\")\nplt.ylabel(\"t^2 [s^2]\")\nplt.title(\"theta 2 sfera 2\")\nplt.plot(x, y)\nplt.errorbar(distanze, list_medie_2[1::3], yerr = list_errori_2[1::3], xerr = distanze_errore, fmt = 'o')\nplt.show()\n\nbest_fit_3 = regression_line(distanze, list_medie_2[2::3], 4)\ny = best_fit_3[0] * x + best_fit_3[1]\nplt.figure()\nplt.xlabel(\"l [cm]\")\nplt.ylabel(\"t^2 [s^2]\")\nplt.title(\"theta 2 sfera 3\")\nplt.plot(x, y)\nplt.errorbar(distanze, list_medie_2[2::3], yerr = list_errori_2[2::3], xerr = distanze_errore, fmt = 'o')\nplt.show()\n\nprint(best_fit_1)\nprint(best_fit_2)\nprint(best_fit_3)", "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\n\ndef shock(V, I0, nVt) :\n return I0*(np.exp(V/nVt)-1)\n \ndef derivata(V, I0, nVt) :\n return (I0/nVt)*np.exp(V/nVt)\n\nV1_digit, V2_digit = np.genfromtxt(\"diodo2.txt\", unpack=True)\n\nVref = 5.08 # Da cambiare\ndVref = 0.03\nRd = 371\ndRd = 5\nX = Vref/1023\ndX = dVref/1023\n\nV1 = V1_digit*X\ndV1 = X*1 + V1*dX\nV2 = V2_digit*X\ndV2 = X*1 + V2*dX\n\nI = ((V1-V2)/Rd)*1000\ndI = ((dV1+dV2)/Rd + (V1-V2)*dRd/(Rd**2))*1000\n\nndof = len(V1)-2\ndI_eff = dI\nval = (1, 0.05)\nfor i in range(10):\n popt, pcov = scipy.optimize.curve_fit(shock, V2, I, val, dI_eff, absolute_sigma = False)\n chi_2 = np.sum(((I - shock(V2,*popt))/dI_eff)**2)\n print(chi_2)\n dI_eff = np.sqrt(((derivata(V2,*popt))*dV2)**2 + dI**2)\n\nV2_out = np.array([])\nI_out = np.array([])\ndI_eff_out = np.array([])\ndifferenza = (I-shock(V2, *popt))/dI_eff\nfor i in range(len(V2)) :\n if(abs(differenza[i])<1) :\n V2_out = np.append(V2_out, V2[i])\n I_out = np.append(I_out, I[i])\n dI_eff_out = np.append(dI_eff_out, dI_eff[i])\n\nprint(\"\\nPORCODDIO RICORDA DI METTERE LA CAZZO DI ABS_SIGMA CHE CE LA SCORDIAMO SEMPRE... PORCA MADONNA\\n\")\nprint(\"chi2/ndof =\",chi_2,\"/\",ndof,\"=\",chi_2/ndof)\nprint(\"I0=\", popt[0], \"+-\", pcov[0][0]**0.5)\nprint(\"nVt=\", popt[1], \"+-\", pcov[1][1]**0.5)\nprint(\"Cov normalizzata\", pcov[1][0]/(pcov[0][0]*pcov[1][1])**0.5, \"\\n\")\nprint(\"chi2 senza outliers\", np.sum(((I_out - shock(V2_out,*popt))/dI_eff_out)**2), \"ndof\", len(I_out)-2)\nprint(\"\\nPORCODDIO RICORDA DI METTERE LA CAZZO DI ABS_SIGMA CHE CE LA SCORDIAMO SEMPRE... PORCA MADONNA\\n\")\n\nV2[0]+=0.01\nt = np.linspace(0, 1.73, 4000)\nplt.figure()\nplt.title(\"Grafico I-V diodo\")\nplt.xlabel(\"ddp [V]\")\nplt.ylabel(\"I [mA]\")\nplt.errorbar(V2, I, dI, dV2, fmt = '.', label = \"Data\")\nplt.plot(t, shock(t,*popt), label = \"Fit\", color = 'red')\nplt.legend()\nplt.show()\n\nplt.figure()\nplt.title(\"Residui normalizzati\")\nplt.xlabel(\"ddp [V]\")\nplt.errorbar(V2_out, (I_out-shock(V2_out, *popt))/dI_eff_out, fmt=\".\")\nplt.plot(t, t*0, color = 'red')\nplt.show()" ]
[ [ "numpy.array", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.xlabel", "numpy.genfromtxt", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "numpy.ones", "matplotlib.pyplot.figure", "numpy.std", "matplotlib.pyplot.ylabel", "numpy.sqrt", "numpy.average", "matplotlib.pyplot.show", "numpy.linspace" ], [ "numpy.array", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "numpy.genfromtxt", "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.exp", "matplotlib.pyplot.ylabel", "numpy.append", "matplotlib.pyplot.show", "numpy.linspace" ] ]
SpaceNetChallenge/SpaceNet_Optimized_Routing_Solutions
[ "3fbc215de6b05904a5b54b2c7cde7e61074ae38d", "3fbc215de6b05904a5b54b2c7cde7e61074ae38d", "3fbc215de6b05904a5b54b2c7cde7e61074ae38d", "3fbc215de6b05904a5b54b2c7cde7e61074ae38d", "3fbc215de6b05904a5b54b2c7cde7e61074ae38d" ]
[ "xd_xd/aa/road_networks/skeletonize.py", "xd_xd/aa/road_networks/wkt_to_graph.py", "selim_sef/data_tools/generate_spacenet_dataset.py", "selim_sef/models/unet.py", "schapke/cresi/05_wkt_to_G.py" ]
[ "# -*- coding: utf-8 -*-\nfrom logging import getLogger\nfrom pathlib import Path\nimport os\nimport json\nimport time\nimport random\nimport argparse\nimport logging\nfrom itertools import tee\nfrom collections import OrderedDict #, defaultdict\nfrom multiprocessing import Pool, cpu_count\n\nfrom scipy.spatial.distance import (\n pdist,\n squareform,\n)\nfrom skimage.morphology import (\n skeletonize,\n remove_small_objects,\n remove_small_holes,\n)\nfrom matplotlib.pylab import plt\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport tqdm\nimport skimage.io\nimport cv2\n\nfrom aa.cresi.other_tools import sknw\n\n\nlogger = getLogger('aa')\n\nlinestring = \"LINESTRING {}\"\n\n\ndef clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=100,\n weight='length_pix', verbose=True,\n super_verbose=False):\n '''Remove subgraphs with a max path length less than min_length,\n if the subgraph has more than max_noxes_to_skip, don't check length\n (this step great improves processing time)'''\n\n if len(G_.nodes()) == 0:\n return G_\n\n # print (\"Running clean_sub_graphs...\")\n sub_graphs = list(nx.connected_component_subgraphs(G_))\n bad_nodes = []\n\n for G_sub in sub_graphs:\n # don't check length if too many nodes in subgraph\n if len(G_sub.nodes()) > max_nodes_to_skip:\n continue\n else:\n all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))\n # get all lenghts\n lens = []\n #for u,v in all_lengths.iteritems():\n for u in all_lengths.keys():\n v = all_lengths[u]\n #for uprime, vprime in v.iteritems():\n for uprime in v.keys():\n vprime = v[uprime]\n lens.append(vprime)\n max_len = np.max(lens)\n if max_len < min_length:\n bad_nodes.extend(G_sub.nodes())\n\n # remove bad_nodes\n G_.remove_nodes_from(bad_nodes)\n\n return G_\n\n\n# From road_raster.py\n###############################################################################\ndef dl_post_process_pred(mask, glob_thresh=80, kernel_size=9,\n min_area=2000, contour_smoothing=0.001,\n adapt_kernel=85, adapt_const=-3,\n outplot_file='', dpi=500, use_glob_thresh=False,\n kernel_open=19, verbose=False):\n '''Refine mask file and return both refined mask and skeleton'''\n\n t0 = time.time()\n kernel_blur = kernel_size #9\n kernel_close = kernel_size #9\n #kernel_open = kernel_size #9\n\n kernel_close = np.ones((kernel_close,kernel_close), np.uint8)\n kernel_open = np.ones((kernel_open, kernel_open), np.uint8)\n\n blur = cv2.medianBlur(mask, kernel_blur)\n\n # global thresh\n glob_thresh_arr = cv2.threshold(blur, glob_thresh, 1, cv2.THRESH_BINARY)[1]\n glob_thresh_arr_smooth = cv2.medianBlur(glob_thresh_arr, kernel_blur)\n\n t1 = time.time()\n # print (\"Time to compute open(), close(), and get thresholds:\", t1-t0, \"seconds\")\n\n if use_glob_thresh:\n mask_thresh = glob_thresh_arr_smooth\n else:\n adapt_thresh = cv2.adaptiveThreshold(mask,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv2.THRESH_BINARY,adapt_kernel, adapt_const)\n # resmooth\n adapt_thresh_smooth = cv2.medianBlur(adapt_thresh, kernel_blur)\n\n mask_thresh = adapt_thresh_smooth\n\n closing = cv2.morphologyEx(mask_thresh, cv2.MORPH_CLOSE, kernel_close)\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_open)\n # try on bgRemoved?\n\n t2 = time.time()\n\n # set output\n if contour_smoothing < 0:\n final_mask = opening\n else:\n # contours\n # remove small items\n contours, cont_plot, hole_idxs = get_contours_complex(opening, \n min_thresh=glob_thresh, \n min_area=min_area, \n contour_smoothing=contour_smoothing)\n\n # for some reason contours don't extend to the edge, so clip the edge\n # and resize\n mask_filt_raw = get_mask(mask_thresh, cont_plot, hole_idxs=hole_idxs)\n shape_tmp = mask_filt_raw.shape\n mask_filt1 = 200 * cv2.resize(mask_filt_raw[2:-2, 2:-2], shape_tmp).astype(np.uint8)\n # thresh and resmooth\n mask_filt = cv2.GaussianBlur(mask_filt1, (kernel_blur, kernel_blur), 0)\n #mask_filt = cv2.threshold(mask_filt2, glob_thresh, 1, cv2.THRESH_BINARY)\n final_mask = mask_filt\n\n t3 = time.time()\n # print (\"Time to smooth contours:\", t3-t2, \"seconds\")\n\n # skeletonize\n #medial = medial_axis(final_mask)\n #medial_int = medial.astype(np.uint8)\n medial_int = medial_axis(final_mask).astype(np.uint8)\n # print (\"Time to compute medial_axis:\", time.time() - t3, \"seconds\")\n # print (\"Time to run dl_post_process_pred():\", time.time() - t0, \"seconds\")\n\n return final_mask, medial_int\n\n\ndef cv2_skeletonize(img):\n \"\"\" OpenCV function to return a skeletonized version of img, a Mat object\n https://gist.github.com/jsheedy/3913ab49d344fac4d02bcc887ba4277d\"\"\"\n # hat tip to http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/\n\n img = img.copy() # don't clobber original\n skel = img.copy()\n\n skel[:,:] = 0\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))\n\n while True:\n eroded = cv2.morphologyEx(img, cv2.MORPH_ERODE, kernel)\n temp = cv2.morphologyEx(eroded, cv2.MORPH_DILATE, kernel)\n temp = cv2.subtract(img, temp)\n skel = cv2.bitwise_or(skel, temp)\n img[:,:] = eroded[:,:]\n if cv2.countNonZero(img) == 0:\n break\n\n return skel\n\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\n\ndef remove_sequential_duplicates(seq):\n # todo\n res = [seq[0]]\n for elem in seq[1:]:\n if elem == res[-1]:\n continue\n res.append(elem)\n return res\n\n\ndef remove_duplicate_segments(seq):\n seq = remove_sequential_duplicates(seq)\n segments = set()\n split_seg = []\n res = []\n for idx, (s, e) in enumerate(pairwise(seq)):\n if (s, e) not in segments and (e, s) not in segments:\n segments.add((s, e))\n segments.add((e, s))\n else:\n split_seg.append(idx+1)\n for idx, v in enumerate(split_seg):\n if idx == 0:\n res.append(seq[:v])\n if idx == len(split_seg) - 1:\n res.append(seq[v:])\n else:\n s = seq[split_seg[idx-1]:v]\n if len(s) > 1:\n res.append(s)\n if not len(split_seg):\n res.append(seq)\n return res\n\n\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\n\ndef get_angle(p0, p1=np.array([0, 0]), p2=None):\n \"\"\" compute angle (in degrees) for p0p1p2 corner\n Inputs:\n p0,p1,p2 - points in the form of [x,y]\n \"\"\"\n if p2 is None:\n p2 = p1 + np.array([1, 0])\n v0 = np.array(p0) - np.array(p1) \n v1 = np.array(p2) - np.array(p1)\n\n angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1))\n return np.degrees(angle)\n\n\ndef preprocess(img, thresh, img_mult=255, hole_size=300,\n cv2_kernel_close=7, cv2_kernel_open=7, verbose=True):\n '''\n http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.remove_small_holes\n hole_size in remove_small_objects is the maximum area, in pixels of the\n hole\n '''\n\n # sometimes get a memory error with this approach\n if img.size < 10000000000:\n # if verbose:\n # print(\"Run preprocess() with skimage\")\n img = (img > (img_mult * thresh)).astype(np.bool)\n remove_small_objects(img, hole_size, in_place=True)\n remove_small_holes(img, hole_size, in_place=True)\n # img = cv2.dilate(img.astype(np.uint8), np.ones((7, 7)))\n\n # cv2 is generally far faster and more memory efficient (though less\n # effective)\n else:\n # if verbose:\n # print(\"Run preprocess() with cv2\")\n\n #from road_raster.py, dl_post_process_pred() function\n kernel_close = np.ones((cv2_kernel_close, cv2_kernel_close), np.uint8)\n kernel_open = np.ones((cv2_kernel_open, cv2_kernel_open), np.uint8)\n kernel_blur = cv2_kernel_close\n\n # global thresh\n #mask_thresh = (img > (img_mult * thresh))#.astype(np.bool)\n blur = cv2.medianBlur( (img * img_mult).astype(np.uint8), kernel_blur)\n glob_thresh_arr = cv2.threshold(blur, thresh, 1, cv2.THRESH_BINARY)[1]\n glob_thresh_arr_smooth = cv2.medianBlur(glob_thresh_arr, kernel_blur)\n mask_thresh = glob_thresh_arr_smooth\n\n # opening and closing\n # http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html\n #gradient = cv2.morphologyEx(mask_thresh, cv2.MORPH_GRADIENT, kernel)\n closing = cv2.morphologyEx(mask_thresh, cv2.MORPH_CLOSE, kernel_close)\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_open)\n img = opening.astype(np.bool)\n #img = opening\n\n return img\n\n\ndef graph2lines(G):\n node_lines = []\n edges = list(G.edges())\n if len(edges) < 1:\n return []\n prev_e = edges[0][1]\n current_line = list(edges[0])\n added_edges = {edges[0]}\n for s, e in edges[1:]:\n if (s, e) in added_edges:\n continue\n if s == prev_e:\n current_line.append(e)\n else:\n node_lines.append(current_line)\n current_line = [s, e]\n added_edges.add((s, e))\n prev_e = e\n if current_line:\n node_lines.append(current_line)\n return node_lines\n\n\ndef visualize(img, G, vertices):\n plt.imshow(img, cmap='gray')\n\n # draw edges by pts\n for (s, e) in G.edges():\n vals = flatten([[v] for v in G[s][e].values()])\n for val in vals:\n ps = val.get('pts', [])\n plt.plot(ps[:, 1], ps[:, 0], 'green')\n\n # draw node by o\n node, nodes = G.node(), G.nodes\n # deg = G.degree\n # ps = np.array([node[i]['o'] for i in nodes])\n ps = np.array(vertices)\n plt.plot(ps[:, 1], ps[:, 0], 'r.')\n\n # title and show\n plt.title('Build Graph')\n plt.show()\n\n\ndef line_points_dist(line1, pts):\n return np.cross(\n line1[1] - line1[0],\n pts - line1[0]\n ) / np.linalg.norm(line1[1] - line1[0])\n\n\ndef remove_small_terminal(G):\n deg = dict(G.degree())\n terminal_points = [i for i, d in deg.items() if d == 1]\n edges = list(G.edges())\n for s, e in edges:\n if s == e:\n sum_len = 0\n vals = flatten([[v] for v in G[s][s].values()])\n for ix, val in enumerate(vals):\n sum_len += len(val['pts'])\n if sum_len < 3:\n G.remove_edge(s, e)\n continue\n vals = flatten([[v] for v in G[s][e].values()])\n for ix, val in enumerate(vals):\n if s in terminal_points and val.get('weight', 0) < 10:\n G.remove_node(s)\n if e in terminal_points and val.get('weight', 0) < 10:\n G.remove_node(e)\n return\n\n\ndef add_small_segments(G,\n terminal_points,\n terminal_lines,\n dist1=20,\n dist2=100,\n angle1=20,\n angle2=160):\n node = G.node\n term = [node[t]['o'] for t in terminal_points]\n dists = squareform(pdist(term))\n possible = np.argwhere((dists > 0) & (dists < dist1))\n good_pairs = []\n for s, e in possible:\n if s > e:\n continue\n s, e = terminal_points[s], terminal_points[e]\n\n if G.has_edge(s, e):\n continue\n good_pairs.append((s, e))\n\n possible2 = np.argwhere((dists > dist1) & (dists < dist2))\n for s, e in possible2:\n if s > e:\n continue\n s, e = terminal_points[s], terminal_points[e]\n if G.has_edge(s, e):\n continue\n l1 = terminal_lines[s]\n l2 = terminal_lines[e]\n d = line_points_dist(l1, l2[0])\n\n if abs(d) > dist1:\n continue\n angle = get_angle(l1[1] - l1[0], np.array((0, 0)), l2[1] - l2[0])\n if (-1*angle1 < angle < angle1) or (angle < -1*angle2) or (angle > angle2):\n good_pairs.append((s, e))\n\n dists = {}\n for s, e in good_pairs:\n s_d, e_d = [G.node[s]['o'], G.node[e]['o']]\n dists[(s, e)] = np.linalg.norm(s_d - e_d)\n\n dists = OrderedDict(sorted(dists.items(), key=lambda x: x[1]))\n\n wkt = []\n added = set()\n for s, e in dists.keys():\n if s not in added and e not in added:\n added.add(s)\n added.add(e)\n s_d, e_d = G.node[s]['o'], G.node[e]['o']\n line_strings = [\"{1:.1f} {0:.1f}\".format(*c.tolist()) for c in [s_d, e_d]]\n line = '(' + \", \".join(line_strings) + ')'\n wkt.append(linestring.format(line))\n return wkt\n\n\ndef add_direction_change_nodes(pts, s, e, s_coord, e_coord):\n if len(pts) > 3:\n ps = pts.reshape(pts.shape[0], 1, 2).astype(np.int32)\n approx = 2\n ps = cv2.approxPolyDP(ps, approx, False)\n ps = np.squeeze(ps, 1)\n st_dist = np.linalg.norm(ps[0] - s_coord)\n en_dist = np.linalg.norm(ps[-1] - s_coord)\n if st_dist > en_dist:\n s, e = e, s\n s_coord, e_coord = e_coord, s_coord\n ps[0] = s_coord\n ps[-1] = e_coord\n else:\n ps = np.array([s_coord, e_coord], dtype=np.int32)\n return ps\n\n\ndef make_skeleton(img_loc,\n thresh,\n debug,\n fix_borders,\n replicate=5,\n clip=2,\n img_mult=255,\n hole_size=300,\n cv2_kernel_close=7,\n cv2_kernel_open=7,\n use_medial_axis=False,\n num_classes=1,\n skeleton_band='all'):\n '''\n Extract a skeleton from a mask.\n skeleton_band is the index of the band of the mask to use for\n skeleton extractionk, set to string 'all' to use all bands\n '''\n\n # print (\"Executing make_skeleton...\")\n t0 = time.time()\n #replicate = 5\n #clip = 2\n rec = replicate + clip\n\n # read in data\n if num_classes == 1:\n try:\n img = cv2.imread(img_loc, cv2.IMREAD_GRAYSCALE)\n except:\n img = skimage.io.imread(img_loc, as_gray=True).astype(np.uint8)#[::-1]\n\n else:\n # ensure 8bit?\n img_tmp = skimage.io.imread(img_loc).astype(np.uint8)\n #img_tmp = skimage.io.imread(img_loc)\n # we want skimage to read in (channels, h, w) for multi-channel\n # assume less than 20 channels\n if img_tmp.shape[0] > 20:\n img_full = np.moveaxis(img_tmp, 0, -1)\n else:\n img_full = img_tmp\n # select the desired band for skeleton extraction\n # if < 0, sum all bands\n if type(skeleton_band) == str: #skeleton_band < 0:\n img = np.sum(img_full, axis=0).astype(np.int8)\n else:\n img = img_full[skeleton_band, :, :]\n\n # potentially keep only subset of data\n shape0 = img.shape\n\n if fix_borders:\n img = cv2.copyMakeBorder(img, replicate, replicate, replicate,\n replicate, cv2.BORDER_REPLICATE)\n img_copy = None\n if debug:\n if fix_borders:\n img_copy = np.copy(img[replicate:-replicate,replicate:-replicate])\n else:\n img_copy = np.copy(img)\n\n t1 = time.time()\n img = preprocess(img, thresh, img_mult=img_mult, hole_size=hole_size,\n cv2_kernel_close=cv2_kernel_close,\n cv2_kernel_open=cv2_kernel_open)\n t2 = time.time()\n if not np.any(img):\n return None, None\n\n if not use_medial_axis:\n ske = skeletonize(img).astype(np.uint16)\n t3 = time.time()\n\n else:\n ske = skimage.morphology.medial_axis(img).astype(np.uint16)\n t3 = time.time()\n\n if fix_borders:\n ske = ske[rec:-rec, rec:-rec]\n ske = cv2.copyMakeBorder(ske, clip, clip, clip, clip, cv2.BORDER_CONSTANT, value=0)\n t4 = time.time()\n\n t1 = time.time()\n return img, ske\n\n\ndef build_graph_wkt(img_loc, out_ske_file, out_gpickle='', thresh=0.3,\n debug=False, add_small=True, fix_borders=True,\n skel_replicate=5, skel_clip=2, min_subgraph_length_pix=150,\n img_mult=255, hole_size=300, cv2_kernel_close=7, cv2_kernel_open=7,\n num_classes=1,\n skeleton_band='all',\n verbose=False):\n\n # create skeleton\n img_copy, ske = make_skeleton(img_loc,\n thresh,\n debug,\n fix_borders,\n replicate=skel_replicate,\n clip=skel_clip,\n img_mult=img_mult,\n hole_size=hole_size,\n cv2_kernel_close=cv2_kernel_close,\n cv2_kernel_open=cv2_kernel_open,\n skeleton_band=skeleton_band,\n num_classes=num_classes)\n if ske is None:\n return [linestring.format(\"EMPTY\")]\n # save to file\n if out_ske_file:\n cv2.imwrite(out_ske_file, ske.astype(np.uint8)*255)\n\n # create graph\n if np.max(ske.shape) > 32767:\n assert False\n else:\n G = sknw.build_sknw(ske, multi=True)\n remove_small_terminal(G)\n if len(G.edges()) == 0:\n return [linestring.format(\"EMPTY\")]\n\n if verbose:\n node_tmp= list(G.nodes())[-1]\n edge_tmp = list(G.edges())[-1]\n\n t01 = time.time()\n G = clean_sub_graphs(G, min_length=min_subgraph_length_pix,\n max_nodes_to_skip=100,\n weight='weight', verbose=verbose,\n super_verbose=False)\n t02 = time.time()\n # save G\n if len(out_gpickle) > 0:\n nx.write_gpickle(G, out_gpickle)\n\n node_lines = graph2lines(G)\n if not node_lines:\n return [linestring.format(\"EMPTY\")]\n\n node = G.node\n deg = dict(G.degree())\n wkt = []\n terminal_points = [i for i, d in deg.items() if d == 1]\n\n # refine wkt\n # print (\"Refine wkt...\")\n terminal_lines = {}\n vertices = []\n for i,w in enumerate(node_lines):\n if ((i % 10000) == 0) and (i > 0):\n print (\" \", i, \"/\", len(node_lines))\n coord_list = []\n additional_paths = []\n for s, e in pairwise(w):\n vals = flatten([[v] for v in G[s][e].values()])\n for ix, val in enumerate(vals):\n\n s_coord, e_coord = node[s]['o'], node[e]['o']\n pts = val.get('pts', [])\n if s in terminal_points:\n terminal_lines[s] = (s_coord, e_coord)\n if e in terminal_points:\n terminal_lines[e] = (e_coord, s_coord)\n\n ps = add_direction_change_nodes(pts, s, e, s_coord, e_coord)\n\n if len(ps.shape) < 2 or len(ps) < 2:\n continue\n\n if len(ps) == 2 and np.all(ps[0] == ps[1]):\n continue\n\n line_strings = [\"{1:.1f} {0:.1f}\".format(*c.tolist()) for c in ps]\n if ix == 0:\n coord_list.extend(line_strings)\n else:\n additional_paths.append(line_strings)\n\n vertices.append(ps)\n\n if not len(coord_list):\n continue\n segments = remove_duplicate_segments(coord_list)\n for coord_list in segments:\n if len(coord_list) > 1:\n line = '(' + \", \".join(coord_list) + ')'\n wkt.append(linestring.format(line))\n for line_strings in additional_paths:\n line = \", \".join(line_strings)\n line_rev = \", \".join(reversed(line_strings))\n for s in wkt:\n if line in s or line_rev in s:\n break\n else:\n wkt.append(linestring.format('(' + line + ')'))\n\n if add_small and len(terminal_points) > 1:\n wkt.extend(add_small_segments(G, terminal_points, terminal_lines))\n\n if debug:\n vertices = flatten(vertices)\n visualize(img_copy, G, vertices)\n\n if not wkt:\n return [linestring.format(\"EMPTY\")]\n\n return wkt\n\n\ndef _build_graph_wkt_iterable(args):\n (\n imfile,\n im_prefix,\n indir,\n spacenet_naming_convention,\n out_ske_dir,\n out_gdir,\n thresh,\n debug,\n add_small,\n fix_borders,\n skel_replicate,\n skel_clip,\n img_mult,\n hole_size,\n cv2_kernel_close,\n cv2_kernel_open,\n min_subgraph_length_pix,\n num_classes,\n skeleton_band,\n ) = args\n\n t1 = time.time()\n img_loc = os.path.join(indir, imfile)\n\n if spacenet_naming_convention:\n im_root = 'AOI' + imfile.split('AOI')[-1].split('.')[0]\n else:\n im_root = imfile.split('.')[0]\n if len(im_prefix) > 0:\n im_root = im_root.split(im_prefix)[-1]\n\n if out_ske_dir:\n out_ske_file = os.path.join(out_ske_dir, imfile)\n else:\n out_ske_file = ''\n\n if len(out_gdir) > 0:\n out_gpickle = os.path.join(out_gdir, imfile.split('.')[0] + '.gpickle')\n else:\n out_gpickle = ''\n\n # create wkt list\n wkt_list = build_graph_wkt(img_loc, out_ske_file,\n out_gpickle=out_gpickle, thresh=thresh,\n debug=debug, add_small=add_small, fix_borders=fix_borders,\n skel_replicate=skel_replicate, skel_clip=skel_clip,\n img_mult=img_mult, hole_size=hole_size,\n cv2_kernel_close=cv2_kernel_close, cv2_kernel_open=cv2_kernel_open,\n min_subgraph_length_pix=min_subgraph_length_pix,\n num_classes=num_classes,\n skeleton_band=skeleton_band)\n return (im_root, wkt_list)\n\n\ndef build_wkt_dir(indir, outfile, out_ske_dir, out_gdir='', thresh=0.3,\n im_prefix='',\n debug=False, add_small=True, fix_borders=True,\n skel_replicate=5, skel_clip=2,\n img_mult=255,\n hole_size=300, cv2_kernel_close=7, cv2_kernel_open=7,\n min_subgraph_length_pix=50,\n spacenet_naming_convention=False,\n num_classes=1,\n skeleton_band='all'):\n '''Execute built_graph_wkt for an entire folder\n Split image name on AOI, keep only name after AOI. This is necessary for\n scoring'''\n\n all_data = []\n im_files = np.sort([z for z in os.listdir(indir) if z.endswith('.tif')])\n nfiles = len(im_files)\n\n print(indir, nfiles)\n\n args_list = []\n for i, imfile in tqdm.tqdm(enumerate(im_files), total=nfiles):\n args = (\n imfile,\n im_prefix,\n indir,\n spacenet_naming_convention,\n out_ske_dir,\n out_gdir,\n thresh,\n debug,\n add_small,\n fix_borders,\n skel_replicate,\n skel_clip,\n img_mult,\n hole_size,\n cv2_kernel_close,\n cv2_kernel_open,\n min_subgraph_length_pix,\n num_classes,\n skeleton_band,\n )\n args_list.append(args)\n\n with Pool(cpu_count()) as p:\n data = list(tqdm.tqdm(\n iterable=p.imap_unordered(_build_graph_wkt_iterable, args_list),\n total=len(args_list)))\n\n for im_root, wkt_list in sorted(data):\n for v in wkt_list:\n all_data.append((im_root, v))\n\n # save to csv\n df = pd.DataFrame(all_data, columns=['ImageId', 'WKT_Pix'])\n df.to_csv(outfile, index=False)\n\n return df\n\n\ndef run_skeletonize(conf):\n spacenet_naming_convention = False # True\n\n preds_dirname = conf.modelname.replace('_th06', '')\n print('preds', preds_dirname)\n\n im_dir = \"{}{}/{}/\".format(\n \"/wdata\", \"/working/sp5r2/models/preds\", preds_dirname)\n im_prefix = ''\n\n if conf.num_folds > 1:\n im_dir = im_dir + \"merged_test\"\n else:\n im_dir = im_dir + \"fold0_test\"\n im_prefix = 'fold0_'\n\n os.makedirs(im_dir, exist_ok=True)\n\n # outut csv file\n outfile_csv = \"{}/working/sp5r2/models/wkt/{}/wkt_submission_nospeed.csv\".format(\n \"/wdata\", conf.modelname)\n Path(outfile_csv).parent.mkdir(parents=True, exist_ok=True)\n\n # output ske\n out_ske_dir = \"{}/working/sp5r2/models/ske/{}\".format(\n \"/wdata\", conf.modelname)\n Path(out_ske_dir).mkdir(parents=True, exist_ok=True)\n\n # output pkl\n out_gdir = \"{}/working/sp5r2/models/sknw_gpickle/{}\".format(\n \"/wdata\", conf.modelname)\n Path(out_gdir).mkdir(parents=True, exist_ok=True)\n\n thresh = conf.skeleton_thresh\n min_subgraph_length_pix = conf.min_subgraph_length_pix\n\n debug=False\n add_small=True\n fix_borders=True\n skel_replicate=5\n skel_clip=2\n img_mult=255\n hole_size=300\n cv2_kernel_close=7\n cv2_kernel_open=7\n\n logger.info(\"Building wkts...\")\n t0 = time.time()\n df = build_wkt_dir(im_dir, outfile_csv, out_ske_dir, out_gdir, thresh,\n debug=debug,\n add_small=add_small,\n fix_borders=fix_borders,\n skel_replicate=skel_replicate,\n skel_clip=skel_clip,\n img_mult=img_mult,\n hole_size=hole_size,\n min_subgraph_length_pix=min_subgraph_length_pix,\n cv2_kernel_close=cv2_kernel_close,\n cv2_kernel_open=cv2_kernel_open,\n skeleton_band=conf.skeleton_band,\n num_classes=conf.num_classes,\n im_prefix=im_prefix,\n spacenet_naming_convention=spacenet_naming_convention)\n\n t1 = time.time()\n logger.info(\"len df: {}\".format(len(df)))\n logger.info(\"outfile: {}\".format(outfile_csv))\n logger.info(\"Total time to run build_wkt_dir: {} seconds\".format(t1-t0))\n", "import os\nimport time\nimport utm\n\nimport numpy as np\nimport networkx as nx\nimport osmnx as ox\nimport shapely\nfrom shapely.geometry import mapping, Point, LineString\nfrom osgeo import gdal, ogr, osr\nimport matplotlib.pyplot as plt\n\n\ndef wkt_to_graph(wkt_list, im_file, conf, out_graph_file):\n min_subgraph_length_pix = 300\n verbose = False\n super_verbose = False\n make_plots = False\n save_shapefiles = False\n pickle_protocol = 4\n\n if (len(wkt_list) == 0) or (wkt_list[0] == 'LINESTRING EMPTY'):\n return None\n\n try:\n G = wkt_to_G(wkt_list,\n im_file=im_file,\n min_subgraph_length_pix=min_subgraph_length_pix,\n verbose=super_verbose)\n if len(G.nodes()) == 0:\n return None\n except Exception as e:\n print('Exception in wkt_to_G: {}, {}'.format(\n str(e), out_graph_file))\n return None\n\n node = list(G.nodes())[-1]\n if verbose:\n print(node, 'random node props:', G.nodes[node])\n\n # print an edge\n edge_tmp = list(G.edges())[-1]\n if verbose:\n print (edge_tmp, \"random edge props:\", G.edges([edge_tmp[0], edge_tmp[1]])) #G.edge[edge_tmp[0]][edge_tmp[1]])\n\n nx.write_gpickle(G, out_graph_file, protocol=pickle_protocol)\n\n # save shapefile as well?\n if save_shapefiles:\n ox.save_graph_shapefile(G,\n filename=image_id.split('.')[0] ,\n folder=graph_dir, encoding='utf-8')\n\n # plot, if desired\n if make_plots:\n outfile_plot = 'debug_ox.png'\n if verbose:\n print (\"Plotting graph...\")\n print (\"outfile_plot:\", outfile_plot)\n ox.plot_graph(G, fig_height=9, fig_width=9, \n #save=True, filename=outfile_plot, margin=0.01)\n )\n #plt.tight_layout()\n plt.savefig(outfile_plot, dpi=400)\n\n\ndef wkt_to_G(wkt_list,\n im_file=None,\n min_subgraph_length_pix=30,\n simplify_graph=True,\n verbose=False):\n if verbose:\n print (\"Running wkt_list_to_nodes_edges()...\")\n node_loc_dic, edge_dic = wkt_list_to_nodes_edges(wkt_list)\n\n if verbose:\n print (\"Creating G...\")\n G0 = nodes_edges_to_G(node_loc_dic, edge_dic)\n if verbose:\n print (\" len(G.nodes():\", len(G0.nodes()))\n print (\" len(G.edges():\", len(G0.edges()))\n\n if verbose:\n print (\"Clean out short subgraphs\")\n G0 = clean_sub_graphs(G0, min_length=min_subgraph_length_pix,\n max_nodes_to_skip=30,\n weight='length_pix', verbose=False,\n super_verbose=False)\n\n if len(G0) == 0:\n return G0\n\n # geo coords\n if im_file:\n if verbose:\n print (\"Running get_node_geo_coords()...\")\n G1 = get_node_geo_coords(G0, im_file, verbose=verbose)\n\n if verbose:\n print (\"Running get_edge_geo_coords()...\")\n G1 = get_edge_geo_coords(G1, im_file, verbose=verbose)\n\n if verbose:\n print (\"projecting graph...\")\n G_projected = ox.project_graph(G1)\n\n Gout = G_projected #G_simp\n else:\n Gout = G0\n\n if simplify_graph:\n if verbose:\n print (\"Simplifying graph\")\n G0 = ox.simplify_graph(Gout.to_directed())\n G0 = G0.to_undirected()\n Gout = ox.project_graph(G0)\n if verbose:\n print (\"Merge 'geometry' linestrings...\")\n keys_tmp = ['geometry_pix', 'geometry_latlon_wkt', 'geometry_utm_wkt']\n for key_tmp in keys_tmp:\n if verbose:\n print (\"Merge\", key_tmp, \"...\")\n for i,(u,v,attr_dict) in enumerate(Gout.edges(data=True)):\n if (i % 10000) == 0:\n if verbose:\n print (i, u , v)\n geom = attr_dict[key_tmp]\n #print (i, u, v, \"geom:\", geom)\n #print (\" type(geom):\", type(geom))\n\n if type(geom) == list:\n # check if the list items are wkt strings, if so, create\n # linestrigs\n if (type(geom[0]) == str):# or (type(geom_pix[0]) == unicode):\n geom = [shapely.wkt.loads(ztmp) for ztmp in geom]\n # merge geoms\n #geom = shapely.ops.linemerge(geom)\n #attr_dict[key_tmp] = geom\n attr_dict[key_tmp] = shapely.ops.linemerge(geom)\n elif type(geom) == str:\n attr_dict[key_tmp] = shapely.wkt.loads(geom)\n else:\n pass\n\n # assign 'geometry' tag to geometry_utm_wkt\n for i,(u,v,attr_dict) in enumerate(Gout.edges(data=True)):\n if verbose:\n print (\"Create 'geometry' field in edges...\")\n #geom_pix = attr_dict[key_tmp]\n line = attr_dict['geometry_utm_wkt']\n if type(line) == str:# or type(line) == unicode:\n attr_dict['geometry'] = shapely.wkt.loads(line)\n else:\n attr_dict['geometry'] = attr_dict['geometry_utm_wkt']\n # update wkt_pix?\n #print (\"attr_dict['geometry_pix':\", attr_dict['geometry_pix'])\n attr_dict['wkt_pix'] = attr_dict['geometry_pix'].wkt\n\n # update 'length_pix'\n attr_dict['length_pix'] = np.sum([attr_dict['length_pix']])\n\n\n Gout = ox.project_graph(Gout)\n\n if verbose:\n # get a few stats (and set to graph properties)\n print(\"Number of nodes: {}\".format(len(Gout.nodes())))\n print(\"Number of edges: {}\".format(len(Gout.edges())))\n #print (\"Number of nodes:\", len(Gout.nodes()))\n #print (\"Number of edges:\", len(Gout.edges()))\n Gout.graph['N_nodes'] = len(Gout.nodes())\n Gout.graph['N_edges'] = len(Gout.edges())\n\n # get total length of edges\n tot_meters = 0\n for i,(u,v,attr_dict) in enumerate(Gout.edges(data=True)):\n tot_meters += attr_dict['length']\n if verbose:\n print (\"Length of edges (km):\", tot_meters/1000)\n Gout.graph['Tot_edge_km'] = tot_meters/1000\n\n if verbose:\n print (\"G.graph:\", Gout.graph)\n\n return Gout\n\n\ndef wkt_list_to_nodes_edges(wkt_list):\n '''Convert wkt list to nodes and edges\n Make an edge between each node in linestring. Since one linestring\n may contain multiple edges, this is the safest approach'''\n\n node_loc_set = set() # set of edge locations\n node_loc_dic = {} # key = node idx, val = location\n node_loc_dic_rev = {} # key = location, val = node idx\n edge_loc_set = set() # set of edge locations\n edge_dic = {} # edge properties\n node_iter = 0\n edge_iter = 0\n\n for i,lstring in enumerate(wkt_list):\n # get lstring properties\n shape = shapely.wkt.loads(lstring)\n xs, ys = shape.coords.xy\n length_orig = shape.length\n\n # iterate through coords in line to create edges between every point\n for j,(x,y) in enumerate(zip(xs, ys)):\n loc = (x,y)\n # for first item just make node, not edge\n if j == 0:\n # if not yet seen, create new node\n if loc not in node_loc_set:\n node_loc_set.add(loc)\n node_loc_dic[node_iter] = loc\n node_loc_dic_rev[loc] = node_iter\n node = node_iter\n node_iter += 1\n\n # if not first node in edge, retrieve previous node and build edge\n else:\n prev_loc = (xs[j-1], ys[j-1])\n #print (\"prev_loc:\", prev_loc)\n prev_node = node_loc_dic_rev[prev_loc]\n\n # if new, create new node\n if loc not in node_loc_set:\n node_loc_set.add(loc)\n node_loc_dic[node_iter] = loc\n node_loc_dic_rev[loc] = node_iter\n node = node_iter\n node_iter += 1\n # if seen before, retrieve node properties\n else:\n node = node_loc_dic_rev[loc]\n\n # add edge, which is start_node to end_node\n edge_loc = (loc, prev_loc)\n edge_loc_rev = (prev_loc, loc)\n # shouldn't be duplicate edges, so break if we see one\n if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):\n print (\"Oops, edge already seen, returning:\", edge_loc)\n return\n\n # get distance to prev_loc and current loc\n proj_prev = shape.project(Point(prev_loc))\n proj = shape.project(Point(loc))\n # edge length is the diffence of the two projected lengths\n # along the linestring\n edge_length = abs(proj - proj_prev)\n # make linestring\n line_out = LineString([prev_loc, loc])\n line_out_wkt = line_out.wkt\n\n edge_props = {'start': prev_node,\n 'start_loc_pix': prev_loc,\n 'end': node,\n 'end_loc_pix': loc,\n 'length_pix': edge_length,\n 'wkt_pix': line_out_wkt,\n 'geometry_pix': line_out,\n 'osmid': i}\n #print (\"edge_props\", edge_props)\n edge_loc_set.add(edge_loc)\n edge_dic[edge_iter] = edge_props\n edge_iter += 1\n\n return node_loc_dic, edge_dic\n\n\ndef nodes_edges_to_G(node_loc_dic, edge_dic, name='glurp'):\n '''Take output of wkt_list_to_nodes_edges(wkt_list) and create networkx\n graph'''\n\n G = nx.MultiDiGraph()\n # set graph crs and name\n G.graph = {'name': name,\n 'crs': {'init': 'epsg:4326'}\n }\n\n # add nodes\n #for key,val in node_loc_dic.iteritems():\n for key in node_loc_dic.keys():\n val = node_loc_dic[key]\n attr_dict = {'osmid': key,\n 'x_pix': val[0],\n 'y_pix': val[1]}\n G.add_node(key, **attr_dict)\n\n # add edges\n #for key,val in edge_dic.iteritems():\n for key in edge_dic.keys():\n val = edge_dic[key]\n attr_dict = val\n u = attr_dict['start']\n v = attr_dict['end']\n #attr_dict['osmid'] = str(i)\n\n #print (\"nodes_edges_to_G:\", u, v, \"attr_dict:\", attr_dict)\n if type(attr_dict['start_loc_pix']) == list:\n return\n\n G.add_edge(u, v, **attr_dict)\n\n ## always set edge key to zero? (for nx 1.X)\n ## THIS SEEMS NECESSARY FOR OSMNX SIMPLIFY COMMAND\n #G.add_edge(u, v, key=0, attr_dict=attr_dict)\n ##G.add_edge(u, v, key=key, attr_dict=attr_dict)\n\n #G1 = ox.simplify_graph(G)\n G2 = G.to_undirected()\n return G2\n\n\ndef clean_sub_graphs(G_,\n min_length=150,\n max_nodes_to_skip=30,\n weight='length_pix',\n verbose=True,\n super_verbose=False):\n '''Remove subgraphs with a max path length less than min_length,\n if the subgraph has more than max_noxes_to_skip, don't check length\n (this step great improves processing time)'''\n\n if len(list(G_.nodes())) == 0:\n return G_\n\n if verbose:\n print (\"Running clean_sub_graphs...\")\n sub_graphs = list(nx.connected_component_subgraphs(G_))\n bad_nodes = []\n if verbose:\n print (\" len(G_.nodes()):\", len(list(G_.nodes())) )\n print (\" len(G_.edges()):\", len(list(G_.edges())) )\n if super_verbose:\n print (\"G_.nodes:\", G_.nodes())\n edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]\n print (edge_tmp, \"G.edge props:\", G_.edge[edge_tmp[0]][edge_tmp[1]])\n\n for G_sub in sub_graphs:\n # don't check length if too many nodes in subgraph\n if len(G_sub.nodes()) > max_nodes_to_skip:\n continue\n\n else:\n all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))\n if super_verbose:\n print (\" \\nGs.nodes:\", G_sub.nodes() )\n print (\" all_lengths:\", all_lengths )\n # get all lenghts\n lens = []\n #for u,v in all_lengths.iteritems():\n for u in all_lengths.keys():\n v = all_lengths[u]\n #for uprime, vprime in v.iteritems():\n for uprime in v.keys():\n vprime = v[uprime]\n lens.append(vprime)\n if super_verbose:\n print (\" u, v\", u,v )\n print (\" uprime, vprime:\", uprime, vprime )\n max_len = np.max(lens)\n if super_verbose:\n print (\" Max length of path:\", max_len)\n if max_len < min_length:\n bad_nodes.extend(G_sub.nodes())\n if super_verbose:\n print (\" appending to bad_nodes:\", G_sub.nodes())\n\n # remove bad_nodes\n G_.remove_nodes_from(bad_nodes)\n if verbose:\n print (\" num bad_nodes:\", len(bad_nodes))\n #print (\"bad_nodes:\", bad_nodes)\n print (\" len(G'.nodes()):\", len(G_.nodes()))\n print (\" len(G'.edges()):\", len(G_.edges()))\n if super_verbose:\n print (\" G_.nodes:\", G_.nodes())\n\n return G_\n\n\ndef pixelToGeoCoord(xPix,\n yPix,\n inputRaster,\n sourceSR='',\n geomTransform='',\n targetSR=''):\n '''from spacenet geotools'''\n # If you want to garuntee lon lat output, specify TargetSR otherwise, geocoords will be in image geo reference\n # targetSR = osr.SpatialReference()\n # targetSR.ImportFromEPSG(4326)\n # Transform can be performed at the polygon level instead of pixel level\n\n if targetSR =='':\n performReprojection=False\n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326)\n else:\n performReprojection=True\n\n if geomTransform=='':\n srcRaster = gdal.Open(inputRaster)\n geomTransform = srcRaster.GetGeoTransform()\n\n source_sr = osr.SpatialReference()\n source_sr.ImportFromWkt(srcRaster.GetProjectionRef())\n\n geom = ogr.Geometry(ogr.wkbPoint)\n xOrigin = geomTransform[0]\n yOrigin = geomTransform[3]\n pixelWidth = geomTransform[1]\n pixelHeight = geomTransform[5]\n\n xCoord = (xPix * pixelWidth) + xOrigin\n yCoord = (yPix * pixelHeight) + yOrigin\n geom.AddPoint(xCoord, yCoord)\n\n\n if performReprojection:\n if sourceSR=='':\n srcRaster = gdal.Open(inputRaster)\n sourceSR = osr.SpatialReference()\n sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)\n geom.Transform(coord_trans)\n\n return (geom.GetX(), geom.GetY())\n\n\ndef get_node_geo_coords(G, im_file, verbose=False):\n nn = len(G.nodes())\n for i,(n,attr_dict) in enumerate(G.nodes(data=True)):\n if verbose:\n print (\"node:\", n)\n if (i % 1000) == 0:\n if verbose:\n print (\"node\", i, \"/\", nn)\n x_pix, y_pix = attr_dict['x_pix'], attr_dict['y_pix']\n lon, lat = pixelToGeoCoord(x_pix, y_pix, im_file)\n [utm_east, utm_north, utm_zone, utm_letter] =\\\n utm.from_latlon(lat, lon)\n attr_dict['lon'] = lon\n attr_dict['lat'] = lat\n attr_dict['utm_east'] = utm_east\n attr_dict['utm_zone'] = utm_zone\n attr_dict['utm_letter'] = utm_letter\n attr_dict['utm_north'] = utm_north\n attr_dict['x'] = lon\n attr_dict['y'] = lat\n if verbose:\n print (\" \", n, attr_dict)\n\n return G\n\n\ndef convert_pix_lstring_to_geo(wkt_lstring, im_file):\n '''Convert linestring in pixel coords to geo coords'''\n shape = wkt_lstring #shapely.wkt.loads(lstring)\n x_pixs, y_pixs = shape.coords.xy\n coords_latlon = []\n coords_utm = []\n for (x,y) in zip (x_pixs, y_pixs):\n lon, lat = pixelToGeoCoord(x, y, im_file)\n [utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)\n coords_utm.append([utm_east, utm_north])\n coords_latlon.append([lon, lat])\n\n lstring_latlon = LineString([Point(z) for z in coords_latlon])\n lstring_utm = LineString([Point(z) for z in coords_utm])\n\n return lstring_latlon, lstring_utm, utm_zone, utm_letter\n\n\ndef get_edge_geo_coords(G,\n im_file,\n remove_pix_geom=True,\n verbose=False):\n ne = len(list(G.edges()))\n for i,(u,v,attr_dict) in enumerate(G.edges(data=True)):\n if verbose:\n print (\"edge:\", u,v)\n if (i % 1000) == 0:\n if verbose:\n print (\"edge\", i, \"/\", ne)\n geom_pix = attr_dict['geometry_pix']\n lstring_latlon, lstring_utm, utm_zone, utm_letter = convert_pix_lstring_to_geo(geom_pix, im_file)\n attr_dict['geometry_latlon_wkt'] = lstring_latlon.wkt\n attr_dict['geometry_utm_wkt'] = lstring_utm.wkt\n attr_dict['length_latlon'] = lstring_latlon.length\n attr_dict['length_utm'] = lstring_utm.length\n attr_dict['length'] = lstring_utm.length\n attr_dict['utm_zone'] = utm_zone\n attr_dict['utm_letter'] = utm_letter\n if verbose:\n print (\" attr_dict:\", attr_dict)\n\n # geometry screws up osmnx.simplify function\n if remove_pix_geom:\n #attr_dict['geometry_wkt'] = lstring_latlon.wkt\n attr_dict['geometry_pix'] = geom_pix.wkt\n\n return G\n", "import argparse\nimport glob\nimport os\n\nimport skimage.io\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nfrom scipy.spatial.distance import euclidean\n\nfrom functools import partial\nfrom multiprocessing.pool import Pool\n\nimport cv2\nimport pandas as pd\n\nimport numpy as np\nimport pygeoif\nimport tqdm\n\n\ndef stretch_8bit(bands, lower_percent=1, higher_percent=99):\n out = np.zeros_like(bands).astype(np.float32)\n for i in range(bands.shape[-1]):\n a = 0\n b = 1\n band = bands[:, :, i].flatten()\n filtered = band[band > 0]\n if len(filtered) == 0:\n continue\n c = np.percentile(filtered, lower_percent)\n d = np.percentile(filtered, higher_percent)\n t = a + (bands[:, :, i] - c) * (b - a) / (d - c)\n t[t < a] = a\n t[t > b] = b\n out[:, :, i] = t\n return out.astype(np.float32)\n\n\ndef convert_image(tif_image, dataset_version):\n if dataset_version > 3:\n return tif_image\n else:\n return stretch_8bit(tif_image) * 255\n\n\ndef create_mask(lines, thickness=16):\n mask = np.zeros((1300, 1300))\n for line in lines:\n wkt_pix = line[\"wkt_pix\"]\n if \"EMPTY\" not in wkt_pix:\n line = wkt_pix\n points = pygeoif.from_wkt(line).coords\n for i in range(1, len(points)):\n pt1 = (int(points[i - 1][0]), int(points[i - 1][1]))\n pt2 = (int(points[i][0]), int(points[i][1]))\n cv2.line(mask, pt1, pt2, (1,), thickness=thickness)\n return mask * 255\n\n\ndef create_speed_mask(lines, thickness=16):\n max_speed = 35 # mps\n\n mask = np.zeros((1300, 1300))\n for line in lines:\n wkt_pix = line[\"wkt_pix\"]\n length_m = line[\"length_m\"]\n travel_time_s = line[\"travel_time_s\"]\n if \"EMPTY\" not in wkt_pix:\n speed = 9. if travel_time_s == 0 else length_m / travel_time_s\n speed_normalized = int(255 * speed / max_speed)\n\n line = wkt_pix\n wkt = pygeoif.from_wkt(line)\n points = wkt.coords\n for i in range(1, len(points)):\n pt1 = (int(points[i - 1][0]), int(points[i - 1][1]))\n pt2 = (int(points[i][0]), int(points[i][1]))\n cv2.line(mask, pt1, pt2, (speed_normalized,), thickness=thickness)\n return mask\n\n\ndef write_mask(id_to_lines, out_dir):\n id, lines = id_to_lines\n mask = create_mask(lines)\n cv2.imwrite(os.path.join(out_dir, \"{}.png\".format(id)), mask)\n\n\ndef write_speed_mask(id_to_lines, out_dir):\n id, lines = id_to_lines\n mask = create_speed_mask(lines)\n cv2.imwrite(os.path.join(out_dir, \"{}.png\".format(id)), mask)\n\n\ndef write_image(img_path, out_dir, dataset_version):\n img = skimage.io.imread(img_path)\n img = convert_image(img, dataset_version)\n img_id = os.path.splitext(os.path.basename(img_path))[0]\n cv2.imwrite(os.path.join(out_dir, \"{}.png\".format(img_id)), img[..., ::-1])\n\ndef write_junctions(id_to_lines, out_dir):\n id, lines = id_to_lines\n mask = create_mask_junctions(lines)\n cv2.imwrite(os.path.join(out_dir, \"{}.png\".format(id)), mask)\n\n\ndef ds_point(p):\n return (p[0] // 5, p[1] // 5)\n\n\ndef create_mask_junctions(lines, thickness=16):\n mask = np.zeros((1300, 1300))\n point_map = {}\n for line in lines:\n wkt_pix = line[\"wkt_pix\"]\n if \"EMPTY\" not in wkt_pix:\n line = wkt_pix\n points = pygeoif.from_wkt(line).coords\n for i in range(1, len(points)):\n pt1 = (int(points[i - 1][0]), int(points[i - 1][1]))\n pt2 = (int(points[i][0]), int(points[i][1]))\n point_map[ds_point(pt1)] = point_map.get(ds_point(pt1), 0) + 1\n point_map[ds_point(pt2)] = point_map.get(ds_point(pt2), 0) + 1\n\n for line in lines:\n wkt_pix = line[\"wkt_pix\"]\n if \"EMPTY\" not in wkt_pix:\n line = wkt_pix\n points = pygeoif.from_wkt(line).coords\n for i in range(1, len(points)):\n pt1 = (int(points[i - 1][0]), int(points[i - 1][1]))\n pt2 = (int(points[i][0]), int(points[i][1]))\n pt2_ori = pt2\n if point_map[ds_point(pt1)] < 3 and point_map[ds_point(pt2)] < 3:\n continue\n if point_map[ds_point(pt1)] >= 3:\n distance = euclidean(pt1, pt2)\n if distance > 32:\n frac = 32 / distance\n xc = pt1[0] + (pt2[0] - pt1[0]) * frac\n yc = pt1[1] + (pt2[1] - pt1[1]) * frac\n pt2 = (int(xc), int(yc))\n cv2.line(mask, pt1, pt2, (1,), thickness=thickness)\n pt2 = pt2_ori\n if point_map[ds_point(pt2)] >= 3:\n tmp = pt1\n pt1 = pt2\n pt2 = tmp\n distance = euclidean(pt1, pt2)\n if distance > 32:\n frac = 32 / distance\n xc = pt1[0] + (pt2[0] - pt1[0]) * frac\n yc = pt1[1] + (pt2[1] - pt1[1]) * frac\n pt2 = (int(xc), int(yc))\n cv2.line(mask, pt1, pt2, (1,), thickness=thickness)\n\n return mask * 255\n\ndef process_dataset(dataset_path, output_dir, workers=12):\n junction_mask_dir = os.path.join(output_dir, \"junction_masks\")\n os.makedirs(junction_mask_dir, exist_ok=True)\n csvs = glob.glob(os.path.join(dataset_path, \"*simp.csv\"))\n id_to_data = {}\n for csv in csvs:\n rows = pd.read_csv(csv).values\n for row in rows:\n imageid, wkt_pix, length_m, travel_time_s = row\n lines = id_to_data.get(imageid, [])\n lines.append({\n \"wkt_pix\": wkt_pix,\n \"length_m\": length_m,\n \"travel_time_s\": travel_time_s,\n })\n id_to_data[imageid] = lines\n print(len(id_to_data))\n with Pool(processes=workers) as p:\n with tqdm.tqdm(total=len(id_to_data)) as pbar:\n for _ in tqdm.tqdm(enumerate(p.imap_unordered(partial(write_junctions, out_dir=junction_mask_dir), id_to_data.items()))):\n pbar.update()\n\n\n\nif __name__ == '__main__':\n if __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Prepare masks\")\n arg = parser.add_argument\n arg('--data-dirs', nargs='+')\n arg('--out-dir', type=str, default=\"/wdata\")\n args = parser.parse_args()\n for data_dir in args.data_dirs:\n process_dataset(data_dir, output_dir=args.out_dir)", "import os\nimport sys\nfrom functools import partial\n\nfrom pretrainedmodels import inceptionresnetv2\nfrom torch.nn import Dropout2d, UpsamplingBilinear2d, Sequential\nfrom torch.nn.functional import bilinear, upsample_bilinear\nfrom torch.utils import model_zoo\n\nfrom models import resnet\nfrom models.densenet import densenet121, densenet169, densenet161\nfrom models.dpn import dpn92, dpn131, dpn107, dpn92_mc\nfrom models.irv import InceptionResNetV2\nfrom models.resnet import resnext50_32x4d\nfrom models.senet import se_resnext50_32x4d, se_resnext101_32x4d, senet154, SCSEModule\nimport torch.hub\n\n\ndef load_resnext101():\n return torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')\n\n\nencoder_params = {\n\n 'seresnext50':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [96, 192, 256, 256],\n 'init_op': se_resnext50_32x4d,\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'},\n 'senet154':\n {'filters': [128, 256, 512, 1024, 2048],\n 'decoder_filters': [64, 128, 256, 256],\n 'init_op': senet154,\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'},\n 'seresnext50_fat':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [96, 192, 256, 512],\n 'last_upsample': 64,\n 'init_op': se_resnext50_32x4d,\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'},\n 'seresnext101':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'init_op': se_resnext101_32x4d,\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'},\n 'dpn92':\n {'filters': [64, 336, 704, 1552, 2688],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'init_op': dpn92,\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn92_extra-b040e4a9b.pth'},\n 'dpn92_mc':\n {'filters': [64, 336, 704, 1552, 2688],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'init_op': partial(dpn92_mc, num_channels=8),\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn92_extra-b040e4a9b.pth'},\n 'resnet34':\n {'filters': [64, 64, 128, 256, 512],\n 'decoder_filters': [64, 128, 256, 512],\n 'last_upsample': 64,\n 'init_op': resnet.resnet34,\n 'url': resnet.model_urls['resnet34']},\n 'resnet101':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [96, 192, 256, 256],\n 'last_upsample': 64,\n 'init_op': resnet.resnet101,\n 'url': resnet.model_urls['resnet101']},\n 'resnext101':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [96, 192, 256, 256],\n 'last_upsample': 64,\n 'init_op': load_resnext101,\n 'url': None},\n 'resnext50':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'init_op': resnext50_32x4d,\n 'url': \"https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth\"},\n 'dpn131':\n {'filters': [128, 352, 832, 1984, 2688],\n 'init_op': dpn131,\n 'last_upsample': 64,\n 'decoder_filters': [64, 128, 256, 256],\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn131-71dfe43e0.pth'},\n 'dpn107':\n {'filters': [128, 376, 1152, 2432, 2688],\n 'init_op': dpn107,\n 'last_upsample': 64,\n 'decoder_filters': [64, 128, 256, 256],\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn107_extra-1ac7121e2.pth'},\n 'resnet50':\n {'filters': [64, 256, 512, 1024, 2048],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'init_op': resnet.resnet101,\n 'url': resnet.model_urls['resnet50']},\n 'densenet121':\n {'filters': [64, 256, 512, 1024, 1024],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'url': None,\n 'init_op': densenet121},\n 'densenet169':\n {'filters': [64, 256, 512, 1280, 1664],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'url': None,\n 'init_op': densenet169},\n 'densenet161':\n {'filters': [96, 384, 768, 2112, 2208],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'url': None,\n 'init_op': densenet161},\n 'inceptionresnetv2':\n {'filters': [64, 192, 320, 1088, 1536],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'url': None,\n 'init_op': inceptionresnetv2},\n 'inceptionresnetv2mc':\n {'filters': [64, 192, 320, 1088, 1536],\n 'decoder_filters': [64, 128, 256, 256],\n 'last_upsample': 64,\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'init_op': partial(InceptionResNetV2, num_channels=8)},\n\n}\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass BasicConvAct(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, dilation=1, activation=nn.ReLU, bias=True):\n super().__init__()\n padding = int((kernel_size - 1) / 2) * dilation\n self.op = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, dilation=dilation,\n bias=bias)\n self.use_act = activation is not None\n if self.use_act:\n self.act = activation()\n\n def forward(self, x):\n x = self.op(x)\n if self.use_act:\n x = self.act(x)\n return x\n\n\nclass Conv1x1(BasicConvAct):\n def __init__(self, in_channels, out_channels, dilation=1, bias=True):\n super().__init__(in_channels, out_channels, kernel_size=1, dilation=dilation, activation=None, bias=bias)\n\n\nclass Conv3x3(BasicConvAct):\n def __init__(self, in_channels, out_channels, dilation=1):\n super().__init__(in_channels, out_channels, kernel_size=3, dilation=dilation, activation=None)\n\n\nclass ConvReLu1x1(BasicConvAct):\n def __init__(self, in_channels, out_channels, dilation=1):\n super().__init__(in_channels, out_channels, kernel_size=1, dilation=dilation, activation=nn.ReLU)\n\n\nclass ConvReLu3x3(BasicConvAct):\n def __init__(self, in_channels, out_channels, dilation=1):\n super().__init__(in_channels, out_channels, kernel_size=3, dilation=dilation, activation=nn.ReLU)\n\n\nclass BasicUpBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, activation=nn.ReLU, mode='nearest'):\n super().__init__()\n padding = int((kernel_size - 1) / 2) * 1\n self.op = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, dilation=1)\n self.use_act = activation is not None\n self.mode = mode\n if self.use_act:\n self.act = activation()\n\n def forward(self, x):\n x = F.upsample(x, scale_factor=2, mode=self.mode)\n x = self.op(x)\n if self.use_act:\n x = self.act(x)\n return x\n\n\nclass AbstractModel(nn.Module):\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n m.weight.data = nn.init.kaiming_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def initialize_encoder(self, model, model_url, num_channels_changed=False):\n if os.path.isfile(model_url):\n pretrained_dict = torch.load(model_url)\n else:\n pretrained_dict = model_zoo.load_url(model_url)\n if 'state_dict' in pretrained_dict:\n pretrained_dict = pretrained_dict['state_dict']\n pretrained_dict = {k.replace(\"module.\", \"\"): v for k, v in pretrained_dict.items()}\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n if num_channels_changed:\n model.state_dict()[self.first_layer_params_names[0] + '.weight'][:, :3, ...] = pretrained_dict[self.first_layer_params_names[0] + '.weight' ].data\n skip_layers = self.first_layer_params_names\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if\n not any(k.startswith(s) for s in skip_layers)}\n model.load_state_dict(pretrained_dict, strict=False)\n\n @property\n def first_layer_params_names(self):\n return ['conv1.conv']\n\n\nclass EncoderDecoder(AbstractModel):\n def __init__(self, num_classes, num_channels=3, encoder_name='resnet34'):\n if not hasattr(self, 'first_layer_stride_two'):\n self.first_layer_stride_two = False\n if not hasattr(self, 'decoder_block'):\n self.decoder_block = UnetDecoderBlock\n if not hasattr(self, 'bottleneck_type'):\n self.bottleneck_type = ConvBottleneck\n if not hasattr(self, 'use_bilinear_4x'):\n self.use_bilinear_4x = False\n\n self.filters = encoder_params[encoder_name]['filters']\n self.decoder_filters = encoder_params[encoder_name].get('decoder_filters', self.filters[:-1])\n self.last_upsample_filters = encoder_params[encoder_name].get('last_upsample', self.decoder_filters[0] // 2)\n\n super().__init__()\n\n self.num_channels = num_channels\n self.num_classes = num_classes\n self.bottlenecks = nn.ModuleList([self.bottleneck_type(self.filters[-i - 2] + f, f) for i, f in\n enumerate(reversed(self.decoder_filters[:]))])\n\n self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_filters))])\n\n if self.first_layer_stride_two:\n # self.last_upsample = self.decoder_block(self.decoder_filters[0], self.last_upsample_filters,\n # self.last_upsample_filters)\n # TODO: make it configurable\n self.last_upsample = UpsamplingBilinear2d(scale_factor=2)\n if self.use_bilinear_4x:\n self.final = self.make_final_classifier(self.decoder_filters[1], num_classes)\n else:\n self.final = self.make_final_classifier(\n self.last_upsample_filters if self.first_layer_stride_two else self.decoder_filters[0], num_classes)\n self._initialize_weights()\n self.dropout = Dropout2d(p=0.25)\n encoder = encoder_params[encoder_name]['init_op']()\n self.encoder_stages = nn.ModuleList([self.get_encoder(encoder, idx) for idx in range(len(self.filters))])\n if encoder_params[encoder_name]['url'] is not None:\n self.initialize_encoder(encoder, encoder_params[encoder_name]['url'], num_channels != 3)\n\n # noinspection PyCallingNonCallable\n def forward(self, x):\n # Encoder\n enc_results = []\n for stage in self.encoder_stages:\n # x = self.dropout(x)\n x = stage(x)\n enc_results.append(torch.cat(x, dim=1) if isinstance(x, tuple) else x.clone())\n bottlenecks = self.bottlenecks\n if self.use_bilinear_4x:\n bottlenecks = bottlenecks[:-1]\n for idx, bottleneck in enumerate(bottlenecks):\n rev_idx = - (idx + 1)\n x = self.decoder_stages[rev_idx](x)\n x = bottleneck(x, enc_results[rev_idx - 1])\n if self.use_bilinear_4x:\n x = self.dropout(x)\n\n if not self.use_bilinear_4x and self.first_layer_stride_two:\n x = self.last_upsample(x)\n\n f = self.final(x)\n\n if self.use_bilinear_4x:\n f = upsample_bilinear(f, scale_factor=4)\n\n return f\n\n def get_decoder(self, layer):\n in_channels = self.filters[layer + 1] if layer + 1 == len(self.decoder_filters) else self.decoder_filters[\n layer + 1]\n return self.decoder_block(in_channels, self.decoder_filters[layer], self.decoder_filters[max(layer, 0)])\n\n def make_final_classifier(self, in_filters, num_classes):\n return nn.Sequential(\n nn.Conv2d(in_filters, num_classes, 1, padding=0)\n )\n\n def get_encoder(self, encoder, layer):\n raise NotImplementedError\n\n @property\n def first_layer_params(self):\n return _get_layers_params([self.encoder_stages[0]])\n\n @property\n def first_layer_params_names(self):\n raise NotImplementedError\n\n @property\n def layers_except_first_params(self):\n layers = get_slice(self.encoder_stages, 1, -1) + [self.bottlenecks, self.decoder_stages, self.final]\n return _get_layers_params(layers)\n\n\ndef _get_layers_params(layers):\n return sum((list(l.parameters()) for l in layers), [])\n\n\ndef get_slice(features, start, end):\n if end == -1:\n end = len(features)\n return [features[i] for i in range(start, end)]\n\n\nclass ConvBottleneck(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, dec, enc):\n x = torch.cat([dec, enc], dim=1)\n return self.seq(x)\n\n\nclass UnetDecoderBlock(nn.Module):\n def __init__(self, in_channels, middle_channels, out_channels):\n super().__init__()\n self.layer = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass SCSeResneXt(EncoderDecoder):\n\n def __init__(self, seg_classes, backbone_arch, reduction=2, mode='concat'):\n if not hasattr(self, 'bottleneck_type'):\n self.bottleneck_type = partial(ConvSCSEBottleneckNoBn, reduction=reduction, mode=mode)\n self.first_layer_stride_two = True\n self.concat_scse = mode == 'concat'\n\n super().__init__(seg_classes, 3, backbone_arch)\n self.last_upsample = self.decoder_block(\n self.decoder_filters[0] * 2 if self.concat_scse else self.decoder_filters[0],\n self.last_upsample_filters,\n self.last_upsample_filters)\n\n def calc_dec_filters(self, d_filters):\n return d_filters * 2 if self.concat_scse else d_filters\n\n def forward(self, x):\n enc_results = []\n for stage in self.encoder_stages:\n x = stage(x)\n enc_results.append(x.clone())\n dec_results = []\n\n for idx, bottleneck in enumerate(self.bottlenecks):\n rev_idx = - (idx + 1)\n x = self.decoder_stages[rev_idx](x)\n x = bottleneck(x, enc_results[rev_idx - 1])\n dec_results.append(x)\n\n if self.first_layer_stride_two:\n x = self.last_upsample(x)\n\n mask = self.final(x)\n return mask\n\n def get_decoder(self, layer):\n in_channels = self.filters[layer + 1] if layer + 1 == len(self.decoder_filters) else self.decoder_filters[\n layer + 1]\n if self.concat_scse and layer + 1 < len(self.decoder_filters):\n in_channels *= 2\n\n return self.decoder_block(in_channels, self.decoder_filters[layer], self.decoder_filters[max(layer, 0)])\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return encoder.layer0\n elif layer == 1:\n return nn.Sequential(\n encoder.pool,\n encoder.layer1)\n elif layer == 2:\n return encoder.layer2\n elif layer == 3:\n return encoder.layer3\n elif layer == 4:\n return encoder.layer4\n\n\nclass ConvSCSEBottleneckNoBn(nn.Module):\n def __init__(self, in_channels, out_channels, reduction=2, mode='concat'):\n print(\"bottleneck \", in_channels, out_channels)\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.ReLU(inplace=True),\n SCSEModule(out_channels, reduction=reduction, mode=mode)\n )\n\n def forward(self, dec, enc):\n x = torch.cat([dec, enc], dim=1)\n return self.seq(x)\n\n\nclass Resnet(EncoderDecoder):\n def __init__(self, seg_classes, backbone_arch):\n self.first_layer_stride_two = True,\n super().__init__(seg_classes, 3, backbone_arch)\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return nn.Sequential(\n encoder.conv1,\n encoder.bn1,\n encoder.relu)\n elif layer == 1:\n return nn.Sequential(\n encoder.maxpool,\n encoder.layer1)\n elif layer == 2:\n return encoder.layer2\n elif layer == 3:\n return encoder.layer3\n elif layer == 4:\n return encoder.layer4\n\n\nclass ResneXt(EncoderDecoder):\n def __init__(self, seg_classes, backbone_arch):\n self.use_bilinear_4x = True\n super().__init__(seg_classes, 3, backbone_arch)\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return nn.Sequential(\n encoder.conv1,\n encoder.bn1,\n encoder.relu)\n elif layer == 1:\n return nn.Sequential(\n encoder.maxpool,\n encoder.layer1)\n elif layer == 2:\n return encoder.layer2\n elif layer == 3:\n return encoder.layer3\n elif layer == 4:\n return encoder.layer4\n\n\nclass DPNUnet(EncoderDecoder):\n def __init__(self, seg_classes, backbone_arch='dpn92', num_channels=3):\n self.first_layer_stride_two = True\n super().__init__(seg_classes, num_channels, backbone_arch)\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return nn.Sequential(\n encoder.blocks['conv1_1'].conv, # conv\n encoder.blocks['conv1_1'].bn, # bn\n encoder.blocks['conv1_1'].act, # relu\n )\n elif layer == 1:\n return nn.Sequential(\n encoder.blocks['conv1_1'].pool, # maxpool\n *[b for k, b in encoder.blocks.items() if k.startswith('conv2_')]\n )\n elif layer == 2:\n return nn.Sequential(*[b for k, b in encoder.blocks.items() if k.startswith('conv3_')])\n elif layer == 3:\n return nn.Sequential(*[b for k, b in encoder.blocks.items() if k.startswith('conv4_')])\n elif layer == 4:\n return nn.Sequential(*[b for k, b in encoder.blocks.items() if k.startswith('conv5_')])\n\n @property\n def first_layer_params_names(self):\n return ['features.conv1_1.conv']\n\nclass DensenetUnet(EncoderDecoder):\n def __init__(self, seg_classes, backbone_arch='dpn92'):\n self.first_layer_stride_two = True\n super().__init__(seg_classes, 3, backbone_arch)\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return nn.Sequential(\n encoder.features.conv0, # conv\n encoder.features.norm0, # bn\n encoder.features.relu0 # relu\n )\n elif layer == 1:\n return nn.Sequential(encoder.features.pool0, encoder.features.denseblock1)\n elif layer == 2:\n return nn.Sequential(encoder.features.transition1, encoder.features.denseblock2)\n elif layer == 3:\n return nn.Sequential(encoder.features.transition2, encoder.features.denseblock3)\n elif layer == 4:\n return nn.Sequential(encoder.features.transition3, encoder.features.denseblock4, encoder.features.norm5,\n nn.ReLU())\n\n\nclass SEUnet(EncoderDecoder):\n def __init__(self, seg_classes, backbone_arch='senet154'):\n self.first_layer_stride_two = True\n super().__init__(seg_classes, 3, backbone_arch)\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return encoder.layer0\n elif layer == 1:\n return nn.Sequential(\n encoder.pool,\n encoder.layer1)\n elif layer == 2:\n return encoder.layer2\n elif layer == 3:\n return encoder.layer3\n elif layer == 4:\n return encoder.layer4\n\n\nclass IRV2Unet(EncoderDecoder):\n def __init__(self, seg_classes, backbone_arch='inceptionresnetv2', num_channels=3):\n self.first_layer_stride_two = True\n super().__init__(seg_classes, num_channels, backbone_arch)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n if m.kernel_size == (3, 3):\n m.padding = (1, 1)\n if isinstance(m, nn.MaxPool2d):\n m.padding = (1, 1)\n\n def get_encoder(self, encoder, layer):\n if layer == 0:\n return Sequential(encoder.conv2d_1a, encoder.conv2d_2a, encoder.conv2d_2b)\n elif layer == 1:\n return nn.Sequential(\n encoder.maxpool_3a,\n encoder.conv2d_3b,\n encoder.conv2d_4a\n )\n elif layer == 2:\n return nn.Sequential(\n encoder.maxpool_5a,\n encoder.mixed_5b,\n encoder.repeat\n )\n elif layer == 3:\n return nn.Sequential(\n encoder.mixed_6a,\n encoder.repeat_1,\n )\n elif layer == 4:\n return nn.Sequential(\n encoder.mixed_7a,\n encoder.repeat_2,\n encoder.block8,\n encoder.conv2d_7b,\n )\n @property\n def first_layer_params_names(self):\n return ['conv2d_1a.conv']\n\n def initialize_encoder(self, model, model_url, num_channels_changed=False):\n del model.last_linear\n super().initialize_encoder(model, model_url, num_channels_changed)\n\n\nsetattr(sys.modules[__name__], 'scse_unet', partial(SCSeResneXt))\nsetattr(sys.modules[__name__], 'se_unet', partial(SEUnet))\nsetattr(sys.modules[__name__], 'scse_unet_addition', partial(SCSeResneXt, reduction=16, mode='addition'))\nsetattr(sys.modules[__name__], 'resnet_unet', partial(Resnet))\nsetattr(sys.modules[__name__], 'resnext_unet', partial(ResneXt))\nsetattr(sys.modules[__name__], 'dpn_unet', partial(DPNUnet))\nsetattr(sys.modules[__name__], 'dpn_unet_mc', partial(DPNUnet, num_channels=8))\nsetattr(sys.modules[__name__], 'irv_unet_mc', partial(IRV2Unet, num_channels=8))\nsetattr(sys.modules[__name__], 'densenet_unet', partial(DensenetUnet))\nsetattr(sys.modules[__name__], 'irv_unet', partial(IRV2Unet))\n\n__all__ = ['scse_unet',\n 'scse_unet_addition',\n 'resnet_unet',\n 'resnext_unet',\n 'dpn_unet',\n 'dpn_unet_mc',\n 'se_unet',\n 'densenet_unet',\n 'irv_unet',\n 'irv_unet_mc',\n ]\n\nif __name__ == '__main__':\n import numpy as np\n\n d = DPNUnet(3, num_channels=8, backbone_arch='dpn92_mc')\n d.eval()\n with torch.no_grad():\n images = torch.from_numpy(np.zeros((2, 8, 256, 256), dtype=\"float32\"))\n i = d(images)\n print(d)\n print(i.size())\n", "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 00:10:40 2018\n\n@author: avanetten\n\nRead in a list of wkt linestrings, render to networkx graph\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport utm\nimport shapely.wkt\nimport shapely.ops\nfrom shapely.geometry import mapping, Point, LineString\nimport fiona\nimport networkx as nx\nimport osmnx as ox\nfrom osgeo import gdal, ogr, osr\nimport argparse\nimport json\nimport pandas as pd\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport logging\n# import cv2\n\nfrom utils import make_logger\n#from json.config import Config\nfrom jsons.config import Config\n\nlogger1 = None\n\n\n###############################################################################\n# from apls.py\n###############################################################################\ndef clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,\n weight='length_pix', verbose=False,\n super_verbose=False):\n '''Remove subgraphs with a max path length less than min_length,\n if the subgraph has more than max_noxes_to_skip, don't check length \n (this step great improves processing time)'''\n \n if len(list(G_.nodes())) == 0:\n return G_\n \n print (\"Running clean_sub_graphs...\")\n sub_graphs = list(nx.connected_component_subgraphs(G_))\n bad_nodes = []\n if verbose:\n print (\" len(G_.nodes()):\", len(list(G_.nodes())) )\n print (\" len(G_.edges()):\", len(list(G_.edges())) )\n if super_verbose:\n print (\"G_.nodes:\", G_.nodes())\n edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]\n print (edge_tmp, \"G.edge props:\", G_.edge[edge_tmp[0]][edge_tmp[1]])\n\n for G_sub in sub_graphs:\n # don't check length if too many nodes in subgraph\n if len(G_sub.nodes()) > max_nodes_to_skip:\n continue\n \n else:\n all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))\n if super_verbose:\n print (\" \\nGs.nodes:\", G_sub.nodes() )\n print (\" all_lengths:\", all_lengths )\n # get all lenghts\n lens = []\n #for u,v in all_lengths.iteritems():\n for u in all_lengths.keys():\n v = all_lengths[u]\n #for uprime, vprime in v.iteritems():\n for uprime in v.keys():\n vprime = v[uprime]\n lens.append(vprime)\n if super_verbose:\n print (\" u, v\", u,v )\n print (\" uprime, vprime:\", uprime, vprime )\n max_len = np.max(lens)\n if super_verbose:\n print (\" Max length of path:\", max_len)\n if max_len < min_length:\n bad_nodes.extend(G_sub.nodes())\n if super_verbose:\n print (\" appending to bad_nodes:\", G_sub.nodes())\n\n # remove bad_nodes\n G_.remove_nodes_from(bad_nodes)\n if verbose:\n print (\" num bad_nodes:\", len(bad_nodes))\n #print (\"bad_nodes:\", bad_nodes)\n print (\" len(G'.nodes()):\", len(G_.nodes()))\n print (\" len(G'.edges()):\", len(G_.edges()))\n if super_verbose:\n print (\" G_.nodes:\", G_.nodes())\n \n return G_\n\n\n###############################################################################\ndef remove_short_edges(G_, min_spur_length_m=100, verbose=False):\n \"\"\"Remove unconnected edges shorter than the desired length\"\"\"\n if verbose:\n print(\"Remove shoert edges\")\n deg_list = list(G_.degree)\n # iterate through list\n bad_nodes = []\n for i, (n, deg) in enumerate(deg_list):\n # if verbose and (i % 500) == 0:\n # print(n, deg)\n # check if node has only one neighbor\n if deg == 1:\n # get edge\n edge = list(G_.edges(n))\n u, v = edge[0]\n # get edge length\n edge_props = G_.get_edge_data(u, v, 0)\n length = edge_props['length']\n # edge_props = G_.edges([u, v])\n\n if length < min_spur_length_m:\n bad_nodes.append(n)\n if verbose:\n print(i, \"/\", len(list(G_.nodes())),\n \"n, deg, u, v, length:\", n, deg, u, v, length)\n\n if verbose:\n print(\"bad_nodes:\", bad_nodes)\n G_.remove_nodes_from(bad_nodes)\n if verbose:\n print(\"num remaining nodes:\", len(list(G_.nodes())))\n return G_\n\n\n###############################################################################\ndef wkt_list_to_nodes_edges(wkt_list):\n '''Convert wkt list to nodes and edges\n Make an edge between each node in linestring. Since one linestring\n may contain multiple edges, this is the safest approach'''\n \n node_loc_set = set() # set of edge locations\n node_loc_dic = {} # key = node idx, val = location\n node_loc_dic_rev = {} # key = location, val = node idx\n edge_loc_set = set() # set of edge locations\n edge_dic = {} # edge properties\n node_iter = 0\n edge_iter = 0\n \n for i,lstring in enumerate(wkt_list):\n # get lstring properties\n shape = shapely.wkt.loads(lstring)\n xs, ys = shape.coords.xy\n length_orig = shape.length\n \n # iterate through coords in line to create edges between every point\n for j,(x,y) in enumerate(zip(xs, ys)):\n loc = (x,y)\n # for first item just make node, not edge\n if j == 0:\n # if not yet seen, create new node\n if loc not in node_loc_set:\n node_loc_set.add(loc)\n node_loc_dic[node_iter] = loc\n node_loc_dic_rev[loc] = node_iter\n node = node_iter\n node_iter += 1\n \n # if not first node in edge, retrieve previous node and build edge\n else:\n prev_loc = (xs[j-1], ys[j-1])\n #print (\"prev_loc:\", prev_loc)\n prev_node = node_loc_dic_rev[prev_loc]\n\n # if new, create new node\n if loc not in node_loc_set:\n node_loc_set.add(loc)\n node_loc_dic[node_iter] = loc\n node_loc_dic_rev[loc] = node_iter\n node = node_iter\n node_iter += 1\n # if seen before, retrieve node properties\n else:\n node = node_loc_dic_rev[loc]\n\n \n # add edge, which is start_node to end_node\n edge_loc = (loc, prev_loc)\n edge_loc_rev = (prev_loc, loc)\n # shouldn't be duplicate edges, so break if we see one\n if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):\n print (\"Oops, edge already seen, returning:\", edge_loc)\n return\n \n # get distance to prev_loc and current loc\n proj_prev = shape.project(Point(prev_loc))\n proj = shape.project(Point(loc))\n # edge length is the diffence of the two projected lengths\n # along the linestring\n edge_length = abs(proj - proj_prev)\n # make linestring\n line_out = LineString([prev_loc, loc])\n line_out_wkt = line_out.wkt\n \n edge_props = {'start': prev_node,\n 'start_loc_pix': prev_loc,\n 'end': node,\n 'end_loc_pix': loc,\n 'length_pix': edge_length,\n 'wkt_pix': line_out_wkt,\n 'geometry_pix': line_out,\n 'osmid': i}\n #print (\"edge_props\", edge_props)\n \n edge_loc_set.add(edge_loc)\n edge_dic[edge_iter] = edge_props\n edge_iter += 1\n\n return node_loc_dic, edge_dic\n \n\n############################################################################### \ndef wkt_list_to_nodes_edges_sloppy(wkt_list):\n '''Convert wkt list to nodes and edges\n Assumes each linestring corresponds to a unique edge\n Since this is not always the case, this function fails if a linestring\n contains multiple edges'''\n \n node_loc_set = set() # set of edge locations\n node_loc_dic = {} # key = node idx, val = location\n node_loc_dic_rev = {} # key = location, val = node idx\n edge_dic = {} # edge properties\n node_iter = 0\n edge_iter = 0\n \n for lstring in wkt_list:\n # get lstring properties\n shape = shapely.wkt.loads(lstring)\n x, y = shape.coords.xy\n length = shape.length\n \n # set start node\n start_loc = (x[0], y[0])\n # if new, create new node\n if start_loc not in node_loc_set:\n node_loc_set.add(start_loc)\n node_loc_dic[node_iter] = start_loc\n node_loc_dic_rev[start_loc] = node_iter\n start_node = node_iter\n node_iter += 1\n # if seen before, retrieve node properties\n else:\n start_node = node_loc_dic_rev[start_loc]\n \n # set end node (just like start node)\n end_loc = (x[-1], y[-1])\n # if new, create new node\n if end_loc not in node_loc_set:\n node_loc_set.add(end_loc)\n node_loc_dic[node_iter] = end_loc\n node_loc_dic_rev[end_loc] = node_iter\n end_node = node_iter\n node_iter += 1\n # if seen before, retrieve node properties\n else:\n end_node = node_loc_dic_rev[end_loc]\n \n \n # add edge, which is start_node to end_node\n edge_props = {'start': start_node,\n 'start_loc_pix': start_loc,\n 'end': end_node,\n 'end_loc_pix': end_loc,\n 'length_pix': length,\n 'wkt_pix': lstring,\n 'geometry_pix': shape}\n\n edge_dic[edge_iter] = edge_props\n edge_iter += 1\n\n return node_loc_dic, edge_dic\n \n\n###############################################################################\ndef nodes_edges_to_G(node_loc_dic, edge_dic, name='glurp'):\n '''Take output of wkt_list_to_nodes_edges(wkt_list) and create networkx \n graph'''\n \n G = nx.MultiDiGraph()\n # set graph crs and name\n G.graph = {'name': name,\n 'crs': {'init': 'epsg:4326'}\n }\n \n # add nodes\n #for key,val in node_loc_dic.iteritems():\n for key in node_loc_dic.keys():\n val = node_loc_dic[key]\n attr_dict = {'osmid': key,\n 'x_pix': val[0],\n 'y_pix': val[1]}\n G.add_node(key, **attr_dict)\n \n # add edges\n #for key,val in edge_dic.iteritems():\n for key in edge_dic.keys():\n val = edge_dic[key]\n attr_dict = val\n u = attr_dict['start']\n v = attr_dict['end']\n #attr_dict['osmid'] = str(i)\n \n #print (\"nodes_edges_to_G:\", u, v, \"attr_dict:\", attr_dict)\n if type(attr_dict['start_loc_pix']) == list:\n return\n \n G.add_edge(u, v, **attr_dict)\n \n ## always set edge key to zero? (for nx 1.X)\n ## THIS SEEMS NECESSARY FOR OSMNX SIMPLIFY COMMAND\n #G.add_edge(u, v, key=0, attr_dict=attr_dict)\n ##G.add_edge(u, v, key=key, attr_dict=attr_dict)\n \n #G1 = ox.simplify_graph(G)\n \n G2 = G.to_undirected()\n \n return G2\n\n###############################################################################\ndef wkt_to_shp(wkt_list, shp_file):\n '''Take output of build_graph_wkt() and render the list of linestrings\n into a shapefile\n # https://gis.stackexchange.com/questions/52705/how-to-write-shapely-geometries-to-shapefiles\n '''\n \n # Define a linestring feature geometry with one attribute\n schema = {\n 'geometry': 'LineString',\n 'properties': {'id': 'int'},\n }\n \n # Write a new shapefile\n with fiona.open(shp_file, 'w', 'ESRI Shapefile', schema) as c:\n for i,line in enumerate(wkt_list):\n shape = shapely.wkt.loads(line)\n c.write({\n 'geometry': mapping(shape),\n 'properties': {'id': i},\n })\n \n return\n\n###############################################################################\ndef shp_to_G(shp_file):\n '''Ingest G from shapefile\n DOES NOT APPEAR TO WORK CORRECTLY'''\n \n G = nx.read_shp(shp_file)\n \n return G\n \n\n###############################################################################\ndef pixelToGeoCoord(xPix, yPix, inputRaster, sourceSR='', geomTransform='', targetSR=''):\n '''from spacenet geotools'''\n # If you want to gauruntee lon lat output, specify TargetSR otherwise, geocoords will be in image geo reference\n # targetSR = osr.SpatialReference()\n # targetSR.ImportFromEPSG(4326)\n # Transform can be performed at the polygon level instead of pixel level\n\n if targetSR =='':\n performReprojection=False\n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326)\n else:\n performReprojection=True\n\n if geomTransform=='':\n srcRaster = gdal.Open(inputRaster)\n geomTransform = srcRaster.GetGeoTransform()\n\n source_sr = osr.SpatialReference()\n source_sr.ImportFromWkt(srcRaster.GetProjectionRef())\n\n geom = ogr.Geometry(ogr.wkbPoint)\n xOrigin = geomTransform[0]\n yOrigin = geomTransform[3]\n pixelWidth = geomTransform[1]\n pixelHeight = geomTransform[5]\n\n xCoord = (xPix * pixelWidth) + xOrigin\n yCoord = (yPix * pixelHeight) + yOrigin\n geom.AddPoint(xCoord, yCoord)\n\n if performReprojection:\n if sourceSR=='':\n srcRaster = gdal.Open(inputRaster)\n sourceSR = osr.SpatialReference()\n sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)\n geom.Transform(coord_trans)\n\n return (geom.GetX(), geom.GetY())\n\n###############################################################################\ndef get_node_geo_coords(G, im_file, verbose=False):\n \n nn = len(G.nodes())\n for i,(n,attr_dict) in enumerate(G.nodes(data=True)):\n if verbose:\n print (\"node:\", n)\n # if (i % 1000) == 0:\n # print (\"node\", i, \"/\", nn, attr_dict)\n x_pix, y_pix = attr_dict['x_pix'], attr_dict['y_pix']\n \n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326)\n lon, lat = pixelToGeoCoord(x_pix, y_pix, im_file, targetSR=targetSR)\n \n # fix zone\n if i == 0:\n [utm_east, utm_north, utm_zone, utm_letter] =\\\n utm.from_latlon(lat, lon)\n else:\n [utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,\n force_zone_number=utm_zone, force_zone_letter=utm_letter)\n \n if lat > 90:\n print(n, attr_dict)\n return\n attr_dict['lon'] = lon\n attr_dict['lat'] = lat\n attr_dict['utm_east'] = utm_east\n attr_dict['utm_zone'] = utm_zone\n attr_dict['utm_letter'] = utm_letter\n attr_dict['utm_north'] = utm_north \n attr_dict['x'] = lon\n attr_dict['y'] = lat\n\n if (i % 1000) == 0:\n print (\"node\", i, \"/\", nn, attr_dict)\n\n if verbose:\n print (\" \", n, attr_dict)\n\n return G\n\n\n###############################################################################\ndef convert_pix_lstring_to_geo(wkt_lstring, im_file, \n utm_zone=None, utm_letter=None, verbose=False):\n '''Convert linestring in pixel coords to geo coords\n If zone or letter changes inthe middle of line, it's all screwed up, so\n force zone and letter based on first point\n (latitude, longitude, force_zone_number=None, force_zone_letter=None)\n Or just force utm zone and letter explicitly\n '''\n shape = wkt_lstring #shapely.wkt.loads(lstring)\n x_pixs, y_pixs = shape.coords.xy\n coords_latlon = []\n coords_utm = []\n for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):\n \n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326)\n lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)\n\n if utm_zone and utm_letter:\n [utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,\n force_zone_number=utm_zone, force_zone_letter=utm_letter)\n else:\n [utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)\n \n# # If zone or letter changes inthe middle of line, it's all screwed up, so\n# # force zone and letter based on first point\n# if i == 0:\n# [utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)\n# else:\n# [utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,\n# force_zone_number=utm_zone, force_zone_letter=utm_letter)\n if verbose:\n print(\"lat lon, utm_east, utm_north, utm_zone, utm_letter]\",\n [lat, lon, utm_east, utm_north, utm_zone, utm_letter])\n coords_utm.append([utm_east, utm_north])\n coords_latlon.append([lon, lat])\n \n lstring_latlon = LineString([Point(z) for z in coords_latlon])\n lstring_utm = LineString([Point(z) for z in coords_utm])\n \n return lstring_latlon, lstring_utm, utm_zone, utm_letter \n\n\n###############################################################################\ndef get_edge_geo_coords(G, im_file, remove_pix_geom=True,\n verbose=False):\n \n ne = len(list(G.edges()))\n for i,(u,v,attr_dict) in enumerate(G.edges(data=True)):\n if verbose:\n print (\"edge:\", u,v)\n print (\" attr_dict_init:\", attr_dict)\n\n # if (i % 1000) == 0:\n # print (\"edge\", i, \"/\", ne)\n geom_pix = attr_dict['geometry_pix']\n \n # fix utm zone and letter to first item seen\n if i == 0:\n lstring_latlon, lstring_utm, utm_zone, utm_letter = convert_pix_lstring_to_geo(geom_pix, im_file)\n else: \n lstring_latlon, lstring_utm, _, _ \\\n = convert_pix_lstring_to_geo(geom_pix, im_file,\n utm_zone=utm_zone,\n utm_letter=utm_letter)\n # lstring_latlon, lstring_utm, utm_zone, utm_letter = convert_pix_lstring_to_geo(geom_pix, im_file)\n attr_dict['geometry_latlon_wkt'] = lstring_latlon.wkt\n attr_dict['geometry_utm_wkt'] = lstring_utm.wkt\n attr_dict['length_latlon'] = lstring_latlon.length\n attr_dict['length_utm'] = lstring_utm.length\n attr_dict['length'] = lstring_utm.length\n attr_dict['utm_zone'] = utm_zone\n attr_dict['utm_letter'] = utm_letter\n if verbose:\n print (\" attr_dict_final:\", attr_dict)\n \n # geometry screws up osmnx.simplify function\n if remove_pix_geom:\n #attr_dict['geometry_wkt'] = lstring_latlon.wkt\n attr_dict['geometry_pix'] = geom_pix.wkt\n \n # ensure utm length isn't excessive\n if lstring_utm.length > 5000:\n #print(u, v, \"edge length too long!:\", attr_dict)\n return\n \n return G\n\n################################################################################\n#def get_xy_geo_coords(xs_pix, ys_pix, im_file):\n# \n# dict_list = []\n# for (x_pix,y_pix) in zip(xs_pix,ys_pix):\n# attr_dict = {}\n# lon, lat = pixelToGeoCoord(x_pix, y_pix, im_file)\n# [utm_east, utm_north, utm_zone, utm_letter] =\\\n# utm.from_latlon(lat, lon)\n# attr_dict['lon'] = lon\n# attr_dict['lat'] = lat\n# attr_dict['utm_east'] = utm_east\n# attr_dict['utm_zone'] = utm_zone\n# attr_dict['utm_letter'] = utm_letter\n# attr_dict['utm_north'] = utm_north \n# attr_dict['x'] = lon\n# attr_dict['y'] = lat\n# #print \" \", n, attr_dict\n# return dict_list\n\n\n###############################################################################\ndef wkt_to_G(wkt_list, im_file=None, min_subgraph_length_pix=30, \n min_spur_length_m=5, simplify_graph=True, verbose=False):\n '''Execute all functions'''\n\n t0 = time.time()\n #print (\"Running wkt_list_to_nodes_edges()...\")\n node_loc_dic, edge_dic = wkt_list_to_nodes_edges(wkt_list)\n t1 = time.time()\n #print (\"Time to run wkt_list_to_nodes_egdes():\", t1 - t0, \"seconds\")\n \n #print (\"node_loc_dic:\", node_loc_dic)\n #print (\"edge_dic:\", edge_dic)\n \n #print (\"Creating G...\")\n G0 = nodes_edges_to_G(node_loc_dic, edge_dic) \n #print (\" len(G.nodes():\", len(G0.nodes()))\n #print (\" len(G.edges():\", len(G0.edges()))\n #for edge_tmp in G0.edges():\n # print (\"\\n 0 wtk_to_G():\", edge_tmp, G0.edge[edge_tmp[0]][edge_tmp[1]])\n \n \n t2 = time.time()\n #print (\"Time to run nodes_edges_to_G():\", t2-t1, \"seconds\")\n \n #print (\"Clean out short subgraphs\")\n G0 = clean_sub_graphs(G0, min_length=min_subgraph_length_pix, \n max_nodes_to_skip=30,\n weight='length_pix', verbose=True,\n super_verbose=False)\n t3 = time.time()\n #print (\"Time to run clean_sub_graphs():\", t3-t2, \"seconds\")\n\n if len(G0) == 0 or len(G0.edges()) == 0:\n return G0\n \n# print (\"Simplifying graph\")\n# G0 = ox.simplify_graph(G0.to_directed())\n# G0 = G0.to_undirected()\n# #G0 = ox.project_graph(G0)\n# #G_p_init = create_edge_linestrings(G_p_init, remove_redundant=True, verbose=False)\n# t3 = time.time()\n# print (\" len(G.nodes():\", len(G0.nodes()))\n# print (\" len(G.edges():\", len(G0.edges()))\n# print (\"Time to run simplify graph:\", t30 - t3, \"seconds\")\n \n #for edge_tmp in G0.edges():\n # print (\"\\n 1 wtk_to_G():\", edge_tmp, G0.edge[edge_tmp[0]][edge_tmp[1]])\n \n #edge_tmp = G0.edges()[5]\n #print (edge_tmp, \"G0.edge props:\", G0.edge[edge_tmp[0]][edge_tmp[1]])\n\n \n # geo coords\n if im_file:\n #print (\"Running get_node_geo_coords()...\")\n G1 = get_node_geo_coords(G0, im_file, verbose=verbose)\n t4= time.time()\n #print (\"Time to run get_node_geo_coords():\", t4-t3, \"seconds\")\n\n #print (\"Running get_edge_geo_coords()...\")\n G1 = get_edge_geo_coords(G1, im_file, verbose=verbose)\n t5 = time.time()\n #print (\"Time to run get_edge_geo_coords():\", t5-t4, \"seconds\")\n\n #print(\"pre projection...\")\n node = list(G1.nodes())[-1]\n #print(node, \"random node props:\", G1.nodes[node])\n # print an edge\n edge_tmp = list(G1.edges())[-1]\n #print(edge_tmp, \"random edge props:\", G1.get_edge_data(edge_tmp[0], edge_tmp[1]))\n\n #print (\"projecting graph...\")\n G_projected = ox.project_graph(G1)\n \n #print(\"post projection...\")\n node = list(G_projected.nodes())[-1]\n #print(node, \"random node props:\", G_projected.nodes[node])\n # print an edge\n edge_tmp = list(G_projected.edges())[-1]\n #print(edge_tmp, \"random edge props:\", G_projected.get_edge_data(edge_tmp[0], edge_tmp[1]))\n\n t6 = time.time()\n #print (\"Time to project graph:\", t6-t5, \"seconds\")\n\n # simplify\n #G_simp = ox.simplify_graph(G_projected.to_directed())\n #ox.plot_graph(G_projected)\n #G1.edge[19][22]\n \n Gout = G_projected #G_simp\n \n else:\n Gout = G0\n\n\n ###########################################################################\n # remove short edges\n t31 = time.time()\n Gout = remove_short_edges(Gout, min_spur_length_m=min_spur_length_m)\n t32 = time.time()\n #print(\"Time to remove_short_edges():\", t32 - t31, \"seconds\")\n ###########################################################################\n \n if simplify_graph:\n #print (\"Simplifying graph\")\n t7 = time.time()\n G0 = ox.simplify_graph(Gout.to_directed())\n Gout = G0.to_undirected()\n #Gout = ox.project_graph(G0)\n \n t8 = time.time()\n #print (\"Time to run simplify graph:\", t8-t7, \"seconds\")\n # When the simplify funciton combines edges, it concats multiple\n # edge properties into a list. This means that 'geometry_pix' is now\n # a list of geoms. Convert this to a linestring with \n # shaply.ops.linemergeconcats \n print (\"Merge 'geometry' linestrings...\")\n keys_tmp = ['geometry_pix', 'geometry_latlon_wkt', 'geometry_utm_wkt']\n for key_tmp in keys_tmp:\n #print (\"Merge\", key_tmp, \"...\")\n for i,(u,v,attr_dict) in enumerate(Gout.edges(data=True)):\n # if (i % 10000) == 0:\n # print (i, u , v)\n geom = attr_dict[key_tmp]\n #print (i, u, v, \"geom:\", geom)\n #print (\" type(geom):\", type(geom))\n \n if type(geom) == list:\n # check if the list items are wkt strings, if so, create\n # linestrigs\n if (type(geom[0]) == str):# or (type(geom_pix[0]) == unicode):\n geom = [shapely.wkt.loads(ztmp) for ztmp in geom]\n # merge geoms\n #geom = shapely.ops.linemerge(geom)\n #attr_dict[key_tmp] = geom\n attr_dict[key_tmp] = shapely.ops.linemerge(geom)\n elif type(geom) == str:\n attr_dict[key_tmp] = shapely.wkt.loads(geom)\n else:\n pass\n\n # assign 'geometry' tag to geometry_utm_wkt\n for i,(u,v,attr_dict) in enumerate(Gout.edges(data=True)):\n if verbose:\n print (\"Create 'geometry' field in edges...\")\n #geom_pix = attr_dict[key_tmp]\n line = attr_dict['geometry_utm_wkt'] \n if type(line) == str:# or type(line) == unicode:\n attr_dict['geometry'] = shapely.wkt.loads(line) \n else:\n attr_dict['geometry'] = attr_dict['geometry_utm_wkt'] \n # update wkt_pix?\n #print (\"attr_dict['geometry_pix':\", attr_dict['geometry_pix'])\n attr_dict['wkt_pix'] = attr_dict['geometry_pix'].wkt\n \n # update 'length_pix'\n attr_dict['length_pix'] = np.sum([attr_dict['length_pix']])\n \n # Gout = ox.project_graph(Gout) \n \n # get a few stats (and set to graph properties)\n logger1.info(\"Number of nodes: {}\".format(len(Gout.nodes())))\n logger1.info(\"Number of edges: {}\".format(len(Gout.edges())))\n #print (\"Number of nodes:\", len(Gout.nodes()))\n #print (\"Number of edges:\", len(Gout.edges()))\n Gout.graph['N_nodes'] = len(Gout.nodes())\n Gout.graph['N_edges'] = len(Gout.edges())\n \n # get total length of edges\n tot_meters = 0\n for i,(u,v,attr_dict) in enumerate(Gout.edges(data=True)):\n tot_meters += attr_dict['length'] \n #print (\"Length of edges (km):\", tot_meters/1000)\n Gout.graph['Tot_edge_km'] = tot_meters/1000\n\n #print (\"G.graph:\", Gout.graph)\n \n t7 = time.time()\n #print (\"Total time to run wkt_to_G():\", t7-t0, \"seconds\")\n \n #for edge_tmp in Gout.edges():\n # print (\"\\n 2 wtk_to_G():\", edge_tmp, Gout.edge[edge_tmp[0]][edge_tmp[1]])\n\n \n return Gout\n\n################################################################################\n#def csv_to_wkt_list(df_wkt, im_root):\n# '''Return wkt_list for all rows in df corresponding to im_root'''\n# \n# df_filt = df_wkt[df_wkt['ImageId'] == im_root]\n \n\ndef main():\n \n global logger1 \n \n # min_subgraph_length_pix = 300\n min_spur_length_m = 0.001 # default = 5\n local = False #True\n verbose = False\n super_verbose = False\n make_plots = False #True\n save_shapefiles = True #False\n pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4\n \n # local\n if local:\n albu_path = '/Users/avanetten/Documents/cosmiq/apls/albu_inference_mod'\n path_images = '/Users/avanetten/Documents/cosmiq/spacenet/data/spacenetv2/AOI_2_Vegas_Test/400m/RGB-PanSharpen'\n res_root_dir = os.path.join(albu_path, 'results/2m_4fold_512_30e_d0.2_g0.2_AOI_2_Vegas_Test')\n csv_file = os.path.join(res_root_dir, 'wkt_submission.csv')\n graph_dir = os.path.join(res_root_dir, 'graphs')\n log_file = os.path.join(res_root_dir, 'wkt_to_G.log')\n #os.makedirs(graph_dir, exist_ok=True)\n try:\n os.makedirs(graph_dir)\n except:\n pass\n \n # deployed on dev box\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument('config_path')\n args = parser.parse_args()\n with open(args.config_path, 'r') as f:\n cfg = json.load(f)\n config = Config(**cfg)\n \n # outut files\n res_root_dir = os.path.join(config.path_results_root, config.test_results_dir)\n path_images = os.path.join(config.path_data_root, config.test_data_refined_dir)\n csv_file = os.path.join(res_root_dir, config.wkt_submission)\n graph_dir = os.path.join(res_root_dir, config.graph_dir)\n log_file = os.path.join(res_root_dir, 'wkt_to_G.log')\n os.makedirs(graph_dir, exist_ok=True)\n\n min_subgraph_length_pix = config.min_subgraph_length_pix\n min_spur_length_m = config.min_spur_length_m\n\n console, logger1 = make_logger.make_logger(log_file, logger_name='log')\n# ###############################################################################\n# # https://docs.python.org/3/howto/logging-cookbook.html#logging-to-multiple-destinations\n# # set up logging to file - see previous section for more details\n# logging.basicConfig(level=logging.DEBUG,\n# format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n# datefmt='%m-%d %H:%M',\n# filename=log_file,\n# filemode='w')\n# # define a Handler which writes INFO messages or higher to the sys.stderr\n# console = logging.StreamHandler()\n# console.setLevel(logging.INFO)\n# # set a format which is simpler for console use\n# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n# #formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n# # tell the handler to use this format\n# console.setFormatter(formatter)\n# # add the handler to the root logger\n# logging.getLogger('').addHandler(console)\n# logger1 = logging.getLogger('log')\n# logger1.info(\"log file: {x}\".format(x=log_file))\n# ############################################################################### \n \n \n# csv_file = os.path.join(res_root_dir, 'merged_wkt_list.csv')\n# graph_dir = os.path.join(res_root_dir, 'graphs')\n# #os.makedirs(graph_dir, exist_ok=True)\n# try:\n# os.makedirs(graph_dir)\n# except:\n# pass\n\n # read in wkt list\n logger1.info(\"df_wkt at: {}\".format(csv_file))\n #print (\"df_wkt at:\", csv_file)\n df_wkt = pd.read_csv(csv_file)\n # columns=['ImageId', 'WKT_Pix'])\n\n # iterate through image ids and create graphs\n t0 = time.time()\n image_ids = np.sort(np.unique(df_wkt['ImageId']))\n #print(\"image_ids:\", image_ids)\n print(\"len image_ids:\", len(image_ids))\n\n for i,image_id in enumerate(image_ids):\n \n #if image_id != 'AOI_2_Vegas_img586':\n # continue\n out_file = os.path.join(graph_dir, image_id.split('.')[0] + '.gpickle')\n \n logger1.info(\"\\n{x} / {y}, {z}\".format(x=i+1, y=len(image_ids), z=image_id))\n #print (\"\\n\")\n #print (i, \"/\", len(image_ids), image_id)\n \n # for geo referencing, im_file should be the raw image\n if config.num_channels == 3:\n im_file = os.path.join(path_images, 'RGB-PanSharpen_' + image_id + '.tif')\n else:\n im_file = os.path.join(path_images, 'MUL-PanSharpen_' + image_id + '.tif') \n #im_file = os.path.join(path_images, image_id)\n if not os.path.exists(im_file):\n im_file = os.path.join(path_images, image_id + '.tif')\n if not os.path.exists(im_file):\n f = [f for f in os.listdir(path_images) if image_id in f][0]\n im_file = os.path.join(path_images, f)\n #print('im_file:', im_file)\n \n # filter \n df_filt = df_wkt['WKT_Pix'][df_wkt['ImageId'] == image_id]\n wkt_list = df_filt.values\n #wkt_list = [z[1] for z in df_filt_vals]\n \n # print a few values\n logger1.info(\"\\n{x} / {y}, num linestrings: {z}\".format(x=i+1, y=len(image_ids), z=len(wkt_list)))\n #print (\"\\n\", i, \"/\", len(image_ids), \"num linestrings:\", len(wkt_list))\n if verbose:\n print (\"image_file:\", im_file)\n print (\" wkt_list[:2]\", wkt_list[:2])\n \n if (len(wkt_list) == 0) or (wkt_list[0] == 'LINESTRING EMPTY'):\n G = nx.MultiDiGraph()\n nx.write_gpickle(G, out_file, protocol=pickle_protocol)\n continue\n \n # create graph\n t1 = time.time()\n G = wkt_to_G(wkt_list, im_file=im_file, \n min_subgraph_length_pix=min_subgraph_length_pix,\n min_spur_length_m=min_spur_length_m,\n verbose=super_verbose)\n t2 = time.time()\n if verbose:\n logger1.info(\"Time to create graph: {} seconds\".format(t2-t1))\n #print (\"Time to create graph:\", t2-t1, \"seconds\")\n \n if len(G.nodes()) == 0 or len(G.edges()) == 0:\n nx.write_gpickle(G, out_file, protocol=pickle_protocol)\n continue\n \n # print a node\n node = list(G.nodes())[-1]\n #print (node, \"random node props:\", G.nodes[node])\n # print an edge\n edge_tmp = list(G.edges())[-1]\n #print (edge_tmp, \"random edge props:\", G.edges([edge_tmp[0], edge_tmp[1]])) #G.edge[edge_tmp[0]][edge_tmp[1]])\n print (edge_tmp, \"random edge props:\", G.get_edge_data(edge_tmp[0], edge_tmp[1]))\n\n # save graph\n logger1.info(\"Saving graph to directory: {}\".format(graph_dir))\n #print (\"Saving graph to directory:\", graph_dir)\n nx.write_gpickle(G, out_file, protocol=pickle_protocol)\n \n # save shapefile as well?\n if save_shapefiles:\n logger1.info(\"Saving shapefile to directory: {}\".format(graph_dir))\n try:\n ox.save_graph_shapefile(G, filename=image_id.split('.')[0] , folder=graph_dir, encoding='utf-8')\n except:\n print(\"Cannot save shapefile...\")\n #out_file2 = os.path.join(graph_dir, image_id.split('.')[0] + '.graphml')\n #ox.save_graphml(G, image_id.split('.')[0] + '.graphml', folder=graph_dir)\n\n # plot, if desired\n if make_plots:\n print (\"Plotting graph...\")\n outfile_plot = os.path.join(graph_dir, image_id)\n print (\"outfile_plot:\", outfile_plot)\n ox.plot_graph(G, fig_height=9, fig_width=9, \n #save=True, filename=outfile_plot, margin=0.01)\n )\n #plt.tight_layout()\n plt.savefig(outfile_plot, dpi=400)\n \n #if i > 30:\n # break\n \n tf = time.time()\n logger1.info(\"Time to run wkt_to_G.py: {} seconds\".format(tf - t0))\n #print (\"Time to run wkt_to_G.py:\", tf - t0, \"seconds\")\n\n# # test...\n#\n#\n# im_dir = '/Users/avanetten/Documents/cosmiq/spacenet/data/spacenetv2/AOI_2_Vegas_Test/400m/RGB-PanSharpen'\n# im_root = 'RGB-PanSharpen_AOI_2_Vegas_img1005.tif'\n# im_file = os.path.join(im_dir, im_root)\n# #im = cv2.imread(im_file, 3)\n# \n# # AOI_2_Vegas_img1005\n# wkt_list = [\"LINESTRING (648.0 2.0, 651.0 192.0)\",\n# \"LINESTRING (1252.0 2.0, 1252.0 162.0, 1261.0 375.0, 1264.0 586.0)\",\n# \"LINESTRING (651.0 192.0, 293.0 195.0)\",\n# \"LINESTRING (651.0 192.0, 661.0 316.0)\",\n# \"LINESTRING (685.0 301.0, 683.0 329.0, 691.0 456.0, 692.0 593.0)\",\n# \"LINESTRING (632.0 312.0, 630.0 444.0, 633.0 593.0)\",\n# \"LINESTRING (1264.0 586.0, 1200.0 592.0)\",\n# \"LINESTRING (1264.0 586.0, 1296.0 588.0)\",\n# \"LINESTRING (1200.0 592.0, 1138.0 589.0, 692.0 593.0)\",\n# \"LINESTRING (633.0 593.0, 692.0 593.0)\",\n# \"LINESTRING (692.0 593.0, 688.0 774.0, 683.0 858.0, 678.0 885.0, 653.0 915.0)\",\n# \"LINESTRING (1200.0 592.0, 1203.0 753.0)\",\n# \"LINESTRING (633.0 593.0, 629.0 598.0, 539.0 604.0, 417.0 621.0, 255.0 637.0, 2.0 654.0)\",\n# \"LINESTRING (629.0 598.0, 628.0 889.0, 624.0 902.0)\",\n# \"LINESTRING (1203.0 753.0, 934.0 756.0, 912.0 759.0, 892.0 770.0, 887.0 782.0, 885.0 807.0, 876.0 1219.0)\",\n# \"LINESTRING (1203.0 753.0, 1210.0 840.0, 1216.0 1111.0, 1215.0 1199.0, 1210.0 1216.0, 1198.0 1223.0, 1162.0 1228.0, 1006.0 1229.0, 932.0 1227.0, 876.0 1219.0)\",\n# \"LINESTRING (2.0 814.0, 160.0 806.0, 314.0 795.0, 392.0 792.0, 421.0 794.0, 428.0 799.0, 435.0 822.0, 440.0 899.0, 447.0 908.0)\",\n# \"LINESTRING (624.0 902.0, 618.0 896.0, 609.0 894.0, 494.0 894.0, 469.0 898.0, 458.0 908.0)\",\n# \"LINESTRING (624.0 902.0, 626.0 912.0)\",\n# \"LINESTRING (447.0 908.0, 458.0 908.0)\",\n# \"LINESTRING (447.0 908.0, 439.0 924.0, 432.0 1126.0, 429.0 1139.0, 422.0 1148.0, 395.0 1153.0, 2.0 1161.0)\",\n# \"LINESTRING (458.0 908.0, 468.0 918.0, 500.0 922.0, 594.0 924.0, 614.0 922.0, 626.0 912.0, 653.0 915.0, 659.0 923.0, 654.0 1168.0, 656.0 1224.0)\",\n# \"LINESTRING (876.0 1219.0, 656.0 1224.0, 653.0 1296.0)\"] \n#\n# \n# # Execute\n# G = wkt_to_G(wkt_list, im_file=im_file, verbose=False)\n# \n## node_loc_dic, edge_dic = wkt_list_to_nodes_edges(wkt_list)\n## G3 = nodes_edges_to_G(node_loc_dic, edge_dic) \n## G4 = get_node_geo_coords(G3, im_file)\n## G4 = get_edge_geo_coords(G4, im_file)\n## G4.edge[19][22]\n## G_projected = ox.project_graph(G4)\n## ox.plot_graph(G_projected)\n# \n# \n# # check ox downloads\n# G = ox.graph_from_bbox(37.79, 37.78, -122.41, -122.43, network_type='drive')\n# G.nodes() \n# G.node[65327144]\n# G_projected = ox.project_graph(G)\n# G.node[65327144]\n# G.edges()\n# G.edge[65327144][65352337]\n# a = G.edge[65327144][65352337][0]['geometry']\n# \n# \n# \n# ## https://bitbucket.org/gallipoli/nx_spatial/src\n# #import nx_spatial as ns \n# #z = shapely.wkt.loads(wkt_list[1])\n# #shp_file = '/Users/avanetten/Documents/cosmiq/apls/albu_inference_mod/results/my_shp2.shp'\n# \n# #wkt_to_shp(wkt_list, shp_file)\n# #G2 = ns.read_shp(shp_file) \n# #nx.draw(G2)\n# ##ox.plot_graph(G2)\n# #\n# #G = shp_to_G(shp_file)\n# #nx.draw(G)\n# ##ox.plot_graph(G)\n# #\n# # \n# #\n# #s = wkt_list[0]\n# #\n# ## Convert to a shapely.geometry.polygon.Polygon object\n# #g1 = shapely.wkt.loads(s)\n# #\n# #g2 = geojson.Feature(geometry=g1, properties={})\n \n \n###############################################################################\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.dot", "numpy.copy", "matplotlib.pylab.plt.title", "numpy.max", "numpy.linalg.norm", "pandas.DataFrame", "numpy.cross", "numpy.array", "numpy.linalg.det", "matplotlib.pylab.plt.imshow", "numpy.argwhere", "numpy.squeeze", "scipy.spatial.distance.pdist", "numpy.sum", "numpy.ones", "numpy.degrees", "numpy.any", "matplotlib.pylab.plt.show", "numpy.all", "numpy.moveaxis", "matplotlib.pylab.plt.plot" ], [ "numpy.max", "matplotlib.pyplot.savefig", "numpy.sum" ], [ "numpy.zeros_like", "scipy.spatial.distance.euclidean", "numpy.zeros", "numpy.percentile", "pandas.read_csv" ], [ "torch.cat", "torch.nn.functional.upsample", "torch.nn.UpsamplingBilinear2d", "torch.nn.Sequential", "numpy.zeros", "torch.no_grad", "torch.nn.init.kaiming_normal_", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.load", "torch.nn.functional.upsample_bilinear", "torch.nn.Dropout2d", "torch.hub.load" ], [ "numpy.max", "numpy.sum", "matplotlib.pyplot.savefig", "pandas.read_csv", "numpy.unique" ] ]
yuanxiangyuee/ins_seg_HRSIs
[ "d716a5d7726bb016a4f6c1032ef3e3ad108d00bb" ]
[ "carafe.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.nn.modules.module import Module\n\n\ndef xavier_init(module, gain=1, bias=0, distribution='normal'):\n assert distribution in ['uniform', 'normal']\n if distribution == 'uniform':\n nn.init.xavier_uniform_(module.weight, gain=gain)\n else:\n nn.init.xavier_normal_(module.weight, gain=gain)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\ndef normal_init(module, mean=0, std=1, bias=0):\n nn.init.normal_(module.weight, mean, std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)\n\n\nclass CARAFEFunction(Function):\n\n @staticmethod\n def forward(ctx, features, masks, kernel_size, group_size, scale_factor):\n assert scale_factor >= 1\n assert masks.size(1) == kernel_size * kernel_size * group_size\n assert masks.size(-1) == features.size(-1) * scale_factor\n assert masks.size(-2) == features.size(-2) * scale_factor\n assert features.size(1) % group_size == 0\n assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1\n ctx.kernel_size = kernel_size\n ctx.group_size = group_size\n ctx.scale_factor = scale_factor\n ctx.feature_size = features.size()\n ctx.mask_size = masks.size()\n\n n, c, h, w = features.size()\n output = features.new_zeros((n, c, h * scale_factor, w * scale_factor))\n routput = features.new_zeros(output.size(), requires_grad=False)\n rfeatures = features.new_zeros(features.size(), requires_grad=False)\n rmasks = masks.new_zeros(masks.size(), requires_grad=False)\n if features.is_cuda:\n carafe_ext.forward(features, rfeatures, masks, rmasks, kernel_size,\n group_size, scale_factor, routput, output)\n else:\n raise NotImplementedError\n\n if features.requires_grad or masks.requires_grad:\n ctx.save_for_backward(features, masks, rfeatures)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n assert grad_output.is_cuda\n\n features, masks, rfeatures = ctx.saved_tensors\n kernel_size = ctx.kernel_size\n group_size = ctx.group_size\n scale_factor = ctx.scale_factor\n\n rgrad_output = torch.zeros_like(grad_output, requires_grad=False)\n rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)\n rgrad_input = torch.zeros_like(features, requires_grad=False)\n rgrad_masks = torch.zeros_like(masks, requires_grad=False)\n grad_input = torch.zeros_like(features, requires_grad=False)\n grad_masks = torch.zeros_like(masks, requires_grad=False)\n carafe_ext.backward(grad_output.contiguous(), rfeatures, masks,\n kernel_size, group_size, scale_factor,\n rgrad_output, rgrad_input_hs, rgrad_input,\n rgrad_masks, grad_input, grad_masks)\n return grad_input, grad_masks, None, None, None, None\n\n\ncarafe = CARAFEFunction.apply\n\n\nclass CARAFEPack(nn.Module):\n \"\"\" A unified package of CARAFE upsampler that contains:\n 1) channel compressor 2) content encoder 3) CARAFE op\n\n Official implementation of ICCV 2019 paper\n CARAFE: Content-Aware ReAssembly of FEatures\n Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n Args:\n channels (int): input feature channels\n scale_factor (int): upsample ratio\n up_kernel (int): kernel size of CARAFE op\n up_group (int): group size of CARAFE op\n encoder_kernel (int): kernel size of content encoder\n encoder_dilation (int): dilation of content encoder\n compressed_channels (int): output channels of channels compressor\n\n Returns:\n upsampled feature map\n \"\"\"\n\n def __init__(self,\n channels,\n scale_factor,\n up_kernel=5,\n up_group=1,\n encoder_kernel=3,\n encoder_dilation=1,\n compressed_channels=64):\n super(CARAFEPack, self).__init__()\n self.channels = channels\n self.scale_factor = scale_factor\n self.up_kernel = up_kernel\n self.up_group = up_group\n self.encoder_kernel = encoder_kernel\n self.encoder_dilation = encoder_dilation\n self.compressed_channels = compressed_channels\n self.channel_compressor = nn.Conv2d(channels, self.compressed_channels,\n 1)\n self.content_encoder = nn.Conv2d(\n self.compressed_channels,\n self.up_kernel * self.up_kernel * self.up_group *\n self.scale_factor * self.scale_factor,\n self.encoder_kernel,\n padding=int((self.encoder_kernel - 1) * self.encoder_dilation / 2),\n dilation=self.encoder_dilation,\n groups=1)\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n normal_init(self.content_encoder, std=0.001)\n\n def kernel_normalizer(self, mask):\n mask = F.pixel_shuffle(mask, self.scale_factor)\n n, mask_c, h, w = mask.size()\n mask_channel = int(mask_c / (self.up_kernel * self.up_kernel))\n mask = mask.view(n, mask_channel, -1, h, w)\n\n mask = F.softmax(mask, dim=2)\n mask = mask.view(n, mask_c, h, w).contiguous()\n\n return mask\n\n def feature_reassemble(self, x, mask):\n x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor)\n return x\n\n def forward(self, x):\n compressed_x = self.channel_compressor(x)\n mask = self.content_encoder(compressed_x)\n mask = self.kernel_normalizer(mask)\n\n x = self.feature_reassemble(x, mask)\n return x" ]
[ [ "torch.nn.init.constant_", "torch.nn.functional.pixel_shuffle", "torch.nn.init.xavier_uniform_", "torch.nn.init.normal_", "torch.nn.Conv2d", "torch.nn.functional.softmax", "torch.zeros_like", "torch.nn.init.xavier_normal_" ] ]
kmes007/EducationProject
[ "14cd4a20341792d240e37f8f73bd795f74fa4fc3" ]
[ "app.py" ]
[ "\n# import sqlalchemy\n# from sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n# import plotly\n# import plotly.graph_objs as go\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flask import Flask, jsonify, render_template, url_for, flash, redirect\nimport pandas as pd\n\nengine = create_engine(\"sqlite:///db/Performance.sqlite\")\n\n\nconn = engine.connect()\nsession=Session(engine)\n\n# Define the app as a flask app\napp= Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.Performance'\n\n# Define the home\n@app.route(\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return render_template(\"index.html\")\n\n@app.route('/scores')\ndef scores():\n scores_df = pd.read_sql(\"SELECT * FROM scores\", engine)\n scores_df['Mean Scale Score'] = pd.to_numeric(scores_df['Mean Scale Score'], errors='coerce')\n# # return scores_df.to_json(None, 'records')\nreturn (scores_df.groupby('State/District/School')['Mean Scale Score'].mean()).to_json(None, 'split')\n# # return jsonify(['score1', 'score2'])\n# #Change just to trigger restart\n\n@app.route('/retention')\ndef retention():\n retention_df = pd.read_sql(\"SELECT * FROM retention\", engine)\n return (retention_df(\"Job Classification\")['Mean'].mean()).to_json(None, 'split')\n\n\n# @app.route('budget')\n\n# @app.route('salary')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n \n \n" ]
[ [ "pandas.read_sql", "pandas.to_numeric" ] ]
DanielTisza/spectralcamera
[ "4fef93b3b4cd8f83e016070f1c0d68aa0cff5102" ]
[ "python/capture_images/led_set_f.py" ]
[ "# ----------------------------------------------------------------------------\n#\tled_set_f.py\n#\n#\tCopyright 2021 Daniel Tisza\n#\tMIT License\n#\n#\tAcquiring LED set F wavelengths radiance and reflectance\n#\n# ----------------------------------------------------------------------------\n\nimport fpipy as fp\nimport matplotlib\n\nfrom camazing import CameraList\nfrom spectracular.fpi_driver import detectFPIDevices, createFPIDevice\nfrom spectracular.hsi import HSI\n\nfrom LEDDriver import detect_LED_devices, LEDDriver, LEDException\n\nLED_IDS = [\n # ( VID, PID) (and the same in decimal)\n ('1FC9', '0083'), (8137, 131),\n ]\n\"\"\"Known VID:PID pairs of LED devices.\"\"\"\nLED_HWIDS = [\n # Strings returned by read_hardware_id\n '1000e016 aefba123 580267dd f5001982',\n\t'10025018 af28a028 5a66a511 f5001983'\n]\n\nledportdevice = detect_LED_devices()\nprint(ledportdevice[0])\n\nled = LEDDriver('COM10')\nprint(led)\n\nled.open()\n\nprint('Turning off LEDs')\nled.L(0)\n\n\ncameras = CameraList()\nprint(cameras)\n\ncamera = cameras[0]\ncamera.initialize()\n\t\t\nFPI_IDS = [\n # ( VID, PID) (and the same in decimal)\n ('1FC9', '0083'), (8137, 131),\n ]\n\"\"\"Known VID:PID pairs of FPI devices.\"\"\"\nFPI_HWIDS = [\n # Strings returned by read_hardware_id\n\t 'd02b012 af380065 5b5bbeab f50019c1'\n]\n\nfpi = createFPIDevice(detectFPIDevices(FPI_IDS, FPI_HWIDS)[0].device)\nprint(fpi)\n\nhsi = HSI(camera, fpi)\nprint(hsi)\nhsi.read_calibration_file('led_set_f_calib.txt')\n\ncamera[\"TriggerMode\"].value = \"On\"\ncamera[\"TriggerSource\"].value = \"Software\"\ncamera[\"ExposureAuto\"].value = \"Off\"\ncamera[\"PixelFormat\"].value = \"BayerGB12\"\n# camera[\"ExposureTime\"].value = 100000\n# camera[\"ExposureTime\"].value = 60000\ncamera[\"ExposureTime\"].value = 25000\n# camera[\"ExposureTime\"].value = 10000\ncamera[\"BalanceWhiteAuto\"].value = \"Off\"\ncamera[\"Gamma\"].value = 1\ncamera[\"Gain\"].value = 1.9382002601\ncamera[\"GainAuto\"].value = \"Off\"\n\ninput(\"Put the lens cap on\")\nhsi.take_dark_reference()\nprint(hsi.dataset.dark)\n\ninput(\"Take the lens cap off and set white reference\")\n\nprint('Turning on LEDs')\n\n# VNIR1 and VNIR2\n# \n# 775.054537\n# 818.8148698\n#\n# 000111100000111100000111100\n# * Reverse for LED control:\n# 001111000001111000001111000\n#\nled.L(0b001111000001111000001111000)\nprint('Capturing white reference')\nwhite_raw = hsi.capture_cube()\n\ninput(\"Set image (only for radiance)\")\n\nprint('Capturing cube')\nraw = hsi.capture_cube()\nprint(raw)\n\nprint('Turning off LEDs')\nled.L(0)\n\nprint('Calculating radiance')\nrad = fp.raw_to_radiance(raw, keep_variables=['dark'])\nprint(rad)\nprint(rad['radiance'])\n\nprint('Calculating white radiance')\nrad['white'] = fp.raw_to_radiance(white_raw, keep_variables = []).radiance\nprint(rad['white'])\n\nprint('Calculating reflectance')\nrad['reflectance'] = rad.radiance / rad.white\nprint(rad['reflectance'])\n\n# reflectance = fp.radiance_to_reflectance(rad, white_raw, keep_variables=[])\n# print(reflectance)\n\nprint('Extracting single frame from cube and saving to PNG')\ntest = rad[\"radiance\"]\n\nprint('Radiance data')\ntestdata = test.data\nprint(testdata)\n\nprint('White data')\nwhitedata = rad['white'].data\nprint(whitedata)\n\nprint('Reflectance data')\nreflectdata = rad['reflectance'].data\nprint(reflectdata)\n\nprint (\"Wavelengths\")\nwavelengths = rad[\"wavelength\"].data\nprint(wavelengths)\n\nprint (\"Wavelengths count\")\nwavelengthCount = len(wavelengths)\nprint(wavelengthCount)\n\n# Multiple peaks result in multiple of single calib file row count\nimagelastindex = wavelengthCount\n\n#\n# Save radiance images\n#\nprint('Start saving radiance images')\nfor x in range(0, imagelastindex):\n\n\twavelengthValue = wavelengths[x]\n\twavelengthStr = str(wavelengthValue)\n\twavelengthReplacedStr = wavelengthStr.replace(\".\", \"p\")\n\tprint('Saving wavelength: ' + wavelengthStr)\n\t\n\trad1 = testdata[:,:,x]\n\tmatplotlib.image.imsave('rad_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', rad1)\n\n\twhite1 = whitedata[:,:,x]\n\t# matplotlib.image.imsave('white_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', white1)\n\t\n\tref1 = reflectdata[:,:,x]\n\tmatplotlib.image.imsave('refl_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', ref1, vmin=0,vmax=1)\n\n\n\nimport matplotlib.pyplot as plt\nplt.gray()\n\n#\n# Save raw images and demosaic images\n#\nprint('Start saving raw data')\nfor x in range(1, 2):\n\n\t# Raw data values\n\tdn1 = raw.dn.isel(index=x)\n\tmatplotlib.image.imsave('raw_' + str(x) + '.png', dn1)\n\n\t# Demosaic to get three colour channels\n\tdm1 = fp.demosaic(dn1, 'BayerGB', 'bilinear')\n\tdm1_red = dm1[:,:,0]\n\tdm1_green = dm1[:,:,1]\n\tdm1_blue = dm1[:,:,2]\n\n\tmatplotlib.image.imsave('raw_' + str(x) + '_demosaic_red.png', dm1_red)\n\tmatplotlib.image.imsave('raw_' + str(x) + '_demosaic_green.png', dm1_green)\n\tmatplotlib.image.imsave('raw_' + str(x) + '_demosaic_blue.png', dm1_blue)\n\n" ]
[ [ "matplotlib.pyplot.gray" ] ]
mbp28/determinantal-point-processes
[ "e3cebb9209ab47de41c35a0d9ef605d608ec9d98" ]
[ "sampling/sample_dpp.py" ]
[ "import numpy as np\nfrom scipy.linalg import orth\n\ndef sample_dpp(vals, vecs, k=0, one_hot=False):\n \"\"\"\n This function expects \n \n Arguments: \n vals: NumPy 1D Array of Eigenvalues of Kernel Matrix\n vecs: Numpy 2D Array of Eigenvectors of Kernel Matrix\n\n \"\"\"\n n = vecs.shape[0] # number of items in ground set\n \n # k-DPP\n if k:\n index = sample_k(vals, k) # sample_k, need to return index\n\n # Sample set size\n else:\n index = (np.random.rand(n) < (vals / (vals + 1)))\n k = np.sum(index)\n \n # Check for empty set\n if not k:\n return np.zeros(n) if one_hot else np.empty(0)\n \n # Check for full set\n if k == n:\n return np.ones(n) if one_hot else np.arange(k, dtype=float) \n \n V = vecs[:, index]\n\n # Sample a set of k items \n items = list()\n\n for i in range(k):\n p = np.sum(V**2, axis=1)\n p = np.cumsum(p / np.sum(p)) # item cumulative probabilities\n item = (np.random.rand() <= p).argmax()\n items.append(item)\n \n # Delete one eigenvector not orthogonal to e_item and find new basis\n j = (np.abs(V[item, :]) > 0).argmax() \n Vj = V[:, j]\n V = orth(V - (np.outer(Vj,(V[item, :] / Vj[item])))) \n \n items.sort()\n sample = np.array(items, dtype=float) \n\n if one_hot:\n sample = np.zeros(n)\n sample[items] = np.ones(k)\n \n return sample " ]
[ [ "numpy.array", "numpy.random.rand", "numpy.empty", "numpy.zeros", "numpy.sum", "numpy.ones", "numpy.arange", "numpy.abs", "numpy.outer" ] ]
BlueBrain/vascpy
[ "65fbf17528ae76c1f08cac9db4a8979ddf99e834" ]
[ "tests/point_graph/test_features.py" ]
[ "import pandas as pd\nimport pytest\nfrom numpy import testing as npt\n\nfrom vascpy.point_graph import features as _feat\nfrom vascpy.point_vasculature import PointVasculature\n\n\n@pytest.fixture\ndef node_properties():\n return pd.DataFrame(\n {\n \"x\": [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0],\n \"y\": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0],\n \"z\": [2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0],\n \"diameter\": [3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],\n \"property1\": [3.0, 1.0, 4.0, 1.0, 4.0, 5.0, 6.0, 8.0, 9.0, 0.0],\n }\n )\n\n\n@pytest.fixture\ndef edge_properties():\n return pd.DataFrame(\n {\n \"start_node\": [0, 1, 2, 3, 4, 5, 6, 7, 8],\n \"end_node\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\n \"type\": [1, 0, 1, 0, 1, 0, 1, 0, 1],\n \"property1\": [9, 8, 7, 6, 5, 4, 3, 2, 1],\n }\n )\n\n\n@pytest.fixture\ndef point_vasculature(node_properties, edge_properties):\n return PointVasculature(node_properties, edge_properties)\n\n\ndef test_segment_volumes(point_vasculature):\n segment_volumes = _feat.segment_volumes(point_vasculature)\n npt.assert_allclose(\n segment_volumes,\n [\n 16.77764414,\n 27.66044034,\n 41.26393559,\n 57.5881299,\n 76.63302325,\n 98.39861565,\n 122.8849071,\n 150.0918976,\n 180.0195872,\n ],\n )\n\n\ndef test_segment_surface_areas(point_vasculature):\n segment_areas = _feat.segment_lateral_areas(point_vasculature)\n npt.assert_allclose(\n segment_areas,\n [\n 19.82255347,\n 25.48614018,\n 31.14972689,\n 36.8133136,\n 42.4769003,\n 48.14048701,\n 53.80407372,\n 59.46766042,\n 65.13124713,\n ],\n )\n\n\ndef test_segment_lengths(point_vasculature):\n segment_lengths = _feat.segment_lengths(point_vasculature)\n npt.assert_allclose(segment_lengths, [1.73205081] * 9)\n" ]
[ [ "pandas.DataFrame", "numpy.testing.assert_allclose" ] ]
winxos/NAO
[ "02ef8609cfdcd6c36aba527757dfe0d2c0fb863e" ]
[ "python/py_udp_cv3_photo/py_udp_server_cv3_v2.py" ]
[ "# -*- coding: utf-8 -*-\n'''\npython udp server get remote image.\n\nwinxos 2015-07-19\n'''\nimport cv2\nimport numpy as np\nimport socket\nimport os\nimport stat # get file size\nimport struct # pack binary\nimport platform\n\nDEBUG = False\nMAX_PACK_SIZE = 1024\nport = 9100\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # DGRAM -> UDP\nif platform.platform()[0:5] == \"Linux\":\n import fcntl\n ip = socket.inet_ntoa(\n fcntl.ioctl(server.fileno(), 0X8915, struct.pack('256s', 'eth0'[:15]))[20:24])\n server.bind((ip, port))\nelse:\n server.bind(('192.168.0.100', port))\n\n\ndef send_file(filename, addr):\n filesize = os.stat(filename)[stat.ST_SIZE]\n print(\"%s size: %d Bytes\" % (filename, filesize))\n f = open(filename, \"rb\")\n chList = []\n for i in range(0, filesize):\n (ch,) = struct.unpack(\"B\", f.read(1))\n chList.append(ch)\n server.sendto(\"bin\", addr)\n packSize = 0\n string = \"\"\n for i in range(0, filesize):\n packSize = packSize + 1\n string = string + struct.pack(\"B\", chList[i])\n if (MAX_PACK_SIZE == packSize or i == filesize - 1):\n server.sendto(string, addr)\n packSize = 0\n string = \"\"\n server.sendto(\"end\", addr)\n\n\ndef send_cv_img(img, addr):\n server.sendto(\"ndarray\", addr)\n packSize = 0\n filesize = len(img)\n server.sendto(str(filesize), addr)\n string = \"\"\n for i in range(0, filesize):\n packSize = packSize + 1\n string = string + img[i]\n if (MAX_PACK_SIZE == packSize or i == filesize - 1):\n server.sendto(string, addr)\n packSize = 0\n string = \"\"\n server.sendto(\"end\", addr)\n\n\ncam = cv2.VideoCapture(0)\n\ndatas = \"\"\nstate = 0\nimg_len = 0\nwhile True:\n ret, frame = cam.read()\n data, addr = server.recvfrom(1024)\n print(data, addr)\n if data == \"img\":\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50] # quality\n f, buf = cv2.imencode('.jpg', frame, encode_param)\n buf_data = np.array(buf).tostring()\n send_cv_img(buf_data, addr)\n" ]
[ [ "numpy.array" ] ]
teubert/hybridq
[ "128292bddf6b7d51ea4a576ddda5cab77c014496" ]
[ "hybridq/extras/random.py" ]
[ "\"\"\"\nAuthor: Salvatore Mandra (salvatore.mandra@nasa.gov)\n\nCopyright © 2021, United States Government, as represented by the Administrator\nof the National Aeronautics and Space Administration. All rights reserved.\n\nThe HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed under\nthe Apache License, Version 2.0 (the \"License\"); you may not use this file\nexcept in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0.\n\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\nfrom __future__ import annotations\nfrom hybridq.gate.utils import get_available_gates, get_clifford_gates\nfrom hybridq.gate import Gate, MatrixGate\nfrom hybridq.circuit import Circuit\nimport numpy as np\n\n\ndef get_random_indexes(n_qubits: int, *, use_random_indexes: bool = False):\n # Initialize\n indexes = []\n\n # Randomize indexes\n if use_random_indexes:\n\n # Add strings\n indexes = []\n while len(indexes) < n_qubits // 3:\n indexes += [\n ''.join(\n np.random.choice(list('abcdefghijklmnopqrstuvwxyz'),\n size=20))\n for _ in range(n_qubits // 3 - len(indexes))\n ]\n\n # Add tuples\n while len(indexes) < n_qubits:\n indexes += [\n tuple(x) for x in np.unique(np.random.randint(\n -2**32 + 1, 2**32 - 1, size=(n_qubits - len(indexes), 2)),\n axis=0)\n ]\n\n # Random permutation\n indexes = [indexes[i] for i in np.random.permutation(n_qubits)]\n\n # Use sequential\n else:\n indexes = np.arange(n_qubits)\n\n # Return indexes\n return indexes\n\n\ndef get_random_gate(randomize_power: bool = True,\n use_clifford_only: bool = False,\n use_unitary_only: bool = True):\n \"\"\"\n Generate random gate.\n \"\"\"\n # Get available gates\n avail_gates = get_clifford_gates(\n ) if use_clifford_only else get_available_gates()\n\n # Add random matrices\n if not use_unitary_only:\n avail_gates = avail_gates + ('RANDOM_MATRIX',)\n\n # Get random gate\n gate_name = np.random.choice(avail_gates)\n\n # Generate a random matrix\n if gate_name == 'RANDOM_MATRIX':\n # Get random number of qubits\n n_qubits = np.random.choice(range(1, 3))\n\n # Get random matrix\n M = 2 * np.random.random(\n (2**n_qubits, 2**n_qubits)).astype('complex') - 1\n M += 1j * (2 * np.random.random((2**n_qubits, 2**n_qubits)) - 1)\n M /= 2\n\n # Get gate\n gate = MatrixGate(M)\n\n # Generate named gate\n else:\n gate = Gate(gate_name)\n\n # Apply random parameters if present\n if gate.provides('params'):\n gate._set_params(np.random.random(size=gate.n_params))\n\n # Apply random power\n gate = gate**(2 * np.random.random() - 1 if randomize_power else 1)\n\n # Apply conjugation if supported\n if gate.provides('conj') and np.random.random() < 0.5:\n gate._conj()\n\n # Apply transposition if supported\n if gate.provides('T') and np.random.random() < 0.5:\n gate._T()\n\n # Convert to MatrixGate half of the times\n gate = gate if gate.name == 'MATRIX' or np.random.random(\n ) < 0.5 else MatrixGate(gate.matrix())\n\n # Return gate\n return gate\n\n\ndef get_rqc(n_qubits: int,\n n_gates: int,\n *,\n indexes: list[int] = None,\n randomize_power: bool = True,\n use_clifford_only: bool = False,\n use_unitary_only: bool = True,\n use_random_indexes: bool = False):\n \"\"\"\n Generate random quantum circuit.\n \"\"\"\n\n # Initialize circuit\n circuit = Circuit()\n\n # If not provided, generate indexes\n indexes = get_random_indexes(n_qubits,\n use_random_indexes=use_random_indexes\n ) if indexes is None else list(indexes)\n\n # Check that size is correct\n assert (len(indexes) == n_qubits)\n\n # Add random gates\n for _ in range(n_gates):\n gate = get_random_gate(randomize_power=randomize_power,\n use_unitary_only=use_unitary_only,\n use_clifford_only=use_clifford_only)\n gate.on([\n indexes[i]\n for i in np.random.choice(n_qubits, gate.n_qubits, replace=False)\n ],\n inplace=True)\n circuit.append(gate)\n\n # Return rqc\n return circuit\n" ]
[ [ "numpy.random.random", "numpy.arange", "numpy.random.choice", "numpy.random.permutation" ] ]
sharanramjee/adala-optimizer
[ "0d3417d41ac05622f8faf55bb2233296eb871e76" ]
[ "language_penntreebank/models/model.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom embed_regularize import embedded_dropout\nfrom locked_dropout import LockedDropout\nfrom weight_drop import WeightDrop\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, tie_weights=False):\n super(RNNModel, self).__init__()\n self.lockdrop = LockedDropout()\n self.idrop = nn.Dropout(dropouti)\n self.hdrop = nn.Dropout(dropouth)\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n assert rnn_type in ['LSTM', 'QRNN', 'GRU'], 'RNN type is not supported'\n if rnn_type == 'LSTM':\n self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else (ninp if tie_weights else nhid), 1, dropout=0) for l in range(nlayers)]\n if wdrop:\n self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]\n if rnn_type == 'GRU':\n self.rnns = [torch.nn.GRU(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) for l in range(nlayers)]\n if wdrop:\n self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]\n elif rnn_type == 'QRNN':\n from torchqrnn import QRNNLayer\n self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid, hidden_size=nhid if l != nlayers - 1 else (ninp if tie_weights else nhid), save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(nlayers)]\n for rnn in self.rnns:\n rnn.linear = WeightDrop(rnn.linear, ['weight'], dropout=wdrop)\n print(self.rnns)\n self.rnns = torch.nn.ModuleList(self.rnns)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n #if nhid != ninp:\n # raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.ninp = ninp\n self.nhid = nhid\n self.nlayers = nlayers\n self.dropout = dropout\n self.dropouti = dropouti\n self.dropouth = dropouth\n self.dropoute = dropoute\n self.tie_weights = tie_weights\n\n def reset(self):\n if self.rnn_type == 'QRNN': [r.reset() for r in self.rnns]\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden, return_h=False):\n emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)\n #emb = self.idrop(emb)\n\n emb = self.lockdrop(emb, self.dropouti)\n\n raw_output = emb\n new_hidden = []\n #raw_output, hidden = self.rnn(emb, hidden)\n raw_outputs = []\n outputs = []\n for l, rnn in enumerate(self.rnns):\n if isinstance(rnn, torch.nn.GRU) or isinstance(rnn, torch.nn.LSTM):\n rnn.flatten_parameters()\n current_input = raw_output\n raw_output, new_h = rnn(raw_output, hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.nlayers - 1:\n #self.hdrop(raw_output)\n raw_output = self.lockdrop(raw_output, self.dropouth)\n outputs.append(raw_output)\n hidden = new_hidden\n\n output = self.lockdrop(raw_output, self.dropout)\n outputs.append(output)\n\n result = output.view(output.size(0)*output.size(1), output.size(2))\n if return_h:\n return result, hidden, raw_outputs, outputs\n return result, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n if self.rnn_type == 'LSTM':\n return [(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else (self.ninp if self.tie_weights else self.nhid)).zero_(),\n weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else (self.ninp if self.tie_weights else self.nhid)).zero_())\n for l in range(self.nlayers)]\n elif self.rnn_type == 'QRNN' or self.rnn_type == 'GRU':\n return [weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else (self.ninp if self.tie_weights else self.nhid)).zero_()\n for l in range(self.nlayers)]\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.LSTM", "torch.nn.GRU", "torch.nn.ModuleList", "torch.nn.Embedding" ] ]
CAVED123/-navbot
[ "5c5711e034516017326bed98a1c66880b6165d9d" ]
[ "tools/draw_success_rate.py" ]
[ "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.sans-serif'] = ['Times New Roman'] # 如果要显示中文字体,则在此处设为:SimHei\nplt.rcParams['axes.unicode_minus'] = False # 显示负号\nplt.figure(figsize=(10, 6))\nplt.grid(linestyle=\"--\") # 设置背景网格线为虚线\nax = plt.gca()\nax.spines['top'].set_visible(False) # 去掉上边框\nax.spines['right'].set_visible(False) # 去掉右边框\n\nrecord_path = '../materials/record/'\n\n\ndef main():\n f = open(record_path + 'PPO_nav1.txt', 'r')\n lines = f.readlines()\n success = []\n successes = []\n\n for i in range(len(lines)):\n if i < 100:\n success.append(0)\n else:\n for j in range(100):\n line = lines[i-j] # eg: '[432.1290540951935, 248, True]'\n data = line.split()\n # successes.append(bool(data[2][:-1])) # bool('False') is True!\n success.append(data[2][:-1] == str('True'))\n success_rate = sum(success)\n successes.append(success_rate)\n success = []\n\n f.close()\n\n f2 = open(record_path + 'E2E_PPO_nav1.txt', 'r')\n lines2 = f2.readlines()\n success2 = []\n successes2 = []\n\n for i in range(100, len(lines2)):\n if i < 100:\n success2.append(0)\n else:\n for j in range(100):\n line2 = lines2[i-j] # eg: '[432.1290540951935, 248, True]'\n data2 = line2.split()\n # successes.append(bool(data[2][:-1])) # bool('False') is True!\n success2.append(data2[2][:-1] == str('True'))\n success_rate2 = sum(success2)\n successes2.append(success_rate2)\n success2 = []\n\n f2.close()\n\n plt.plot(range(len(successes)), successes, color=\"blue\", label=\"Proposed\", linewidth=1.5)\n plt.plot(range(len(successes2)), successes2, color=\"green\", label=\"Baseline\", linewidth=1.5)\n\n size = 22\n plt.xticks(fontsize=size) # 默认字体大小为10\n plt.yticks(fontsize=size)\n # plt.title(\"example\", fontsize=12, fontweight='bold') # 默认字体大小为12\n plt.xlabel(\"episode\", fontsize=size)\n plt.ylabel(\"success rate(%)\", fontsize=size)\n\n plt.title('maze1', fontsize=size)\n # plt.legend() # 显示各曲线的图例\n plt.legend(loc=4, numpoints=1) # lower right\n leg = plt.gca().get_legend()\n ltext = leg.get_texts()\n plt.setp(ltext, fontsize=size) # 设置图例字体的大小和粗细\n \n axes = plt.gca()\n # axes.set_xlim([None, None]) # 限定X轴的范围\n\n plt.savefig('../result/maze1_dense_success.png')\n plt.show()\n\n\nmain()\n" ]
[ [ "matplotlib.pyplot.setp", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.yticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.gca", "matplotlib.pyplot.xticks" ] ]