python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Global configuration."""
#----------------------------------------------------------------------------
# Paths.
result_dir = 'results'
data_dir = 'datasets'
cache_dir = 'cache'
run_dir_ignore = ['results', 'datasets', 'cache']
#----------------------------------------------------------------------------
|
stylegan-master
|
config.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Minimal script for generating an image using pre-trained StyleGAN generator."""
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
def main():
# Initialize TensorFlow.
tflib.init_tf()
# Load pre-trained network.
url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
# Print network details.
Gs.print_layers()
# Pick latent vector.
rnd = np.random.RandomState(5)
latents = rnd.randn(1, Gs.input_shape[1])
# Generate image.
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
# Save image.
os.makedirs(config.result_dir, exist_ok=True)
png_filename = os.path.join(config.result_dir, 'example.png')
PIL.Image.fromarray(images[0], 'RGB').save(png_filename)
if __name__ == "__main__":
main()
|
stylegan-master
|
pretrained_example.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main entry point for training StyleGAN and ProGAN networks."""
import copy
import dnnlib
from dnnlib import EasyDict
import config
from metrics import metric_base
#----------------------------------------------------------------------------
# Official training configs for StyleGAN, targeted mainly for FFHQ.
if 1:
desc = 'sgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan.G_style') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan.D_basic') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0) # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='4k', layout='random') # Options for setup_snapshot_image_grid().
metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
# Dataset.
desc += '-ffhq'; dataset = EasyDict(tfrecord_dir='ffhq'); train.mirror_augment = True
#desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
#desc += '-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-full'); train.mirror_augment = False
#desc += '-car'; dataset = EasyDict(tfrecord_dir='lsun-car-512x384'); train.mirror_augment = False
#desc += '-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-full'); train.mirror_augment = False
# Config presets from Progressive GAN (choose one).
#desc += '-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-2gpu'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-4gpu'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
desc += '-8gpu'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
# Tuned config for StyleGAN.
train.total_kimg = 25000; sched.lod_initial_resolution = 8
# WGAN-GP loss for CelebA-HQ.
#desc += '-wgangp'; G_loss = EasyDict(func_name='training.loss.G_wgan'); D_loss = EasyDict(func_name='training.loss.D_wgan_gp'); sched.G_lrate_dict = {k: min(v, 0.002) for k, v in sched.G_lrate_dict.items()}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
# Table 1.
#desc += '-tuned-baseline'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-add-mapping-and-styles'; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-remove-traditional-input'; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-add-noise-inputs'; G.style_mixing_prob = 0.0
#desc += '-mixing-regularization' # default
# Table 2.
#desc += '-mix0'; G.style_mixing_prob = 0.0
#desc += '-mix50'; G.style_mixing_prob = 0.5
#desc += '-mix90'; G.style_mixing_prob = 0.9 # default
#desc += '-mix100'; G.style_mixing_prob = 1.0
# Table 4.
#desc += '-traditional-0'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-traditional-8'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 8; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-stylebased-0'; G.mapping_layers = 0
#desc += '-stylebased-1'; G.mapping_layers = 1
#desc += '-stylebased-2'; G.mapping_layers = 2
#desc += '-stylebased-8'; G.mapping_layers = 8 # default
#----------------------------------------------------------------------------
# Official training configs for Progressive GAN, targeted mainly for CelebA-HQ.
if 0:
desc = 'pgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_progan.G_paper') # Options for generator network.
D = EasyDict(func_name='training.networks_progan.D_paper') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_wgan') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_wgan_gp') # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='1080p', layout='random') # Options for setup_snapshot_image_grid().
metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
# Dataset (choose one).
desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
#desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True
#desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10')
#desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100')
#desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn')
#desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist')
#desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb')
#desc += '-syn1024rgb'; dataset = EasyDict(class_name='training.dataset.SyntheticDataset', resolution=1024, num_channels=3)
#desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True
#desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True
#desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True
#desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True
#desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True
#desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True
#desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True
#desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True
#desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True
#desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True
#desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True
#desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True
#desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True
#desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True
#desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True
#desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True
#desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True
#desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True
#desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True
#desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True
#desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True
#desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True
#desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True
#desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True
#desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True
#desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True
#desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True
#desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True
#desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True
#desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True
# Conditioning & snapshot options.
#desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label
#desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label
#desc += '-g4k'; grid.size = '4k'
#desc += '-grpc'; grid.layout = 'row_per_class'
# Config presets (choose one).
#desc += '-preset-v1-1gpu'; submit_config.num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000
desc += '-preset-v2-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-2gpus'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-4gpus'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-8gpus'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
# Numerical precision (choose one).
desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4}
#desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}
# Disable individual features.
#desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000
#desc += '-nopixelnorm'; G.use_pixelnorm = False
#desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False
#desc += '-noleakyrelu'; G.use_leakyrelu = False
#desc += '-nosmoothing'; train.G_smoothing_kimg = 0.0
#desc += '-norepeat'; train.minibatch_repeats = 1
#desc += '-noreset'; train.reset_opt_for_new_lod = False
# Special modes.
#desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100
#desc += '-GRAPH'; train.save_tf_graph = True
#desc += '-HIST'; train.save_weight_histograms = True
#----------------------------------------------------------------------------
# Main entry point for training.
# Calls the function indicated by 'train' using the selected options.
def main():
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(submit_config)
kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
kwargs.submit_config.run_dir_ignore += config.run_dir_ignore
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
|
stylegan-master
|
train.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Linear Separability (LS)."""
from collections import defaultdict
import numpy as np
import sklearn.svm
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
classifier_urls = [
'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl
'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl
'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl
'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl
'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl
'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl
'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl
'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl
'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl
'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl
'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl
'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl
'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl
'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl
'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl
'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl
'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl
'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl
'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl
'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl
'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl
'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl
'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl
'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl
'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl
'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl
'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl
'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl
'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl
'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl
'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl
'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl
'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl
'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl
'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl
'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl
'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl
'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl
'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl
'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl
]
#----------------------------------------------------------------------------
def prob_normalize(p):
p = np.asarray(p).astype(np.float32)
assert len(p.shape) == 2
return p / np.sum(p)
def mutual_information(p):
p = prob_normalize(p)
px = np.sum(p, axis=1)
py = np.sum(p, axis=0)
result = 0.0
for x in range(p.shape[0]):
p_x = px[x]
for y in range(p.shape[1]):
p_xy = p[x][y]
p_y = py[y]
if p_xy > 0.0:
result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output
return result
def entropy(p):
p = prob_normalize(p)
result = 0.0
for x in range(p.shape[0]):
for y in range(p.shape[1]):
p_xy = p[x][y]
if p_xy > 0.0:
result -= p_xy * np.log2(p_xy)
return result
def conditional_entropy(p):
# H(Y|X) where X corresponds to axis 0, Y to axis 1
# i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0?
p = prob_normalize(p)
y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y)
return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up.
#----------------------------------------------------------------------------
class LS(metric_base.MetricBase):
def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs):
assert num_keep <= num_samples
super().__init__(**kwargs)
self.num_samples = num_samples
self.num_keep = num_keep
self.attrib_indices = attrib_indices
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
# Construct TensorFlow graph for each GPU.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
# Generate images.
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
dlatents = Gs_clone.components.mapping.get_output_for(latents, None, is_validation=True)
images = Gs_clone.components.synthesis.get_output_for(dlatents, is_validation=True, randomize_noise=True)
# Downsample to 256x256. The attribute classifiers were built for 256x256.
if images.shape[2] > 256:
factor = images.shape[2] // 256
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3, 5])
# Run classifier for each attribute.
result_dict = dict(latents=latents, dlatents=dlatents[:,-1])
for attrib_idx in self.attrib_indices:
classifier = misc.load_pkl(classifier_urls[attrib_idx])
logits = classifier.get_output_for(images, None)
predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1))
result_dict[attrib_idx] = predictions
result_expr.append(result_dict)
# Sampling loop.
results = []
for _ in range(0, self.num_samples, minibatch_size):
results += tflib.run(result_expr)
results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()}
# Calculate conditional entropy for each attribute.
conditional_entropies = defaultdict(list)
for attrib_idx in self.attrib_indices:
# Prune the least confident samples.
pruned_indices = list(range(self.num_samples))
pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i]))
pruned_indices = pruned_indices[:self.num_keep]
# Fit SVM to the remaining samples.
svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1)
for space in ['latents', 'dlatents']:
svm_inputs = results[space][pruned_indices]
try:
svm = sklearn.svm.LinearSVC()
svm.fit(svm_inputs, svm_targets)
svm.score(svm_inputs, svm_targets)
svm_outputs = svm.predict(svm_inputs)
except:
svm_outputs = svm_targets # assume perfect prediction
# Calculate conditional entropy.
p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)]
conditional_entropies[space].append(conditional_entropy(p))
# Calculate separability scores.
scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()}
self._report_result(scores['latents'], suffix='_z')
self._report_result(scores['dlatents'], suffix='_w')
#----------------------------------------------------------------------------
|
stylegan-master
|
metrics/linear_separability.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Frechet Inception Distance (FID)."""
import os
import numpy as np
import scipy
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
class FID(metric_base.MetricBase):
def __init__(self, num_images, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Calculate statistics for reals.
cache_file = self._get_cache_file_for_reals(num_images=self.num_images)
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
if os.path.isfile(cache_file):
mu_real, sigma_real = misc.load_pkl(cache_file)
else:
for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)):
begin = idx * minibatch_size
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True)
if end == self.num_images:
break
mu_real = np.mean(activations, axis=0)
sigma_real = np.cov(activations, rowvar=False)
misc.save_pkl((mu_real, sigma_real), cache_file)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True)
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate statistics for fakes.
for begin in range(0, self.num_images, minibatch_size):
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]
mu_fake = np.mean(activations, axis=0)
sigma_fake = np.cov(activations, rowvar=False)
# Calculate FID.
m = np.square(mu_fake - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member
dist = m + np.trace(sigma_fake + sigma_real - 2*s)
self._report_result(np.real(dist))
#----------------------------------------------------------------------------
|
stylegan-master
|
metrics/frechet_inception_distance.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# empty
|
stylegan-master
|
metrics/__init__.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Perceptual Path Length (PPL)."""
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
# Normalize batch of vectors.
def normalize(v):
return v / tf.sqrt(tf.reduce_sum(tf.square(v), axis=-1, keepdims=True))
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = normalize(a)
b = normalize(b)
d = tf.reduce_sum(a * b, axis=-1, keepdims=True)
p = t * tf.math.acos(d)
c = normalize(b - d * a)
d = a * tf.math.cos(p) + c * tf.math.sin(p)
return normalize(d)
#----------------------------------------------------------------------------
class PPL(metric_base.MetricBase):
def __init__(self, num_samples, epsilon, space, sampling, minibatch_per_gpu, **kwargs):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__(**kwargs)
self.num_samples = num_samples
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
# Construct TensorFlow graph.
distance_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
noise_vars = [var for name, var in Gs_clone.components.synthesis.vars.items() if name.startswith('noise')]
# Generate random latents and interpolation t-values.
lat_t01 = tf.random_normal([self.minibatch_per_gpu * 2] + Gs_clone.input_shape[1:])
lerp_t = tf.random_uniform([self.minibatch_per_gpu], 0.0, 1.0 if self.sampling == 'full' else 0.0)
# Interpolate in W or Z.
if self.space == 'w':
dlat_t01 = Gs_clone.components.mapping.get_output_for(lat_t01, None, is_validation=True)
dlat_t0, dlat_t1 = dlat_t01[0::2], dlat_t01[1::2]
dlat_e0 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis])
dlat_e1 = tflib.lerp(dlat_t0, dlat_t1, lerp_t[:, np.newaxis, np.newaxis] + self.epsilon)
dlat_e01 = tf.reshape(tf.stack([dlat_e0, dlat_e1], axis=1), dlat_t01.shape)
else: # space == 'z'
lat_t0, lat_t1 = lat_t01[0::2], lat_t01[1::2]
lat_e0 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis])
lat_e1 = slerp(lat_t0, lat_t1, lerp_t[:, np.newaxis] + self.epsilon)
lat_e01 = tf.reshape(tf.stack([lat_e0, lat_e1], axis=1), lat_t01.shape)
dlat_e01 = Gs_clone.components.mapping.get_output_for(lat_e01, None, is_validation=True)
# Synthesize images.
with tf.control_dependencies([var.initializer for var in noise_vars]): # use same noise inputs for the entire minibatch
images = Gs_clone.components.synthesis.get_output_for(dlat_e01, is_validation=True, randomize_noise=False)
# Crop only the face region.
c = int(images.shape[2] // 8)
images = images[:, :, c*3 : c*7, c*2 : c*6]
# Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
if images.shape[2] > 256:
factor = images.shape[2] // 256
images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor])
images = tf.reduce_mean(images, axis=[3,5])
# Scale dynamic range from [-1,1] to [0,255] for VGG.
images = (images + 1) * (255 / 2)
# Evaluate perceptual distance.
img_e0, img_e1 = images[0::2], images[1::2]
distance_measure = misc.load_pkl('https://drive.google.com/uc?id=1N2-m9qszOeVC9Tq77WxsLnuWwOedQiD2') # vgg16_zhang_perceptual.pkl
distance_expr.append(distance_measure.get_output_for(img_e0, img_e1) * (1 / self.epsilon**2))
# Sampling loop.
all_distances = []
for _ in range(0, self.num_samples, minibatch_size):
all_distances += tflib.run(distance_expr)
all_distances = np.concatenate(all_distances, axis=0)
# Reject outliers.
lo = np.percentile(all_distances, 1, interpolation='lower')
hi = np.percentile(all_distances, 99, interpolation='higher')
filtered_distances = np.extract(np.logical_and(lo <= all_distances, all_distances <= hi), all_distances)
self._report_result(np.mean(filtered_distances))
#----------------------------------------------------------------------------
|
stylegan-master
|
metrics/perceptual_path_length.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Common definitions for GAN metrics."""
import os
import time
import hashlib
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
import config
from training import misc
from training import dataset
#----------------------------------------------------------------------------
# Standard metrics.
fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8)
ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16)
ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16)
ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16)
ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16)
ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4)
dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging
#----------------------------------------------------------------------------
# Base class for metrics.
class MetricBase:
def __init__(self, name):
self.name = name
self._network_pkl = None
self._dataset_args = None
self._mirror_augment = None
self._results = []
self._eval_time = None
def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True):
self._network_pkl = network_pkl
self._dataset_args = dataset_args
self._mirror_augment = mirror_augment
self._results = []
if (dataset_args is None or mirror_augment is None) and run_dir is not None:
run_config = misc.parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config['dataset'])
self._dataset_args['shuffle_mb'] = 0
self._mirror_augment = run_config['train'].get('mirror_augment', False)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
_G, _D, Gs = misc.load_pkl(self._network_pkl)
self._evaluate(Gs, num_gpus=num_gpus)
self._eval_time = time.time() - time_begin
if log_results:
result_str = self.get_result_str()
if run_dir is not None:
log = os.path.join(run_dir, 'metric-%s.txt' % self.name)
with dnnlib.util.Logger(log, 'a'):
print(result_str)
else:
print(result_str)
def get_result_str(self):
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if len(network_name) > 29:
network_name = '...' + network_name[-26:]
result_str = '%-30s' % network_name
result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
for res in self._results:
result_str += ' ' + self.name + res.suffix + ' '
result_str += res.fmt % res.value
return result_str
def update_autosummaries(self):
for res in self._results:
tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
def _evaluate(self, Gs, num_gpus):
raise NotImplementedError # to be overridden by subclasses
def _report_result(self, value, suffix='', fmt='%-10.4f'):
self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1]
return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
def _iterate_reals(self, minibatch_size):
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args)
while True:
images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
images = misc.apply_mirror_augment(images)
yield images
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
yield images
#----------------------------------------------------------------------------
# Group of multiple metrics.
class MetricGroup:
def __init__(self, metric_kwarg_list):
self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
def run(self, *args, **kwargs):
for metric in self.metrics:
metric.run(*args, **kwargs)
def get_result_str(self):
return ' '.join(metric.get_result_str() for metric in self.metrics)
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries()
#----------------------------------------------------------------------------
# Dummy metric for debugging purposes.
class DummyMetric(MetricBase):
def _evaluate(self, Gs, num_gpus):
_ = Gs, num_gpus
self._report_result(0.0)
#----------------------------------------------------------------------------
|
stylegan-master
|
metrics/metric_base.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous utility functions."""
import os
import glob
import pickle
import re
import numpy as np
from collections import defaultdict
import PIL.Image
import dnnlib
import config
from training import dataset
#----------------------------------------------------------------------------
# Convenience wrappers for pickle that are able to load data produced by
# older versions of the code, and from external URLs.
def open_file_or_url(file_or_url):
if dnnlib.util.is_url(file_or_url):
return dnnlib.util.open_url(file_or_url, cache_dir=config.cache_dir)
return open(file_or_url, 'rb')
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding='latin1')
def save_pkl(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
#----------------------------------------------------------------------------
# Image utils.
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[0,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
else:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
fmt = 'RGB' if image.ndim == 3 else 'L'
return PIL.Image.fromarray(image, fmt)
def save_image(image, filename, drange=[0,1], quality=95):
img = convert_to_pil_image(image, drange)
if '.jpg' in filename:
img.save(filename,"JPEG", quality=quality, optimize=True)
else:
img.save(filename)
def save_image_grid(images, filename, drange=[0,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename)
#----------------------------------------------------------------------------
# Locating results.
def locate_run_dir(run_id_or_run_dir):
if isinstance(run_id_or_run_dir, str):
if os.path.isdir(run_id_or_run_dir):
return run_id_or_run_dir
converted = dnnlib.submission.submit.convert_path(run_id_or_run_dir)
if os.path.isdir(converted):
return converted
run_dir_pattern = re.compile('^0*%s-' % str(run_id_or_run_dir))
for search_dir in ['']:
full_search_dir = config.result_dir if search_dir == '' else os.path.normpath(os.path.join(config.result_dir, search_dir))
run_dir = os.path.join(full_search_dir, str(run_id_or_run_dir))
if os.path.isdir(run_dir):
return run_dir
run_dirs = sorted(glob.glob(os.path.join(full_search_dir, '*')))
run_dirs = [run_dir for run_dir in run_dirs if run_dir_pattern.match(os.path.basename(run_dir))]
run_dirs = [run_dir for run_dir in run_dirs if os.path.isdir(run_dir)]
if len(run_dirs) == 1:
return run_dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_run_dir)
def list_network_pkls(run_id_or_run_dir, include_final=True):
run_dir = locate_run_dir(run_id_or_run_dir)
pkls = sorted(glob.glob(os.path.join(run_dir, 'network-*.pkl')))
if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl':
if include_final:
pkls.append(pkls[0])
del pkls[0]
return pkls
def locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None):
for candidate in [snapshot_or_network_pkl, run_id_or_run_dir_or_network_pkl]:
if isinstance(candidate, str):
if os.path.isfile(candidate):
return candidate
converted = dnnlib.submission.submit.convert_path(candidate)
if os.path.isfile(converted):
return converted
pkls = list_network_pkls(run_id_or_run_dir_or_network_pkl)
if len(pkls) >= 1 and snapshot_or_network_pkl is None:
return pkls[-1]
for pkl in pkls:
try:
name = os.path.splitext(os.path.basename(pkl))[0]
number = int(name.split('-')[-1])
if number == snapshot_or_network_pkl:
return pkl
except ValueError: pass
except IndexError: pass
raise IOError('Cannot locate network pkl for snapshot', snapshot_or_network_pkl)
def get_id_string_for_network_pkl(network_pkl):
p = network_pkl.replace('.pkl', '').replace('\\', '/').split('/')
return '-'.join(p[max(len(p) - 2, 0):])
#----------------------------------------------------------------------------
# Loading data from previous training runs.
def load_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl=None):
return load_pkl(locate_network_pkl(run_id_or_run_dir_or_network_pkl, snapshot_or_network_pkl))
def parse_config_for_previous_run(run_id):
run_dir = locate_run_dir(run_id)
# Parse config.txt.
cfg = defaultdict(dict)
with open(os.path.join(run_dir, 'config.txt'), 'rt') as f:
for line in f:
line = re.sub(r"^{?\s*'(\w+)':\s*{(.*)(},|}})$", r"\1 = {\2}", line.strip())
if line.startswith('dataset =') or line.startswith('train ='):
exec(line, cfg, cfg) # pylint: disable=exec-used
# Handle legacy options.
if 'file_pattern' in cfg['dataset']:
cfg['dataset']['tfrecord_dir'] = cfg['dataset'].pop('file_pattern').replace('-r??.tfrecords', '')
if 'mirror_augment' in cfg['dataset']:
cfg['train']['mirror_augment'] = cfg['dataset'].pop('mirror_augment')
if 'max_labels' in cfg['dataset']:
v = cfg['dataset'].pop('max_labels')
if v is None: v = 0
if v == 'all': v = 'full'
cfg['dataset']['max_label_size'] = v
if 'max_images' in cfg['dataset']:
cfg['dataset'].pop('max_images')
return cfg
def load_dataset_for_previous_run(run_id, **kwargs): # => dataset_obj, mirror_augment
cfg = parse_config_for_previous_run(run_id)
cfg['dataset'].update(kwargs)
dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **cfg['dataset'])
mirror_augment = cfg['train'].get('mirror_augment', False)
return dataset_obj, mirror_augment
def apply_mirror_augment(minibatch):
mask = np.random.rand(minibatch.shape[0]) < 0.5
minibatch = np.array(minibatch)
minibatch[mask] = minibatch[mask, :, :, ::-1]
return minibatch
#----------------------------------------------------------------------------
# Size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(G, training_set,
size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.
# Select size.
gw = 1; gh = 1
if size == '1080p':
gw = np.clip(1920 // G.output_shape[3], 3, 32)
gh = np.clip(1080 // G.output_shape[2], 2, 32)
if size == '4k':
gw = np.clip(3840 // G.output_shape[3], 7, 32)
gh = np.clip(2160 // G.output_shape[2], 4, 32)
# Initialize data arrays.
reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
latents = np.random.randn(gw * gh, *G.input_shape[1:])
# Random layout.
if layout == 'random':
reals[:], labels[:] = training_set.get_minibatch_np(gw * gh)
# Class-conditional layouts.
class_layouts = dict(row_per_class=[gw,1], col_per_class=[1,gh], class4x4=[4,4])
if layout in class_layouts:
bw, bh = class_layouts[layout]
nw = (gw - 1) // bw + 1
nh = (gh - 1) // bh + 1
blocks = [[] for _i in range(nw * nh)]
for _iter in range(1000000):
real, label = training_set.get_minibatch_np(1)
idx = np.argmax(label[0])
while idx < len(blocks) and len(blocks[idx]) >= bw * bh:
idx += training_set.label_size
if idx < len(blocks):
blocks[idx].append((real, label))
if all(len(block) >= bw * bh for block in blocks):
break
for i, block in enumerate(blocks):
for j, (real, label) in enumerate(block):
x = (i % nw) * bw + j % bw
y = (i // nw) * bh + j // bw
if x < gw and y < gh:
reals[x + y * gw] = real[0]
labels[x + y * gw] = label[0]
return (gw, gh), reals, labels, latents
#----------------------------------------------------------------------------
|
stylegan-master
|
training/misc.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
# empty
|
stylegan-master
|
training/__init__.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Network architectures used in the StyleGAN paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Primitive ops for manipulating 4D activation tensors.
# The gradients of these are not necessary efficient or even meaningful.
def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(stride, int) and stride >= 1
# Finalize filter kernel.
f = np.array(f, dtype=np.float32)
if f.ndim == 1:
f = f[:, np.newaxis] * f[np.newaxis, :]
assert f.ndim == 2
if normalize:
f /= np.sum(f)
if flip:
f = f[::-1, ::-1]
f = f[:, :, np.newaxis, np.newaxis]
f = np.tile(f, [1, 1, int(x.shape[1]), 1])
# No-op => early exit.
if f.shape == (1, 1) and f[0,0] == 1:
return x
# Convolve using depthwise_conv2d.
orig_dtype = x.dtype
x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16
f = tf.constant(f, dtype=x.dtype, name='filter')
strides = [1, 1, stride, stride]
x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW')
x = tf.cast(x, orig_dtype)
return x
def _upscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Upscale using tf.tile().
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
def _downscale2d(x, factor=2, gain=1):
assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:])
assert isinstance(factor, int) and factor >= 1
# 2x2, float32 => downscale using _blur2d().
if factor == 2 and x.dtype == tf.float32:
f = [np.sqrt(gain) / factor] * factor
return _blur2d(x, f=f, normalize=False, stride=factor)
# Apply gain.
if gain != 1:
x *= gain
# No-op => early exit.
if factor == 1:
return x
# Large factor => downscale using tf.nn.avg_pool().
# NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work.
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW')
#----------------------------------------------------------------------------
# High-level ops for manipulating 4D activation tensors.
# The gradients of these are meant to be as efficient as possible.
def blur2d(x, f=[1,2,1], normalize=True):
with tf.variable_scope('Blur2D'):
@tf.custom_gradient
def func(x):
y = _blur2d(x, f, normalize)
@tf.custom_gradient
def grad(dy):
dx = _blur2d(dy, f, normalize, flip=True)
return dx, lambda ddx: _blur2d(ddx, f, normalize)
return y, grad
return func(x)
def upscale2d(x, factor=2):
with tf.variable_scope('Upscale2D'):
@tf.custom_gradient
def func(x):
y = _upscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _downscale2d(dy, factor, gain=factor**2)
return dx, lambda ddx: _upscale2d(ddx, factor)
return y, grad
return func(x)
def downscale2d(x, factor=2):
with tf.variable_scope('Downscale2D'):
@tf.custom_gradient
def func(x):
y = _downscale2d(x, factor)
@tf.custom_gradient
def grad(dy):
dx = _upscale2d(dy, factor, gain=1/factor**2)
return dx, lambda ddx: _downscale2d(ddx, factor)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, **kwargs):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, **kwargs):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Fused convolution + scaling.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) * 2 >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return conv2d(upscale2d(x), fmaps, kernel, **kwargs)
# Fused => perform both ops simultaneously using tf.nn.conv2d_transpose().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in]
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs):
assert kernel >= 1 and kernel % 2 == 1
assert fused_scale in [True, False, 'auto']
if fused_scale == 'auto':
fused_scale = min(x.shape[2:]) >= 128
# Not fused => call the individual ops directly.
if not fused_scale:
return downscale2d(conv2d(x, fmaps, kernel, **kwargs))
# Fused => perform both ops simultaneously using tf.nn.conv2d().
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x, lrmul=1):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.variable_scope('LeakyReLU'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
@tf.custom_gradient
def func(x):
y = tf.maximum(x, x * alpha)
@tf.custom_gradient
def grad(dy):
dx = tf.where(y >= 0, dy, dy * alpha)
return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha)
return y, grad
return func(x)
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Instance normalization.
def instance_norm(x, epsilon=1e-8):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('InstanceNorm'):
orig_dtype = x.dtype
x = tf.cast(x, tf.float32)
x -= tf.reduce_mean(x, axis=[2,3], keepdims=True)
epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon')
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon)
x = tf.cast(x, orig_dtype)
return x
#----------------------------------------------------------------------------
# Style modulation.
def style_mod(x, dlatent, **kwargs):
with tf.variable_scope('StyleMod'):
style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs))
style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2))
return x * (style[:,0] + 1) + style[:,1]
#----------------------------------------------------------------------------
# Noise input.
def apply_noise(x, noise_var=None, randomize_noise=True):
assert len(x.shape) == 4 # NCHW
with tf.variable_scope('Noise'):
if noise_var is None or randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_var, x.dtype)
weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros())
return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Style-based generator used in the StyleGAN paper.
# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below.
def G_style(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
**kwargs): # Arguments for sub-networks (G_mapping and G_synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0):
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.name_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None and truncation_cutoff is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
ones = np.ones(layer_idx.shape, dtype=np.float32)
coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones)
dlatents = tflib.lerp(dlatent_avg, dlatents, coefs)
# Evaluate synthesis network.
with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]):
images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Mapping network used in the StyleGAN paper.
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'.
use_wscale = True, # Enable equalized learning rate?
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity]
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
if normalize_latents:
x = pixel_norm(x)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul)
x = apply_bias(x, lrmul=mapping_lrmul)
x = act(x)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# Synthesis network used in the StyleGAN paper.
def G_synthesis(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_styles = True, # Enable style inputs?
const_input_layer = True, # First layer is a learned constant?
use_noise = True, # Enable noise inputs?
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'
use_wscale = True, # Enable equalized learning rate?
use_pixel_norm = False, # Enable pixelwise feature vector normalization?
use_instance_norm = True, # Enable instance normalization?
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_styles, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
if use_noise:
for layer_idx in range(num_layers):
res = layer_idx // 2 + 2
shape = [1, use_noise, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Things to do at the end of each layer.
def layer_epilogue(x, layer_idx):
if use_noise:
x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise)
x = apply_bias(x)
x = act(x)
if use_pixel_norm:
x = pixel_norm(x)
if use_instance_norm:
x = instance_norm(x)
if use_styles:
x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale)
return x
# Early layers.
with tf.variable_scope('4x4'):
if const_input_layer:
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones())
x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0)
else:
with tf.variable_scope('Dense'):
x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN
x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0)
with tf.variable_scope('Conv'):
x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4)
with tf.variable_scope('Conv1'):
x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3)
return x
def torgb(res, x): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: upscale2d(torgb(res, y), 2**lod)
img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Discriminator used in the StyleGAN paper.
def D_basic(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu',
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically.
blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def blur(x): return blur2d(x, blur_filter) if blur_filter else x
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity]
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
scores_out = None
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)))
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale))
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
scores_out = block(x, 2)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
scores_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
return y()
scores_out = grow(2, resolution_log2 - 2)
# Label conditioning from "Which Training Methods for GANs do actually Converge?"
if label_size:
with tf.variable_scope('LabelSwitch'):
scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True)
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
|
stylegan-master
|
training/networks_stylegan.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main training script."""
import os
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
import config
import train
from training import dataset
from training import misc
from metrics import metric_base
#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks.
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
with tf.name_scope('ProcessReals'):
with tf.name_scope('DynamicRange'):
x = tf.cast(x, tf.float32)
x = misc.adjust_dynamic_range(x, drange_data, drange_net)
if mirror_augment:
with tf.name_scope('MirrorAugment'):
s = tf.shape(x)
mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
mask = tf.tile(mask, [1, s[1], s[2], s[3]])
x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
s = tf.shape(x)
y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
y = tf.tile(y, [1, 1, 1, 2, 1, 2])
y = tf.reshape(y, [-1, s[1], s[2], s[3]])
x = tflib.lerp(x, y, lod - tf.floor(lod))
with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
s = tf.shape(x)
factor = tf.cast(2 ** tf.floor(lod), tf.int32)
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Evaluate time-varying training parameters.
def training_schedule(
cur_nimg,
training_set,
num_gpus,
lod_initial_resolution = 4, # Image resolution used at the beginning.
lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.
lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.
minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs.
minibatch_dict = {}, # Resolution-specific overrides.
max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU.
G_lrate_base = 0.001, # Learning rate for the generator.
G_lrate_dict = {}, # Resolution-specific overrides.
D_lrate_base = 0.001, # Learning rate for the discriminator.
D_lrate_dict = {}, # Resolution-specific overrides.
lrate_rampup_kimg = 0, # Duration of learning rate ramp-up.
tick_kimg_base = 160, # Default interval of progress snapshots.
tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:30, 1024:20}): # Resolution-specific overrides.
# Initialize result dict.
s = dnnlib.EasyDict()
s.kimg = cur_nimg / 1000.0
# Training phase.
phase_dur = lod_training_kimg + lod_transition_kimg
phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0
phase_kimg = s.kimg - phase_idx * phase_dur
# Level-of-detail and resolution.
s.lod = training_set.resolution_log2
s.lod -= np.floor(np.log2(lod_initial_resolution))
s.lod -= phase_idx
if lod_transition_kimg > 0:
s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
s.lod = max(s.lod, 0.0)
s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod)))
# Minibatch size.
s.minibatch = minibatch_dict.get(s.resolution, minibatch_base)
s.minibatch -= s.minibatch % num_gpus
if s.resolution in max_minibatch_per_gpu:
s.minibatch = min(s.minibatch, max_minibatch_per_gpu[s.resolution] * num_gpus)
# Learning rate.
s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base)
s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base)
if lrate_rampup_kimg > 0:
rampup = min(s.kimg / lrate_rampup_kimg, 1.0)
s.G_lrate *= rampup
s.D_lrate *= rampup
# Other parameters.
s.tick_kimg = tick_kimg_dict.get(s.resolution, tick_kimg_base)
return s
#----------------------------------------------------------------------------
# Main training script.
def training_loop(
submit_config,
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
G_opt_args = {}, # Options for generator optimizer.
D_opt_args = {}, # Options for discriminator optimizer.
G_loss_args = {}, # Options for generator loss.
D_loss_args = {}, # Options for discriminator loss.
dataset_args = {}, # Options for dataset.load_dataset().
sched_args = {}, # Options for train.TrainingSchedule.
grid_args = {}, # Options for train.setup_snapshot_image_grid().
metric_arg_list = [], # Options for MetricGroup.
tf_config = {}, # Options for tflib.init_tf().
G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights.
D_repeats = 1, # How many times the discriminator is trained per G iteration.
minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
total_kimg = 15000, # Total length of the training, measured in thousands of real images.
mirror_augment = False, # Enable mirror augment?
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
image_snapshot_ticks = 1, # How often to export image snapshots?
network_snapshot_ticks = 10, # How often to export network snapshots?
save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
save_weight_histograms = False, # Include weight histograms in the tfevents file?
resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch.
resume_snapshot = None, # Snapshot index to resume training from, None = autodetect.
resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.
resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting.
# Initialize dnnlib and TensorFlow.
ctx = dnnlib.RunContext(submit_config, train)
tflib.init_tf(tf_config)
# Load training set.
training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **dataset_args)
# Construct networks.
with tf.device('/gpu:0'):
if resume_run_id is not None:
network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot)
print('Loading networks from "%s"...' % network_pkl)
G, D, Gs = misc.load_pkl(network_pkl)
else:
print('Constructing networks...')
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
G.print_layers(); D.print_layers()
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'), tf.device('/cpu:0'):
lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[])
minibatch_split = minibatch_in // submit_config.num_gpus
Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0
G_opt = tflib.Optimizer(name='TrainG', learning_rate=lrate_in, **G_opt_args)
D_opt = tflib.Optimizer(name='TrainD', learning_rate=lrate_in, **D_opt_args)
for gpu in range(submit_config.num_gpus):
with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')
D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)]
reals, labels = training_set.get_minibatch_tf()
reals = process_reals(reals, lod_in, mirror_augment, training_set.dynamic_range, drange_net)
with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops):
G_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **G_loss_args)
with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops):
D_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals, labels=labels, **D_loss_args)
G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta)
with tf.device('/gpu:0'):
try:
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
except tf.errors.NotFoundError:
peak_gpu_mem_op = tf.constant(0)
print('Setting up snapshot image grid...')
grid_size, grid_reals, grid_labels, grid_latents = misc.setup_snapshot_image_grid(G, training_set, **grid_args)
sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args)
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus)
print('Setting up run dir...')
misc.save_image_grid(grid_reals, os.path.join(submit_config.run_dir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size)
misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % resume_kimg), drange=drange_net, grid_size=grid_size)
summary_log = tf.summary.FileWriter(submit_config.run_dir)
if save_tf_graph:
summary_log.add_graph(tf.get_default_graph())
if save_weight_histograms:
G.setup_weight_histograms(); D.setup_weight_histograms()
metrics = metric_base.MetricGroup(metric_arg_list)
print('Training...\n')
ctx.update('', cur_epoch=resume_kimg, max_epoch=total_kimg)
maintenance_time = ctx.get_last_update_interval()
cur_nimg = int(resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
prev_lod = -1.0
while cur_nimg < total_kimg * 1000:
if ctx.should_stop(): break
# Choose training parameters and configure training ops.
sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args)
training_set.configure(sched.minibatch // submit_config.num_gpus, sched.lod)
if reset_opt_for_new_lod:
if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()
prev_lod = sched.lod
# Run training ops.
for _mb_repeat in range(minibatch_repeats):
for _D_repeat in range(D_repeats):
tflib.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch})
cur_nimg += sched.minibatch
tflib.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch})
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
cur_tick += 1
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_time = ctx.get_time_since_last_update()
total_time = ctx.get_time_since_start() + resume_time
# Report progress.
print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %-4.1f' % (
autosummary('Progress/tick', cur_tick),
autosummary('Progress/kimg', cur_nimg / 1000.0),
autosummary('Progress/lod', sched.lod),
autosummary('Progress/minibatch', sched.minibatch),
dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)),
autosummary('Timing/sec_per_tick', tick_time),
autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
autosummary('Timing/maintenance_sec', maintenance_time),
autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30)))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
# Save snapshots.
if cur_tick % image_snapshot_ticks == 0 or done:
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus)
misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)
if cur_tick % network_snapshot_ticks == 0 or done or cur_tick == 1:
pkl = os.path.join(submit_config.run_dir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000))
misc.save_pkl((G, D, Gs), pkl)
metrics.run(pkl, run_dir=submit_config.run_dir, num_gpus=submit_config.num_gpus, tf_config=tf_config)
# Update summaries and RunContext.
metrics.update_autosummaries()
tflib.autosummary.save_summaries(summary_log, cur_nimg)
ctx.update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg)
maintenance_time = ctx.get_last_update_interval() - tick_time
# Write final results.
misc.save_pkl((G, D, Gs), os.path.join(submit_config.run_dir, 'network-final.pkl'))
summary_log.close()
ctx.close()
#----------------------------------------------------------------------------
|
stylegan-master
|
training/training_loop.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Multi-resolution input data pipeline."""
import os
import glob
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
#----------------------------------------------------------------------------
# Parse individual image from a tfrecords file.
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value # temporary pylint workaround # pylint: disable=no-member
data = ex.features.feature['data'].bytes_list.value[0] # temporary pylint workaround # pylint: disable=no-member
return np.fromstring(data, np.uint8).reshape(shape)
#----------------------------------------------------------------------------
# Dataset class that loads data from tfrecords files.
class TFRecordDataset:
def __init__(self,
tfrecord_dir, # Directory containing a collection of tfrecords files.
resolution = None, # Dataset resolution, None = autodetect.
label_file = None, # Relative path of the labels file, None = autodetect.
max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
repeat = True, # Repeat dataset indefinitely.
shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.
prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.
buffer_mb = 256, # Read buffer size (megabytes).
num_threads = 2): # Number of concurrent threads.
self.tfrecord_dir = tfrecord_dir
self.resolution = None
self.resolution_log2 = None
self.shape = [] # [channel, height, width]
self.dtype = 'uint8'
self.dynamic_range = [0, 255]
self.label_file = label_file
self.label_size = None # [component]
self.label_dtype = None
self._np_labels = None
self._tf_minibatch_in = None
self._tf_labels_var = None
self._tf_labels_dataset = None
self._tf_datasets = dict()
self._tf_iterator = None
self._tf_init_ops = dict()
self._tf_minibatch_np = None
self._cur_minibatch = -1
self._cur_lod = -1
# List tfrecords files and inspect their shapes.
assert os.path.isdir(self.tfrecord_dir)
tfr_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.tfrecords')))
assert len(tfr_files) >= 1
tfr_shapes = []
for tfr_file in tfr_files:
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):
tfr_shapes.append(parse_tfrecord_np(record).shape)
break
# Autodetect label filename.
if self.label_file is None:
guess = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*.labels')))
if len(guess):
self.label_file = guess[0]
elif not os.path.isfile(self.label_file):
guess = os.path.join(self.tfrecord_dir, self.label_file)
if os.path.isfile(guess):
self.label_file = guess
# Determine shape and resolution.
max_shape = max(tfr_shapes, key=np.prod)
self.resolution = resolution if resolution is not None else max_shape[1]
self.resolution_log2 = int(np.log2(self.resolution))
self.shape = [max_shape[0], self.resolution, self.resolution]
tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
assert all(shape[1] == shape[2] for shape in tfr_shapes)
assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
# Load labels.
assert max_label_size == 'full' or max_label_size >= 0
self._np_labels = np.zeros([1<<20, 0], dtype=np.float32)
if self.label_file is not None and max_label_size != 0:
self._np_labels = np.load(self.label_file)
assert self._np_labels.ndim == 2
if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
self._np_labels = self._np_labels[:, :max_label_size]
self.label_size = self._np_labels.shape[1]
self.label_dtype = self._np_labels.dtype.name
# Build TF expressions.
with tf.name_scope('Dataset'), tf.device('/cpu:0'):
self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
self._tf_labels_var = tflib.create_var_with_large_initial_value(self._np_labels, name='labels_var')
self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
if tfr_lod < 0:
continue
dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads)
dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
if shuffle_mb > 0:
dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)
if repeat:
dset = dset.repeat()
if prefetch_mb > 0:
dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)
dset = dset.batch(self._tf_minibatch_in)
self._tf_datasets[tfr_lod] = dset
self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
# Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod in self._tf_datasets
if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
self._cur_minibatch = minibatch_size
self._cur_lod = lod
# Get next minibatch as TensorFlow expressions.
def get_minibatch_tf(self): # => images, labels
return self._tf_iterator.get_next()
# Get next minibatch as NumPy arrays.
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tflib.run(self._tf_minibatch_np)
# Get random labels as TensorFlow expression.
def get_random_labels_tf(self, minibatch_size): # => labels
if self.label_size > 0:
with tf.device('/cpu:0'):
return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
return tf.zeros([minibatch_size, 0], self.label_dtype)
# Get random labels as NumPy array.
def get_random_labels_np(self, minibatch_size): # => labels
if self.label_size > 0:
return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
return np.zeros([minibatch_size, 0], self.label_dtype)
#----------------------------------------------------------------------------
# Base class for datasets that are generated on the fly.
class SyntheticDataset:
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'):
self.resolution = resolution
self.resolution_log2 = int(np.log2(resolution))
self.shape = [num_channels, resolution, resolution]
self.dtype = dtype
self.dynamic_range = dynamic_range
self.label_size = label_size
self.label_dtype = label_dtype
self._tf_minibatch_var = None
self._tf_lod_var = None
self._tf_minibatch_np = None
self._tf_labels_np = None
assert self.resolution == 2 ** self.resolution_log2
with tf.name_scope('Dataset'):
self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var')
self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and 0 <= lod <= self.resolution_log2
tflib.set_vars({self._tf_minibatch_var: minibatch_size, self._tf_lod_var: lod})
def get_minibatch_tf(self): # => images, labels
with tf.name_scope('SyntheticDataset'):
shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32)
shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink]
images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape)
labels = self._generate_labels(self._tf_minibatch_var)
return images, labels
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tflib.run(self._tf_minibatch_np)
def get_random_labels_tf(self, minibatch_size): # => labels
with tf.name_scope('SyntheticDataset'):
return self._generate_labels(minibatch_size)
def get_random_labels_np(self, minibatch_size): # => labels
self.configure(minibatch_size)
if self._tf_labels_np is None:
self._tf_labels_np = self.get_random_labels_tf(minibatch_size)
return tflib.run(self._tf_labels_np)
def _generate_images(self, minibatch, lod, shape): # to be overridden by subclasses # pylint: disable=unused-argument
return tf.zeros([minibatch] + shape, self.dtype)
def _generate_labels(self, minibatch): # to be overridden by subclasses
return tf.zeros([minibatch, self.label_size], self.label_dtype)
#----------------------------------------------------------------------------
# Helper func for constructing a dataset object using the given options.
def load_dataset(class_name='training.dataset.TFRecordDataset', data_dir=None, verbose=False, **kwargs):
adjusted_kwargs = dict(kwargs)
if 'tfrecord_dir' in adjusted_kwargs and data_dir is not None:
adjusted_kwargs['tfrecord_dir'] = os.path.join(data_dir, adjusted_kwargs['tfrecord_dir'])
if verbose:
print('Streaming data using %s...' % class_name)
dataset = dnnlib.util.get_obj_by_name(class_name)(**adjusted_kwargs)
if verbose:
print('Dataset shape =', np.int32(dataset.shape).tolist())
print('Dynamic range =', dataset.dynamic_range)
print('Label size =', dataset.label_size)
return dataset
#----------------------------------------------------------------------------
|
stylegan-master
|
training/dataset.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Loss functions."""
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
#----------------------------------------------------------------------------
# Convenience func that casts all of its arguments to tf.float32.
def fp32(*values):
if len(values) == 1 and isinstance(values[0], tuple):
values = values[0]
values = tuple(tf.cast(v, tf.float32) for v in values)
return values if len(values) >= 2 else values[0]
#----------------------------------------------------------------------------
# WGAN & WGAN-GP loss functions.
def G_wgan(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -fake_scores_out
return loss
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_epsilon = 0.001): # Weight for the epsilon term, \epsilon_{drift}.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_epsilon = 0.001, # Weight for the epsilon term, \epsilon_{drift}.
wgan_target = 1.0): # Target value for gradient magnitudes.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss
#----------------------------------------------------------------------------
# Hinge loss functions. (Use G_wgan with these)
def D_hinge(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out)
return loss
def D_hinge_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
wgan_lambda = 10.0, # Weight for the gradient penalty term.
wgan_target = 1.0): # Target value for gradient magnitudes.
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.maximum(0., 1.+fake_scores_out) + tf.maximum(0., 1.-real_scores_out)
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
return loss
#----------------------------------------------------------------------------
# Loss functions advocated by the paper
# "Which Training Methods for GANs do actually Converge?"
def G_logistic_saturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = -tf.nn.softplus(fake_scores_out) # log(1 - logistic(fake_scores_out))
return loss
def G_logistic_nonsaturating(G, D, opt, training_set, minibatch_size): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = tf.nn.softplus(-fake_scores_out) # -log(logistic(fake_scores_out))
return loss
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
return loss
def D_logistic_simplegp(G, D, opt, training_set, minibatch_size, reals, labels, r1_gamma=10.0, r2_gamma=0.0): # pylint: disable=unused-argument
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1 - logistic(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
if r1_gamma != 0.0:
with tf.name_scope('R1Penalty'):
real_loss = opt.apply_loss_scaling(tf.reduce_sum(real_scores_out))
real_grads = opt.undo_loss_scaling(fp32(tf.gradients(real_loss, [reals])[0]))
r1_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
r1_penalty = autosummary('Loss/r1_penalty', r1_penalty)
loss += r1_penalty * (r1_gamma * 0.5)
if r2_gamma != 0.0:
with tf.name_scope('R2Penalty'):
fake_loss = opt.apply_loss_scaling(tf.reduce_sum(fake_scores_out))
fake_grads = opt.undo_loss_scaling(fp32(tf.gradients(fake_loss, [fake_images_out])[0]))
r2_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3])
r2_penalty = autosummary('Loss/r2_penalty', r2_penalty)
loss += r2_penalty * (r2_gamma * 0.5)
return loss
#----------------------------------------------------------------------------
|
stylegan-master
|
training/loss.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Network architectures used in the ProGAN paper."""
import numpy as np
import tensorflow as tf
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
def lerp(a, b, t): return a + (b - a) * t
def lerp_clip(a, b, t): return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def cset(cur_lambda, new_cond, new_lambda): return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolutional or fully-connected layer.
def get_weight(shape, gain=np.sqrt(2), use_wscale=False):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
std = gain / np.sqrt(fan_in) # He init
if use_wscale:
wscale = tf.constant(np.float32(std), name='wscale')
w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
else:
w = tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))
return w
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.
def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Apply bias to the given activation tensor.
def apply_bias(x):
b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros())
b = tf.cast(b, x.dtype)
if len(x.shape) == 2:
return x + b
return x + tf.reshape(b, [1, -1, 1, 1])
#----------------------------------------------------------------------------
# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16.
def leaky_relu(x, alpha=0.2):
with tf.name_scope('LeakyRelu'):
alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
return tf.maximum(x * alpha, x)
#----------------------------------------------------------------------------
# Nearest-neighbor upscaling layer.
def upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Upscale2D'):
s = x.shape
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x
#----------------------------------------------------------------------------
# Fused upscale2d + conv2d.
# Faster and uses less memory than performing the operations separately.
def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in]
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
w = tf.cast(w, x.dtype)
os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Box filter downscaling layer.
def downscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1: return x
with tf.variable_scope('Downscale2D'):
ksize = [1, 1, factor, factor]
return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') # NOTE: requires tf_config['graph_options.place_pruned_graph'] = True
#----------------------------------------------------------------------------
# Fused conv2d + downscale2d.
# Faster and uses less memory than performing the operations separately.
def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
w = tf.cast(w, x.dtype)
return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')
#----------------------------------------------------------------------------
# Pixelwise feature vector normalization.
def pixel_norm(x, epsilon=1e-8):
with tf.variable_scope('PixelNorm'):
return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon)
#----------------------------------------------------------------------------
# Minibatch standard deviation.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Networks used in the ProgressiveGAN paper.
def G_paper(
latents_in, # First input: Latent vectors [minibatch, latent_size].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of output color channels. Overridden based on dataset.
resolution = 32, # Output resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
latent_size = None, # Dimensionality of the latent vectors. None = min(fmap_base, fmap_max).
normalize_latents = True, # Normalize latent vectors before feeding them to the network?
use_wscale = True, # Enable equalized learning rate?
use_pixelnorm = True, # Enable pixelwise feature vector normalization?
pixelnorm_epsilon = 1e-8, # Constant epsilon for pixelwise feature vector normalization.
use_leakyrelu = True, # True = leaky ReLU, False = ReLU.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = True, # True = use fused upscale2d + conv2d, False = separate upscale2d layers.
structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
def PN(x): return pixel_norm(x, epsilon=pixelnorm_epsilon) if use_pixelnorm else x
if latent_size is None: latent_size = nf(0)
if structure is None: structure = 'linear' if is_template_graph else 'recursive'
act = leaky_relu if use_leakyrelu else tf.nn.relu
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
combo_in = tf.cast(tf.concat([latents_in, labels_in], axis=1), dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
images_out = None
# Building blocks.
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res == 2: # 4x4
if normalize_latents: x = pixel_norm(x, epsilon=pixelnorm_epsilon)
with tf.variable_scope('Dense'):
x = dense(x, fmaps=nf(res-1)*16, gain=np.sqrt(2)/4, use_wscale=use_wscale) # override gain to match the original Theano implementation
x = tf.reshape(x, [-1, nf(res-1), 4, 4])
x = PN(act(apply_bias(x)))
with tf.variable_scope('Conv'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
else: # 8x8 and up
if fused_scale:
with tf.variable_scope('Conv0_up'):
x = PN(act(apply_bias(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
else:
x = upscale2d(x)
with tf.variable_scope('Conv0'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
with tf.variable_scope('Conv1'):
x = PN(act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale))))
return x
def torgb(x, res): # res = 2..resolution_log2
lod = resolution_log2 - res
with tf.variable_scope('ToRGB_lod%d' % lod):
return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale))
# Linear structure: simple but inefficient.
if structure == 'linear':
x = block(combo_in, 2)
images_out = torgb(x, 2)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(x, res)
img = torgb(x, res)
images_out = upscale2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def grow(x, res, lod):
y = block(x, res)
img = lambda: upscale2d(torgb(y, res), 2**lod)
if res > 2: img = cset(img, (lod_in > lod), lambda: upscale2d(lerp(torgb(y, res), upscale2d(torgb(x, res - 1)), lod_in - lod), 2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(combo_in, 2, resolution_log2 - 2)
assert images_out.dtype == tf.as_dtype(dtype)
images_out = tf.identity(images_out, name='images_out')
return images_out
def D_paper(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 1, # Number of input color channels. Overridden based on dataset.
resolution = 32, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 8192, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_max = 512, # Maximum number of feature maps in any layer.
use_wscale = True, # Enable equalized learning rate?
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
dtype = 'float32', # Data type to use for activations and outputs.
fused_scale = True, # True = use fused conv2d + downscale2d, False = separate downscale2d layers.
structure = None, # 'linear' = human-readable, 'recursive' = efficient, None = select automatically
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
if structure is None: structure = 'linear' if is_template_graph else 'recursive'
act = leaky_relu
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
scores_out = None
# Building blocks.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, use_wscale=use_wscale)))
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if res >= 3: # 8x8 and up
with tf.variable_scope('Conv0'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
if fused_scale:
with tf.variable_scope('Conv1_down'):
x = act(apply_bias(conv2d_downscale2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
else:
with tf.variable_scope('Conv1'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-2), kernel=3, use_wscale=use_wscale)))
x = downscale2d(x)
else: # 4x4
if mbstd_group_size > 1:
x = minibatch_stddev_layer(x, mbstd_group_size)
with tf.variable_scope('Conv'):
x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, use_wscale=use_wscale)))
with tf.variable_scope('Dense0'):
x = act(apply_bias(dense(x, fmaps=nf(res-2), use_wscale=use_wscale)))
with tf.variable_scope('Dense1'):
x = apply_bias(dense(x, fmaps=1, gain=1, use_wscale=use_wscale))
return x
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
img = downscale2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = lerp_clip(x, y, lod_in - lod)
scores_out = block(x, 2)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def grow(res, lod):
x = lambda: fromrgb(downscale2d(images_in, 2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
if res > 2: y = cset(y, (lod_in > lod), lambda: lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod))
return y()
scores_out = grow(2, resolution_log2 - 2)
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
|
stylegan-master
|
training/networks_progan.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous utility classes and functions."""
import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
# Util classes
# ------------------------------------------------------------------------------------------
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
class Logger(object):
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
self.file = None
if file_name is not None:
self.file = open(file_name, file_mode)
self.should_flush = should_flush
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __enter__(self) -> "Logger":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def write(self, text: str) -> None:
"""Write text to stdout (and a file) and optionally flush."""
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
return
if self.file is not None:
self.file.write(text)
self.stdout.write(text)
if self.should_flush:
self.flush()
def flush(self) -> None:
"""Flush written text to both stdout and a file, if open."""
if self.file is not None:
self.file.flush()
self.stdout.flush()
def close(self) -> None:
"""Flush, close possible files, and remove stdout/stderr mirroring."""
self.flush()
# if using multiple loggers, prevent closing in wrong order
if sys.stdout is self:
sys.stdout = self.stdout
if sys.stderr is self:
sys.stderr = self.stderr
if self.file is not None:
self.file.close()
# Small util functions
# ------------------------------------------------------------------------------------------
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
def ask_yes_no(question: str) -> bool:
"""Ask the user the question until the user inputs a valid answer."""
while True:
try:
print("{0} [y/n]".format(question))
return strtobool(input().lower())
except ValueError:
pass
def tuple_product(t: Tuple) -> Any:
"""Calculate the product of the tuple elements."""
result = 1
for v in t:
result *= v
return result
_str_to_ctype = {
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"int8": ctypes.c_byte,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"float32": ctypes.c_float,
"float64": ctypes.c_double
}
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, "__name__"):
type_str = type_obj.__name__
elif hasattr(type_obj, "name"):
type_str = type_obj.name
else:
raise RuntimeError("Cannot infer type name from input")
assert type_str in _str_to_ctype.keys()
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
return my_dtype, my_ctype
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False
# Functionality to import modules/objects by name, and call functions by name
# ------------------------------------------------------------------------------------------
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
"""Searches for the underlying module behind the name to some python object.
Returns the module and the object name (original name with module part removed)."""
# allow convenience shorthands, substitute them by full names
obj_name = re.sub("^np.", "numpy.", obj_name)
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
# list alternatives for (module_name, local_obj_name)
parts = obj_name.split(".")
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
# try each alternative in turn
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
return module, local_obj_name
except:
pass
# maybe some of the modules themselves contain errors?
for module_name, _local_obj_name in name_pairs:
try:
importlib.import_module(module_name) # may raise ImportError
except ImportError:
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
raise
# maybe the requested attribute is missing?
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
except ImportError:
pass
# we are out of luck, but we have no idea why
raise ImportError(obj_name)
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
"""Traverses the object name and returns the last (rightmost) python object."""
if obj_name == '':
return module
obj = module
for part in obj_name.split("."):
obj = getattr(obj, part)
return obj
def get_obj_by_name(name: str) -> Any:
"""Finds the python object with the given name."""
module, obj_name = get_module_from_obj_name(name)
return get_obj_from_module(module, obj_name)
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
def get_module_dir_by_obj_name(obj_name: str) -> str:
"""Get the directory path of the module containing the given object name."""
module, _ = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module))
def is_top_level_function(obj: Any) -> bool:
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
def get_top_level_function_name(obj: Any) -> str:
"""Return the fully-qualified name of a top-level function."""
assert is_top_level_function(obj)
return obj.__module__ + "." + obj.__name__
# File system helpers
# ------------------------------------------------------------------------------------------
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if ignores is None:
ignores = []
result = []
for root, dirs, files in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
# dirs need to be edited in-place
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert len(absolute_paths) == len(relative_paths)
result += zip(absolute_paths, relative_paths)
return result
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1])
# URL helpers
# ------------------------------------------------------------------------------------------
def is_url(obj: Any) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
try:
res = requests.compat.urlparse(obj)
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
except:
return False
return True
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert is_url(url)
assert num_attempts >= 1
# Lookup from cache.
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache_dir is not None:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
return open(cache_files[0], "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if "download_warning" in res.headers.get("Set-Cookie", "") and len(res.content) < 8192:
links = [html.unescape(link) for link in res.content.decode("utf-8").split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache_dir is not None:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
# Return data as file object.
return io.BytesIO(url_data)
|
stylegan-master
|
dnnlib/util.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import submission
from .submission.run_context import RunContext
from .submission.submit import SubmitTarget
from .submission.submit import PathType
from .submission.submit import SubmitConfig
from .submission.submit import get_path_from_template
from .submission.submit import submit_run
from .util import EasyDict
submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
|
stylegan-master
|
dnnlib/__init__.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import autosummary
from . import network
from . import optimizer
from . import tfutil
from .tfutil import *
from .network import Network
from .optimizer import Optimizer
|
stylegan-master
|
dnnlib/tflib/__init__.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for adding automatically tracked values to Tensorboard.
Autosummary creates an identity op that internally keeps track of the input
values and automatically shows up in TensorBoard. The reported value
represents an average over input components. The average is accumulated
constantly over time and flushed when save_summaries() is called.
Notes:
- The output tensor must be used as an input for something else in the
graph. Otherwise, the autosummary op will not get executed, and the average
value will not get accumulated.
- It is perfectly fine to include autosummaries with the same name in
several places throughout the graph, even if they are executed concurrently.
- It is ok to also pass in a python scalar or numpy array. In this case, it
is added to the average immediately.
"""
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorboard import summary as summary_lib
from tensorboard.plugins.custom_scalar import layout_pb2
from . import tfutil
from .tfutil import TfExpression
from .tfutil import TfExpressionEx
_dtype = tf.float64
_vars = OrderedDict() # name => [var, ...]
_immediate = OrderedDict() # name => update_op, update_value
_finalized = False
_merge_op = None
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
"""Internal helper for creating autosummary accumulators."""
assert not _finalized
name_id = name.replace("/", "_")
v = tf.cast(value_expr, _dtype)
if v.shape.is_fully_defined():
size = np.prod(tfutil.shape_to_list(v.shape))
size_expr = tf.constant(size, dtype=_dtype)
else:
size = None
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
if size == 1:
if v.shape.ndims != 0:
v = tf.reshape(v, [])
v = [size_expr, v, tf.square(v)]
else:
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
v = tf.cond(tf.is_finite(v[1]), lambda: tf.stack(v), lambda: tf.zeros(3, dtype=_dtype))
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.control_dependencies(None):
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False) # [sum(1), sum(x), sum(x**2)]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _vars:
_vars[name].append(var)
else:
_vars[name] = [var]
return update_op
def autosummary(name: str, value: TfExpressionEx, passthru: TfExpressionEx = None) -> TfExpressionEx:
"""Create a new autosummary.
Args:
name: Name to use in TensorBoard
value: TensorFlow expression or python value to track
passthru: Optionally return this TF node without modifications but tack an autosummary update side-effect to this node.
Example use of the passthru mechanism:
n = autosummary('l2loss', loss, passthru=n)
This is a shorthand for the following code:
with tf.control_dependencies([autosummary('l2loss', loss)]):
n = tf.identity(n)
"""
tfutil.assert_tf_initialized()
name_id = name.replace("/", "_")
if tfutil.is_tf_expression(value):
with tf.name_scope("summary_" + name_id), tf.device(value.device):
update_op = _create_var(name, value)
with tf.control_dependencies([update_op]):
return tf.identity(value if passthru is None else passthru)
else: # python scalar or numpy array
if name not in _immediate:
with tfutil.absolute_name_scope("Autosummary/" + name_id), tf.device(None), tf.control_dependencies(None):
update_value = tf.placeholder(_dtype)
update_op = _create_var(name, update_value)
_immediate[name] = update_op, update_value
update_op, update_value = _immediate[name]
tfutil.run(update_op, {update_value: value})
return value if passthru is None else passthru
def finalize_autosummaries() -> None:
"""Create the necessary ops to include autosummaries in TensorBoard report.
Note: This should be done only once per graph.
"""
global _finalized
tfutil.assert_tf_initialized()
if _finalized:
return None
_finalized = True
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
# Create summary ops.
with tf.device(None), tf.control_dependencies(None):
for name, vars_list in _vars.items():
name_id = name.replace("/", "_")
with tfutil.absolute_name_scope("Autosummary/" + name_id):
moments = tf.add_n(vars_list)
moments /= moments[0]
with tf.control_dependencies([moments]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
mean = moments[1]
std = tf.sqrt(moments[2] - tf.square(moments[1]))
tf.summary.scalar(name, mean)
tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std)
tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std)
# Group by category and chart name.
cat_dict = OrderedDict()
for series_name in sorted(_vars.keys()):
p = series_name.split("/")
cat = p[0] if len(p) >= 2 else ""
chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1]
if cat not in cat_dict:
cat_dict[cat] = OrderedDict()
if chart not in cat_dict[cat]:
cat_dict[cat][chart] = []
cat_dict[cat][chart].append(series_name)
# Setup custom_scalar layout.
categories = []
for cat_name, chart_dict in cat_dict.items():
charts = []
for chart_name, series_names in chart_dict.items():
series = []
for series_name in series_names:
series.append(layout_pb2.MarginChartContent.Series(
value=series_name,
lower="xCustomScalars/" + series_name + "/margin_lo",
upper="xCustomScalars/" + series_name + "/margin_hi"))
margin = layout_pb2.MarginChartContent(series=series)
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
return layout
def save_summaries(file_writer, global_step=None):
"""Call FileWriter.add_summary() with all summaries in the default graph,
automatically finalizing and merging them on the first call.
"""
global _merge_op
tfutil.assert_tf_initialized()
if _merge_op is None:
layout = finalize_autosummaries()
if layout is not None:
file_writer.add_summary(layout)
with tf.device(None), tf.control_dependencies(None):
_merge_op = tf.summary.merge_all()
file_writer.add_summary(_merge_op.eval(), global_step)
|
stylegan-master
|
dnnlib/tflib/autosummary.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Miscellaneous helper utils for Tensorflow."""
import os
import numpy as np
import tensorflow as tf
from typing import Any, Iterable, List, Union
TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation]
"""A type that represents a valid Tensorflow expression."""
TfExpressionEx = Union[TfExpression, int, float, np.ndarray]
"""A type that can be converted to a valid Tensorflow expression."""
def run(*args, **kwargs) -> Any:
"""Run the specified ops in the default session."""
assert_tf_initialized()
return tf.get_default_session().run(*args, **kwargs)
def is_tf_expression(x: Any) -> bool:
"""Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation."""
return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation))
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints."""
return [dim.value for dim in shape]
def flatten(x: TfExpressionEx) -> TfExpression:
"""Shortcut function for flattening a tensor."""
with tf.name_scope("Flatten"):
return tf.reshape(x, [-1])
def log2(x: TfExpressionEx) -> TfExpression:
"""Logarithm in base 2."""
with tf.name_scope("Log2"):
return tf.log(x) * np.float32(1.0 / np.log(2.0))
def exp2(x: TfExpressionEx) -> TfExpression:
"""Exponent in base 2."""
with tf.name_scope("Exp2"):
return tf.exp(x * np.float32(np.log(2.0)))
def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx:
"""Linear interpolation."""
with tf.name_scope("Lerp"):
return a + (b - a) * t
def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression:
"""Linear interpolation with clip."""
with tf.name_scope("LerpClip"):
return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)
def absolute_name_scope(scope: str) -> tf.name_scope:
"""Forcefully enter the specified name scope, ignoring any surrounding scopes."""
return tf.name_scope(scope + "/")
def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope:
"""Forcefully enter the specified variable scope, ignoring any surrounding scopes."""
return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False)
def _sanitize_tf_config(config_dict: dict = None) -> dict:
# Defaults.
cfg = dict()
cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is.
cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is.
cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info.
cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used.
cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed.
# User overrides.
if config_dict is not None:
cfg.update(config_dict)
return cfg
def init_tf(config_dict: dict = None) -> None:
"""Initialize TensorFlow session using good default settings."""
# Skip if already initialized.
if tf.get_default_session() is not None:
return
# Setup config dict and random seeds.
cfg = _sanitize_tf_config(config_dict)
np_random_seed = cfg["rnd.np_random_seed"]
if np_random_seed is not None:
np.random.seed(np_random_seed)
tf_random_seed = cfg["rnd.tf_random_seed"]
if tf_random_seed == "auto":
tf_random_seed = np.random.randint(1 << 31)
if tf_random_seed is not None:
tf.set_random_seed(tf_random_seed)
# Setup environment variables.
for key, value in list(cfg.items()):
fields = key.split(".")
if fields[0] == "env":
assert len(fields) == 2
os.environ[fields[1]] = str(value)
# Create default TensorFlow session.
create_session(cfg, force_as_default=True)
def assert_tf_initialized():
"""Check that TensorFlow session has been initialized."""
if tf.get_default_session() is None:
raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().")
def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session:
"""Create tf.Session based on config dict."""
# Setup TensorFlow config proto.
cfg = _sanitize_tf_config(config_dict)
config_proto = tf.ConfigProto()
for key, value in cfg.items():
fields = key.split(".")
if fields[0] not in ["rnd", "env"]:
obj = config_proto
for field in fields[:-1]:
obj = getattr(obj, field)
setattr(obj, fields[-1], value)
# Create session.
session = tf.Session(config=config_proto)
if force_as_default:
# pylint: disable=protected-access
session._default_session = session.as_default()
session._default_session.enforce_nesting = False
session._default_session.__enter__() # pylint: disable=no-member
return session
def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None:
"""Initialize all tf.Variables that have not already been initialized.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tf.variables_initializer(tf.report_uninitialized_variables()).run()
"""
assert_tf_initialized()
if target_vars is None:
target_vars = tf.global_variables()
test_vars = []
test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in target_vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0"))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(":")[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
def set_vars(var_to_value_dict: dict) -> None:
"""Set the values of given tf.Variables.
Equivalent to the following, but more efficient and does not bloat the tf graph:
tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
"""
assert_tf_initialized()
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(":")[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs):
"""Create tf.Variable with large initial value without bloating the tf graph."""
assert_tf_initialized()
assert isinstance(initial_value, np.ndarray)
zeros = tf.zeros(initial_value.shape, initial_value.dtype)
var = tf.Variable(zeros, *args, **kwargs)
set_vars({var: initial_value})
return var
def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False):
"""Convert a minibatch of images from uint8 to float32 with configurable dynamic range.
Can be used as an input transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if nhwc_to_nchw:
images = tf.transpose(images, [0, 3, 1, 2])
return (images - drange[0]) * ((drange[1] - drange[0]) / 255)
def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1):
"""Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
Can be used as an output transformation for Network.run().
"""
images = tf.cast(images, tf.float32)
if shrink > 1:
ksize = [1, 1, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW")
if nchw_to_nhwc:
images = tf.transpose(images, [0, 2, 3, 1])
scale = 255 / (drange[1] - drange[0])
images = images * scale + (0.5 - drange[0] * scale)
return tf.saturate_cast(images, tf.uint8)
|
stylegan-master
|
dnnlib/tflib/tfutil.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for managing networks."""
import types
import inspect
import re
import uuid
import sys
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import Any, List, Tuple, Union
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_import_module_src = dict() # Source code for temporary modules created during pickle import.
def import_handler(handler_func):
"""Function decorator for declaring custom import handlers."""
_import_handlers.append(handler_func)
return handler_func
class Network:
"""Generic network abstraction.
Acts as a convenience wrapper for a parameterized network construction
function, providing several utility methods and convenient access to
the inputs/outputs/weights.
Network objects can be safely pickled and unpickled for long-term
archival purposes. The pickling works reliably as long as the underlying
network construction function is defined in a standalone Python module
that has no side effects or application-specific imports.
Args:
name: Network name. Used to select TensorFlow name and variable scopes.
func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
static_kwargs: Keyword arguments to be passed in to the network construction function.
Attributes:
name: User-specified name, defaults to build func name if None.
scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
static_kwargs: Arguments passed to the user-supplied build func.
components: Container for sub-networks. Passed to the build func, and retained between calls.
num_inputs: Number of input tensors.
num_outputs: Number of output tensors.
input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
input_shape: Short-hand for input_shapes[0].
output_shape: Short-hand for output_shapes[0].
input_templates: Input placeholders in the template graph.
output_templates: Output tensors in the template graph.
input_names: Name string for each input.
output_names: Name string for each output.
own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
vars: All variables (local_name => var).
trainables: All trainable variables (local_name => var).
var_global_to_local: Mapping from variable global names to local names.
"""
def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
tfutil.assert_tf_initialized()
assert isinstance(name, str) or name is None
assert func_name is not None
assert isinstance(func_name, str) or util.is_top_level_function(func_name)
assert util.is_pickleable(static_kwargs)
self._init_fields()
self.name = name
self.static_kwargs = util.EasyDict(static_kwargs)
# Locate the user-specified network build function.
if util.is_top_level_function(func_name):
func_name = util.get_top_level_function_name(func_name)
module, self._build_func_name = util.get_module_from_obj_name(func_name)
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Dig up source code for the module containing the build function.
self._build_module_src = _import_module_src.get(module, None)
if self._build_module_src is None:
self._build_module_src = inspect.getsource(module)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
def _init_fields(self) -> None:
self.name = None
self.scope = None
self.static_kwargs = util.EasyDict()
self.components = util.EasyDict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.own_vars = OrderedDict()
self.vars = OrderedDict()
self.trainables = OrderedDict()
self.var_global_to_local = OrderedDict()
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self) -> None:
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
with tf.name_scope(None):
self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs["is_template_graph"] = True
build_kwargs["components"] = self.components
# Build template graph.
with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes
assert tf.get_variable_scope().name == self.scope
assert tf.get_default_graph().get_name_scope() == self.scope
with tf.control_dependencies(None): # ignore surrounding control dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, **build_kwargs)
# Collect outputs.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
# Perform sanity checks.
if any(t.shape.ndims is None for t in self.input_templates):
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
if any(t.shape.ndims is None for t in self.output_templates):
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
if any(not isinstance(comp, Network) for comp in self.components.values()):
raise ValueError("Components of a Network must be Networks themselves.")
if len(self.components) != len(set(comp.name for comp in self.components.values())):
raise ValueError("Components of a Network must have unique names.")
# List inputs and outputs.
self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
# List variables.
self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
self.vars = OrderedDict(self.own_vars)
self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items())
self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
def reset_own_vars(self) -> None:
"""Re-initialize all variables of this network, excluding sub-networks."""
tfutil.run([var.initializer for var in self.own_vars.values()])
def reset_vars(self) -> None:
"""Re-initialize all variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.vars.values()])
def reset_trainables(self) -> None:
"""Re-initialize all trainable variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
"""Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
assert len(in_expr) == self.num_inputs
assert not all(expr is None for expr in in_expr)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs.update(dynamic_kwargs)
build_kwargs["is_template_graph"] = False
build_kwargs["components"] = self.components
# Build TensorFlow graph to evaluate the network.
with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
assert tf.get_variable_scope().name == self.scope
valid_inputs = [expr for expr in in_expr if expr is not None]
final_inputs = []
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
if expr is not None:
expr = tf.identity(expr, name=name)
else:
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
final_inputs.append(expr)
out_expr = self._build_func(*final_inputs, **build_kwargs)
# Propagate input shapes back to the user-specified expressions.
for expr, final in zip(in_expr, final_inputs):
if isinstance(expr, tf.Tensor):
expr.set_shape(final.shape)
# Express outputs in the desired format.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
return out_expr
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
"""Get the local name of a given variable, without any surrounding name scopes."""
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
return self.var_global_to_local[global_name]
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
"""Find variable by local or global name."""
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
"""Get the value of a given variable as NumPy array.
Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
return self.find_var(var_or_local_name).eval()
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
"""Set the value of a given variable based on the given NumPy array.
Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
def __getstate__(self) -> dict:
"""Pickle export."""
state = dict()
state["version"] = 3
state["name"] = self.name
state["static_kwargs"] = dict(self.static_kwargs)
state["components"] = dict(self.components)
state["build_module_src"] = self._build_module_src
state["build_func_name"] = self._build_func_name
state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values()))))
return state
def __setstate__(self, state: dict) -> None:
"""Pickle import."""
# pylint: disable=attribute-defined-outside-init
tfutil.assert_tf_initialized()
self._init_fields()
# Execute custom import handlers.
for handler in _import_handlers:
state = handler(state)
# Set basic fields.
assert state["version"] in [2, 3]
self.name = state["name"]
self.static_kwargs = util.EasyDict(state["static_kwargs"])
self.components = util.EasyDict(state.get("components", {}))
self._build_module_src = state["build_module_src"]
self._build_func_name = state["build_func_name"]
# Create temporary module from the imported source code.
module_name = "_tflib_network_import_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_import_module_src[module] = self._build_module_src
exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
# Locate network build function in the temporary module.
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
def clone(self, name: str = None, **new_static_kwargs) -> "Network":
"""Create a clone of this network with its own copy of the variables."""
# pylint: disable=protected-access
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = util.EasyDict(self.static_kwargs)
net.static_kwargs.update(new_static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_own_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, excluding sub-networks."""
names = [name for name in self.own_vars.keys() if name in src_net.own_vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, including sub-networks."""
names = [name for name in self.vars.keys() if name in src_net.vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_trainables_from(self, src_net: "Network") -> None:
"""Copy the values of all trainable variables from the given network, including sub-networks."""
names = [name for name in self.trainables.keys() if name in src_net.trainables]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
"""Create new network with the given parameters, and copy all variables from this network."""
if new_name is None:
new_name = self.name
static_kwargs = dict(self.static_kwargs)
static_kwargs.update(new_static_kwargs)
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
"""Construct a TensorFlow op that updates the variables of this network
to be slightly closer to those of the given network."""
with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self,
*in_arrays: Tuple[Union[np.ndarray, None], ...],
input_transform: dict = None,
output_transform: dict = None,
return_as_list: bool = False,
print_progress: bool = False,
minibatch_size: int = None,
num_gpus: int = 1,
assume_frozen: bool = False,
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
Args:
input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the input
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the output
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress: Print progress to the console? Useful for very large input arrays.
minibatch_size: Maximum minibatch size to use, None = disable batching.
num_gpus: Number of GPUs to use.
assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
"""
assert len(in_arrays) == self.num_inputs
assert not all(arr is None for arr in in_arrays)
assert input_transform is None or util.is_top_level_function(input_transform["func"])
assert output_transform is None or util.is_top_level_function(output_transform["func"])
output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
# Construct unique hash key from all arguments that affect the TensorFlow graph.
key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
def unwind_key(obj):
if isinstance(obj, dict):
return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
if callable(obj):
return util.get_top_level_function_name(obj)
return obj
key = repr(unwind_key(key))
# Build graph.
if key not in self._run_cache:
with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
with tf.device("/cpu:0"):
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
out_split = []
for gpu in range(num_gpus):
with tf.device("/gpu:%d" % gpu):
net_gpu = self.clone() if assume_frozen else self
in_gpu = in_split[gpu]
if input_transform is not None:
in_kwargs = dict(input_transform)
in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
assert len(in_gpu) == self.num_inputs
out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
if output_transform is not None:
out_kwargs = dict(output_transform)
out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
assert len(out_gpu) == self.num_outputs
out_split.append(out_gpu)
with tf.device("/cpu:0"):
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
self._run_cache[key] = in_expr, out_expr
# Run minibatches.
in_expr, out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print("\r%d / %d" % (mb_begin, num_items), end="")
mb_end = min(mb_begin + minibatch_size, num_items)
mb_num = mb_end - mb_begin
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print("\r%d / %d" % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
def list_ops(self) -> List[TfExpression]:
include_prefix = self.scope + "/"
exclude_prefix = include_prefix + "_"
ops = tf.get_default_graph().get_operations()
ops = [op for op in ops if op.name.startswith(include_prefix)]
ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
return ops
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
"""Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
individual layers of the network. Mainly intended to be used for reporting."""
layers = []
def recurse(scope, parent_ops, parent_vars, level):
# Ignore specific patterns.
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
return
# Filter ops and vars by scope.
global_prefix = scope + "/"
local_prefix = global_prefix[len(self.scope) + 1:]
cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
if not cur_ops and not cur_vars:
return
# Filter out all ops related to variables.
for var in [op for op in cur_ops if op.type.startswith("Variable")]:
var_prefix = var.name + "/"
cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
# Scope does not contain ops as immediate children => recurse deeper.
contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type != "Identity" for op in cur_ops)
if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1:
visited = set()
for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
token = rel_name.split("/")[0]
if token not in visited:
recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
visited.add(token)
return
# Report layer.
layer_name = scope[len(self.scope) + 1:]
layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
layer_trainables = [var for _name, var in cur_vars if var.trainable]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
return layers
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
"""Print a summary table of the network structure."""
rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
rows += [["---"] * 4]
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables)
weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
weights.sort(key=lambda x: len(x.name))
if len(weights) == 0 and len(layer_trainables) == 1:
weights = layer_trainables
total_params += num_params
if not hide_layers_with_no_params or num_params != 0:
num_params_str = str(num_params) if num_params > 0 else "-"
output_shape_str = str(layer_output.shape)
weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
rows += [["---"] * 4]
rows += [["Total", str(total_params), "", ""]]
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
print()
def setup_weight_histograms(self, title: str = None) -> None:
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
if title is None:
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for local_name, var in self.trainables.items():
if "/" in local_name:
p = local_name.split("/")
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
else:
name = title + "_toplevel/" + local_name
tf.summary.histogram(name, var)
#----------------------------------------------------------------------------
# Backwards-compatible emulation of legacy output transformation in Network.run().
_print_legacy_warning = True
def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
global _print_legacy_warning
legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
return output_transform, dynamic_kwargs
if _print_legacy_warning:
_print_legacy_warning = False
print()
print("WARNING: Old-style output transformations in Network.run() are deprecated.")
print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
print()
assert output_transform is None
new_kwargs = dict(dynamic_kwargs)
new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
new_transform["func"] = _legacy_output_transform_func
return new_transform, new_kwargs
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
if out_mul != 1.0:
expr = [x * out_mul for x in expr]
if out_add != 0.0:
expr = [x + out_add for x in expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
expr = [tf.round(x) for x in expr]
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
return expr
|
stylegan-master
|
dnnlib/tflib/network.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper wrapper for a Tensorflow optimizer."""
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import List, Union
from . import autosummary
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
try:
# TensorFlow 1.13
from tensorflow.python.ops import nccl_ops
except:
# Older TensorFlow versions
import tensorflow.contrib.nccl as nccl_ops
class Optimizer:
"""A Wrapper for tf.train.Optimizer.
Automatically takes care of:
- Gradient averaging for multi-GPU training.
- Dynamic loss scaling and typecasts for FP16 training.
- Ignoring corrupted gradients that contain NaNs/Infs.
- Reporting statistics.
- Well-chosen default settings.
"""
def __init__(self,
name: str = "Train",
tf_optimizer: str = "tf.train.AdamOptimizer",
learning_rate: TfExpressionEx = 0.001,
use_loss_scaling: bool = False,
loss_scaling_init: float = 64.0,
loss_scaling_inc: float = 0.0005,
loss_scaling_dec: float = 1.0,
**kwargs):
# Init fields.
self.name = name
self.learning_rate = tf.convert_to_tensor(learning_rate)
self.id = self.name.replace("/", ".")
self.scope = tf.get_default_graph().unique_name(self.id)
self.optimizer_class = util.get_obj_by_name(tf_optimizer)
self.optimizer_kwargs = dict(kwargs)
self.use_loss_scaling = use_loss_scaling
self.loss_scaling_init = loss_scaling_init
self.loss_scaling_inc = loss_scaling_inc
self.loss_scaling_dec = loss_scaling_dec
self._grad_shapes = None # [shape, ...]
self._dev_opt = OrderedDict() # device => optimizer
self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]
self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)
self._updates_applied = False
def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
"""Register the gradients of the given loss function with respect to the given variables.
Intended to be called once per GPU."""
assert not self._updates_applied
# Validate arguments.
if isinstance(trainable_vars, dict):
trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
if self._grad_shapes is None:
self._grad_shapes = [tfutil.shape_to_list(var.shape) for var in trainable_vars]
assert len(trainable_vars) == len(self._grad_shapes)
assert all(tfutil.shape_to_list(var.shape) == var_shape for var, var_shape in zip(trainable_vars, self._grad_shapes))
dev = loss.device
assert all(var.device == dev for var in trainable_vars)
# Register device and compute gradients.
with tf.name_scope(self.id + "_grad"), tf.device(dev):
if dev not in self._dev_opt:
opt_name = self.scope.replace("/", "_") + "_opt%d" % len(self._dev_opt)
assert callable(self.optimizer_class)
self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
self._dev_grads[dev] = []
loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
grads = self._dev_opt[dev].compute_gradients(loss, trainable_vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage
grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros
self._dev_grads[dev].append(grads)
def apply_updates(self) -> tf.Operation:
"""Construct training op to update the registered variables based on their gradients."""
tfutil.assert_tf_initialized()
assert not self._updates_applied
self._updates_applied = True
devices = list(self._dev_grads.keys())
total_grads = sum(len(grads) for grads in self._dev_grads.values())
assert len(devices) >= 1 and total_grads >= 1
ops = []
with tfutil.absolute_name_scope(self.scope):
# Cast gradients to FP32 and calculate partial sum within each device.
dev_grads = OrderedDict() # device => [(grad, var), ...]
for dev_idx, dev in enumerate(devices):
with tf.name_scope("ProcessGrads%d" % dev_idx), tf.device(dev):
sums = []
for gv in zip(*self._dev_grads[dev]):
assert all(v is gv[0][1] for g, v in gv)
g = [tf.cast(g, tf.float32) for g, v in gv]
g = g[0] if len(g) == 1 else tf.add_n(g)
sums.append((g, gv[0][1]))
dev_grads[dev] = sums
# Sum gradients across devices.
if len(devices) > 1:
with tf.name_scope("SumAcrossGPUs"), tf.device(None):
for var_idx, grad_shape in enumerate(self._grad_shapes):
g = [dev_grads[dev][var_idx][0] for dev in devices]
if np.prod(grad_shape): # nccl does not support zero-sized tensors
g = nccl_ops.all_sum(g)
for dev, gg in zip(devices, g):
dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])
# Apply updates separately on each device.
for dev_idx, (dev, grads) in enumerate(dev_grads.items()):
with tf.name_scope("ApplyGrads%d" % dev_idx), tf.device(dev):
# Scale gradients as needed.
if self.use_loss_scaling or total_grads > 1:
with tf.name_scope("Scale"):
coef = tf.constant(np.float32(1.0 / total_grads), name="coef")
coef = self.undo_loss_scaling(coef)
grads = [(g * coef, v) for g, v in grads]
# Check for overflows.
with tf.name_scope("CheckOverflow"):
grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))
# Update weights and adjust loss scaling.
with tf.name_scope("UpdateWeights"):
# pylint: disable=cell-var-from-loop
opt = self._dev_opt[dev]
ls_var = self.get_loss_scaling_var(dev)
if not self.use_loss_scaling:
ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))
else:
ops.append(tf.cond(grad_ok,
lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),
lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))
# Report statistics on the last device.
if dev == devices[-1]:
with tf.name_scope("Statistics"):
ops.append(autosummary.autosummary(self.id + "/learning_rate", self.learning_rate))
ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(grad_ok, 0, 1)))
if self.use_loss_scaling:
ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", ls_var))
# Initialize variables and group everything into a single op.
self.reset_optimizer_state()
tfutil.init_uninitialized_vars(list(self._dev_ls_var.values()))
return tf.group(*ops, name="TrainingOp")
def reset_optimizer_state(self) -> None:
"""Reset internal state of the underlying optimizer."""
tfutil.assert_tf_initialized()
tfutil.run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])
def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
"""Get or create variable representing log2 of the current dynamic loss scaling factor."""
if not self.use_loss_scaling:
return None
if device not in self._dev_ls_var:
with tfutil.absolute_name_scope(self.scope + "/LossScalingVars"), tf.control_dependencies(None):
self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name="loss_scaling_var")
return self._dev_ls_var[device]
def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
"""Apply dynamic loss scaling for the given expression."""
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
"""Undo the effect of dynamic loss scaling for the given expression."""
assert tfutil.is_tf_expression(value)
if not self.use_loss_scaling:
return value
return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
|
stylegan-master
|
dnnlib/tflib/optimizer.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Submit a function to be run either locally or in a computing cluster."""
import copy
import io
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import time
import traceback
import typeguard
import zipfile
from enum import Enum
from .. import util
from ..util import EasyDict
class SubmitTarget(Enum):
"""The target where the function should be run.
LOCAL: Run it locally.
"""
LOCAL = 1
class PathType(Enum):
"""Determines in which format should a path be formatted.
WINDOWS: Format with Windows style.
LINUX: Format with Linux/Posix style.
AUTO: Use current OS type to select either WINDOWS or LINUX.
"""
WINDOWS = 1
LINUX = 2
AUTO = 3
_user_name_override = None
class SubmitConfig(util.EasyDict):
"""Strongly typed config dict needed to submit runs.
Attributes:
run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template.
run_desc: Description of the run. Will be used in the run dir and task name.
run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir.
run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir.
submit_target: Submit target enum value. Used to select where the run is actually launched.
num_gpus: Number of GPUs used/requested for the run.
print_info: Whether to print debug information when submitting.
ask_confirmation: Whether to ask a confirmation before submitting.
use_typeguard: Whether to use the typeguard module for run-time type checking (slow!).
run_id: Automatically populated value during submit.
run_name: Automatically populated value during submit.
run_dir: Automatically populated value during submit.
run_func_name: Automatically populated value during submit.
run_func_kwargs: Automatically populated value during submit.
user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value.
task_name: Automatically populated value during submit.
host_name: Automatically populated value during submit.
"""
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode"]
self.run_dir_extra_files = None
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.ask_confirmation = False
self.use_typeguard = False
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
"""Replace tags in the given path template and return either Windows or Linux formatted path."""
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
"""Convert a normal path back to its template representation."""
# replace all path parts with the template tags
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
"""Convert a normal path to template and the convert it back to a normal path with given path type."""
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
"""Set the global username override value."""
global _user_name_override
_user_name_override = name
def get_user_name():
"""Get the current user name."""
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd # pylint: disable=import-error
return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def _create_run_dir_local(submit_config: SubmitConfig) -> str:
"""Create a new run dir with increasing ID number at the start."""
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
print("Creating the run dir root: {}".format(run_dir_root))
os.makedirs(run_dir_root)
submit_config.run_id = _get_next_run_id_local(run_dir_root)
submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if os.path.exists(run_dir):
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
print("Creating the run dir: {}".format(run_dir))
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
"""Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names."""
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(run_dir: str, submit_config: SubmitConfig) -> None:
"""Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable."""
print("Copying files to the run dir")
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert '.' in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count('.') - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True)
if submit_config.run_dir_extra_files is not None:
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "_internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False)
def run_wrapper(submit_config: SubmitConfig) -> None:
"""Wrap the actual run function call for handling logging, exceptions, typing, etc."""
is_local = submit_config.submit_target == SubmitTarget.LOCAL
checker = None
if submit_config.use_typeguard:
checker = typeguard.TypeChecker("dnnlib")
checker.start()
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name=None, should_flush=True)
import dnnlib
dnnlib.submit_config = submit_config
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
util.call_func_by_name(func_name=submit_config.run_func_name, submit_config=submit_config, **submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.submit_config = None
logger.close()
if checker is not None:
checker.stop()
def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None:
"""Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place."""
submit_config = copy.copy(submit_config)
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
assert submit_config.submit_target == SubmitTarget.LOCAL
if submit_config.submit_target in {SubmitTarget.LOCAL}:
run_dir = _create_run_dir_local(submit_config)
submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
submit_config.run_dir = run_dir
_populate_run_dir(run_dir, submit_config)
if submit_config.print_info:
print("\nSubmit config:\n")
pprint.pprint(submit_config, indent=4, width=200, compact=False)
print()
if submit_config.ask_confirmation:
if not util.ask_yes_no("Continue submitting the job?"):
return
run_wrapper(submit_config)
|
stylegan-master
|
dnnlib/submission/submit.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
from . import run_context
from . import submit
|
stylegan-master
|
dnnlib/submission/__init__.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helpers for managing the run/training loop."""
import datetime
import json
import os
import pprint
import time
import types
from typing import Any
from . import submit
class RunContext(object):
"""Helper class for managing the run/training loop.
The context will hide the implementation details of a basic run/training loop.
It will set things up properly, tell if run should be stopped, and then cleans up.
User should call update periodically and use should_stop to determine if run should be stopped.
Args:
submit_config: The SubmitConfig that is used for the current run.
config_module: The whole config module that is used for the current run.
max_epoch: Optional cached value for the max_epoch variable used in update.
"""
def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None):
self.submit_config = submit_config
self.should_stop_flag = False
self.has_closed = False
self.start_time = time.time()
self.last_update_time = time.time()
self.last_update_interval = 0.0
self.max_epoch = max_epoch
# pretty print the all the relevant content of the config module to a text file
if config_module is not None:
with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f:
filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))}
pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False)
# write out details about the run to a text file
self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")}
with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f:
pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
def __enter__(self) -> "RunContext":
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None:
"""Do general housekeeping and keep the state of the context up-to-date.
Should be called often enough but not in a tight loop."""
assert not self.has_closed
self.last_update_interval = time.time() - self.last_update_time
self.last_update_time = time.time()
if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")):
self.should_stop_flag = True
max_epoch_val = self.max_epoch if max_epoch is None else max_epoch
def should_stop(self) -> bool:
"""Tell whether a stopping condition has been triggered one way or another."""
return self.should_stop_flag
def get_time_since_start(self) -> float:
"""How much time has passed since the creation of the context."""
return time.time() - self.start_time
def get_time_since_last_update(self) -> float:
"""How much time has passed since the last call to update."""
return time.time() - self.last_update_time
def get_last_update_interval(self) -> float:
"""How much time passed between the previous two calls to update."""
return self.last_update_interval
def close(self) -> None:
"""Close the context and clean up.
Should only be called once."""
if not self.has_closed:
# update the run.txt with stopping time
self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ")
with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f:
pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)
self.has_closed = True
|
stylegan-master
|
dnnlib/submission/run_context.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for launching run functions in computing clusters.
During the submit process, this file is copied to the appropriate run dir.
When the job is launched in the cluster, this module is the first thing that
is run inside the docker container.
"""
import os
import pickle
import sys
# PYTHONPATH should have been set so that the run_dir/src is in it
import dnnlib
def main():
if not len(sys.argv) >= 4:
raise RuntimeError("This script needs three arguments: run_dir, task_name and host_name!")
run_dir = str(sys.argv[1])
task_name = str(sys.argv[2])
host_name = str(sys.argv[3])
submit_config_path = os.path.join(run_dir, "submit_config.pkl")
# SubmitConfig should have been pickled to the run dir
if not os.path.exists(submit_config_path):
raise RuntimeError("SubmitConfig pickle file does not exist!")
submit_config: dnnlib.SubmitConfig = pickle.load(open(submit_config_path, "rb"))
dnnlib.submission.submit.set_user_name_override(submit_config.user_name)
submit_config.task_name = task_name
submit_config.host_name = host_name
dnnlib.submission.submit.run_wrapper(submit_config)
if __name__ == "__main__":
main()
|
stylegan-master
|
dnnlib/submission/_internal/run.py
|
from setuptools import setup, find_packages
setup(
name = 'mlm-pytorch',
packages = find_packages(),
version = '0.1.0',
license='MIT',
description = 'MLM (Masked Language Modeling) - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/mlm-pytorch',
keywords = [
'transformers',
'artificial intelligence',
'pretraining',
'unsupervised learning'
],
install_requires=[
'torch>=1.1.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
mlm-pytorch-master
|
setup.py
|
import math
from functools import reduce
import torch
from torch import nn
import torch.nn.functional as F
# helpers
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def mask_with_tokens(t, token_ids):
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
return mask
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
# main class
class MLM(nn.Module):
def __init__(
self,
transformer,
mask_prob = 0.15,
replace_prob = 0.9,
num_tokens = None,
random_token_prob = 0.,
mask_token_id = 2,
pad_token_id = 0,
mask_ignore_token_ids = []):
super().__init__()
self.transformer = transformer
# mlm related probabilities
self.mask_prob = mask_prob
self.replace_prob = replace_prob
self.num_tokens = num_tokens
self.random_token_prob = random_token_prob
# token ids
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.mask_ignore_token_ids = set([*mask_ignore_token_ids, pad_token_id])
def forward(self, seq, **kwargs):
# do not mask [pad] tokens, or any other tokens in the tokens designated to be excluded ([cls], [sep])
# also do not include these special tokens in the tokens chosen at random
no_mask = mask_with_tokens(seq, self.mask_ignore_token_ids)
mask = get_mask_subset_with_prob(~no_mask, self.mask_prob)
# mask input with mask tokens with probability of `replace_prob` (keep tokens the same with probability 1 - replace_prob)
masked_seq = seq.clone().detach()
# derive labels to predict
labels = seq.masked_fill(~mask, self.pad_token_id)
# if random token probability > 0 for mlm
if self.random_token_prob > 0:
assert self.num_tokens is not None, 'num_tokens keyword must be supplied when instantiating MLM if using random token replacement'
random_token_prob = prob_mask_like(seq, self.random_token_prob)
random_tokens = torch.randint(0, self.num_tokens, seq.shape, device=seq.device)
random_no_mask = mask_with_tokens(random_tokens, self.mask_ignore_token_ids)
random_token_prob &= ~random_no_mask
masked_seq = torch.where(random_token_prob, random_tokens, masked_seq)
# remove tokens that were substituted randomly from being [mask]ed later
mask = mask & ~random_token_prob
# [mask] input
replace_prob = prob_mask_like(seq, self.replace_prob)
masked_seq = masked_seq.masked_fill(mask * replace_prob, self.mask_token_id)
# get generator output and get mlm loss
logits = self.transformer(masked_seq, **kwargs)
mlm_loss = F.cross_entropy(
logits.transpose(1, 2),
labels,
ignore_index = self.pad_token_id
)
return mlm_loss
|
mlm-pytorch-master
|
mlm_pytorch/mlm_pytorch.py
|
from mlm_pytorch.mlm_pytorch import MLM
|
mlm-pytorch-master
|
mlm_pytorch/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'adan-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.0',
license='MIT',
description = 'Adan - (ADAptive Nesterov momentum algorithm) Optimizer in Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/Adan-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'optimizer',
],
install_requires=[
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
Adan-pytorch-main
|
setup.py
|
import math
import torch
from torch.optim import Optimizer
def exists(val):
return val is not None
class Adan(Optimizer):
def __init__(
self,
params,
lr = 1e-3,
betas = (0.02, 0.08, 0.01),
eps = 1e-8,
weight_decay = 0,
restart_cond: callable = None
):
assert len(betas) == 3
defaults = dict(
lr = lr,
betas = betas,
eps = eps,
weight_decay = weight_decay,
restart_cond = restart_cond
)
super().__init__(params, defaults)
def step(self, closure = None):
loss = None
if exists(closure):
loss = closure()
for group in self.param_groups:
lr = group['lr']
beta1, beta2, beta3 = group['betas']
weight_decay = group['weight_decay']
eps = group['eps']
restart_cond = group['restart_cond']
for p in group['params']:
if not exists(p.grad):
continue
data, grad = p.data, p.grad.data
assert not grad.is_sparse
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['prev_grad'] = torch.zeros_like(grad)
state['m'] = torch.zeros_like(grad)
state['v'] = torch.zeros_like(grad)
state['n'] = torch.zeros_like(grad)
step, m, v, n, prev_grad = state['step'], state['m'], state['v'], state['n'], state['prev_grad']
if step > 0:
prev_grad = state['prev_grad']
# main algorithm
m.mul_(1 - beta1).add_(grad, alpha = beta1)
grad_diff = grad - prev_grad
v.mul_(1 - beta2).add_(grad_diff, alpha = beta2)
next_n = (grad + (1 - beta2) * grad_diff) ** 2
n.mul_(1 - beta3).add_(next_n, alpha = beta3)
# bias correction terms
step += 1
correct_m, correct_v, correct_n = map(lambda n: 1 / (1 - (1 - n) ** step), (beta1, beta2, beta3))
# gradient step
def grad_step_(data, m, v, n):
weighted_step_size = lr / (n * correct_n).sqrt().add_(eps)
denom = 1 + weight_decay * lr
data.addcmul_(weighted_step_size, (m * correct_m + (1 - beta2) * v * correct_v), value = -1.).div_(denom)
grad_step_(data, m, v, n)
# restart condition
if exists(restart_cond) and restart_cond(state):
m.data.copy_(grad)
v.zero_()
n.data.copy_(grad ** 2)
grad_step_(data, m, v, n)
# set new incremented step
prev_grad.copy_(grad)
state['step'] = step
return loss
|
Adan-pytorch-main
|
adan_pytorch/adan.py
|
from adan_pytorch.adan import Adan
|
Adan-pytorch-main
|
adan_pytorch/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'perceiver-ar-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.10',
license='MIT',
description = 'Perceiver AR',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/perceiver-ar-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'long context',
'attention'
],
install_requires=[
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
perceiver-ar-pytorch-main
|
setup.py
|
import gzip
import random
import numpy as np
import torch
import torch.optim as optim
import tqdm
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from perceiver_ar_pytorch import PerceiverAR
from perceiver_ar_pytorch.autoregressive_wrapper import AutoregressiveWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
PREFIX_SEQ_LEN = 3584
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
model = PerceiverAR(
num_tokens = 256,
dim = 512,
depth = 8,
heads = 8,
dim_head = 64,
cross_attn_dropout = 0.5,
max_seq_len = SEQ_LEN,
cross_attn_seq_len = PREFIX_SEQ_LEN
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f"training loss: {loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
|
perceiver-ar-pytorch-main
|
train.py
|
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres=0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value=0):
super().__init__()
self.max_seq_len = net.max_seq_len
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token=None,
temperature=1.0,
filter_thres=0.9,
**kwargs
):
b, n, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(
out[:, -self.max_seq_len:],
**kwargs
)[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = out == eos_token
if is_eos_token.any(dim=-1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_token, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim=-1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, n:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
return self.net(x_inp, labels = x_labels, **kwargs)
|
perceiver-ar-pytorch-main
|
perceiver_ar_pytorch/autoregressive_wrapper.py
|
from perceiver_ar_pytorch.perceiver_ar_pytorch import PerceiverAR
|
perceiver-ar-pytorch-main
|
perceiver_ar_pytorch/__init__.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
# feedforward
def FeedForward(dim, mult = 4, dropout = 0.):
hidden_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, hidden_dim, bias = False),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim, bias = False)
)
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device = device, dtype = self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim = -1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(pos, t):
seq_len, rotate_dim = t.shape[-2], pos.shape[-1]
pos = pos[..., -seq_len:, :]
t, t_pass = t[..., :rotate_dim], t[..., rotate_dim:]
t = (t * pos.cos()) + (rotate_half(t) * pos.sin())
return torch.cat((t, t_pass), dim = -1)
# attention
class CausalAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = heads * dim_head
self.norm = nn.LayerNorm(dim)
self.dropout = nn.Dropout(dropout)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x, rotary_pos_emb = None):
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
q = q * self.scale
if exists(rotary_pos_emb):
q = apply_rotary_pos_emb(rotary_pos_emb, q)
k = apply_rotary_pos_emb(rotary_pos_emb, k)
sim = einsum('b h i d, b h j d -> b h i j', q, k)
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = x.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class CausalPrefixAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
max_heads_process = 2,
dropout = 0.,
cross_attn_dropout = 0.
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
self.max_heads_process = max_heads_process
inner_dim = heads * dim_head
self.norm = nn.LayerNorm(dim)
self.context_norm = nn.LayerNorm(dim)
self.dropout = nn.Dropout(dropout)
self.cross_attn_dropout = cross_attn_dropout # they drop out a percentage of the prefix during training, shown to help prevent overfitting
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, context, context_mask = None, rotary_pos_emb = None):
batch, context_len, device = x.shape[0], context.shape[-2], x.device
q_rotary_pos_emb = rotary_pos_emb
k_rotary_pos_emb = rotary_pos_emb
# take care of cross attention dropout
if self.training and self.cross_attn_dropout > 0.:
rand = torch.zeros((batch, context_len), device = device).uniform_()
keep_context_len = context_len - int(context_len * self.cross_attn_dropout)
keep_indices = rand.topk(keep_context_len, dim = -1).indices
keep_mask = torch.zeros_like(rand).scatter_(1, keep_indices, 1).bool()
context = rearrange(context[keep_mask], '(b n) d -> b n d', b = batch)
if exists(context_mask):
context_mask = rearrange(context_mask[keep_mask], '(b n) -> b n', b = batch)
# operate on rotary position embeddings for keys
k_rotary_pos_emb = repeat(k_rotary_pos_emb, '... -> b ...', b = batch)
k_rotary_pos_emb_context, k_rotary_pos_emb_seq = k_rotary_pos_emb[:, :context_len], k_rotary_pos_emb[:, context_len:]
k_rotary_pos_emb_context = rearrange(k_rotary_pos_emb_context[keep_mask], '(b n) d -> b n d', b = batch)
k_rotary_pos_emb = torch.cat((k_rotary_pos_emb_context, k_rotary_pos_emb_seq), dim = 1)
k_rotary_pos_emb = rearrange(k_rotary_pos_emb, 'b n d -> b 1 n d')
# normalization
x = self.norm(x)
context = self.context_norm(context)
# derive queries, keys, values
q = self.to_q(x)
k_input, v_input = self.to_kv(x).chunk(2, dim = -1)
k_context, v_context = self.to_kv(context).chunk(2, dim = -1)
k = torch.cat((k_context, k_input), dim = 1)
v = torch.cat((v_context, v_input), dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
q = q * self.scale
# rotate queries and keys with rotary embeddings
if exists(rotary_pos_emb):
q = apply_rotary_pos_emb(q_rotary_pos_emb, q)
k = apply_rotary_pos_emb(k_rotary_pos_emb, k)
# take care of masking
i, j = q.shape[-2], k.shape[-2]
mask_value = -torch.finfo(q.dtype).max
if exists(context_mask):
mask_len = context_mask.shape[-1]
context_mask = F.pad(context_mask, (0, max(j - mask_len, 0)), value = True)
context_mask = rearrange(context_mask, 'b j -> b 1 1 j')
causal_mask = torch.ones((i, j), device = x.device, dtype = torch.bool).triu(j - i + 1)
# process in chunks of heads
out = []
max_heads = self.max_heads_process
for q_chunk, k_chunk, v_chunk in zip(q.split(max_heads, dim = 1), k.split(max_heads, dim = 1), v.split(max_heads, dim = 1)):
sim = einsum('b h i d, b h j d -> b h i j', q_chunk, k_chunk)
if exists(context_mask):
sim = sim.masked_fill(~context_mask, mask_value)
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out_chunk = einsum('b h i j, b h j d -> b h i d', attn, v_chunk)
out.append(out_chunk)
# concat all the heads together
out = torch.cat(out, dim = 1)
# merge heads and then combine with linear
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class PerceiverAR(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
cross_attn_seq_len,
dim_head = 64,
heads = 8,
dropout = 0.,
cross_attn_dropout = 0.,
ff_mult = 4,
perceive_depth = 1,
perceive_max_heads_process = 2 # processes the heads in the perceiver layer in chunks to lower peak memory, in the case the prefix is really long
):
super().__init__()
assert max_seq_len > cross_attn_seq_len, 'max_seq_len must be greater than cross_attn_seq_len, the length of the sequence for which to cross attend to "perceiver" style'
self.max_seq_len = max_seq_len
self.cross_attn_seq_len = cross_attn_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.rotary_pos_emb = RotaryEmbedding(dim = max(32, dim_head // 2))
self.perceive_layers = nn.ModuleList([])
for _ in range(perceive_depth):
self.perceive_layers.append(nn.ModuleList([
CausalPrefixAttention(dim = dim, dim_head = dim_head, heads = heads, max_heads_process = perceive_max_heads_process, dropout = dropout, cross_attn_dropout = cross_attn_dropout),
FeedForward(dim, mult = ff_mult, dropout = dropout)
]))
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
CausalAttention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim, mult = ff_mult, dropout = dropout),
]))
self.to_logits = nn.Linear(dim, num_tokens, bias = False)
def forward(
self,
x,
prefix_mask = None,
labels = None
):
seq_len, device = x.shape[1], x.device
assert self.cross_attn_seq_len < seq_len <= self.max_seq_len
x = self.token_emb(x)
x = x + self.pos_emb(torch.arange(seq_len, device = device))
# rotary positional embedding
rotary_pos_emb = self.rotary_pos_emb(seq_len, device = device)
# divide into prefix to cross attend to and sequence to self attend to
prefix, x = x[:, :self.cross_attn_seq_len], x[:, self.cross_attn_seq_len:]
# initial perceiver attention and feedforward (one cross attention)
for cross_attn, ff in self.perceive_layers:
x = cross_attn(x, prefix, context_mask = prefix_mask, rotary_pos_emb = rotary_pos_emb) + x
x = ff(x) + x
# layers
for attn, ff in self.layers:
x = attn(x, rotary_pos_emb = rotary_pos_emb) + x
x = ff(x) + x
# to logits
logits = self.to_logits(x)
# take care of cross entropy loss if labels are provided
if not exists(labels):
return logits
labels = labels[:, self.cross_attn_seq_len:]
return F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels, ignore_index = 0)
|
perceiver-ar-pytorch-main
|
perceiver_ar_pytorch/perceiver_ar_pytorch.py
|
from base import BaseTestCase
from parameterized import parameterized
#class SearchTest(BaseTestCase):
#@parameterized.expand([['@mobile_test'], ['@mobile_test_2']])
#def test_username_search(self, username):
#self.search_username(username)
#self.assert_text(f'{username}')
|
nitter-master
|
tests/test_search.py
|
from base import BaseTestCase, Timeline
from parameterized import parameterized
normal = [['jack'], ['elonmusk']]
after = [['jack', '1681686036294803456'],
['elonmusk', '1681686036294803456']]
no_more = [['mobile_test_8?cursor=1000']]
empty = [['emptyuser'], ['mobile_test_10']]
protected = [['mobile_test_7'], ['Empty_user']]
photo_rail = [['mobile_test', ['Bo0nDsYIYAIjqVn', 'BoQbwJAIUAA0QCY', 'BoQbRQxIIAA3FWD', 'Bn8Qh8iIIAABXrG']]]
class TweetTest(BaseTestCase):
@parameterized.expand(normal)
def test_timeline(self, username):
self.open_nitter(username)
self.assert_element_present(Timeline.older)
self.assert_element_absent(Timeline.newest)
self.assert_element_absent(Timeline.end)
self.assert_element_absent(Timeline.none)
@parameterized.expand(after)
def test_after(self, username, cursor):
self.open_nitter(f'{username}?cursor={cursor}')
self.assert_element_present(Timeline.newest)
self.assert_element_present(Timeline.older)
self.assert_element_absent(Timeline.end)
self.assert_element_absent(Timeline.none)
@parameterized.expand(no_more)
def test_no_more(self, username):
self.open_nitter(username)
self.assert_text('No more items', Timeline.end)
self.assert_element_present(Timeline.newest)
self.assert_element_absent(Timeline.older)
@parameterized.expand(empty)
def test_empty(self, username):
self.open_nitter(username)
self.assert_text('No items found', Timeline.none)
self.assert_element_absent(Timeline.newest)
self.assert_element_absent(Timeline.older)
self.assert_element_absent(Timeline.end)
@parameterized.expand(protected)
def test_protected(self, username):
self.open_nitter(username)
self.assert_text('This account\'s tweets are protected.', Timeline.protected)
self.assert_element_absent(Timeline.newest)
self.assert_element_absent(Timeline.older)
self.assert_element_absent(Timeline.end)
#@parameterized.expand(photo_rail)
#def test_photo_rail(self, username, images):
#self.open_nitter(username)
#self.assert_element_visible(Timeline.photo_rail)
#for i, url in enumerate(images):
#img = self.get_attribute(Timeline.photo_rail + f' a:nth-child({i + 1}) img', 'src')
#self.assertIn(url, img)
|
nitter-master
|
tests/test_timeline.py
|
from base import BaseTestCase, Profile
from parameterized import parameterized
profiles = [
['mobile_test', 'Test account',
'Test Account. test test Testing username with @mobile_test_2 and a #hashtag',
'San Francisco, CA', 'example.com/foobar', 'Joined October 2009', '98'],
['mobile_test_2', 'mobile test 2', '', '', '', 'Joined January 2011', '13']
]
verified = [['jack'], ['elonmusk']]
protected = [
['mobile_test_7', 'mobile test 7', ''],
['Poop', 'Randy', 'Social media fanatic.']
]
invalid = [['thisprofiledoesntexist'], ['%']]
banner_image = [
['mobile_test', 'profile_banners%2F82135242%2F1384108037%2F1500x500']
]
class ProfileTest(BaseTestCase):
@parameterized.expand(profiles)
def test_data(self, username, fullname, bio, location, website, joinDate, mediaCount):
self.open_nitter(username)
self.assert_exact_text(fullname, Profile.fullname)
self.assert_exact_text(f'@{username}', Profile.username)
tests = [
(bio, Profile.bio),
(location, Profile.location),
(website, Profile.website),
(joinDate, Profile.joinDate),
(mediaCount + " Photos and videos", Profile.mediaCount)
]
for text, selector in tests:
if len(text) > 0:
self.assert_exact_text(text, selector)
else:
self.assert_element_absent(selector)
@parameterized.expand(verified)
def test_verified(self, username):
self.open_nitter(username)
self.assert_element_visible(Profile.verified)
@parameterized.expand(protected)
def test_protected(self, username, fullname, bio):
self.open_nitter(username)
self.assert_element_visible(Profile.protected)
self.assert_exact_text(fullname, Profile.fullname)
self.assert_exact_text(f'@{username}', Profile.username)
if len(bio) > 0:
self.assert_text(bio, Profile.bio)
else:
self.assert_element_absent(Profile.bio)
@parameterized.expand(invalid)
def test_invalid_username(self, username):
self.open_nitter(username)
self.assert_text(f'User "{username}" not found')
def test_suspended(self):
self.open_nitter('suspendme')
self.assert_text('User "suspendme" has been suspended')
@parameterized.expand(banner_image)
def test_banner_image(self, username, url):
self.open_nitter(username)
banner = self.find_element(Profile.banner + ' img')
self.assertIn(url, banner.get_attribute('src'))
|
nitter-master
|
tests/test_profile.py
|
from base import BaseTestCase, Conversation
from parameterized import parameterized
thread = [
['octonion/status/975253897697611777', [], 'Based', ['Crystal', 'Julia'], [
['For', 'Then', 'Okay,', 'Python', 'Speed', 'Java', 'Coding', 'I', 'You'],
['yeah,']
]],
['octonion/status/975254452625002496', ['Based'], 'Crystal', ['Julia'], []],
['octonion/status/975256058384887808', ['Based', 'Crystal'], 'Julia', [], []],
['gauravssnl/status/975364889039417344',
['Based', 'For', 'Then', 'Okay,', 'Python'], 'Speed', [], [
['Java', 'Coding', 'I', 'You'], ['JAVA!']
]],
['d0m96/status/1141811379407425537', [], 'I\'m',
['The', 'The', 'Today', 'Some', 'If', 'There', 'Above'],
[['Thank', 'Also,']]],
['gmpreussner/status/999766552546299904', [], 'A', [],
[['I', 'Especially'], ['I']]]
]
class ThreadTest(BaseTestCase):
def find_tweets(self, selector):
return self.find_elements(f"{selector} {Conversation.tweet_text}")
def compare_first_word(self, tweets, selector):
if len(tweets) > 0:
self.assert_element_visible(selector)
for i, tweet in enumerate(self.find_tweets(selector)):
text = tweet.text.split(" ")[0]
self.assert_equal(tweets[i], text)
@parameterized.expand(thread)
def test_thread(self, tweet, before, main, after, replies):
self.open_nitter(tweet)
self.assert_element_visible(Conversation.main)
self.assert_text(main, Conversation.main)
self.assert_text(main, Conversation.main)
self.compare_first_word(before, Conversation.before)
self.compare_first_word(after, Conversation.after)
for i, reply in enumerate(self.find_elements(Conversation.thread)):
selector = Conversation.replies + f" > div:nth-child({i + 1})"
self.compare_first_word(replies[i], selector)
|
nitter-master
|
tests/test_thread.py
|
from base import BaseTestCase, Card, Conversation
from parameterized import parameterized
card = [
['nim_lang/status/1136652293510717440',
'Version 0.20.0 released',
'We are very proud to announce Nim version 0.20. This is a massive release, both literally and figuratively. It contains more than 1,000 commits and it marks our release candidate for version 1.0!',
'nim-lang.org', True],
['voidtarget/status/1094632512926605312',
'Basic OBS Studio plugin, written in nim, supporting C++ (C fine too)',
'Basic OBS Studio plugin, written in nim, supporting C++ (C fine too) - obsplugin.nim',
'gist.github.com', True],
['FluentAI/status/1116417904831029248',
'Amazon’s Alexa isn’t just AI — thousands of humans are listening',
'One of the only ways to improve Alexa is to have human beings check it for errors',
'theverge.com', True],
['nim_lang/status/1082989146040340480',
'Nim in 2018: A short recap',
'There were several big news in the Nim world in 2018 – two new major releases, partnership with Status, and much more. But let us go chronologically.',
'nim-lang.org', True]
]
no_thumb = [
['Thom_Wolf/status/1122466524860702729',
'facebookresearch/fairseq',
'Facebook AI Research Sequence-to-Sequence Toolkit written in Python. - GitHub - facebookresearch/fairseq: Facebook AI Research Sequence-to-Sequence Toolkit written in Python.',
'github.com'],
['brent_p/status/1088857328680488961',
'Hts Nim Sugar',
'hts-nim is a library that allows one to use htslib via the nim programming language. Nim is a garbage-collected language that compiles to C and often has similar performance. I have become very...',
'brentp.github.io'],
['voidtarget/status/1133028231672582145',
'sinkingsugar/nimqt-example',
'A sample of a Qt app written using mostly nim. Contribute to sinkingsugar/nimqt-example development by creating an account on GitHub.',
'github.com']
]
playable = [
['nim_lang/status/1118234460904919042',
'Nim development blog 2019-03',
'Arne (aka Krux02)* debugging: * improved nim-gdb, $ works, framefilter * alias for --debugger:native: -g* bugs: * forwarding of .pure. * sizeof union* fe...',
'youtube.com'],
['nim_lang/status/1121090879823986688',
'Nim - First natively compiled language w/ hot code-reloading at...',
'#nim #c++ #ACCUConfNim is a statically typed systems and applications programming language which offers perhaps some of the most powerful metaprogramming cap...',
'youtube.com']
]
class CardTest(BaseTestCase):
@parameterized.expand(card)
def test_card(self, tweet, title, description, destination, large):
self.open_nitter(tweet)
c = Card(Conversation.main + " ")
self.assert_text(title, c.title)
self.assert_text(destination, c.destination)
self.assertIn('/pic/', self.get_image_url(c.image + ' img'))
if len(description) > 0:
self.assert_text(description, c.description)
if large:
self.assert_element_visible('.card.large')
else:
self.assert_element_not_visible('.card.large')
@parameterized.expand(no_thumb)
def test_card_no_thumb(self, tweet, title, description, destination):
self.open_nitter(tweet)
c = Card(Conversation.main + " ")
self.assert_text(title, c.title)
self.assert_text(destination, c.destination)
if len(description) > 0:
self.assert_text(description, c.description)
@parameterized.expand(playable)
def test_card_playable(self, tweet, title, description, destination):
self.open_nitter(tweet)
c = Card(Conversation.main + " ")
self.assert_text(title, c.title)
self.assert_text(destination, c.destination)
self.assertIn('/pic/', self.get_image_url(c.image + ' img'))
self.assert_element_visible('.card-overlay')
if len(description) > 0:
self.assert_text(description, c.description)
|
nitter-master
|
tests/test_card.py
|
from base import BaseTestCase, Quote, Conversation
from parameterized import parameterized
text = [
['elonmusk/status/1138136540096319488',
'TREV PAGE', '@Model3Owners',
"""As of March 58.4% of new car sales in Norway are electric.
What are we doing wrong? reuters.com/article/us-norwa…"""],
['nim_lang/status/1491461266849808397#m',
'Nim language', '@nim_lang',
"""What's better than Nim 1.6.0?
Nim 1.6.2 :)
nim-lang.org/blog/2021/12/17…"""]
]
image = [
['elonmusk/status/1138827760107790336', 'D83h6Y8UIAE2Wlz'],
['SpaceX/status/1067155053461426176', 'Ds9EYfxXoAAPNmx']
]
gif = [
['SpaceX/status/747497521593737216', 'Cl-R5yFWkAA_-3X'],
['nim_lang/status/1068099315074248704', 'DtJSqP9WoAAKdRC']
]
video = [
['bkuensting/status/1067316003200217088', 'IyCaQlzF0q8u9vBd']
]
class QuoteTest(BaseTestCase):
@parameterized.expand(text)
def test_text(self, tweet, fullname, username, text):
self.open_nitter(tweet)
quote = Quote(Conversation.main + " ")
self.assert_text(fullname, quote.fullname)
self.assert_text(username, quote.username)
self.assert_text(text, quote.text)
@parameterized.expand(image)
def test_image(self, tweet, url):
self.open_nitter(tweet)
quote = Quote(Conversation.main + " ")
self.assert_element_visible(quote.media)
self.assertIn(url, self.get_image_url(quote.media + ' img'))
@parameterized.expand(gif)
def test_gif(self, tweet, url):
self.open_nitter(tweet)
quote = Quote(Conversation.main + " ")
self.assert_element_visible(quote.media)
self.assertIn(url, self.get_attribute(quote.media + ' source', 'src'))
@parameterized.expand(video)
def test_video(self, tweet, url):
self.open_nitter(tweet)
quote = Quote(Conversation.main + " ")
self.assert_element_visible(quote.media)
self.assertIn(url, self.get_image_url(quote.media + ' img'))
|
nitter-master
|
tests/test_quote.py
|
from base import BaseTestCase, Tweet, get_timeline_tweet
from parameterized import parameterized
# image = tweet + 'div.attachments.media-body > div > div > a > div > img'
# self.assert_true(self.get_image_url(image).split('/')[0] == 'http')
timeline = [
[1, 'Test account', 'mobile_test', '10 Aug 2016', '763483571793174528',
'.'],
[3, 'Test account', 'mobile_test', '3 Mar 2016', '705522133443571712',
'LIVE on #Periscope pscp.tv/w/aadiTzF6dkVOTXZSbX…'],
[6, 'mobile test 2', 'mobile_test_2', '1 Oct 2014', '517449200045277184',
'Testing. One two three four. Test.']
]
status = [
[20, 'jack', 'jack', '21 Mar 2006', 'just setting up my twttr'],
[134849778302464000, 'The Twoffice', 'TheTwoffice', '11 Nov 2011', 'test'],
[105685475985080322, 'The Twoffice', 'TheTwoffice', '22 Aug 2011', 'regular tweet'],
[572593440719912960, 'Test account', 'mobile_test', '3 Mar 2015', 'testing test']
]
invalid = [
['mobile_test/status/120938109238'],
['TheTwoffice/status/8931928312']
]
multiline = [
[400897186990284800, 'mobile_test_3',
"""
♔
KEEP
CALM
AND
CLICHÉ
ON"""]
]
link = [
['nim_lang/status/1110499584852353024', [
'nim-lang.org/araq/ownedrefs.…',
'news.ycombinator.com/item?id…',
'teddit.net/r/programming…'
]],
['nim_lang/status/1125887775151140864', [
'en.wikipedia.org/wiki/Nim_(p…'
]],
['hiankun_taioan/status/1086916335215341570', [
'(hackernoon.com/interview-wit…)'
]],
['archillinks/status/1146302618223951873', [
'flickr.com/photos/87101284@N…',
'hisafoto.tumblr.com/post/176…'
]],
['archillinks/status/1146292551936335873', [
'flickr.com/photos/michaelrye…',
'furtho.tumblr.com/post/16618…'
]]
]
username = [
['Bountysource/status/1094803522053320705', ['nim_lang']],
['leereilly/status/1058464250098704385', ['godotengine', 'unity3d', 'nim_lang']]
]
emoji = [
['Tesla/status/1134850442511257600', '🌈❤️🧡💛💚💙💜']
]
retweet = [
[7, 'mobile_test_2', 'mobile test 2', 'Test account', '@mobile_test', '1234'],
[3, 'mobile_test_8', 'mobile test 8', 'jack', '@jack', 'twttr']
]
# reply = [
# ['mobile_test/with_replies', 15]
# ]
class TweetTest(BaseTestCase):
# @parameterized.expand(timeline)
# def test_timeline(self, index, fullname, username, date, tid, text):
# self.open_nitter(username)
# tweet = get_timeline_tweet(index)
# self.assert_exact_text(fullname, tweet.fullname)
# self.assert_exact_text('@' + username, tweet.username)
# self.assert_exact_text(date, tweet.date)
# self.assert_text(text, tweet.text)
# permalink = self.find_element(tweet.date + ' a')
# self.assertIn(tid, permalink.get_attribute('href'))
@parameterized.expand(status)
def test_status(self, tid, fullname, username, date, text):
tweet = Tweet()
self.open_nitter(f'{username}/status/{tid}')
self.assert_exact_text(fullname, tweet.fullname)
self.assert_exact_text('@' + username, tweet.username)
self.assert_exact_text(date, tweet.date)
self.assert_text(text, tweet.text)
@parameterized.expand(multiline)
def test_multiline_formatting(self, tid, username, text):
self.open_nitter(f'{username}/status/{tid}')
self.assert_text(text.strip('\n'), '.main-tweet')
@parameterized.expand(emoji)
def test_emoji(self, tweet, text):
self.open_nitter(tweet)
self.assert_text(text, '.main-tweet')
@parameterized.expand(link)
def test_link(self, tweet, links):
self.open_nitter(tweet)
for link in links:
self.assert_text(link, '.main-tweet')
@parameterized.expand(username)
def test_username(self, tweet, usernames):
self.open_nitter(tweet)
for un in usernames:
link = self.find_link_text(f'@{un}')
self.assertIn(f'/{un}', link.get_property('href'))
# @parameterized.expand(retweet)
# def test_retweet(self, index, url, retweet_by, fullname, username, text):
# self.open_nitter(url)
# tweet = get_timeline_tweet(index)
# self.assert_text(f'{retweet_by} retweeted', tweet.retweet)
# self.assert_text(text, tweet.text)
# self.assert_exact_text(fullname, tweet.fullname)
# self.assert_exact_text(username, tweet.username)
@parameterized.expand(invalid)
def test_invalid_id(self, tweet):
self.open_nitter(tweet)
self.assert_text('Tweet not found', '.error-panel')
# @parameterized.expand(reply)
# def test_thread(self, tweet, num):
# self.open_nitter(tweet)
# thread = self.find_element(f'.timeline > div:nth-child({num})')
# self.assertIn(thread.get_attribute('class'), 'thread-line')
|
nitter-master
|
tests/test_tweet.py
|
from seleniumbase import BaseCase
class Card(object):
def __init__(self, tweet=''):
card = tweet + '.card '
self.link = card + 'a'
self.title = card + '.card-title'
self.description = card + '.card-description'
self.destination = card + '.card-destination'
self.image = card + '.card-image'
class Quote(object):
def __init__(self, tweet=''):
quote = tweet + '.quote '
namerow = quote + '.fullname-and-username '
self.link = quote + '.quote-link'
self.fullname = namerow + '.fullname'
self.username = namerow + '.username'
self.text = quote + '.quote-text'
self.media = quote + '.quote-media-container'
self.unavailable = quote + '.quote.unavailable'
class Tweet(object):
def __init__(self, tweet=''):
namerow = tweet + '.tweet-header '
self.fullname = namerow + '.fullname'
self.username = namerow + '.username'
self.date = namerow + '.tweet-date'
self.text = tweet + '.tweet-content.media-body'
self.retweet = tweet + '.retweet-header'
self.reply = tweet + '.replying-to'
class Profile(object):
fullname = '.profile-card-fullname'
username = '.profile-card-username'
protected = '.icon-lock'
verified = '.verified-icon'
banner = '.profile-banner'
bio = '.profile-bio'
location = '.profile-location'
website = '.profile-website'
joinDate = '.profile-joindate'
mediaCount = '.photo-rail-header'
class Timeline(object):
newest = 'div[class="timeline-item show-more"]'
older = 'div[class="show-more"]'
end = '.timeline-end'
none = '.timeline-none'
protected = '.timeline-protected'
photo_rail = '.photo-rail-grid'
class Conversation(object):
main = '.main-tweet'
before = '.before-tweet'
after = '.after-tweet'
replies = '.replies'
thread = '.reply'
tweet = '.timeline-item'
tweet_text = '.tweet-content'
class Poll(object):
votes = '.poll-info'
choice = '.poll-meter'
value = 'poll-choice-value'
option = 'poll-choice-option'
leader = 'leader'
class Media(object):
container = '.attachments'
row = '.gallery-row'
image = '.still-image'
video = '.gallery-video'
gif = '.gallery-gif'
class BaseTestCase(BaseCase):
def setUp(self):
super(BaseTestCase, self).setUp()
def tearDown(self):
super(BaseTestCase, self).tearDown()
def open_nitter(self, page=''):
self.open(f'http://localhost:8080/{page}')
def search_username(self, username):
self.open_nitter()
self.update_text('.search-bar input[type=text]', username)
self.submit('.search-bar form')
def get_timeline_tweet(num=1):
return Tweet(f'.timeline > div:nth-child({num}) ')
|
nitter-master
|
tests/base.py
|
from base import BaseTestCase, Poll, Media
from parameterized import parameterized
from selenium.webdriver.common.by import By
poll = [
['nim_lang/status/1064219801499955200', 'Style insensitivity', '91', 1, [
('47%', 'Yay'), ('53%', 'Nay')
]],
['polls/status/1031986180622049281', 'What Tree Is Coolest?', '3,322', 1, [
('30%', 'Oak'), ('42%', 'Bonsai'), ('5%', 'Hemlock'), ('23%', 'Apple')
]]
]
image = [
['mobile_test/status/519364660823207936', 'BzUnaDFCUAAmrjs'],
['mobile_test_2/status/324619691039543297', 'BIFH45vCUAAQecj']
]
gif = [
['elonmusk/status/1141367104702038016', 'D9bzUqoUcAAfUgf'],
['Proj_Borealis/status/1136595194621677568', 'D8X_PJAXUAAavPT']
]
video_m3u8 = [
['d0m96/status/1078373829917974528', '9q1-v9w8-ft3awgD.jpg'],
['SpaceX/status/1138474014152712192', 'ocJJj2uu4n1kyD2Y.jpg']
]
gallery = [
# ['mobile_test/status/451108446603980803', [
# ['BkKovdrCUAAEz79', 'BkKovdcCEAAfoBO']
# ]],
# ['mobile_test/status/471539824713691137', [
# ['Bos--KNIQAAA7Li', 'Bos--FAIAAAWpah'],
# ['Bos--IqIQAAav23']
# ]],
['mobile_test/status/469530783384743936', [
['BoQbwJAIUAA0QCY', 'BoQbwN1IMAAuTiP'],
['BoQbwarIAAAlaE-', 'BoQbwh_IEAA27ef']
]]
]
class MediaTest(BaseTestCase):
@parameterized.expand(poll)
def test_poll(self, tweet, text, votes, leader, choices):
self.open_nitter(tweet)
self.assert_text(text, '.main-tweet')
self.assert_text(votes, Poll.votes)
poll_choices = self.find_elements(Poll.choice)
for i, (v, o) in enumerate(choices):
choice = poll_choices[i]
value = choice.find_element(By.CLASS_NAME, Poll.value)
option = choice.find_element(By.CLASS_NAME, Poll.option)
choice_class = choice.get_attribute('class')
self.assert_equal(v, value.text)
self.assert_equal(o, option.text)
if i == leader:
self.assertIn(Poll.leader, choice_class)
else:
self.assertNotIn(Poll.leader, choice_class)
@parameterized.expand(image)
def test_image(self, tweet, url):
self.open_nitter(tweet)
self.assert_element_visible(Media.container)
self.assert_element_visible(Media.image)
image_url = self.get_image_url(Media.image + ' img')
self.assertIn(url, image_url)
@parameterized.expand(gif)
def test_gif(self, tweet, gif_id):
self.open_nitter(tweet)
self.assert_element_visible(Media.container)
self.assert_element_visible(Media.gif)
url = self.get_attribute('source', 'src')
thumb = self.get_attribute('video', 'poster')
self.assertIn(gif_id + '.mp4', url)
self.assertIn(gif_id + '.jpg', thumb)
@parameterized.expand(video_m3u8)
def test_video_m3u8(self, tweet, thumb):
# no url because video playback isn't supported yet
self.open_nitter(tweet)
self.assert_element_visible(Media.container)
self.assert_element_visible(Media.video)
video_thumb = self.get_attribute(Media.video + ' img', 'src')
self.assertIn(thumb, video_thumb)
@parameterized.expand(gallery)
def test_gallery(self, tweet, rows):
self.open_nitter(tweet)
self.assert_element_visible(Media.container)
self.assert_element_visible(Media.row)
self.assert_element_visible(Media.image)
gallery_rows = self.find_elements(Media.row)
self.assert_equal(len(rows), len(gallery_rows))
for i, row in enumerate(gallery_rows):
images = row.find_elements(By.CSS_SELECTOR, 'img')
self.assert_equal(len(rows[i]), len(images))
for j, image in enumerate(images):
self.assertIn(rows[i][j], image.get_attribute('src'))
|
nitter-master
|
tests/test_tweet_media.py
|
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['stylegan2_pytorch']
from version import __version__
setup(
name = 'stylegan2_pytorch',
packages = find_packages(),
entry_points={
'console_scripts': [
'stylegan2_pytorch = stylegan2_pytorch.cli:main',
],
},
version = __version__,
license='GPLv3+',
description = 'StyleGan2 in Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/stylegan2-pytorch',
download_url = 'https://github.com/lucidrains/stylegan2-pytorch/archive/v_036.tar.gz',
keywords = ['generative adversarial networks', 'artificial intelligence'],
install_requires=[
'aim',
'einops',
'contrastive_learner>=0.1.0',
'fire',
'kornia>=0.5.4',
'numpy',
'retry',
'tqdm',
'torch',
'torchvision',
'pillow',
'vector-quantize-pytorch==0.1.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
stylegan2-pytorch-master
|
setup.py
|
from functools import partial
import random
import torch
import torch.nn.functional as F
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous()
# """
# Augmentation functions got images as `x`
# where `x` is tensor with this dimensions:
# 0 - count of images
# 1 - channels
# 2 - width
# 3 - height of image
# """
def rand_brightness(x, scale):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5) * scale
return x
def rand_saturation(x, scale):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (((torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5) * 2.0 * scale) + 1.0) + x_mean
return x
def rand_contrast(x, scale):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (((torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5) * 2.0 * scale) + 1.0) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_offset(x, ratio=1, ratio_h=1, ratio_v=1):
w, h = x.size(2), x.size(3)
imgs = []
for img in x.unbind(dim = 0):
max_h = int(w * ratio * ratio_h)
max_v = int(h * ratio * ratio_v)
value_h = random.randint(0, max_h) * 2 - max_h
value_v = random.randint(0, max_v) * 2 - max_v
if abs(value_h) > 0:
img = torch.roll(img, value_h, 2)
if abs(value_v) > 0:
img = torch.roll(img, value_v, 1)
imgs.append(img)
return torch.stack(imgs)
def rand_offset_h(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=ratio, ratio_v=0)
def rand_offset_v(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=0, ratio_v=ratio)
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'brightness': [partial(rand_brightness, scale=1.)],
'lightbrightness': [partial(rand_brightness, scale=.65)],
'contrast': [partial(rand_contrast, scale=.5)],
'lightcontrast': [partial(rand_contrast, scale=.25)],
'saturation': [partial(rand_saturation, scale=1.)],
'lightsaturation': [partial(rand_saturation, scale=.5)],
'color': [partial(rand_brightness, scale=1.), partial(rand_saturation, scale=1.), partial(rand_contrast, scale=0.5)],
'lightcolor': [partial(rand_brightness, scale=0.65), partial(rand_saturation, scale=.5), partial(rand_contrast, scale=0.5)],
'offset': [rand_offset],
'offset_h': [rand_offset_h],
'offset_v': [rand_offset_v],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
|
stylegan2-pytorch-master
|
stylegan2_pytorch/diff_augment.py
|
__version__ = '1.8.9'
|
stylegan2-pytorch-master
|
stylegan2_pytorch/version.py
|
from stylegan2_pytorch.stylegan2_pytorch import Trainer, StyleGAN2, NanException, ModelLoader
|
stylegan2-pytorch-master
|
stylegan2_pytorch/__init__.py
|
import os
import sys
import math
import fire
import json
from tqdm import tqdm
from math import floor, log2
from random import random
from shutil import rmtree
from functools import partial
import multiprocessing
from contextlib import contextmanager, ExitStack
import numpy as np
import torch
from torch import nn, einsum
from torch.utils import data
from torch.optim import Adam
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from einops import rearrange, repeat
from kornia.filters import filter2d
import torchvision
from torchvision import transforms
from stylegan2_pytorch.version import __version__
from stylegan2_pytorch.diff_augment import DiffAugment
from vector_quantize_pytorch import VectorQuantize
from PIL import Image
from pathlib import Path
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
import aim
assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.'
# constants
NUM_CORES = multiprocessing.cpu_count()
EXTS = ['jpg', 'jpeg', 'png']
# helper classes
class NanException(Exception):
pass
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if not exists(old):
return new
return old * self.beta + (1 - self.beta) * new
class Flatten(nn.Module):
def forward(self, x):
return x.reshape(x.shape[0], -1)
class RandomApply(nn.Module):
def __init__(self, prob, fn, fn_else = lambda x: x):
super().__init__()
self.fn = fn
self.fn_else = fn_else
self.prob = prob
def forward(self, x):
fn = self.fn if random() < self.prob else self.fn_else
return fn(x)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class ChanNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + self.eps).sqrt() * self.g + self.b
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = ChanNorm(dim)
def forward(self, x):
return self.fn(self.norm(x))
class PermuteToFrom(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
x = x.permute(0, 2, 3, 1)
out, *_, loss = self.fn(x)
out = out.permute(0, 3, 1, 2)
return out, loss
class Blur(nn.Module):
def __init__(self):
super().__init__()
f = torch.Tensor([1, 2, 1])
self.register_buffer('f', f)
def forward(self, x):
f = self.f
f = f[None, None, :] * f [None, :, None]
return filter2d(x, f, normalized=True)
# attention
class DepthWiseConv2d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias),
nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias)
)
def forward(self, x):
return self.net(x)
class LinearAttention(nn.Module):
def __init__(self, dim, dim_head = 64, heads = 8):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.nonlin = nn.GELU()
self.to_q = nn.Conv2d(dim, inner_dim, 1, bias = False)
self.to_kv = DepthWiseConv2d(dim, inner_dim * 2, 3, padding = 1, bias = False)
self.to_out = nn.Conv2d(inner_dim, dim, 1)
def forward(self, fmap):
h, x, y = self.heads, *fmap.shape[-2:]
q, k, v = (self.to_q(fmap), *self.to_kv(fmap).chunk(2, dim = 1))
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
q = q.softmax(dim = -1)
k = k.softmax(dim = -2)
q = q * self.scale
context = einsum('b n d, b n e -> b d e', k, v)
out = einsum('b n d, b d e -> b n e', q, context)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)
out = self.nonlin(out)
return self.to_out(out)
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(PreNorm(chan, LinearAttention(chan))),
Residual(PreNorm(chan, nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
# helpers
def exists(val):
return val is not None
@contextmanager
def null_context():
yield
def combine_contexts(contexts):
@contextmanager
def multi_contexts():
with ExitStack() as stack:
yield [stack.enter_context(ctx()) for ctx in contexts]
return multi_contexts
def default(value, d):
return value if exists(value) else d
def cycle(iterable):
while True:
for i in iterable:
yield i
def cast_list(el):
return el if isinstance(el, list) else [el]
def is_empty(t):
if isinstance(t, torch.Tensor):
return t.nelement() == 0
return not exists(t)
def raise_if_nan(t):
if torch.isnan(t):
raise NanException
def gradient_accumulate_contexts(gradient_accumulate_every, is_ddp, ddps):
if is_ddp:
num_no_syncs = gradient_accumulate_every - 1
head = [combine_contexts(map(lambda ddp: ddp.no_sync, ddps))] * num_no_syncs
tail = [null_context]
contexts = head + tail
else:
contexts = [null_context] * gradient_accumulate_every
for context in contexts:
with context():
yield
def loss_backwards(fp16, loss, optimizer, loss_id, **kwargs):
if fp16:
with amp.scale_loss(loss, optimizer, loss_id) as scaled_loss:
scaled_loss.backward(**kwargs)
else:
loss.backward(**kwargs)
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(outputs=output, inputs=images,
grad_outputs=torch.ones(output.size(), device=images.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(batch_size, -1)
return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
def calc_pl_lengths(styles, images):
device = images.device
num_pixels = images.shape[2] * images.shape[3]
pl_noise = torch.randn(images.shape, device=device) / math.sqrt(num_pixels)
outputs = (images * pl_noise).sum()
pl_grads = torch_grad(outputs=outputs, inputs=styles,
grad_outputs=torch.ones(outputs.shape, device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
return (pl_grads ** 2).sum(dim=2).mean(dim=1).sqrt()
def noise(n, latent_dim, device):
return torch.randn(n, latent_dim).cuda(device)
def noise_list(n, layers, latent_dim, device):
return [(noise(n, latent_dim, device), layers)]
def mixed_list(n, layers, latent_dim, device):
tt = int(torch.rand(()).numpy() * layers)
return noise_list(n, tt, latent_dim, device) + noise_list(n, layers - tt, latent_dim, device)
def latent_to_w(style_vectorizer, latent_descr):
return [(style_vectorizer(z), num_layers) for z, num_layers in latent_descr]
def image_noise(n, im_size, device):
return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).cuda(device)
def leaky_relu(p=0.2):
return nn.LeakyReLU(p, inplace=True)
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def styles_def_to_tensor(styles_def):
return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)
def set_requires_grad(model, bool):
for p in model.parameters():
p.requires_grad = bool
def slerp(val, low, high):
low_norm = low / torch.norm(low, dim=1, keepdim=True)
high_norm = high / torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
return res
# losses
def gen_hinge_loss(fake, real):
return fake.mean()
def hinge_loss(real, fake):
return (F.relu(1 + real) + F.relu(1 - fake)).mean()
def dual_contrastive_loss(real_logits, fake_logits):
device = real_logits.device
real_logits, fake_logits = map(lambda t: rearrange(t, '... -> (...)'), (real_logits, fake_logits))
def loss_half(t1, t2):
t1 = rearrange(t1, 'i -> i ()')
t2 = repeat(t2, 'j -> i j', i = t1.shape[0])
t = torch.cat((t1, t2), dim = -1)
return F.cross_entropy(t, torch.zeros(t1.shape[0], device = device, dtype = torch.long))
return loss_half(real_logits, fake_logits) + loss_half(-fake_logits, -real_logits)
# dataset
def convert_rgb_to_transparent(image):
if image.mode != 'RGBA':
return image.convert('RGBA')
return image
def convert_transparent_to_rgb(image):
if image.mode != 'RGB':
return image.convert('RGB')
return image
class expand_greyscale(object):
def __init__(self, transparent):
self.transparent = transparent
def __call__(self, tensor):
channels = tensor.shape[0]
num_target_channels = 4 if self.transparent else 3
if channels == num_target_channels:
return tensor
alpha = None
if channels == 1:
color = tensor.expand(3, -1, -1)
elif channels == 2:
color = tensor[:1].expand(3, -1, -1)
alpha = tensor[1:]
else:
raise Exception(f'image with invalid number of channels given {channels}')
if not exists(alpha) and self.transparent:
alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device)
return color if not self.transparent else torch.cat((color, alpha))
def resize_to_minimum_size(min_size, image):
if max(*image.size) < min_size:
return torchvision.transforms.functional.resize(image, min_size)
return image
class Dataset(data.Dataset):
def __init__(self, folder, image_size, transparent = False, aug_prob = 0.):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]
assert len(self.paths) > 0, f'No images were found in {folder} for training'
convert_image_fn = convert_transparent_to_rgb if not transparent else convert_rgb_to_transparent
num_channels = 3 if not transparent else 4
self.transform = transforms.Compose([
transforms.Lambda(convert_image_fn),
transforms.Lambda(partial(resize_to_minimum_size, image_size)),
transforms.Resize(image_size),
RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale(transparent))
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# augmentations
def random_hflip(tensor, prob):
if prob < random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size):
super().__init__()
self.D = D
def forward(self, images, prob = 0., types = [], detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=types)
if detach:
images = images.detach()
return self.D(images)
# stylegan2 classes
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul = 1, bias = True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class StyleVectorizer(nn.Module):
def __init__(self, emb, depth, lr_mul = 0.1):
super().__init__()
layers = []
for i in range(depth):
layers.extend([EqualLinear(emb, emb, lr_mul), leaky_relu()])
self.net = nn.Sequential(*layers)
def forward(self, x):
x = F.normalize(x, dim=1)
return self.net(x)
class RGBBlock(nn.Module):
def __init__(self, latent_dim, input_channel, upsample, rgba = False):
super().__init__()
self.input_channel = input_channel
self.to_style = nn.Linear(latent_dim, input_channel)
out_filters = 3 if not rgba else 4
self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)
self.upsample = nn.Sequential(
nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False),
Blur()
) if upsample else None
def forward(self, x, prev_rgb, istyle):
b, c, h, w = x.shape
style = self.to_style(istyle)
x = self.conv(x, style)
if exists(prev_rgb):
x = x + prev_rgb
if exists(self.upsample):
x = self.upsample(x)
return x
class Conv2DMod(nn.Module):
def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps = 1e-8, **kwargs):
super().__init__()
self.filters = out_chan
self.demod = demod
self.kernel = kernel
self.stride = stride
self.dilation = dilation
self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))
self.eps = eps
nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def _get_same_padding(self, size, kernel, dilation, stride):
return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2
def forward(self, x, y):
b, c, h, w = x.shape
w1 = y[:, None, :, None, None]
w2 = self.weight[None, :, :, :, :]
weights = w2 * (w1 + 1)
if self.demod:
d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)
weights = weights * d
x = x.reshape(1, -1, h, w)
_, _, *ws = weights.shape
weights = weights.reshape(b * self.filters, *ws)
padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)
x = F.conv2d(x, weights, padding=padding, groups=b)
x = x.reshape(-1, self.filters, h, w)
return x
class GeneratorBlock(nn.Module):
def __init__(self, latent_dim, input_channels, filters, upsample = True, upsample_rgb = True, rgba = False):
super().__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None
self.to_style1 = nn.Linear(latent_dim, input_channels)
self.to_noise1 = nn.Linear(1, filters)
self.conv1 = Conv2DMod(input_channels, filters, 3)
self.to_style2 = nn.Linear(latent_dim, filters)
self.to_noise2 = nn.Linear(1, filters)
self.conv2 = Conv2DMod(filters, filters, 3)
self.activation = leaky_relu()
self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, rgba)
def forward(self, x, prev_rgb, istyle, inoise):
if exists(self.upsample):
x = self.upsample(x)
inoise = inoise[:, :x.shape[2], :x.shape[3], :]
noise1 = self.to_noise1(inoise).permute((0, 3, 2, 1))
noise2 = self.to_noise2(inoise).permute((0, 3, 2, 1))
style1 = self.to_style1(istyle)
x = self.conv1(x, style1)
x = self.activation(x + noise1)
style2 = self.to_style2(istyle)
x = self.conv2(x, style2)
x = self.activation(x + noise2)
rgb = self.to_rgb(x, prev_rgb, istyle)
return x, rgb
class DiscriminatorBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = nn.Sequential(
nn.Conv2d(input_channels, filters, 3, padding=1),
leaky_relu(),
nn.Conv2d(filters, filters, 3, padding=1),
leaky_relu()
)
self.downsample = nn.Sequential(
Blur(),
nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
if exists(self.downsample):
x = self.downsample(x)
x = (x + res) * (1 / math.sqrt(2))
return x
class Generator(nn.Module):
def __init__(self, image_size, latent_dim, network_capacity = 16, transparent = False, attn_layers = [], no_const = False, fmap_max = 512):
super().__init__()
self.image_size = image_size
self.latent_dim = latent_dim
self.num_layers = int(log2(image_size) - 1)
filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
init_channels = filters[0]
filters = [init_channels, *filters]
in_out_pairs = zip(filters[:-1], filters[1:])
self.no_const = no_const
if no_const:
self.to_initial_block = nn.ConvTranspose2d(latent_dim, init_channels, 4, 1, 0, bias=False)
else:
self.initial_block = nn.Parameter(torch.randn((1, init_channels, 4, 4)))
self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)
self.blocks = nn.ModuleList([])
self.attns = nn.ModuleList([])
for ind, (in_chan, out_chan) in enumerate(in_out_pairs):
not_first = ind != 0
not_last = ind != (self.num_layers - 1)
num_layer = self.num_layers - ind
attn_fn = attn_and_ff(in_chan) if num_layer in attn_layers else None
self.attns.append(attn_fn)
block = GeneratorBlock(
latent_dim,
in_chan,
out_chan,
upsample = not_first,
upsample_rgb = not_last,
rgba = transparent
)
self.blocks.append(block)
def forward(self, styles, input_noise):
batch_size = styles.shape[0]
image_size = self.image_size
if self.no_const:
avg_style = styles.mean(dim=1)[:, :, None, None]
x = self.to_initial_block(avg_style)
else:
x = self.initial_block.expand(batch_size, -1, -1, -1)
rgb = None
styles = styles.transpose(0, 1)
x = self.initial_conv(x)
for style, block, attn in zip(styles, self.blocks, self.attns):
if exists(attn):
x = attn(x)
x, rgb = block(x, rgb, style, input_noise)
return rgb
class Discriminator(nn.Module):
def __init__(self, image_size, network_capacity = 16, fq_layers = [], fq_dict_size = 256, attn_layers = [], transparent = False, fmap_max = 512):
super().__init__()
num_layers = int(log2(image_size) - 1)
num_init_filters = 3 if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
chan_in_out = list(zip(filters[:-1], filters[1:]))
blocks = []
attn_blocks = []
quantize_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)
blocks.append(block)
attn_fn = attn_and_ff(out_chan) if num_layer in attn_layers else None
attn_blocks.append(attn_fn)
quantize_fn = PermuteToFrom(VectorQuantize(out_chan, fq_dict_size)) if num_layer in fq_layers else None
quantize_blocks.append(quantize_fn)
self.blocks = nn.ModuleList(blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
self.quantize_blocks = nn.ModuleList(quantize_blocks)
chan_last = filters[-1]
latent_dim = 2 * 2 * chan_last
self.final_conv = nn.Conv2d(chan_last, chan_last, 3, padding=1)
self.flatten = Flatten()
self.to_logit = nn.Linear(latent_dim, 1)
def forward(self, x):
b, *_ = x.shape
quantize_loss = torch.zeros(1).to(x)
for (block, attn_block, q_block) in zip(self.blocks, self.attn_blocks, self.quantize_blocks):
x = block(x)
if exists(attn_block):
x = attn_block(x)
if exists(q_block):
x, loss = q_block(x)
quantize_loss += loss
x = self.final_conv(x)
x = self.flatten(x)
x = self.to_logit(x)
return x.squeeze(), quantize_loss
class StyleGAN2(nn.Module):
def __init__(self, image_size, latent_dim = 512, fmap_max = 512, style_depth = 8, network_capacity = 16, transparent = False, fp16 = False, cl_reg = False, steps = 1, lr = 1e-4, ttur_mult = 2, fq_layers = [], fq_dict_size = 256, attn_layers = [], no_const = False, lr_mlp = 0.1, rank = 0):
super().__init__()
self.lr = lr
self.steps = steps
self.ema_updater = EMA(0.995)
self.S = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)
self.G = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const, fmap_max = fmap_max)
self.D = Discriminator(image_size, network_capacity, fq_layers = fq_layers, fq_dict_size = fq_dict_size, attn_layers = attn_layers, transparent = transparent, fmap_max = fmap_max)
self.SE = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)
self.GE = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const)
self.D_cl = None
if cl_reg:
from contrastive_learner import ContrastiveLearner
# experimental contrastive loss discriminator regularization
assert not transparent, 'contrastive loss regularization does not work with transparent images yet'
self.D_cl = ContrastiveLearner(self.D, image_size, hidden_layer='flatten')
# wrapper for augmenting all images going into the discriminator
self.D_aug = AugWrapper(self.D, image_size)
# turn off grad for exponential moving averages
set_requires_grad(self.SE, False)
set_requires_grad(self.GE, False)
# init optimizers
generator_params = list(self.G.parameters()) + list(self.S.parameters())
self.G_opt = Adam(generator_params, lr = self.lr, betas=(0.5, 0.9))
self.D_opt = Adam(self.D.parameters(), lr = self.lr * ttur_mult, betas=(0.5, 0.9))
# init weights
self._init_weights()
self.reset_parameter_averaging()
self.cuda(rank)
# startup apex mixed precision
self.fp16 = fp16
if fp16:
(self.S, self.G, self.D, self.SE, self.GE), (self.G_opt, self.D_opt) = amp.initialize([self.S, self.G, self.D, self.SE, self.GE], [self.G_opt, self.D_opt], opt_level='O1', num_losses=3)
def _init_weights(self):
for m in self.modules():
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
for block in self.G.blocks:
nn.init.zeros_(block.to_noise1.weight)
nn.init.zeros_(block.to_noise2.weight)
nn.init.zeros_(block.to_noise1.bias)
nn.init.zeros_(block.to_noise2.bias)
def EMA(self):
def update_moving_average(ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
update_moving_average(self.SE, self.S)
update_moving_average(self.GE, self.G)
def reset_parameter_averaging(self):
self.SE.load_state_dict(self.S.state_dict())
self.GE.load_state_dict(self.G.state_dict())
def forward(self, x):
return x
class Trainer():
def __init__(
self,
name = 'default',
results_dir = 'results',
models_dir = 'models',
base_dir = './',
image_size = 128,
network_capacity = 16,
fmap_max = 512,
transparent = False,
batch_size = 4,
mixed_prob = 0.9,
gradient_accumulate_every=1,
lr = 2e-4,
lr_mlp = 0.1,
ttur_mult = 2,
rel_disc_loss = False,
num_workers = None,
save_every = 1000,
evaluate_every = 1000,
num_image_tiles = 8,
trunc_psi = 0.6,
fp16 = False,
cl_reg = False,
no_pl_reg = False,
fq_layers = [],
fq_dict_size = 256,
attn_layers = [],
no_const = False,
aug_prob = 0.,
aug_types = ['translation', 'cutout'],
top_k_training = False,
generator_top_k_gamma = 0.99,
generator_top_k_frac = 0.5,
dual_contrast_loss = False,
dataset_aug_prob = 0.,
calculate_fid_every = None,
calculate_fid_num_images = 12800,
clear_fid_cache = False,
is_ddp = False,
rank = 0,
world_size = 1,
log = False,
*args,
**kwargs
):
self.GAN_params = [args, kwargs]
self.GAN = None
self.name = name
base_dir = Path(base_dir)
self.base_dir = base_dir
self.results_dir = base_dir / results_dir
self.models_dir = base_dir / models_dir
self.fid_dir = base_dir / 'fid' / name
self.config_path = self.models_dir / name / '.config.json'
assert log2(image_size).is_integer(), 'image size must be a power of 2 (64, 128, 256, 512, 1024)'
self.image_size = image_size
self.network_capacity = network_capacity
self.fmap_max = fmap_max
self.transparent = transparent
self.fq_layers = cast_list(fq_layers)
self.fq_dict_size = fq_dict_size
self.has_fq = len(self.fq_layers) > 0
self.attn_layers = cast_list(attn_layers)
self.no_const = no_const
self.aug_prob = aug_prob
self.aug_types = aug_types
self.lr = lr
self.lr_mlp = lr_mlp
self.ttur_mult = ttur_mult
self.rel_disc_loss = rel_disc_loss
self.batch_size = batch_size
self.num_workers = num_workers
self.mixed_prob = mixed_prob
self.num_image_tiles = num_image_tiles
self.evaluate_every = evaluate_every
self.save_every = save_every
self.steps = 0
self.av = None
self.trunc_psi = trunc_psi
self.no_pl_reg = no_pl_reg
self.pl_mean = None
self.gradient_accumulate_every = gradient_accumulate_every
assert not fp16 or fp16 and APEX_AVAILABLE, 'Apex is not available for you to use mixed precision training'
self.fp16 = fp16
self.cl_reg = cl_reg
self.d_loss = 0
self.g_loss = 0
self.q_loss = None
self.last_gp_loss = None
self.last_cr_loss = None
self.last_fid = None
self.pl_length_ma = EMA(0.99)
self.init_folders()
self.loader = None
self.dataset_aug_prob = dataset_aug_prob
self.calculate_fid_every = calculate_fid_every
self.calculate_fid_num_images = calculate_fid_num_images
self.clear_fid_cache = clear_fid_cache
self.top_k_training = top_k_training
self.generator_top_k_gamma = generator_top_k_gamma
self.generator_top_k_frac = generator_top_k_frac
self.dual_contrast_loss = dual_contrast_loss
assert not (is_ddp and cl_reg), 'Contrastive loss regularization does not work well with multi GPUs yet'
self.is_ddp = is_ddp
self.is_main = rank == 0
self.rank = rank
self.world_size = world_size
self.logger = aim.Session(experiment=name) if log else None
@property
def image_extension(self):
return 'jpg' if not self.transparent else 'png'
@property
def checkpoint_num(self):
return floor(self.steps // self.save_every)
@property
def hparams(self):
return {'image_size': self.image_size, 'network_capacity': self.network_capacity}
def init_GAN(self):
args, kwargs = self.GAN_params
self.GAN = StyleGAN2(lr = self.lr, lr_mlp = self.lr_mlp, ttur_mult = self.ttur_mult, image_size = self.image_size, network_capacity = self.network_capacity, fmap_max = self.fmap_max, transparent = self.transparent, fq_layers = self.fq_layers, fq_dict_size = self.fq_dict_size, attn_layers = self.attn_layers, fp16 = self.fp16, cl_reg = self.cl_reg, no_const = self.no_const, rank = self.rank, *args, **kwargs)
if self.is_ddp:
ddp_kwargs = {'device_ids': [self.rank]}
self.S_ddp = DDP(self.GAN.S, **ddp_kwargs)
self.G_ddp = DDP(self.GAN.G, **ddp_kwargs)
self.D_ddp = DDP(self.GAN.D, **ddp_kwargs)
self.D_aug_ddp = DDP(self.GAN.D_aug, **ddp_kwargs)
if exists(self.logger):
self.logger.set_params(self.hparams)
def write_config(self):
self.config_path.write_text(json.dumps(self.config()))
def load_config(self):
config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text())
self.image_size = config['image_size']
self.network_capacity = config['network_capacity']
self.transparent = config['transparent']
self.fq_layers = config['fq_layers']
self.fq_dict_size = config['fq_dict_size']
self.fmap_max = config.pop('fmap_max', 512)
self.attn_layers = config.pop('attn_layers', [])
self.no_const = config.pop('no_const', False)
self.lr_mlp = config.pop('lr_mlp', 0.1)
del self.GAN
self.init_GAN()
def config(self):
return {'image_size': self.image_size, 'network_capacity': self.network_capacity, 'lr_mlp': self.lr_mlp, 'transparent': self.transparent, 'fq_layers': self.fq_layers, 'fq_dict_size': self.fq_dict_size, 'attn_layers': self.attn_layers, 'no_const': self.no_const}
def set_data_src(self, folder):
self.dataset = Dataset(folder, self.image_size, transparent = self.transparent, aug_prob = self.dataset_aug_prob)
num_workers = num_workers = default(self.num_workers, NUM_CORES if not self.is_ddp else 0)
sampler = DistributedSampler(self.dataset, rank=self.rank, num_replicas=self.world_size, shuffle=True) if self.is_ddp else None
dataloader = data.DataLoader(self.dataset, num_workers = num_workers, batch_size = math.ceil(self.batch_size / self.world_size), sampler = sampler, shuffle = not self.is_ddp, drop_last = True, pin_memory = True)
self.loader = cycle(dataloader)
# auto set augmentation prob for user if dataset is detected to be low
num_samples = len(self.dataset)
if not exists(self.aug_prob) and num_samples < 1e5:
self.aug_prob = min(0.5, (1e5 - num_samples) * 3e-6)
print(f'autosetting augmentation probability to {round(self.aug_prob * 100)}%')
def train(self):
assert exists(self.loader), 'You must first initialize the data source with `.set_data_src(<folder of images>)`'
if not exists(self.GAN):
self.init_GAN()
self.GAN.train()
total_disc_loss = torch.tensor(0.).cuda(self.rank)
total_gen_loss = torch.tensor(0.).cuda(self.rank)
batch_size = math.ceil(self.batch_size / self.world_size)
image_size = self.GAN.G.image_size
latent_dim = self.GAN.G.latent_dim
num_layers = self.GAN.G.num_layers
aug_prob = self.aug_prob
aug_types = self.aug_types
aug_kwargs = {'prob': aug_prob, 'types': aug_types}
apply_gradient_penalty = self.steps % 4 == 0
apply_path_penalty = not self.no_pl_reg and self.steps > 5000 and self.steps % 32 == 0
apply_cl_reg_to_generated = self.steps > 20000
S = self.GAN.S if not self.is_ddp else self.S_ddp
G = self.GAN.G if not self.is_ddp else self.G_ddp
D = self.GAN.D if not self.is_ddp else self.D_ddp
D_aug = self.GAN.D_aug if not self.is_ddp else self.D_aug_ddp
backwards = partial(loss_backwards, self.fp16)
if exists(self.GAN.D_cl):
self.GAN.D_opt.zero_grad()
if apply_cl_reg_to_generated:
for i in range(self.gradient_accumulate_every):
get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list
style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(batch_size, image_size, device=self.rank)
w_space = latent_to_w(self.GAN.S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = self.GAN.G(w_styles, noise)
self.GAN.D_cl(generated_images.clone().detach(), accumulate=True)
for i in range(self.gradient_accumulate_every):
image_batch = next(self.loader).cuda(self.rank)
self.GAN.D_cl(image_batch, accumulate=True)
loss = self.GAN.D_cl.calculate_loss()
self.last_cr_loss = loss.clone().detach().item()
backwards(loss, self.GAN.D_opt, loss_id = 0)
self.GAN.D_opt.step()
# setup losses
if not self.dual_contrast_loss:
D_loss_fn = hinge_loss
G_loss_fn = gen_hinge_loss
G_requires_reals = False
else:
D_loss_fn = dual_contrastive_loss
G_loss_fn = dual_contrastive_loss
G_requires_reals = True
# train discriminator
avg_pl_length = self.pl_mean
self.GAN.D_opt.zero_grad()
for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[D_aug, S, G]):
get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list
style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(batch_size, image_size, device=self.rank)
w_space = latent_to_w(S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = G(w_styles, noise)
fake_output, fake_q_loss = D_aug(generated_images.clone().detach(), detach = True, **aug_kwargs)
image_batch = next(self.loader).cuda(self.rank)
image_batch.requires_grad_()
real_output, real_q_loss = D_aug(image_batch, **aug_kwargs)
real_output_loss = real_output
fake_output_loss = fake_output
if self.rel_disc_loss:
real_output_loss = real_output_loss - fake_output.mean()
fake_output_loss = fake_output_loss - real_output.mean()
divergence = D_loss_fn(real_output_loss, fake_output_loss)
disc_loss = divergence
if self.has_fq:
quantize_loss = (fake_q_loss + real_q_loss).mean()
self.q_loss = float(quantize_loss.detach().item())
disc_loss = disc_loss + quantize_loss
if apply_gradient_penalty:
gp = gradient_penalty(image_batch, real_output)
self.last_gp_loss = gp.clone().detach().item()
self.track(self.last_gp_loss, 'GP')
disc_loss = disc_loss + gp
disc_loss = disc_loss / self.gradient_accumulate_every
disc_loss.register_hook(raise_if_nan)
backwards(disc_loss, self.GAN.D_opt, loss_id = 1)
total_disc_loss += divergence.detach().item() / self.gradient_accumulate_every
self.d_loss = float(total_disc_loss)
self.track(self.d_loss, 'D')
self.GAN.D_opt.step()
# train generator
self.GAN.G_opt.zero_grad()
for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[S, G, D_aug]):
style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(batch_size, image_size, device=self.rank)
w_space = latent_to_w(S, style)
w_styles = styles_def_to_tensor(w_space)
generated_images = G(w_styles, noise)
fake_output, _ = D_aug(generated_images, **aug_kwargs)
fake_output_loss = fake_output
real_output = None
if G_requires_reals:
image_batch = next(self.loader).cuda(self.rank)
real_output, _ = D_aug(image_batch, detach = True, **aug_kwargs)
real_output = real_output.detach()
if self.top_k_training:
epochs = (self.steps * batch_size * self.gradient_accumulate_every) / len(self.dataset)
k_frac = max(self.generator_top_k_gamma ** epochs, self.generator_top_k_frac)
k = math.ceil(batch_size * k_frac)
if k != batch_size:
fake_output_loss, _ = fake_output_loss.topk(k=k, largest=False)
loss = G_loss_fn(fake_output_loss, real_output)
gen_loss = loss
if apply_path_penalty:
pl_lengths = calc_pl_lengths(w_styles, generated_images)
avg_pl_length = np.mean(pl_lengths.detach().cpu().numpy())
if not is_empty(self.pl_mean):
pl_loss = ((pl_lengths - self.pl_mean) ** 2).mean()
if not torch.isnan(pl_loss):
gen_loss = gen_loss + pl_loss
gen_loss = gen_loss / self.gradient_accumulate_every
gen_loss.register_hook(raise_if_nan)
backwards(gen_loss, self.GAN.G_opt, loss_id = 2)
total_gen_loss += loss.detach().item() / self.gradient_accumulate_every
self.g_loss = float(total_gen_loss)
self.track(self.g_loss, 'G')
self.GAN.G_opt.step()
# calculate moving averages
if apply_path_penalty and not np.isnan(avg_pl_length):
self.pl_mean = self.pl_length_ma.update_average(self.pl_mean, avg_pl_length)
self.track(self.pl_mean, 'PL')
if self.is_main and self.steps % 10 == 0 and self.steps > 20000:
self.GAN.EMA()
if self.is_main and self.steps <= 25000 and self.steps % 1000 == 2:
self.GAN.reset_parameter_averaging()
# save from NaN errors
if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)):
print(f'NaN detected for generator or discriminator. Loading from checkpoint #{self.checkpoint_num}')
self.load(self.checkpoint_num)
raise NanException
# periodically save results
if self.is_main:
if self.steps % self.save_every == 0:
self.save(self.checkpoint_num)
if self.steps % self.evaluate_every == 0 or (self.steps % 100 == 0 and self.steps < 2500):
self.evaluate(floor(self.steps / self.evaluate_every))
if exists(self.calculate_fid_every) and self.steps % self.calculate_fid_every == 0 and self.steps != 0:
num_batches = math.ceil(self.calculate_fid_num_images / self.batch_size)
fid = self.calculate_fid(num_batches)
self.last_fid = fid
with open(str(self.results_dir / self.name / f'fid_scores.txt'), 'a') as f:
f.write(f'{self.steps},{fid}\n')
self.steps += 1
self.av = None
@torch.no_grad()
def evaluate(self, num = 0, trunc = 1.0):
self.GAN.eval()
ext = self.image_extension
num_rows = self.num_image_tiles
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
# latents and noise
latents = noise_list(num_rows ** 2, num_layers, latent_dim, device=self.rank)
n = image_noise(num_rows ** 2, image_size, device=self.rank)
# regular
generated_images = self.generate_truncated(self.GAN.S, self.GAN.G, latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}.{ext}'), nrow=num_rows)
# moving averages
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-ema.{ext}'), nrow=num_rows)
# mixing regularities
def tile(a, dim, n_tile):
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).cuda(self.rank)
return torch.index_select(a, dim, order_index)
nn = noise(num_rows, latent_dim, device=self.rank)
tmp1 = tile(nn, 0, num_rows)
tmp2 = nn.repeat(num_rows, 1)
tt = int(num_layers / 2)
mixed_latents = [(tmp1, tt), (tmp2, num_layers - tt)]
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, mixed_latents, n, trunc_psi = self.trunc_psi)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-mr.{ext}'), nrow=num_rows)
@torch.no_grad()
def calculate_fid(self, num_batches):
from pytorch_fid import fid_score
torch.cuda.empty_cache()
real_path = self.fid_dir / 'real'
fake_path = self.fid_dir / 'fake'
# remove any existing files used for fid calculation and recreate directories
if not real_path.exists() or self.clear_fid_cache:
rmtree(real_path, ignore_errors=True)
os.makedirs(real_path)
for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'):
real_batch = next(self.loader)
for k, image in enumerate(real_batch.unbind(0)):
filename = str(k + batch_num * self.batch_size)
torchvision.utils.save_image(image, str(real_path / f'{filename}.png'))
# generate a bunch of fake images in results / name / fid_fake
rmtree(fake_path, ignore_errors=True)
os.makedirs(fake_path)
self.GAN.eval()
ext = self.image_extension
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'):
# latents and noise
latents = noise_list(self.batch_size, num_layers, latent_dim, device=self.rank)
noise = image_noise(self.batch_size, image_size, device=self.rank)
# moving averages
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, noise, trunc_psi = self.trunc_psi)
for j, image in enumerate(generated_images.unbind(0)):
torchvision.utils.save_image(image, str(fake_path / f'{str(j + batch_num * self.batch_size)}-ema.{ext}'))
return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, noise.device, 2048)
@torch.no_grad()
def truncate_style(self, tensor, trunc_psi = 0.75):
S = self.GAN.S
batch_size = self.batch_size
latent_dim = self.GAN.G.latent_dim
if not exists(self.av):
z = noise(2000, latent_dim, device=self.rank)
samples = evaluate_in_chunks(batch_size, S, z).cpu().numpy()
self.av = np.mean(samples, axis = 0)
self.av = np.expand_dims(self.av, axis = 0)
av_torch = torch.from_numpy(self.av).cuda(self.rank)
tensor = trunc_psi * (tensor - av_torch) + av_torch
return tensor
@torch.no_grad()
def truncate_style_defs(self, w, trunc_psi = 0.75):
w_space = []
for tensor, num_layers in w:
tensor = self.truncate_style(tensor, trunc_psi = trunc_psi)
w_space.append((tensor, num_layers))
return w_space
@torch.no_grad()
def generate_truncated(self, S, G, style, noi, trunc_psi = 0.75, num_image_tiles = 8):
w = map(lambda t: (S(t[0]), t[1]), style)
w_truncated = self.truncate_style_defs(w, trunc_psi = trunc_psi)
w_styles = styles_def_to_tensor(w_truncated)
generated_images = evaluate_in_chunks(self.batch_size, G, w_styles, noi)
return generated_images.clamp_(0., 1.)
@torch.no_grad()
def generate_interpolation(self, num = 0, num_image_tiles = 8, trunc = 1.0, num_steps = 100, save_frames = False):
self.GAN.eval()
ext = self.image_extension
num_rows = num_image_tiles
latent_dim = self.GAN.G.latent_dim
image_size = self.GAN.G.image_size
num_layers = self.GAN.G.num_layers
# latents and noise
latents_low = noise(num_rows ** 2, latent_dim, device=self.rank)
latents_high = noise(num_rows ** 2, latent_dim, device=self.rank)
n = image_noise(num_rows ** 2, image_size, device=self.rank)
ratios = torch.linspace(0., 8., num_steps)
frames = []
for ratio in tqdm(ratios):
interp_latents = slerp(ratio, latents_low, latents_high)
latents = [(interp_latents, num_layers)]
generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)
images_grid = torchvision.utils.make_grid(generated_images, nrow = num_rows)
pil_image = transforms.ToPILImage()(images_grid.cpu())
if self.transparent:
background = Image.new("RGBA", pil_image.size, (255, 255, 255))
pil_image = Image.alpha_composite(background, pil_image)
frames.append(pil_image)
frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True)
if save_frames:
folder_path = (self.results_dir / self.name / f'{str(num)}')
folder_path.mkdir(parents=True, exist_ok=True)
for ind, frame in enumerate(frames):
frame.save(str(folder_path / f'{str(ind)}.{ext}'))
def print_log(self):
data = [
('G', self.g_loss),
('D', self.d_loss),
('GP', self.last_gp_loss),
('PL', self.pl_mean),
('CR', self.last_cr_loss),
('Q', self.q_loss),
('FID', self.last_fid)
]
data = [d for d in data if exists(d[1])]
log = ' | '.join(map(lambda n: f'{n[0]}: {n[1]:.2f}', data))
print(log)
def track(self, value, name):
if not exists(self.logger):
return
self.logger.track(value, name = name)
def model_name(self, num):
return str(self.models_dir / self.name / f'model_{num}.pt')
def init_folders(self):
(self.results_dir / self.name).mkdir(parents=True, exist_ok=True)
(self.models_dir / self.name).mkdir(parents=True, exist_ok=True)
def clear(self):
rmtree(str(self.models_dir / self.name), True)
rmtree(str(self.results_dir / self.name), True)
rmtree(str(self.fid_dir), True)
rmtree(str(self.config_path), True)
self.init_folders()
def save(self, num):
save_data = {
'GAN': self.GAN.state_dict(),
'version': __version__
}
if self.GAN.fp16:
save_data['amp'] = amp.state_dict()
torch.save(save_data, self.model_name(num))
self.write_config()
def load(self, num = -1):
self.load_config()
name = num
if num == -1:
file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')]
saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))
if len(saved_nums) == 0:
return
name = saved_nums[-1]
print(f'continuing from previous epoch - {name}')
self.steps = name * self.save_every
load_data = torch.load(self.model_name(name))
if 'version' in load_data:
print(f"loading from version {load_data['version']}")
try:
self.GAN.load_state_dict(load_data['GAN'])
except Exception as e:
print('unable to load save model. please try downgrading the package to the version specified by the saved model')
raise e
if self.GAN.fp16 and 'amp' in load_data:
amp.load_state_dict(load_data['amp'])
class ModelLoader:
def __init__(self, *, base_dir, name = 'default', load_from = -1):
self.model = Trainer(name = name, base_dir = base_dir)
self.model.load(load_from)
def noise_to_styles(self, noise, trunc_psi = None):
noise = noise.cuda()
w = self.model.GAN.SE(noise)
if exists(trunc_psi):
w = self.model.truncate_style(w)
return w
def styles_to_images(self, w):
batch_size, *_ = w.shape
num_layers = self.model.GAN.GE.num_layers
image_size = self.model.image_size
w_def = [(w, num_layers)]
w_tensors = styles_def_to_tensor(w_def)
noise = image_noise(batch_size, image_size, device = 0)
images = self.model.GAN.GE(w_tensors, noise)
images.clamp_(0., 1.)
return images
|
stylegan2-pytorch-master
|
stylegan2_pytorch/stylegan2_pytorch.py
|
import os
import fire
import random
from retry.api import retry_call
from tqdm import tqdm
from datetime import datetime
from functools import wraps
from stylegan2_pytorch import Trainer, NanException
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
import numpy as np
def cast_list(el):
return el if isinstance(el, list) else [el]
def timestamped_filename(prefix = 'generated-'):
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
return f'{prefix}{timestamp}'
def set_seed(seed):
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def run_training(rank, world_size, model_args, data, load_from, new, num_train_steps, name, seed):
is_main = rank == 0
is_ddp = world_size > 1
if is_ddp:
set_seed(seed)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group('nccl', rank=rank, world_size=world_size)
print(f"{rank + 1}/{world_size} process initialized.")
model_args.update(
is_ddp = is_ddp,
rank = rank,
world_size = world_size
)
model = Trainer(**model_args)
if not new:
model.load(load_from)
else:
model.clear()
model.set_data_src(data)
progress_bar = tqdm(initial = model.steps, total = num_train_steps, mininterval=10., desc=f'{name}<{data}>')
while model.steps < num_train_steps:
retry_call(model.train, tries=3, exceptions=NanException)
progress_bar.n = model.steps
progress_bar.refresh()
if is_main and model.steps % 50 == 0:
model.print_log()
model.save(model.checkpoint_num)
if is_ddp:
dist.destroy_process_group()
def train_from_folder(
data = './data',
results_dir = './results',
models_dir = './models',
name = 'default',
new = False,
load_from = -1,
image_size = 128,
network_capacity = 16,
fmap_max = 512,
transparent = False,
batch_size = 5,
gradient_accumulate_every = 6,
num_train_steps = 150000,
learning_rate = 2e-4,
lr_mlp = 0.1,
ttur_mult = 1.5,
rel_disc_loss = False,
num_workers = None,
save_every = 1000,
evaluate_every = 1000,
generate = False,
num_generate = 1,
generate_interpolation = False,
interpolation_num_steps = 100,
save_frames = False,
num_image_tiles = 8,
trunc_psi = 0.75,
mixed_prob = 0.9,
fp16 = False,
no_pl_reg = False,
cl_reg = False,
fq_layers = [],
fq_dict_size = 256,
attn_layers = [],
no_const = False,
aug_prob = 0.,
aug_types = ['translation', 'cutout'],
top_k_training = False,
generator_top_k_gamma = 0.99,
generator_top_k_frac = 0.5,
dual_contrast_loss = False,
dataset_aug_prob = 0.,
multi_gpus = False,
calculate_fid_every = None,
calculate_fid_num_images = 12800,
clear_fid_cache = False,
seed = 42,
log = False
):
model_args = dict(
name = name,
results_dir = results_dir,
models_dir = models_dir,
batch_size = batch_size,
gradient_accumulate_every = gradient_accumulate_every,
image_size = image_size,
network_capacity = network_capacity,
fmap_max = fmap_max,
transparent = transparent,
lr = learning_rate,
lr_mlp = lr_mlp,
ttur_mult = ttur_mult,
rel_disc_loss = rel_disc_loss,
num_workers = num_workers,
save_every = save_every,
evaluate_every = evaluate_every,
num_image_tiles = num_image_tiles,
trunc_psi = trunc_psi,
fp16 = fp16,
no_pl_reg = no_pl_reg,
cl_reg = cl_reg,
fq_layers = fq_layers,
fq_dict_size = fq_dict_size,
attn_layers = attn_layers,
no_const = no_const,
aug_prob = aug_prob,
aug_types = cast_list(aug_types),
top_k_training = top_k_training,
generator_top_k_gamma = generator_top_k_gamma,
generator_top_k_frac = generator_top_k_frac,
dual_contrast_loss = dual_contrast_loss,
dataset_aug_prob = dataset_aug_prob,
calculate_fid_every = calculate_fid_every,
calculate_fid_num_images = calculate_fid_num_images,
clear_fid_cache = clear_fid_cache,
mixed_prob = mixed_prob,
log = log
)
if generate:
model = Trainer(**model_args)
model.load(load_from)
samples_name = timestamped_filename()
for num in tqdm(range(num_generate)):
model.evaluate(f'{samples_name}-{num}', num_image_tiles)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
return
if generate_interpolation:
model = Trainer(**model_args)
model.load(load_from)
samples_name = timestamped_filename()
model.generate_interpolation(samples_name, num_image_tiles, num_steps = interpolation_num_steps, save_frames = save_frames)
print(f'interpolation generated at {results_dir}/{name}/{samples_name}')
return
world_size = torch.cuda.device_count()
if world_size == 1 or not multi_gpus:
run_training(0, 1, model_args, data, load_from, new, num_train_steps, name, seed)
return
mp.spawn(run_training,
args=(world_size, model_args, data, load_from, new, num_train_steps, name, seed),
nprocs=world_size,
join=True)
def main():
fire.Fire(train_from_folder)
|
stylegan2-pytorch-master
|
stylegan2_pytorch/cli.py
|
from setuptools import setup, find_packages
setup(
name = 'bidirectional-cross-attention',
packages = find_packages(exclude=[]),
version = '0.0.4',
license='MIT',
description = 'Bidirectional Cross Attention',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/bidirectional-cross-attention',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism'
],
install_requires=[
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
bidirectional-cross-attention-main
|
setup.py
|
import torch
from torch import nn
from einops import rearrange
from torch import einsum
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def stable_softmax(t, dim = -1):
t = t - t.amax(dim = dim, keepdim = True)
return t.softmax(dim = dim)
# bidirectional cross attention - have two sequences attend to each other with 1 attention step
class BidirectionalCrossAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
context_dim = None,
dropout = 0.,
talking_heads = False,
prenorm = False,
):
super().__init__()
context_dim = default(context_dim, dim)
self.norm = nn.LayerNorm(dim) if prenorm else nn.Identity()
self.context_norm = nn.LayerNorm(context_dim) if prenorm else nn.Identity()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.dropout = nn.Dropout(dropout)
self.context_dropout = nn.Dropout(dropout)
self.to_qk = nn.Linear(dim, inner_dim, bias = False)
self.context_to_qk = nn.Linear(context_dim, inner_dim, bias = False)
self.to_v = nn.Linear(dim, inner_dim, bias = False)
self.context_to_v = nn.Linear(context_dim, inner_dim, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.context_to_out = nn.Linear(inner_dim, context_dim)
self.talking_heads = nn.Conv2d(heads, heads, 1, bias = False) if talking_heads else nn.Identity()
self.context_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) if talking_heads else nn.Identity()
def forward(
self,
x,
context,
mask = None,
context_mask = None,
return_attn = False,
rel_pos_bias = None
):
b, i, j, h, device = x.shape[0], x.shape[-2], context.shape[-2], self.heads, x.device
x = self.norm(x)
context = self.context_norm(context)
# get shared query/keys and values for sequence and context
qk, v = self.to_qk(x), self.to_v(x)
context_qk, context_v = self.context_to_qk(context), self.context_to_v(context)
# split out head
qk, context_qk, v, context_v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (qk, context_qk, v, context_v))
# get similarities
sim = einsum('b h i d, b h j d -> b h i j', qk, context_qk) * self.scale
# relative positional bias, if supplied
if exists(rel_pos_bias):
sim = sim + rel_pos_bias
# mask
if exists(mask) or exists(context_mask):
mask = default(mask, torch.ones((b, i), device = device, dtype = torch.bool))
context_mask = default(context_mask, torch.ones((b, j), device = device, dtype = torch.bool))
attn_mask = rearrange(mask, 'b i -> b 1 i 1') * rearrange(context_mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# get attention along both sequence length and context length dimensions
# shared similarity matrix
attn = stable_softmax(sim, dim = -1)
context_attn = stable_softmax(sim, dim = -2)
# dropouts
attn = self.dropout(attn)
context_attn = self.context_dropout(context_attn)
# talking heads
attn = self.talking_heads(attn)
context_attn = self.context_talking_heads(context_attn)
# src sequence aggregates values from context, context aggregates values from src sequence
out = einsum('b h i j, b h j d -> b h i d', attn, context_v)
context_out = einsum('b h j i, b h j d -> b h i d', context_attn, v)
# merge heads and combine out
out, context_out = map(lambda t: rearrange(t, 'b h n d -> b n (h d)'), (out, context_out))
out = self.to_out(out)
context_out = self.context_to_out(context_out)
if return_attn:
return out, context_out, attn, context_attn
return out, context_out
|
bidirectional-cross-attention-main
|
bidirectional_cross_attention/bidirectional_cross_attention.py
|
from bidirectional_cross_attention.bidirectional_cross_attention import BidirectionalCrossAttention
|
bidirectional-cross-attention-main
|
bidirectional_cross_attention/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'rela-transformer',
packages = find_packages(exclude=[]),
version = '0.0.7',
license='MIT',
description = 'ReLA Transformer',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/rela-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention-mechanism',
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
rela-transformer-main
|
setup.py
|
from rela_transformer import ReLATransformer
from rela_transformer.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 3e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReLATransformer(
num_tokens = 256,
dim = 512,
depth = 8,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:SEQ_LEN]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, :], GENERATE_LENGTH)
output_str = decode_tokens(sample.squeeze(0))
print(output_str)
|
rela-transformer-main
|
train.py
|
from functools import partial
import torch
import random
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def exists(val):
return val is not None
def default(value, default):
return value if exists(value) else default
def log(t, eps=1e-9):
return torch.log(t + eps)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = None, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = default(ignore_index, pad_value)
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
gumbel_noise = -log(-log(torch.zeros_like(filtered_logits).uniform_(0, 1)))
sample = ((filtered_logits / temperature) + gumbel_noise).argmax(dim=-1)
out = torch.cat((out, sample[:, None]), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
self.net.train(was_training)
return out
def forward(self, x, *args, **kwargs):
inp, labels = x[:, :-1], x[:, 1:]
out = self.net(inp, *args, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), labels, ignore_index = self.ignore_index)
return loss
|
rela-transformer-main
|
rela_transformer/autoregressive_wrapper.py
|
from rela_transformer.rela_transformer import ReLATransformer
|
rela-transformer-main
|
rela_transformer/__init__.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
# classes
class GatedRMSNorm(nn.Module):
def __init__(
self,
dim,
eps = 1e-8
):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.w = nn.Parameter(torch.ones(dim))
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
normed_x = x / norm.clamp(min = self.eps) * self.g
return normed_x * (x * self.w).sigmoid()
def FeedForward(dim, mult = 4):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
class ReLA(nn.Module):
def __init__(
self,
*,
dim,
causal = True,
dim_head = 64,
heads = 8,
num_memory_kv = 0,
relu_squared = False
):
super().__init__()
self.heads = heads
inner_dim = dim_head * heads
self.scale = dim_head ** -0.5
self.causal = causal
self.relu_squared = relu_squared
self.norm = GatedRMSNorm(dim)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.mem_k = nn.Parameter(torch.randn(num_memory_kv, inner_dim))
self.mem_v = nn.Parameter(torch.randn(num_memory_kv, inner_dim))
self.norm_values = GatedRMSNorm(dim_head)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
)
def forward(self, x, mask = None):
b, device = x.shape[0], x.device
x = self.norm(x)
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
mem_k, mem_v = map(lambda t: repeat(t, 'n d -> b n d', b = b), (self.mem_k, self.mem_v))
k = torch.cat((mem_k, k), dim = 1)
v = torch.cat((mem_v, v), dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
attn = F.relu(sim)
if self.relu_squared:
attn = attn ** 2
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
attn = attn.masked_fill(~mask, 0.)
if self.causal:
i, j = attn.shape[-2:]
causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
attn = attn.masked_fill(causal_mask, 0.)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = self.norm_values(out)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class ReLATransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
causal = True,
heads = 8,
dim_head = 64,
num_memory_kv = 0,
no_ff = False,
ff_mult = 4,
relu_squared = False
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
ReLA(dim = dim, relu_squared = relu_squared, heads = heads, dim_head = dim_head, num_memory_kv = num_memory_kv, causal = causal),
FeedForward(dim = dim, mult = ff_mult) if not no_ff else None
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, mask = None):
n, device = x.shape[1], x.device
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> 1 n d')
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
if exists(ff):
x = ff(x) + x
return self.to_logits(x)
|
rela-transformer-main
|
rela_transformer/rela_transformer.py
|
from setuptools import setup, find_packages
setup(
name = 'ema-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.3',
license='MIT',
description = 'Easy way to keep track of exponential moving average version of your pytorch module',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/ema-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'exponential moving average'
],
install_requires=[
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
ema-pytorch-main
|
setup.py
|
from ema_pytorch.ema_pytorch import EMA
|
ema-pytorch-main
|
ema_pytorch/__init__.py
|
import copy
import torch
from torch import nn
def exists(val):
return val is not None
def clamp(value, min_value = None, max_value = None):
assert exists(min_value) or exists(max_value)
if exists(min_value):
value = max(value, min_value)
if exists(max_value):
value = min(value, max_value)
return value
class EMA(nn.Module):
"""
Implements exponential moving average shadowing for your model.
Utilizes an inverse decay schedule to manage longer term training runs.
By adjusting the power, you can control how fast EMA will ramp up to your specified beta.
@crowsonkb's notes on EMA Warmup:
If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are
good values for models you plan to train for a million or more steps (reaches decay
factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models
you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
215.4k steps).
Args:
inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
power (float): Exponential factor of EMA warmup. Default: 1.
min_value (float): The minimum EMA decay rate. Default: 0.
"""
def __init__(
self,
model,
ema_model = None, # if your model has lazylinears or other types of non-deepcopyable modules, you can pass in your own ema model
beta = 0.9999,
update_after_step = 100,
update_every = 10,
inv_gamma = 1.0,
power = 2 / 3,
min_value = 0.0,
param_or_buffer_names_no_ema = set(),
ignore_names = set(),
ignore_startswith_names = set(),
include_online_model = True # set this to False if you do not wish for the online model to be saved along with the ema model (managed externally)
):
super().__init__()
self.beta = beta
# whether to include the online model within the module tree, so that state_dict also saves it
self.include_online_model = include_online_model
if include_online_model:
self.online_model = model
else:
self.online_model = [model] # hack
# ema model
self.ema_model = ema_model
if not exists(self.ema_model):
try:
self.ema_model = copy.deepcopy(model)
except:
print('Your model was not copyable. Please make sure you are not using any LazyLinear')
exit()
self.ema_model.requires_grad_(False)
self.parameter_names = {name for name, param in self.ema_model.named_parameters() if param.dtype in [torch.float, torch.float16]}
self.buffer_names = {name for name, buffer in self.ema_model.named_buffers() if buffer.dtype in [torch.float, torch.float16]}
self.update_every = update_every
self.update_after_step = update_after_step
self.inv_gamma = inv_gamma
self.power = power
self.min_value = min_value
assert isinstance(param_or_buffer_names_no_ema, (set, list))
self.param_or_buffer_names_no_ema = param_or_buffer_names_no_ema # parameter or buffer
self.ignore_names = ignore_names
self.ignore_startswith_names = ignore_startswith_names
self.register_buffer('initted', torch.Tensor([False]))
self.register_buffer('step', torch.tensor([0]))
@property
def model(self):
return self.online_model if self.include_online_model else self.online_model[0]
def restore_ema_model_device(self):
device = self.initted.device
self.ema_model.to(device)
def get_params_iter(self, model):
for name, param in model.named_parameters():
if name not in self.parameter_names:
continue
yield name, param
def get_buffers_iter(self, model):
for name, buffer in model.named_buffers():
if name not in self.buffer_names:
continue
yield name, buffer
def copy_params_from_model_to_ema(self):
for (_, ma_params), (_, current_params) in zip(self.get_params_iter(self.ema_model), self.get_params_iter(self.model)):
ma_params.data.copy_(current_params.data)
for (_, ma_buffers), (_, current_buffers) in zip(self.get_buffers_iter(self.ema_model), self.get_buffers_iter(self.model)):
ma_buffers.data.copy_(current_buffers.data)
def get_current_decay(self):
epoch = clamp(self.step.item() - self.update_after_step - 1, min_value = 0.)
value = 1 - (1 + epoch / self.inv_gamma) ** - self.power
if epoch <= 0:
return 0.
return clamp(value, min_value = self.min_value, max_value = self.beta)
def update(self):
step = self.step.item()
self.step += 1
if (step % self.update_every) != 0:
return
if step <= self.update_after_step:
self.copy_params_from_model_to_ema()
return
if not self.initted.item():
self.copy_params_from_model_to_ema()
self.initted.data.copy_(torch.Tensor([True]))
self.update_moving_average(self.ema_model, self.model)
@torch.no_grad()
def update_moving_average(self, ma_model, current_model):
current_decay = self.get_current_decay()
for (name, current_params), (_, ma_params) in zip(self.get_params_iter(current_model), self.get_params_iter(ma_model)):
if name in self.ignore_names:
continue
if any([name.startswith(prefix) for prefix in self.ignore_startswith_names]):
continue
if name in self.param_or_buffer_names_no_ema:
ma_params.data.copy_(current_params.data)
continue
ma_params.data.lerp_(current_params.data, 1. - current_decay)
for (name, current_buffer), (_, ma_buffer) in zip(self.get_buffers_iter(current_model), self.get_buffers_iter(ma_model)):
if name in self.ignore_names:
continue
if any([name.startswith(prefix) for prefix in self.ignore_startswith_names]):
continue
if name in self.param_or_buffer_names_no_ema:
ma_buffer.data.copy_(current_buffer.data)
continue
ma_buffer.data.lerp_(current_buffer.data, 1. - current_decay)
def __call__(self, *args, **kwargs):
return self.ema_model(*args, **kwargs)
|
ema-pytorch-main
|
ema_pytorch/ema_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'PaLM-jax',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'PaLM: Scaling Language Modeling with Pathways - Jax',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/PaLM-jax',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism'
],
install_requires=[
'einops==0.4',
'equinox>=0.5',
'jax>=0.3.4',
'jaxlib>=0.1',
'optax',
'numpy'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
PaLM-jax-main
|
setup.py
|
import os
from random import randrange
from functools import partial
import tqdm
import gzip
import numpy as np
import jax
import jax.numpy as jnp
from jax import nn
import equinox as eqx
from optax import adam, clip_by_global_norm, chain, apply_every
from palm_jax.palm_lite import PaLM
from palm_jax.utils import sample
# env
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = 'false'
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
MAX_GRAD_NORM = 0.5
VALIDATE_EVERY = 100
SAMPLE_EVERY = 500
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# enwik8 data and data functions
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
data_train, data_val = np.split(X, [int(90e6)])
def sample_seq_from_data(data, *, seq_len, batch_size):
total_seq_len = data.shape[0]
base_arange = np.arange(seq_len)
start_indices = np.random.randint(0, total_seq_len - seq_len, (batch_size,))
token_indices = start_indices[:, None] + base_arange
return data[token_indices]
sample_seq_fn = partial(sample_seq_from_data, seq_len = SEQ_LEN, batch_size = BATCH_SIZE)
# setup model and params
key = jax.random.PRNGKey(0)
model = PaLM(
num_tokens = 256,
dim = 512,
depth = 8,
heads = 8,
dim_head = 64,
key = key
)
# cross entropy loss
def cross_entropy(logits, targets, axis = -1):
logprobs = nn.log_softmax(logits, axis = axis)
nll = jnp.take_along_axis(logprobs, jnp.expand_dims(targets, axis = axis), axis = axis)
cross_entropy = -jnp.mean(nll)
return cross_entropy
@eqx.filter_value_and_grad
def loss_fn(model, data):
inp, labels = data[:, :-1], data[:, 1:]
logits = model(inp)
return cross_entropy(logits, labels, axis = -1)
# optimizer
optim = chain(
clip_by_global_norm(MAX_GRAD_NORM),
adam(LEARNING_RATE),
apply_every(GRADIENT_ACCUMULATE_EVERY)
)
optim_state = optim.init(model)
# train step
@eqx.filter_jit(kwargs=dict(data=True))
def train_step(model, data, optim_state):
loss, grads = loss_fn(model, data)
updates, optim_state = optim.update(grads, optim_state)
model = eqx.apply_updates(model, updates)
return model, optim_state, loss
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
data = sample_seq_fn(data_train)
model, optim_state, loss = train_step(model, data, optim_state)
print(f'loss: {loss.item()}')
if i % SAMPLE_EVERY == 0:
valid_data = sample_seq_fn(data_val)
prime = valid_data[0][:100]
prime_str = decode_tokens(prime)
print(prime_str, "\n", "*" * 40)
sampled = sample(key, model, prime, SEQ_LEN, top_k = 25)
sampled_str = decode_tokens(sampled[100:])
print(sampled_str)
|
PaLM-jax-main
|
train.py
|
from typing import List, Tuple
import numpy as onp
from jax import random, nn, lax, jit, numpy as np
from jax.numpy import einsum
from equinox import Module, static_field
from einops import rearrange, repeat
# bias-less layernorm
class LayerNorm(Module):
gamma: np.ndarray
eps: float = static_field()
def __init__(self, dim, eps = 1e-5):
self.gamma = np.ones((dim,))
self.eps = eps
def __call__(self, x):
mean = np.mean(x, axis = -1, keepdims = True)
mean_of_squares = np.mean(np.square(x), axis = -1, keepdims = True)
variance = mean_of_squares - np.square(mean)
inv = lax.rsqrt(variance + self.eps)
return inv * (x - mean) * self.gamma
# Rotary embedding
def fixed_pos_embedding(inv_freq, seq):
sinusoid_inp = einsum('i , j -> i j', np.arange(seq), inv_freq)
sinusoid_inp = repeat(sinusoid_inp, '... d -> ... (d r)', r = 2)
return np.sin(sinusoid_inp), np.cos(sinusoid_inp)
def rotate_every_two(x):
x = rearrange(x, '... (d r) -> ... d r', r = 2)
x1, x2 = x[..., 0], x[..., 1]
x = np.stack((-x2, x1), axis = -1)
return rearrange(x, '... d r -> ... (d r)')
def apply_rotary_pos_emb(x, sincos):
sin, cos = sincos
return (x * cos) + (rotate_every_two(x) * sin)
# attention - multi-query, one-headed key / values variant
# feedforward - Shazeer's SwiGLU variant
class ParallelTransformerBlock(Module):
norm: Module
wi: np.ndarray
attn_wo: np.ndarray
ff_wo: np.ndarray
heads: int = static_field()
fused_dims: Tuple[int] = static_field()
scale: float = static_field()
mask_value: float = static_field()
def __init__(
self,
dim,
dim_head,
heads,
key,
ff_mult = 4,
mask_value = -1e10
):
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.norm = LayerNorm(dim)
self.fused_dims = (attn_inner_dim, dim_head, dim_head, ff_inner_dim, ff_inner_dim)
self.wi = random.normal(key, (dim, sum(self.fused_dims)))
self.attn_wo = random.normal(key, (attn_inner_dim, dim))
self.ff_wo = random.normal(key, (ff_inner_dim, dim))
self.heads = heads
self.scale = dim_head ** -0.5
self.mask_value = mask_value
def __call__(self, x, *, pos_emb, causal_mask):
n, split_indices = x.shape[-2], onp.cumsum(self.fused_dims[:-1])
x = self.norm(x)
# fused attention and feedforward projections
q, k, v, ff, ff_gate = np.split(x @ self.wi, split_indices, axis = -1)
# split out heads
q = rearrange(q, '... n (h d) -> ... h n d', h = self.heads)
# scale
q *= self.scale
# apply rotary embeddings
q, k = map(lambda t: apply_rotary_pos_emb(t, pos_emb), (q, k))
# sim
sim = einsum('... h i d, ... j d -> ... h i j', q, k)
# causal mask
sim = np.where(causal_mask, sim, self.mask_value)
# attention
attn = nn.softmax(sim, axis = -1)
# aggregate values
out = einsum('... h i j, ... j d -> ... h i d', attn, v)
# merge heads
out = rearrange(out, '... h n d -> ... n (h d)')
# feedforward out
attn_out = out @ self.attn_wo
ff_out = (ff * nn.swish(ff_gate)) @ self.ff_wo
# combine heads out
return attn_out + ff_out
# main class
class PaLM(Module):
embedding: np.ndarray
norm: Module
layers: List[List[Module]]
inv_freq: onp.ndarray = static_field()
def __init__(
self,
*,
num_tokens,
dim,
dim_head,
depth,
heads,
key,
ff_mult = 4
):
self.embedding = random.normal(key, (num_tokens, dim)) * 0.02
self.inv_freq = 1.0 / (10000 ** (np.arange(0, dim_head, 2) / dim_head))
self.layers = [ParallelTransformerBlock(dim = dim, dim_head = dim_head, heads = heads, ff_mult = ff_mult, key = key) for _ in range(depth)]
self.norm = LayerNorm(dim)
@jit
def __call__(self, x):
n = x.shape[-1]
x = self.embedding[x]
rotary_emb = fixed_pos_embedding(self.inv_freq, n)
causal_mask = np.tril(np.ones((n, n)))
for block in self.layers:
x = block(x, pos_emb = rotary_emb, causal_mask = causal_mask) + x
x = self.norm(x)
return x @ self.embedding.transpose()
|
PaLM-jax-main
|
palm_jax/palm.py
|
from palm_jax.palm import PaLM
|
PaLM-jax-main
|
palm_jax/__init__.py
|
from math import log2, floor
from typing import List, Tuple
import numpy as onp
from jax import random, jit, nn, lax, numpy as np
from jax.numpy import einsum
from equinox import Module, static_field
from einops import rearrange, repeat
# rmsnorm
class RMSNorm(Module):
gamma: np.ndarray
scale: float = static_field()
eps: float = static_field()
def __init__(self, dim, eps = 1e-5):
self.gamma = np.ones((dim,))
self.eps = eps
self.scale = dim ** 0.5
def __call__(self, x):
sum_of_squares = np.sum(np.square(x), axis = -1, keepdims = True)
inv_norm = lax.rsqrt(sum_of_squares + self.eps)
return inv_norm * x * self.gamma * self.scale
# AliBi
def get_alibi_slopes(heads):
def get_slopes_power_of_2(n):
start = (2 ** (-2 ** -(log2(n) - 3)))
ratio = start
return [start*ratio**i for i in range(n)]
if log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** floor(log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def calc_alibi_bias(seq_len, heads):
slopes = get_alibi_slopes(heads)
slopes = rearrange(onp.array(slopes), 'h -> h 1 1')
bias = rearrange(onp.arange(seq_len), 'j -> 1 1 j')
return slopes * bias
# attention - multi-query, one-headed key / values variant with shared key / values
# feedforward - Shazeer's SwiGLU variant
class ParallelTransformerBlock(Module):
norm: Module
wi: np.ndarray
attn_wo: np.ndarray
ff_wo: np.ndarray
heads: int = static_field()
fused_dims: Tuple[int] = static_field()
scale: float = static_field()
mask_value: float = static_field()
def __init__(
self,
dim,
dim_head,
heads,
key,
ff_mult = 4,
mask_value = -1e10
):
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.norm = RMSNorm(dim)
self.fused_dims = (attn_inner_dim, dim_head, ff_inner_dim, ff_inner_dim)
self.wi = random.normal(key, (dim, sum(self.fused_dims)))
self.attn_wo = random.normal(key, (attn_inner_dim, dim))
self.ff_wo = random.normal(key, (ff_inner_dim, dim))
self.heads = heads
self.scale = dim_head ** -0.5
self.mask_value = mask_value
def __call__(self, x, *, attn_bias):
n, split_indices = x.shape[-2], onp.cumsum(self.fused_dims[:-1])
x = self.norm(x)
# fused attention and feedforward projections
q, kv, ff, ff_gate = np.split(x @ self.wi, split_indices, axis = -1)
# split out heads
q = rearrange(q, '... n (h d) -> ... h n d', h = self.heads)
# scale
q *= self.scale
# sim
sim = einsum('... h i d, ... j d -> ... h i j', q, kv)
# causal mask
sim = sim + attn_bias
# attention
attn = nn.softmax(sim, axis = -1)
# aggregate values
out = einsum('... h i j, ... j d -> ... h i d', attn, kv)
# merge heads
out = rearrange(out, '... h n d -> ... n (h d)')
# feedforward out
attn_out = out @ self.attn_wo
ff_out = (ff * nn.swish(ff_gate)) @ self.ff_wo
# combine heads out
return attn_out + ff_out
# main class
class PaLM(Module):
embedding: np.ndarray
norm: Module
layers: List[List[Module]]
attn_bias: onp.ndarray = static_field()
def __init__(
self,
*,
num_tokens,
dim,
dim_head,
depth,
heads,
key,
ff_mult = 4,
max_seq_len = 2048,
mask_value = -1e10
):
self.embedding = random.normal(key, (num_tokens, dim)) * 0.02
causal_mask = onp.tril(onp.ones((max_seq_len, max_seq_len)))
alibi_bias = calc_alibi_bias(max_seq_len, heads = heads)
self.attn_bias = np.where(causal_mask, repeat(alibi_bias, 'h 1 j -> h i j', i = max_seq_len), mask_value)
self.layers = [ParallelTransformerBlock(dim = dim, dim_head = dim_head, heads = heads, key = key, ff_mult = ff_mult) for _ in range(depth)]
self.norm = RMSNorm(dim)
@jit
def __call__(self, x):
n = x.shape[-1]
x = self.embedding[x]
attn_bias = self.attn_bias[..., :n, :n]
for block in self.layers:
x = block(x, attn_bias = attn_bias) + x
x = self.norm(x)
return x @ self.embedding.transpose()
|
PaLM-jax-main
|
palm_jax/palm_lite.py
|
from jax import random
from jax.lax import top_k
import jax.numpy as np
# helper functions
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return np.log(t + eps)
# sampling functions
def select_top_k(tensor, k):
values, _ = top_k(tensor, k)
mask = tensor > values.min()
return mask, np.where(mask, tensor, 0.)
def gumbel_noise(key, shape):
noise = random.uniform(key, shape = shape, minval = 0., maxval = 1.)
return -log(-log(noise))
def sample(key, model, prime, length, top_k = None):
start_pos = prime.shape[-1]
seq = np.pad(prime, (0, length - prime.shape[-1]))
one_hots = np.eye(length, dtype = int)
for curr_pos in range(start_pos, length):
logits = model(seq)
logits = logits[curr_pos - 1]
_, key = random.split(key)
noise = gumbel_noise(key, logits.shape)
if exists(top_k):
mask, logits = select_top_k(logits, top_k)
noise *= mask
logits += noise
sampled_ind = np.argmax(logits, axis = -1)
one_hot = one_hots[curr_pos]
seq += one_hot * sampled_ind
return seq
|
PaLM-jax-main
|
palm_jax/utils.py
|
from setuptools import setup, find_packages
setup(
name = 'memory-efficient-attention-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.6',
license='MIT',
description = 'Memory Efficient Attention - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/memory-efficient-attention-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention-mechanism'
],
install_requires=[
'einops>=0.4.1',
'torch>=1.6'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
],
)
|
memory-efficient-attention-pytorch-main
|
setup.py
|
from memory_efficient_attention_pytorch.transformer import Transformer
from memory_efficient_attention_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = Transformer(
num_tokens = 256,
dim = 512,
max_seq_len = SEQ_LEN,
depth = 6,
heads = 8,
causal = True,
q_bucket_size = 256,
k_bucket_size = 256,
ff_chunks = 5,
use_flash_attn = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str)
|
memory-efficient-attention-pytorch-main
|
train.py
|
import torch
from memory_efficient_attention_pytorch import Attention
from memory_efficient_attention_pytorch.memory_efficient_attention import attention
from memory_efficient_attention_pytorch.flash_attention import FlashAttention, FlashAttentionFunction
# constants
def isclose(a, b, atol = 1e-6):
diff = (a - b).abs().amax()
return diff < atol
# test outputs are equal
def test_output_equal():
attn = Attention(
dim = 512,
dim_head = 64,
heads = 8,
q_bucket_size = 64,
k_bucket_size = 64,
causal = True
)
x = torch.randn(2, 2048, 512)
mask = torch.ones(2, 2048).bool()
out = attn(x, mask = mask)
mem_efficient_out = attn(x, mask = mask, memory_efficient = True)
assert isclose(mem_efficient_out, out, atol = 1e-6)
# test gradients equal
def test_gradients_equal():
attn = Attention(
dim = 512,
dim_head = 64,
heads = 8,
q_bucket_size = 64,
k_bucket_size = 64,
causal = True
)
def loss_fn(inp, **kwargs):
return attn(inp, **kwargs).sum()
x = torch.randn(2, 2048, 512).requires_grad_()
mask = torch.ones(2, 2048).bool()
loss_fn(x, mask = mask).backward()
out_grad = x.grad.clone()
x.grad.zero_()
loss_fn(x, mask = mask, memory_efficient = True).backward()
mem_efficient_out_grad = x.grad.clone()
assert isclose(out_grad, mem_efficient_out_grad, atol = 1e-5)
# test flash attention
def test_flash_attn_output_equal():
attn_kwargs = dict(
dim = 512,
dim_head = 64,
heads = 8,
q_bucket_size = 64,
k_bucket_size = 64,
causal = True
)
attn = Attention(**attn_kwargs)
flash_attn = FlashAttention(**attn_kwargs)
flash_attn.to_q = attn.to_q
flash_attn.to_kv = attn.to_kv
flash_attn.to_out = attn.to_out
x = torch.randn(2, 2048, 512)
mask = torch.ones(2, 2048).bool()
out = attn(x, mask = mask)
mem_efficient_out = flash_attn(x, mask = mask)
assert isclose(mem_efficient_out, out, atol = 1e-6)
# test gradients equal
def test_flash_attn_gradients_equal():
q = torch.randn(1, 8, 1024, 512).requires_grad_()
k = torch.randn(1, 8, 1024, 512).requires_grad_()
v = torch.randn(1, 8, 1024, 512).requires_grad_()
mask = torch.ones(1, 1024).bool()
o = attention(q, k, v, mask = mask, causal = True)
o.sum().backward()
dq_grad = q.grad.clone()
dk_grad = k.grad.clone()
dv_grad = v.grad.clone()
q.grad.zero_()
k.grad.zero_()
v.grad.zero_()
flash_o = FlashAttentionFunction.apply(q, k, v, mask, True, 64, 64)
flash_o.sum().backward()
flash_dq_grad = q.grad.clone()
flash_dk_grad = k.grad.clone()
flash_dv_grad = v.grad.clone()
assert isclose(flash_dq_grad, dq_grad, atol = 1e-5)
assert isclose(flash_dk_grad, dk_grad, atol = 1e-5)
assert isclose(flash_dv_grad, dv_grad, atol = 1e-5)
# test flash attention - full attention mask
def test_flash_attn_full_attn_mask_output_equal():
attn_kwargs = dict(
dim = 512,
dim_head = 64,
heads = 8,
q_bucket_size = 64,
k_bucket_size = 64,
causal = True
)
attn = Attention(**attn_kwargs)
flash_attn = FlashAttention(**attn_kwargs)
flash_attn.to_q = attn.to_q
flash_attn.to_kv = attn.to_kv
flash_attn.to_out = attn.to_out
x = torch.randn(2, 2048, 512)
mask = torch.ones(2, 1, 2048, 2048).bool()
out = attn(x, mask = mask)
mem_efficient_out = flash_attn(x, mask = mask)
assert isclose(mem_efficient_out, out, atol = 1e-6)
# test gradients equal - full attention mask
def test_flash_attn_full_attn_mask_gradients_equal():
q = torch.randn(1, 8, 1024, 512).requires_grad_()
k = torch.randn(1, 8, 1024, 512).requires_grad_()
v = torch.randn(1, 8, 1024, 512).requires_grad_()
mask = torch.ones(1, 1, 1024, 1024).bool()
o = attention(q, k, v, mask = mask, causal = True)
o.sum().backward()
dq_grad = q.grad.clone()
dk_grad = k.grad.clone()
dv_grad = v.grad.clone()
q.grad.zero_()
k.grad.zero_()
v.grad.zero_()
flash_o = FlashAttentionFunction.apply(q, k, v, mask, True, 64, 64)
flash_o.sum().backward()
flash_dq_grad = q.grad.clone()
flash_dk_grad = k.grad.clone()
flash_dv_grad = v.grad.clone()
assert isclose(flash_dq_grad, dq_grad, atol = 1e-5)
assert isclose(flash_dk_grad, dk_grad, atol = 1e-5)
assert isclose(flash_dv_grad, dv_grad, atol = 1e-5)
|
memory-efficient-attention-pytorch-main
|
tests/test.py
|
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_thres = 0.9, **kwargs):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = (out == eos_token)
if is_eos_token.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
return self.net(x_inp, labels = x_labels, **kwargs)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/autoregressive_wrapper.py
|
import torch
from functools import partial
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint
import torch.nn.functional as F
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# regular attention
def attention(
q, k, v,
mask = None,
causal = False,
attn_bias = None,
**kwargs
):
scale = q.shape[-1] ** -0.5
q = q * scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(attn_bias):
sim = sim + attn_bias
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
if mask.ndim == 2:
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, mask_value)
if causal:
i, j = sim.shape[-2:]
mask = torch.ones(i, j, device = q.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(mask, mask_value)
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
return out
# memory efficient attention
def summarize_qkv_chunk(q, k, v, mask, attn_bias_chunk, causal, qk_start_indices, dropout):
q_start_index, k_start_index, q_chunk_size, k_chunk_size, device = *qk_start_indices, q.shape[-2], k.shape[-2], q.device
weight = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(attn_bias_chunk):
weight = weight + attn_bias_chunk
mask_value = -torch.finfo(weight.dtype).max
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
weight = weight.masked_fill(~mask, mask_value)
if causal and q_start_index < (k_start_index + k_chunk_size - 1):
causal_mask = torch.ones((q_chunk_size, k_chunk_size), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
weight = weight.masked_fill(causal_mask, mask_value)
weight_max = weight.amax(dim = -1, keepdim = True).detach()
weight = weight - weight_max
exp_weight = weight.exp()
exp_weight = F.dropout(exp_weight, p = dropout)
weighted_value = einsum('b h i j, b h j d -> b h i d', exp_weight, v)
return exp_weight.sum(dim = -1), weighted_value, rearrange(weight_max, '... 1 -> ...')
checkpointed_summarize_qkv_chunk = partial(checkpoint, summarize_qkv_chunk)
def memory_efficient_attention(
q, k, v,
mask = None,
causal = False,
attn_bias = None,
q_bucket_size = 512,
k_bucket_size = 1024,
eps = 1e-8,
dropout = 0.,
training = False
):
scale = q.shape[-1] ** -0.5
q = q * scale
# function
needs_backwards = q.requires_grad or k.requires_grad or v.requires_grad
summarize_qkv_fn = checkpointed_summarize_qkv_chunk if needs_backwards else summarize_qkv_chunk
# chunk all the inputs
q_chunks = q.split(q_bucket_size, dim = -2)
k_chunks = k.split(k_bucket_size, dim = -2)
v_chunks = v.split(k_bucket_size, dim = -2)
mask_chunks = mask.split(k_bucket_size, dim = -1) if exists(mask) else ((None,) * len(k_chunks))
if exists(attn_bias):
i, j = attn_bias.shape[-2:]
attn_bias_chunks = attn_bias.split(q_bucket_size, dim = -2)
attn_bias_chunks = list(map(lambda t: t.split(k_bucket_size, dim = -1), attn_bias_chunks))
# loop through all chunks and accumulate
out = []
for q_index, q_chunk in enumerate(q_chunks):
exp_weights = []
weighted_values = []
weight_maxes = []
for k_index, (k_chunk, v_chunk, mask_chunk) in enumerate(zip(k_chunks, v_chunks, mask_chunks)):
q_start_index = q_index * q_bucket_size
k_start_index = k_index * k_bucket_size
if causal and k_start_index > (q_start_index + q_chunk.shape[-2] - 1):
# if chunk is to be all masked out causally, skip
continue
attn_bias_chunk = attn_bias_chunks[q_index][k_index] if exists(attn_bias) else None
exp_weight_chunk, weighted_value_chunk, weight_max_chunk = summarize_qkv_fn(
q_chunk,
k_chunk,
v_chunk,
mask_chunk,
attn_bias_chunk,
causal,
(q_start_index, k_start_index),
dropout if training else 0.
)
exp_weights.append(exp_weight_chunk)
weighted_values.append(weighted_value_chunk)
weight_maxes.append(weight_max_chunk)
weight_maxes = torch.stack(weight_maxes, dim = -1)
weighted_values = torch.stack(weighted_values, dim = -1)
exp_weights = torch.stack(exp_weights, dim = -1)
global_max = weight_maxes.amax(dim = -1, keepdim = True)
renorm_factor = (weight_maxes - global_max).exp().detach()
exp_weights = exp_weights * renorm_factor
weighted_values = weighted_values * rearrange(renorm_factor, '... c -> ... 1 c')
all_values = weighted_values.sum(dim = -1)
all_weights = exp_weights.sum(dim = -1)
normalized_values = all_values / (rearrange(all_weights, '... -> ... 1') + eps)
out.append(normalized_values)
return torch.cat(out, dim = -2)
# main class
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
causal = False,
memory_efficient = False,
q_bucket_size = 512,
k_bucket_size = 1024
):
super().__init__()
self.heads = heads
self.causal = causal
self.dropout = dropout
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.memory_efficient = memory_efficient
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
memory_efficient = None,
q_bucket_size = None,
k_bucket_size = None,
):
memory_efficient = default(memory_efficient, self.memory_efficient)
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
attn_fn = attention if not memory_efficient else memory_efficient_attention
out = attn_fn(q, k, v, mask = mask, attn_bias = attn_bias, causal = self.causal, q_bucket_size = q_bucket_size,
k_bucket_size = k_bucket_size, dropout = self.dropout, training = self.training)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/memory_efficient_attention.py
|
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
layers_and_args = list(zip(blocks, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/reversible.py
|
from memory_efficient_attention_pytorch.memory_efficient_attention import Attention, memory_efficient_attention
from memory_efficient_attention_pytorch.memory_efficient_cosine_sim_attention import CosineSimAttention, numerically_unstable_memory_efficient_attention
from memory_efficient_attention_pytorch.flash_attention import FlashAttention
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/__init__.py
|
import math
import torch
from functools import partial
from torch import nn, einsum
import torch.nn.functional as F
from torch.autograd.function import Function
from einops import rearrange
# constants
EPSILON = 1e-6
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = -1)
# flash attention forwards and backwards
class FlashAttentionFunction(Function):
@staticmethod
@torch.no_grad()
def forward(ctx, q, k, v, mask, scale, causal, q_bucket_size, k_bucket_size):
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
k_len = k.shape[-2] # in cosine sim attention, row sums are bounded by key / values sequence length
o = torch.zeros_like(q)
all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device)
if not exists(mask):
mask = (None,) * math.ceil(q.shape[-2] / q_bucket_size)
else:
mask = rearrange(mask, 'b n -> b 1 1 n')
mask = mask.split(q_bucket_size, dim = -1)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
mask,
all_row_sums.split(q_bucket_size, dim = -2),
)
for ind, (qc, oc, row_mask, row_sums) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
)
for k_ind, (kc, vc) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if exists(row_mask):
attn_weights.masked_fill_(~row_mask, max_neg_value)
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
attn_weights -= scale
exp_weights = torch.exp(attn_weights)
if exists(row_mask):
exp_weights.masked_fill_(~row_mask, 0.)
block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON)
exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)
oc.add_(exp_values / k_len)
row_sums.add_(block_row_sums)
ctx.args = (scale, causal, mask, q_bucket_size, k_bucket_size)
ctx.save_for_backward(q, k, v, o, all_row_sums)
o.mul_(k_len / all_row_sums)
return o
@staticmethod
@torch.no_grad()
def backward(ctx, do):
scale, causal, mask, q_bucket_size, k_bucket_size = ctx.args
q, k, v, o, l = ctx.saved_tensors
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
dq = torch.zeros_like(q)
dk = torch.zeros_like(k)
dv = torch.zeros_like(v)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
do.split(q_bucket_size, dim = -2),
mask,
l.split(q_bucket_size, dim = -2),
dq.split(q_bucket_size, dim = -2)
)
for ind, (qc, oc, doc, row_mask, lc, dqc) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
dk.split(k_bucket_size, dim = -2),
dv.split(k_bucket_size, dim = -2),
)
for k_ind, (kc, vc, dkc, dvc) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
exp_attn_weights = torch.exp(attn_weights - scale)
if exists(row_mask):
exp_attn_weights.masked_fill_(~row_mask, 0.)
p = exp_attn_weights / lc
dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)
dp = einsum('... i d, ... j d -> ... i j', doc, vc)
D = (doc * oc).sum(dim = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)
dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)
dqc.add_(dq_chunk)
dkc.add_(dk_chunk)
dvc.add_(dv_chunk)
return dq, dk, dv, None, None, None, None, None
# main class
# flash attention for cosine sim attention
# a bit less complicated, as no more need to worry about softmax numerical stability, and row sums are bounded
class FlashAttention(nn.Module):
def __init__(
self,
*,
dim,
scale = 16,
heads = 8,
dim_head = 64,
causal = False,
q_bucket_size = 512,
k_bucket_size = 1024
):
super().__init__()
self.heads = heads
self.scale = scale
self.causal = causal
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
def forward(
self,
x,
context = None,
mask = None,
q_bucket_size = None,
k_bucket_size = None,
):
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q, k = map(l2norm, (q, k))
out = FlashAttentionFunction.apply(q, k, v, mask, self.scale, self.causal, q_bucket_size, k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/cosine_sim_flash_attention.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from functools import partial
from einops import rearrange
from memory_efficient_attention_pytorch import FlashAttention, Attention
from memory_efficient_attention_pytorch.reversible import ReversibleSequence
def exists(val):
return val is not None
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, chunks = 1):
super().__init__()
self.chunks = chunks
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
if self.chunks <= 1:
return self.net(x)
chunks = x.chunk(self.chunks, dim = 1)
out = [self.net(chunk) for chunk in chunks]
return torch.cat(out, dim = 1)
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
dim,
depth,
causal = False,
dim_head = 64,
heads = 8,
ff_mult = 4,
ff_chunks = 1,
use_flash_attn = True,
**kwargs
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
attn_klass = FlashAttention if use_flash_attn else partial(Attention, memory_efficient = True)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, attn_klass(dim = dim, dim_head = dim_head, heads = heads, causal = causal, **kwargs)),
PreNorm(dim, FeedForward(dim = dim, mult = ff_mult, chunks = ff_chunks)),
]))
self.net = ReversibleSequence(self.layers)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, labels = None):
device = x.device
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(x.shape[-2], device = device))
x = x + pos_emb
x = self.net(x)
logits = self.to_logits(x)
if not exists(labels):
return logits
return F.cross_entropy(rearrange(logits, 'b n d -> b d n'), labels)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/transformer.py
|
import math
import torch
from functools import partial
from torch import nn, einsum
from torch.autograd.function import Function
from einops import rearrange
# constants
EPSILON = 1e-10
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# flash attention forwards and backwards
# flash attention v1 - https://arxiv.org/abs/2205.14135
# flash attention v2 - https://tridao.me/publications/flash2/flash2.pdf
class FlashAttentionFunction(Function):
@staticmethod
@torch.no_grad()
def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
""" Algorithm 1 in the v2 paper """
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
o = torch.zeros_like(q)
all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device)
all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device = device)
scale = (q.shape[-1] ** -0.5)
num_row_tiles = math.ceil(q.shape[-2] / q_bucket_size)
num_col_tiles = math.ceil(k.shape[-2] / k_bucket_size)
if exists(mask) and mask.ndim == 2:
mask = rearrange(mask, 'b n -> b 1 1 n')
if not exists(mask):
col_masks = (None,) * num_col_tiles
mask = (col_masks,) * num_row_tiles
else:
mask = ((mask,) * num_row_tiles) if mask.shape[-2] == 1 else mask.split(q_bucket_size, dim = -2)
mask = tuple(((row_mask,) * num_col_tiles) if row_mask.shape[-1] == 1 else row_mask.split(k_bucket_size, dim = -1) for row_mask in mask)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
mask,
all_row_sums.split(q_bucket_size, dim = -2),
all_row_maxes.split(q_bucket_size, dim = -2),
)
for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if exists(col_mask):
attn_weights.masked_fill_(~col_mask, max_neg_value)
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
block_row_maxes = attn_weights.amax(dim = -1, keepdims = True)
new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
exp_weights = torch.exp(attn_weights - new_row_maxes)
if exists(col_mask):
exp_weights.masked_fill_(~col_mask, 0.)
block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON)
exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)
exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
new_row_sums = exp_row_max_diff * row_sums + block_row_sums
oc.mul_(exp_row_max_diff).add_(exp_values)
row_maxes.copy_(new_row_maxes)
row_sums.copy_(new_row_sums)
oc.div_(row_sums)
lse = all_row_sums.log() + all_row_maxes
ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
ctx.save_for_backward(q, k, v, o, lse)
return o
@staticmethod
@torch.no_grad()
def backward(ctx, do):
""" Algorithm 2 in the v2 paper """
causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
q, k, v, o, lse = ctx.saved_tensors
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
dq = torch.zeros_like(q)
dk = torch.zeros_like(k)
dv = torch.zeros_like(v)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
do.split(q_bucket_size, dim = -2),
mask,
lse.split(q_bucket_size, dim = -2),
dq.split(q_bucket_size, dim = -2)
)
for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
dk.split(k_bucket_size, dim = -2),
dv.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, dkc, dvc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
p = torch.exp(attn_weights - lsec)
if exists(col_mask):
p.masked_fill_(~col_mask, 0.)
dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)
dp = einsum('... i d, ... j d -> ... i j', doc, vc)
D = (doc * oc).sum(dim = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)
dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)
dqc.add_(dq_chunk)
dkc.add_(dk_chunk)
dvc.add_(dv_chunk)
return dq, dk, dv, None, None, None, None
# main class
# just flash attention in plain pytorch
# it will be way slower than implementing it in CUDA
# for tinkering and educational purposes
class FlashAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = False,
q_bucket_size = 512,
k_bucket_size = 1024
):
super().__init__()
self.heads = heads
self.causal = causal
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
def forward(
self,
x,
context = None,
mask = None,
q_bucket_size = None,
k_bucket_size = None,
):
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/flash_attention.py
|
import math
import torch
import torch.nn.functional as F
from functools import partial
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def l2norm(t):
return F.normalize(t, dim = -1)
# regular attention
def attention(
q, k, v,
mask = None,
causal = False,
attn_bias = None,
**kwargs
):
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(attn_bias):
sim = sim + attn_bias
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, mask_value)
if causal:
i, j = sim.shape[-2:]
mask = torch.ones(i, j, device = q.device, dtype = torch.bool).triu(j - i + 1)
sim = sim.masked_fill(mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
return out
# memory efficient attention
def summarize_qkv_chunk(q, k, v, mask, attn_bias_chunk, causal, qk_start_indices):
q_start_index, k_start_index, q_chunk_size, k_chunk_size, device = *qk_start_indices, q.shape[-2], k.shape[-2], q.device
weight = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(attn_bias_chunk):
weight = weight + attn_bias_chunk
mask_value = -torch.finfo(weight.dtype).max
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
weight = weight.masked_fill(~mask, mask_value)
if causal and q_start_index < (k_start_index + k_chunk_size - 1):
causal_mask = torch.ones((q_chunk_size, k_chunk_size), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
weight = weight.masked_fill(causal_mask, mask_value)
exp_weight = weight.exp()
weighted_value = einsum('b h i j, b h j d -> b h i d', exp_weight, v)
return exp_weight.sum(dim = -1), weighted_value
checkpointed_summarize_qkv_chunk = partial(checkpoint, summarize_qkv_chunk)
def numerically_unstable_memory_efficient_attention(
q, k, v,
mask = None,
causal = False,
attn_bias = None,
q_bucket_size = 512,
k_bucket_size = 1024,
eps = 1e-8
):
needs_backwards = q.requires_grad or k.requires_grad or v.requires_grad
summarize_qkv_fn = checkpointed_summarize_qkv_chunk if needs_backwards else summarize_qkv_chunk
# chunk all the inputs
q_chunks = q.split(q_bucket_size, dim = -2)
k_chunks = k.split(k_bucket_size, dim = -2)
v_chunks = v.split(k_bucket_size, dim = -2)
mask_chunks = mask.split(k_bucket_size, dim = -1) if exists(mask) else ((None,) * len(k_chunks))
if exists(attn_bias):
i, j = attn_bias.shape[-2:]
attn_bias_chunks = attn_bias.split(q_bucket_size, dim = -2)
attn_bias_chunks = list(map(lambda t: t.split(k_bucket_size, dim = -1), attn_bias_chunks))
# loop through all chunks and accumulate
out = []
for q_index, q_chunk in enumerate(q_chunks):
q_start_index = q_index * q_bucket_size
exp_weights = []
weighted_values = []
for k_index, (k_chunk, v_chunk, mask_chunk) in enumerate(zip(k_chunks, v_chunks, mask_chunks)):
k_start_index = k_index * k_bucket_size
if causal and k_start_index > (q_start_index + q_chunk.shape[-2] - 1):
# if chunk is to be all masked out causally, skip
continue
attn_bias_chunk = attn_bias_chunks[q_index][k_index] if exists(attn_bias) else None
exp_weight_chunk, weighted_value_chunk = summarize_qkv_fn(
q_chunk,
k_chunk,
v_chunk,
mask_chunk,
attn_bias_chunk,
causal,
(q_start_index, k_start_index)
)
exp_weights.append(exp_weight_chunk)
weighted_values.append(weighted_value_chunk)
all_values = sum(weighted_values)
all_weights = sum(exp_weights)
normalized_values = all_values / (rearrange(all_weights, '... -> ... 1') + eps)
out.append(normalized_values)
return torch.cat(out, dim = -2)
# main class
class CosineSimAttention(nn.Module):
def __init__(
self,
*,
dim,
seq_len,
heads = 8,
dim_head = 64,
dropout = 0.,
causal = False,
memory_efficient = False,
q_bucket_size = 512,
k_bucket_size = 1024
):
super().__init__()
self.heads = heads
self.causal = causal
inner_dim = heads * dim_head
scale_init_value = -math.log(math.log2(seq_len ** 2 - seq_len))
self.scale = nn.Parameter(torch.full((1, heads, 1, 1), scale_init_value))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.memory_efficient = memory_efficient
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
def forward(
self,
x,
context = None,
mask = None,
attn_bias = None,
memory_efficient = None,
q_bucket_size = None,
k_bucket_size = None,
):
memory_efficient = default(memory_efficient, self.memory_efficient)
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q, k = map(l2norm, (q, k))
q = q * self.scale.exp()
attn_fn = attention if not memory_efficient else numerically_unstable_memory_efficient_attention
out = attn_fn(q, k, v, mask = mask, attn_bias = attn_bias, causal = self.causal, q_bucket_size = q_bucket_size, k_bucket_size = k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
|
memory-efficient-attention-pytorch-main
|
memory_efficient_attention_pytorch/memory_efficient_cosine_sim_attention.py
|
from setuptools import setup, find_packages
setup(
name = 'tab-transformer-pytorch',
packages = find_packages(),
version = '0.2.6',
license='MIT',
description = 'Tab Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/tab-transformer-pytorch',
keywords = [
'artificial intelligence',
'transformers',
'attention mechanism',
'tabular data'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
tab-transformer-pytorch-main
|
setup.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
# feedforward and attention
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
def FeedForward(dim, mult = 4, dropout = 0.):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.norm = nn.LayerNorm(dim)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
h = self.heads
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
attn = sim.softmax(dim = -1)
dropped_attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', dropped_attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
out = self.to_out(out)
return out, attn
# transformer
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
dim_head,
attn_dropout,
ff_dropout
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout),
FeedForward(dim, dropout = ff_dropout),
]))
def forward(self, x, return_attn = False):
post_softmax_attns = []
for attn, ff in self.layers:
attn_out, post_softmax_attn = attn(x)
post_softmax_attns.append(post_softmax_attn)
x = attn_out + x
x = ff(x) + x
if not return_attn:
return x
return x, torch.stack(post_softmax_attns)
# numerical embedder
class NumericalEmbedder(nn.Module):
def __init__(self, dim, num_numerical_types):
super().__init__()
self.weights = nn.Parameter(torch.randn(num_numerical_types, dim))
self.biases = nn.Parameter(torch.randn(num_numerical_types, dim))
def forward(self, x):
x = rearrange(x, 'b n -> b n 1')
return x * self.weights + self.biases
# main class
class FTTransformer(nn.Module):
def __init__(
self,
*,
categories,
num_continuous,
dim,
depth,
heads,
dim_head = 16,
dim_out = 1,
num_special_tokens = 2,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
assert all(map(lambda n: n > 0, categories)), 'number of each category must be positive'
assert len(categories) + num_continuous > 0, 'input shape must not be null'
# categories related calculations
self.num_categories = len(categories)
self.num_unique_categories = sum(categories)
# create category embeddings table
self.num_special_tokens = num_special_tokens
total_tokens = self.num_unique_categories + num_special_tokens
# for automatically offsetting unique category ids to the correct position in the categories embedding table
if self.num_unique_categories > 0:
categories_offset = F.pad(torch.tensor(list(categories)), (1, 0), value = num_special_tokens)
categories_offset = categories_offset.cumsum(dim = -1)[:-1]
self.register_buffer('categories_offset', categories_offset)
# categorical embedding
self.categorical_embeds = nn.Embedding(total_tokens, dim)
# continuous
self.num_continuous = num_continuous
if self.num_continuous > 0:
self.numerical_embedder = NumericalEmbedder(dim, self.num_continuous)
# cls token
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
# transformer
self.transformer = Transformer(
dim = dim,
depth = depth,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout
)
# to logits
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.ReLU(),
nn.Linear(dim, dim_out)
)
def forward(self, x_categ, x_numer, return_attn = False):
assert x_categ.shape[-1] == self.num_categories, f'you must pass in {self.num_categories} values for your categories input'
xs = []
if self.num_unique_categories > 0:
x_categ = x_categ + self.categories_offset
x_categ = self.categorical_embeds(x_categ)
xs.append(x_categ)
# add numerically embedded tokens
if self.num_continuous > 0:
x_numer = self.numerical_embedder(x_numer)
xs.append(x_numer)
# concat categorical and numerical
x = torch.cat(xs, dim = 1)
# append cls tokens
b = x.shape[0]
cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b)
x = torch.cat((cls_tokens, x), dim = 1)
# attend
x, attns = self.transformer(x, return_attn = True)
# get cls token
x = x[:, 0]
# out in the paper is linear(relu(ln(cls)))
logits = self.to_logits(x)
if not return_attn:
return logits
return logits, attns
|
tab-transformer-pytorch-main
|
tab_transformer_pytorch/ft_transformer.py
|
from tab_transformer_pytorch.tab_transformer_pytorch import TabTransformer
from tab_transformer_pytorch.ft_transformer import FTTransformer
|
tab-transformer-pytorch-main
|
tab_transformer_pytorch/__init__.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# attention
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, **kwargs):
return self.net(x)
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 16,
dropout = 0.
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = sim.softmax(dim = -1)
dropped_attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', dropped_attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out), attn
# transformer
class Transformer(nn.Module):
def __init__(self, num_tokens, dim, depth, heads, dim_head, attn_dropout, ff_dropout):
super().__init__()
self.embeds = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim, dropout = ff_dropout)),
]))
def forward(self, x, return_attn = False):
x = self.embeds(x)
post_softmax_attns = []
for attn, ff in self.layers:
attn_out, post_softmax_attn = attn(x)
post_softmax_attns.append(post_softmax_attn)
x = x + attn_out
x = ff(x) + x
if not return_attn:
return x
return x, torch.stack(post_softmax_attns)
# mlp
class MLP(nn.Module):
def __init__(self, dims, act = None):
super().__init__()
dims_pairs = list(zip(dims[:-1], dims[1:]))
layers = []
for ind, (dim_in, dim_out) in enumerate(dims_pairs):
is_last = ind >= (len(dims_pairs) - 1)
linear = nn.Linear(dim_in, dim_out)
layers.append(linear)
if is_last:
continue
act = default(act, nn.ReLU())
layers.append(act)
self.mlp = nn.Sequential(*layers)
def forward(self, x):
return self.mlp(x)
# main class
class TabTransformer(nn.Module):
def __init__(
self,
*,
categories,
num_continuous,
dim,
depth,
heads,
dim_head = 16,
dim_out = 1,
mlp_hidden_mults = (4, 2),
mlp_act = None,
num_special_tokens = 2,
continuous_mean_std = None,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
assert all(map(lambda n: n > 0, categories)), 'number of each category must be positive'
assert len(categories) + num_continuous > 0, 'input shape must not be null'
# categories related calculations
self.num_categories = len(categories)
self.num_unique_categories = sum(categories)
# create category embeddings table
self.num_special_tokens = num_special_tokens
total_tokens = self.num_unique_categories + num_special_tokens
# for automatically offsetting unique category ids to the correct position in the categories embedding table
if self.num_unique_categories > 0:
categories_offset = F.pad(torch.tensor(list(categories)), (1, 0), value = num_special_tokens)
categories_offset = categories_offset.cumsum(dim = -1)[:-1]
self.register_buffer('categories_offset', categories_offset)
# continuous
self.num_continuous = num_continuous
if self.num_continuous > 0:
if exists(continuous_mean_std):
assert continuous_mean_std.shape == (num_continuous, 2), f'continuous_mean_std must have a shape of ({num_continuous}, 2) where the last dimension contains the mean and variance respectively'
self.register_buffer('continuous_mean_std', continuous_mean_std)
self.norm = nn.LayerNorm(num_continuous)
# transformer
self.transformer = Transformer(
num_tokens = total_tokens,
dim = dim,
depth = depth,
heads = heads,
dim_head = dim_head,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout
)
# mlp to logits
input_size = (dim * self.num_categories) + num_continuous
l = input_size // 8
hidden_dimensions = list(map(lambda t: l * t, mlp_hidden_mults))
all_dimensions = [input_size, *hidden_dimensions, dim_out]
self.mlp = MLP(all_dimensions, act = mlp_act)
def forward(self, x_categ, x_cont, return_attn = False):
xs = []
assert x_categ.shape[-1] == self.num_categories, f'you must pass in {self.num_categories} values for your categories input'
if self.num_unique_categories > 0:
x_categ += self.categories_offset
x, attns = self.transformer(x_categ, return_attn = True)
flat_categ = x.flatten(1)
xs.append(flat_categ)
assert x_cont.shape[1] == self.num_continuous, f'you must pass in {self.num_continuous} values for your continuous input'
if self.num_continuous > 0:
if exists(self.continuous_mean_std):
mean, std = self.continuous_mean_std.unbind(dim = -1)
x_cont = (x_cont - mean) / std
normed_cont = self.norm(x_cont)
xs.append(normed_cont)
x = torch.cat(xs, dim = -1)
logits =self.mlp(x)
if not return_attn:
return logits
return logits, attns
|
tab-transformer-pytorch-main
|
tab_transformer_pytorch/tab_transformer_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'rvq-vae-gpt',
packages = find_packages(exclude=[]),
version = '0.0.4',
license='MIT',
description = 'Yet another attempt at GPT in quantized latent space',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/rvq-vae-gpt',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism'
],
install_requires=[
'beartype',
'einops>=0.4',
'local-attention>=1.0.0',
'torch>=1.6',
'vector-quantize-pytorch>=1.1.2'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
rvq-vae-gpt-main
|
setup.py
|
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from rvq_vae_gpt import TextVQVAE
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
SAVE_EVERY = 1000
SEQ_LEN = 2048
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def first(it):
return it[0]
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate text vae
model = TextVQVAE(
num_tokens = 256,
dim = (128, 256, 512),
depth = (2, 2, 4),
local_attn_window_size = 64,
num_codebooks = 8,
strides = (2, 2, 2)
).cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10.0, desc = "training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f"training loss: {loss.item():.3f}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i == 0:
continue
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
valid_text = next(val_loader)
loss, recon = model(valid_text, return_reconstruction = True)
print(f"validation loss: {loss.item():.3f}")
print(f"\n\n\n[input text]\n\n {decode_tokens(first(valid_text))}")
print(f"\n\n[reconstructed text]\n\n {decode_tokens(first(recon))}\n\n")
if i % SAVE_EVERY == 0:
model.save('./text-vae.pt')
|
rvq-vae-gpt-main
|
train.py
|
from rvq_vae_gpt.rvq_vae_gpt import TextVQVAE, Transformer
|
rvq-vae-gpt-main
|
rvq_vae_gpt/__init__.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from local_attention import LocalMHA
from vector_quantize_pytorch import VectorQuantize, ResidualVQ
from beartype import beartype
from beartype.typing import Tuple, Optional, Union
from pathlib import Path
import pickle
# helpers
def exists(val):
return val is not None
def first(it):
return it[0]
def default(*vals):
for val in vals:
if exists(val):
return val
return None
def divisible_by(numer, denom):
return (numer % denom) == 0
def cast_tuple(t, len = 1):
return ((t,) * len) if not isinstance(t, tuple) else t
# token shift - used by RWKV, Peng et al
def shift_tokens(t):
t, t_shift = t.chunk(2, dim = -1)
t_shift = F.pad(t_shift, (0, 0, 1, -1), value = 0.)
return torch.cat((t, t_shift), dim = -1)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
def FeedForward(dim, mult = 4):
dim_inner = int(dim * mult * 2 / 3)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim_inner * 2),
GEGLU(),
nn.Linear(dim_inner, dim)
)
# the best kind of down and upsampling
class Upsample(nn.Module):
def __init__(
self,
dim,
dim_out = None,
factor = 2
):
super().__init__()
dim_out = default(dim_out, dim)
linear = nn.Linear(dim, dim_out * factor)
self.net = nn.Sequential(
linear,
nn.SiLU(),
Rearrange('b n (p d) -> b (n p) d', p = factor)
)
self.factor = factor
self.init_(linear)
def init_(self, linear):
o, i = linear.weight.shape
linear_weight = torch.empty(o // self.factor, i)
nn.init.kaiming_uniform_(linear_weight)
linear_weight = repeat(linear_weight, 'o ... -> (o r) ...', r = self.factor)
linear_weight.data.copy_(linear_weight)
nn.init.zeros_(linear.bias.data)
def forward(self, x):
return self.net(x)
def Downsample(
dim,
dim_out = None,
factor = 2
):
dim_out = default(dim_out, dim)
return nn.Sequential(
Rearrange('b (n p) d -> b n (p d)', p = factor),
nn.Linear(dim * factor, dim_out)
)
# local attention
class LocalTransformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
heads,
dim_head,
window_size
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
LocalMHA(
dim = dim,
heads = heads,
dim_head = dim_head,
qk_rmsnorm = True,
window_size = window_size,
use_rotary_pos_emb = True,
use_xpos = True,
causal = True
),
FeedForward(dim = dim)
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(shift_tokens(x)) + x
x = ff(shift_tokens(x)) + x
return x
# modules
@beartype
class TextVQVAE(nn.Module): # or genomics, eventually, with num_tokens set to 4
def __init__(
self,
*,
num_tokens,
dim: Union[int, Tuple[int, ...]],
depth: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
codebook_size = 1024,
local_attn_window_size = 32,
local_attn_heads = 8,
local_attn_dim_head = 64,
num_codebooks = 4,
vq_decay = 0.9,
rvq_quantize_dropout = True
):
super().__init__()
config = locals()
config.pop('self')
config.pop('__class__')
self._config = config
assert 0 < vq_decay <= 1.
strides = cast_tuple(strides)
num_layers = len(strides)
dim = cast_tuple(dim, num_layers)
depth = cast_tuple(depth, num_layers)
local_attn_window_size = cast_tuple(local_attn_window_size, num_layers)
assert num_layers == len(depth) == len(local_attn_window_size) == len(dim)
init_dim, vq_dim = dim[0], dim[-1]
dims = [first(dim), *dim]
dim_pairs = tuple(zip(dims[:-1], dims[1:]))
self.token_emb = nn.Embedding(num_tokens, init_dim)
self.total_strides = torch.tensor(list(strides)).cumprod(dim = -1)[-1].item()
self.encoder = nn.ModuleList([])
layer_params = tuple(zip(
strides,
depth,
local_attn_window_size,
dim_pairs
))
self.init_transformer = LocalTransformer(
dim = init_dim,
depth = first(depth),
heads = local_attn_heads,
dim_head = local_attn_dim_head,
window_size = first(local_attn_window_size)
)
self.final_transformer = LocalTransformer(
dim = init_dim,
depth = first(depth),
heads = local_attn_heads,
dim_head = local_attn_dim_head,
window_size = first(local_attn_window_size)
)
for layer_stride, layer_depth, layer_local_attn_window_size, (dim_in, dim_out) in layer_params:
self.encoder.append(nn.ModuleList([
Downsample(dim = dim_in, dim_out = dim_out, factor = layer_stride),
LocalTransformer(
dim = dim_out,
depth = layer_depth,
heads = local_attn_heads,
dim_head = local_attn_dim_head,
window_size = layer_local_attn_window_size
)
]))
self.encoder_norm = nn.LayerNorm(vq_dim)
self.vq = ResidualVQ(
dim = vq_dim,
num_quantizers = num_codebooks,
codebook_size = codebook_size,
decay = vq_decay,
quantize_dropout = num_codebooks > 1 and rvq_quantize_dropout,
commitment_weight = 0., # the weight on the commitment loss
kmeans_init = True,
kmeans_iters = 10
)
self.decoder = nn.ModuleList([])
for layer_stride, layer_depth, layer_local_attn_window_size, (dim_in, dim_out) in reversed(layer_params):
self.decoder.append(nn.ModuleList([
Upsample(dim = dim_out, dim_out = dim_in, factor = layer_stride),
LocalTransformer(
dim = dim_out,
depth = layer_depth,
heads = local_attn_heads,
dim_head = local_attn_dim_head,
window_size = layer_local_attn_window_size
)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(init_dim),
nn.Linear(init_dim, num_tokens)
)
def save(self, path):
path = Path(path)
pkg = dict(
model = self.state_dict(),
config = pickle.dumps(self._config)
)
torch.save(pkg, str(path))
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
self.load_state_dict(pkg['model'])
@classmethod
def init_and_load(cls, path):
path = Path(path)
assert path.exists()
pkg = torch.load(str(path))
model = cls(**pickle.loads(pkg['config']))
model.load(path)
return model
@property
def device(self):
return next(self.parameters()).device
def encode(self, ids):
tokens = self.token_emb(ids)
tokens = self.init_transformer(tokens)
for downsample, local_attn in self.encoder:
tokens = downsample(tokens)
tokens = local_attn(tokens)
return self.encoder_norm(tokens)
def decode(self, codes):
tokens = codes
for upsample, local_attn in self.decoder:
tokens = local_attn(tokens)
tokens = upsample(tokens)
tokens = self.final_transformer(tokens)
logits = self.to_logits(tokens)
return logits
@torch.no_grad()
def decode_from_codebook_ids(self, codebook_ids):
codes = self.vq.get_codes_from_indices(codebook_ids)
return self.decode(codes)
def forward(
self,
ids,
return_codebook_indices = False,
return_reconstruction = False,
return_loss_breakdown = False
):
batch, seq = ids.shape
assert divisible_by(seq, self.total_strides)
ids = ids.to(self.device)
tokens = self.encode(ids)
tokens, indices, _ = self.vq(tokens)
if return_codebook_indices:
return indices
logits = self.decode(tokens)
logits = rearrange(logits, 'b n c -> b c n')
loss = F.cross_entropy(
logits,
ids
)
if return_reconstruction:
return loss, logits.argmax(dim = 1)
return loss
# hierarchical transformer
class Transformer(nn.Module):
pass
|
rvq-vae-gpt-main
|
rvq_vae_gpt/rvq_vae_gpt.py
|
import os
import sys
from distutils.core import setup
from setuptools.command.install import install
from setuptools import find_packages
# circleci.py version
VERSION = '1.2.0'
with open('README.rst', 'r') as fh:
long_description = fh.read().split('Results\n-------')[0]
with open('requirements.txt', 'r') as fh:
requirements = [e.strip() for e in fh.readlines() if e.strip() != '']
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = 'verify that the git tag matches our version'
def run(self):
tag = os.getenv('CIRCLE_TAG')
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name='memcnn',
version=VERSION,
author='S.C. van de Leemput',
author_email='silvandeleemput@gmail.com',
packages=find_packages(),
include_package_data=True,
scripts=[],
url='http://pypi.python.org/pypi/memcnn/',
license='LICENSE.txt',
description='A PyTorch framework for developing memory efficient deep invertible networks.',
long_description=long_description,
long_description_content_type='text/x-rst',
install_requires=requirements,
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: OS Independent"
],
keywords='memcnn invertible PyTorch',
cmdclass={
'verify': VerifyVersionCommand,
}
)
|
memcnn-master
|
setup.py
|
# -*- coding: utf-8 -*-
"""Top-level package for MemCNN."""
__author__ = """Sil van de Leemput"""
__email__ = 'silvandeleemput@gmail.com'
__version__ = '1.2.0'
from memcnn.models.revop import ReversibleBlock, InvertibleModuleWrapper, create_coupling, is_invertible_module
from memcnn.models.additive import AdditiveCoupling
from memcnn.models.affine import AffineCoupling, AffineAdapterNaive, AffineAdapterSigmoid
__all__ = [
'AdditiveCoupling',
'AffineCoupling',
'AffineAdapterNaive',
'AffineAdapterSigmoid',
'InvertibleModuleWrapper',
'ReversibleBlock',
'create_coupling',
'is_invertible_module'
]
|
memcnn-master
|
memcnn/__init__.py
|
import argparse
import os
import logging
import torch
from memcnn.config import Config
from memcnn.experiment.manager import ExperimentManager
from memcnn.experiment.factory import load_experiment_config, experiment_config_parser
import memcnn.utils.log
logger = logging.getLogger('train')
def run_experiment(experiment_tags, data_dir, results_dir, start_fresh=False, use_cuda=False, workers=None,
experiments_file=None, *args, **kwargs):
if not os.path.exists(data_dir):
raise RuntimeError('Cannot find data_dir directory: {}'.format(data_dir))
if not os.path.exists(results_dir):
raise RuntimeError('Cannot find results_dir directory: {}'.format(results_dir))
cfg = load_experiment_config(experiments_file, experiment_tags)
logger.info(cfg)
model, optimizer, trainer, trainer_params = experiment_config_parser(cfg, workers=workers, data_dir=data_dir)
experiment_dir = os.path.join(results_dir, '_'.join(experiment_tags))
manager = ExperimentManager(experiment_dir, model, optimizer)
if start_fresh:
logger.info('Starting fresh option enabled. Clearing all previous results...')
manager.delete_dirs()
manager.make_dirs()
if use_cuda:
manager.model = manager.model.cuda()
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
last_iter = manager.get_last_model_iteration()
if last_iter > 0:
logger.info('Continue experiment from iteration: {}'.format(last_iter))
manager.load_train_state(last_iter)
trainer_params.update(kwargs)
trainer(manager, start_iter=last_iter, use_cuda=use_cuda, *args, **trainer_params)
def main(data_dir, results_dir):
# setup logging
memcnn.utils.log.setup(True)
# specify defaults for arguments
use_cuda = torch.cuda.is_available()
workers = 16
experiments_file = os.path.join(os.path.dirname(__file__), 'config', 'experiments.json')
start_fresh = False
# parse arguments
parser = argparse.ArgumentParser(description='Run memcnn experiments.')
parser.add_argument('experiment_tags', type=str, nargs='+',
help='Experiment tags to run and combine from the experiment config file')
parser.add_argument('--workers', dest='workers', type=int, default=workers,
help='Number of workers for data loading (Default: {})'.format(workers))
parser.add_argument('--results-dir', dest='results_dir', type=str, default=results_dir,
help='Directory for storing results (Default: {})'.format(results_dir))
parser.add_argument('--data-dir', dest='data_dir', type=str, default=data_dir,
help='Directory for input data (Default: {})'.format(data_dir))
parser.add_argument('--experiments-file', dest='experiments_file', type=str, default=experiments_file,
help='Experiments file (Default: {})'.format(experiments_file))
parser.add_argument('--fresh', dest='start_fresh', action='store_true', default=start_fresh,
help='Start with fresh experiment, clears all previous results (Default: {})'
.format(start_fresh))
parser.add_argument('--no-cuda', dest='use_cuda', action='store_false', default=use_cuda,
help='Always disables GPU use (Default: use when available)')
args = parser.parse_args()
if not use_cuda:
logger.warning('CUDA is not available in the current configuration!!!')
if not args.use_cuda:
logger.warning('CUDA is disabled!!!')
# run experiment given arguments
run_experiment(
args.experiment_tags,
args.data_dir,
args.results_dir,
start_fresh=args.start_fresh,
experiments_file=args.experiments_file,
use_cuda=args.use_cuda, workers=args.workers)
if __name__ == '__main__': # pragma: no cover
config_fname = Config.get_filename()
if not os.path.exists(config_fname) or not 'data_dir' in Config() or not 'results_dir' in Config():
print('The configuration file was not set correctly.\n')
print('Please create a configuration file (json) at:\n {}\n'.format(config_fname))
print('The configuration file should be formatted as follows:\n\n'
'{\n'
' "data_dir": "/home/user/data",\n'
' "results_dir": "/home/user/experiments"\n'
'}\n')
print('data_dir : location for storing the input training datasets')
print('results_dir : location for storing the experiment files during training')
else:
main(data_dir=Config()['data_dir'],
results_dir=Config()['results_dir'])
|
memcnn-master
|
memcnn/train.py
|
import json
import os
class Config(dict):
def __init__(self, dic=None, verbose=False):
super(Config, self).__init__()
if dic is None:
fname = self.get_filename()
if verbose:
print("loading default {0}".format(fname))
with open(fname, "r") as f:
dic = json.load(f)
self.update(dic)
@staticmethod
def get_filename():
return os.path.join(Config.get_dir(), "config.json")
@staticmethod
def get_dir():
return os.path.dirname(__file__)
|
memcnn-master
|
memcnn/config/__init__.py
|
memcnn-master
|
memcnn/config/tests/__init__.py
|
|
import unittest
import json
import os
from memcnn.experiment.factory import load_experiment_config, experiment_config_parser
from memcnn.config import Config
import memcnn.config
class ConfigTestCase(unittest.TestCase):
class ConfigTest(Config):
@staticmethod
def get_filename():
return os.path.join(Config.get_dir(), "config.json.example")
def setUp(self):
self.config = ConfigTestCase.ConfigTest()
self.config_fname = os.path.join(os.path.dirname(__file__), "..", "config.json.example")
self.experiments_fname = os.path.join(os.path.dirname(__file__), "..", "experiments.json")
def load_json_file(fname):
with open(fname, 'r') as f:
data = json.load(f)
return data
self.load_json_file = load_json_file
def test_loading_main_config(self):
self.assertTrue(os.path.exists(self.config.get_filename()))
data = self.config
self.assertTrue(isinstance(data, dict))
self.assertTrue("data_dir" in data)
self.assertTrue("results_dir" in data)
def test_loading_experiments_config(self):
self.assertTrue(os.path.exists(self.experiments_fname))
data = self.load_json_file(self.experiments_fname)
self.assertTrue(isinstance(data, dict))
def test_experiment_configs(self):
data = self.load_json_file(self.experiments_fname)
config = self.config
keys = data.keys()
for key in keys:
result = load_experiment_config(self.experiments_fname, [key])
self.assertTrue(isinstance(result, dict))
if "dataset" in result:
experiment_config_parser(result, config['data_dir'])
def test_config_get_filename(self):
self.assertEqual(Config.get_filename(), os.path.join(os.path.dirname(memcnn.config.__file__), "config.json"))
def test_config_get_dir(self):
self.assertEqual(Config.get_dir(), os.path.dirname(memcnn.config.__file__))
def test_verbose(self):
ConfigTestCase.ConfigTest(verbose=True)
if __name__ == '__main__':
unittest.main()
|
memcnn-master
|
memcnn/config/tests/test_config.py
|
import os
import json
import logging
import sys
import time
def setup(use_stdout=True, filename=None, log_level=logging.DEBUG):
"""setup some basic logging"""
log = logging.getLogger('')
log.setLevel(log_level)
fmt = logging.Formatter("%(asctime)s [%(name)-15s] %(message)s", datefmt="%y-%m-%d %H:%M:%S")
if use_stdout:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(log_level)
ch.setFormatter(fmt)
log.addHandler(ch)
if filename is not None:
fh = logging.FileHandler(filename)
fh.setLevel(log_level)
fh.setFormatter(fmt)
log.addHandler(fh)
class SummaryWriter(object):
def __init__(self, log_dir):
self._log_dir = log_dir
self._log_file = os.path.join(log_dir, "scalars.json")
self._summary = {}
self._load_if_exists()
def _load_if_exists(self):
if os.path.exists(self._log_file):
with open(self._log_file, "r") as f:
self._summary = json.load(f)
def add_scalar(self, name, value, iteration):
if name not in self._summary:
self._summary[name] = []
self._summary[name].append([time.time(), int(iteration), float(value)])
def flush(self):
with open(self._log_file, "w") as f:
json.dump(self._summary, f)
def close(self):
self.flush()
|
memcnn-master
|
memcnn/utils/log.py
|
memcnn-master
|
memcnn/utils/__init__.py
|
|
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
def _assert_no_grad(variable):
msg = "nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as not requiring gradients"
assert not variable.requires_grad, msg # nosec
class CrossEntropyLossTF(Module):
def __init__(self):
super(CrossEntropyLossTF, self).__init__()
def forward(self, Ypred, Y, W=None):
_assert_no_grad(Y)
lsm = nn.Softmax(dim=1)
y_onehot = torch.zeros(Ypred.shape[0], Ypred.shape[1], dtype=torch.float32, device=Ypred.device)
y_onehot.scatter_(1, Y.data.view(-1, 1), 1)
if W is not None:
y_onehot = y_onehot * W
return torch.mean(-y_onehot * torch.log(lsm(Ypred))) * Ypred.shape[1]
|
memcnn-master
|
memcnn/utils/loss.py
|
""" Module containing utilities to compute statistics
Some bits from: https://gist.github.com/xmfbit/67c407e34cbaf56e7820f09e774e56d8
"""
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# top-k accuracy
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
memcnn-master
|
memcnn/utils/stats.py
|
import pytest
import torch
from memcnn.utils.stats import AverageMeter, accuracy
@pytest.mark.parametrize('val,n', [(1, 1), (14, 10), (10, 14), (5, 1), (1, 5), (0, 10)])
def test_average_meter(val, n):
meter = AverageMeter()
assert meter.val == 0
assert meter.avg == 0
assert meter.sum == 0
assert meter.count == 0
meter.update(val, n=n)
assert meter.val == val
assert meter.avg == val
assert meter.sum == val * n
assert meter.count == n
@pytest.mark.parametrize('topk,klass', [((1,), 4), ((1, 3,), 2), ((5,), 1)])
def test_accuracy(topk, klass, num_klasses=5): # output, target,
batch_size = 5
target = torch.ones(batch_size, dtype=torch.long) * klass
output = torch.zeros(batch_size, num_klasses)
output[:, klass] = 1
res = accuracy(output, target, topk)
assert len(res) == len(topk)
assert all([e == 100.0 for e in res])
|
memcnn-master
|
memcnn/utils/tests/test_stats.py
|
memcnn-master
|
memcnn/utils/tests/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.