python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from aepsych.config import Config
from .min_asks import MinAsks
from .min_total_outcome_occurrences import MinTotalOutcomeOccurrences
from .min_total_tells import MinTotalTells
from .run_indefinitely import RunIndefinitely
completion_criteria = [
MinTotalTells,
MinAsks,
MinTotalOutcomeOccurrences,
RunIndefinitely,
]
__all__ = [
"completion_criteria",
"MinTotalTells",
"MinAsks",
"MinTotalOutcomeOccurrences",
"RunIndefinitely",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/generators/completion_criterion/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from aepsych.config import Config, ConfigurableMixin
from ax.modelbridge.completion_criterion import MinimumPreferenceOccurances
class MinTotalOutcomeOccurrences(MinimumPreferenceOccurances, ConfigurableMixin):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
outcome_types = config.getlist(name, "outcome_types", element_type=str)
outcome_names = config.getlist(
name, "outcome_names", element_type=str, fallback=None
)
# The completion criterion needs to get the name of the first outcome.
# TODO: Make it so that the criterion can be configured to which outcome
# it cares about instead of defaulting to the first one.
if outcome_names is None:
outcome_name = "outcome_1"
else:
outcome_name = str(outcome_names[0])
min_total_outcome_occurrences = config.getint(
name,
"min_total_outcome_occurrences",
fallback=1 if "binary" in outcome_types else 0,
)
options = {
"metric_name": outcome_name,
"threshold": min_total_outcome_occurrences,
}
return options
|
aepsych-main
|
aepsych/generators/completion_criterion/min_total_outcome_occurrences.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from aepsych.config import Config, ConfigurableMixin
from ax.core.base_trial import TrialStatus
from ax.modelbridge.completion_criterion import MinimumTrialsInStatus
class MinTotalTells(MinimumTrialsInStatus, ConfigurableMixin):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
min_total_tells = config.getint(name, "min_total_tells", fallback=1)
options = {"status": TrialStatus.COMPLETED, "threshold": min_total_tells}
return options
|
aepsych-main
|
aepsych/generators/completion_criterion/min_total_tells.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .factory import (
default_mean_covar_factory,
monotonic_mean_covar_factory,
ordinal_mean_covar_factory,
song_mean_covar_factory,
)
__all__ = [
"default_mean_covar_factory",
"ordinal_mean_covar_factory",
"monotonic_mean_covar_factory",
"song_mean_covar_factory",
]
Config.register_module(sys.modules[__name__])
|
aepsych-main
|
aepsych/factory/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from configparser import NoOptionError
from typing import Optional, Tuple
import gpytorch
import torch
from aepsych.config import Config
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from aepsych.utils import get_dim
from scipy.stats import norm
"""AEPsych factory functions.
These functions generate a gpytorch Mean and Kernel objects from
aepsych.config.Config configurations, including setting lengthscale
priors and so on. They are primarily used for programmatically
constructing modular AEPsych models from configs.
TODO write a modular AEPsych tutorial.
"""
# AEPsych assumes input dimensions are transformed to [0,1] and we want
# a lengthscale prior that excludes lengthscales that are larger than the
# range of inputs (i.e. >1) or much smaller (i.e. <0.1). This inverse
# gamma prior puts about 99% of the prior probability mass on such values,
# with a preference for small values to prevent oversmoothing. The idea
# is taken from https://betanalpha.github.io/assets/case_studies/gaussian_processes.html#323_Informative_Prior_Model
__default_invgamma_concentration = 4.6
__default_invgamma_rate = 1.0
def default_mean_covar_factory(
config: Optional[Config] = None, dim: Optional[int] = None
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.ScaleKernel]:
"""Default factory for generic GP models
Args:
config (Config, optional): Object containing bounds (and potentially other
config details).
dim (int, optional): Dimensionality of the parameter space. Must be provided
if config is None.
Returns:
Tuple[gpytorch.means.Mean, gpytorch.kernels.Kernel]: Instantiated
ConstantMean and ScaleKernel with priors based on bounds.
"""
assert (config is not None) or (
dim is not None
), "Either config or dim must be provided!"
fixed_mean = False
lengthscale_prior = "gamma"
outputscale_prior = "box"
kernel = gpytorch.kernels.RBFKernel
mean = gpytorch.means.ConstantMean()
if config is not None:
fixed_mean = config.getboolean(
"default_mean_covar_factory", "fixed_mean", fallback=fixed_mean
)
lengthscale_prior = config.get(
"default_mean_covar_factory",
"lengthscale_prior",
fallback=lengthscale_prior,
)
outputscale_prior = config.get(
"default_mean_covar_factory",
"outputscale_prior",
fallback=outputscale_prior,
)
kernel = config.getobj("default_mean_covar_factory", "kernel", fallback=kernel)
if fixed_mean:
try:
target = config.getfloat("default_mean_covar_factory", "target")
mean.constant.requires_grad_(False)
mean.constant.copy_(torch.tensor(norm.ppf(target)))
except NoOptionError:
raise RuntimeError("Config got fixed_mean=True but no target included!")
if config.getboolean("common", "use_ax", fallback=False):
config_dim = get_dim(config)
else:
lb = config.gettensor("default_mean_covar_factory", "lb")
ub = config.gettensor("default_mean_covar_factory", "ub")
assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
config_dim = lb.shape[0]
if dim is not None:
assert dim == config_dim, "Provided config does not match provided dim!"
else:
dim = config_dim
if lengthscale_prior == "invgamma":
ls_prior = gpytorch.priors.GammaPrior(
concentration=__default_invgamma_concentration,
rate=__default_invgamma_rate,
transform=lambda x: 1 / x,
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
elif lengthscale_prior == "gamma":
ls_prior = gpytorch.priors.GammaPrior(concentration=3.0, rate=6.0)
ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
else:
raise RuntimeError(
f"Lengthscale_prior should be invgamma or gamma, got {lengthscale_prior}"
)
if outputscale_prior == "gamma":
os_prior = gpytorch.priors.GammaPrior(concentration=2.0, rate=0.15)
elif outputscale_prior == "box":
os_prior = gpytorch.priors.SmoothedBoxPrior(a=1, b=4)
else:
raise RuntimeError(
f"Outputscale_prior should be gamma or box, got {outputscale_prior}"
)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
covar = gpytorch.kernels.ScaleKernel(
kernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
),
outputscale_prior=os_prior,
)
return mean, covar
def monotonic_mean_covar_factory(
config: Config,
) -> Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]:
"""Default factory for monotonic GP models based on derivative observations.
Args:
config (Config): Config containing (at least) bounds, and optionally LSE target.
Returns:
Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]: Instantiated mean and
scaled RBF kernels with partial derivative observations.
"""
lb = config.gettensor("monotonic_mean_covar_factory", "lb")
ub = config.gettensor("monotonic_mean_covar_factory", "ub")
assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
dim = lb.shape[0]
fixed_mean = config.getboolean(
"monotonic_mean_covar_factory", "fixed_mean", fallback=False
)
mean = ConstantMeanPartialObsGrad()
if fixed_mean:
try:
target = config.getfloat("monotonic_mean_covar_factory", "target")
mean.constant.requires_grad_(False)
mean.constant.copy_(torch.tensor(norm.ppf(target)))
except NoOptionError:
raise RuntimeError("Config got fixed_mean=True but no target included!")
ls_prior = gpytorch.priors.GammaPrior(
concentration=__default_invgamma_concentration,
rate=__default_invgamma_rate,
transform=lambda x: 1 / x,
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
covar = gpytorch.kernels.ScaleKernel(
RBFKernelPartialObsGrad(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
return mean, covar
def song_mean_covar_factory(
config: Config,
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]:
"""
Factory that makes kernels like Song et al. 2018:
Linear in intensity dimension (assumed to be the last
dimension), RBF in context dimensions, summed.
Args:
config (Config): Config object containing (at least) bounds and optionally
LSE target.
Returns:
Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]: Instantiated
constant mean object and additive kernel object.
"""
if config.getboolean("common", "use_ax", fallback=False):
dim = get_dim(config)
else:
lb = config.gettensor("song_mean_covar_factory", "lb")
ub = config.gettensor("song_mean_covar_factory", "ub")
assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
dim = lb.shape[0]
mean = gpytorch.means.ConstantMean()
try:
target = config.getfloat("song_mean_covar_factory", "target")
except NoOptionError:
target = 0.75
mean.constant.requires_grad_(False)
mean.constant.copy_(torch.tensor(norm.ppf(target)))
ls_prior = gpytorch.priors.GammaPrior(
concentration=__default_invgamma_concentration,
rate=__default_invgamma_rate,
transform=lambda x: 1 / x,
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
stim_dim = config.getint("song_mean_covar_factory", "stim_dim", fallback=-1)
context_dims = list(range(dim))
# if intensity RBF is true, the intensity dimension
# will have both the RBF and linear kernels
intensity_RBF = config.getboolean(
"song_mean_covar_factory", "intensity_RBF", fallback=False
)
if not intensity_RBF:
intensity_dim = 1
stim_dim = context_dims.pop(stim_dim) # support relative stim dims
else:
intensity_dim = 0
stim_dim = context_dims[stim_dim]
# create the LinearKernel
intensity_covar = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.LinearKernel(active_dims=stim_dim, ard_num_dims=1),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
if dim == 1:
# this can just be LinearKernel but for consistency of interface
# we make it additive with one module
if not intensity_RBF:
return (
mean,
gpytorch.kernels.AdditiveKernel(intensity_covar),
)
else:
context_covar = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
active_dims=context_dims,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
return mean, context_covar + intensity_covar
else:
context_covar = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim - intensity_dim,
active_dims=context_dims,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
return mean, context_covar + intensity_covar
def ordinal_mean_covar_factory(
config: Config,
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.ScaleKernel]:
try:
base_factory = config.getobj("ordinal_mean_covar_factory", "base_factory")
except NoOptionError:
base_factory = default_mean_covar_factory
_, base_covar = base_factory(config)
mean = gpytorch.means.ZeroMean() # wlog since ordinal is shift-invariant
if isinstance(base_covar, gpytorch.kernels.ScaleKernel):
covar = base_covar.base_kernel
else:
covar = base_covar
return mean, covar
|
aepsych-main
|
aepsych/factory/factory.py
|
#!/usr/bin/env python3
# coding: utf-8
# ### Semi-parametric psychophysical model tutorial
#
# This tutorial will demonstrate how to fit the semiparametric psychophysical models described in [Keeley et al., 2023](https://arxiv.org/abs/2302.01187).
#
# The semi-parametric model uses a conventional parametric form for the monotonic intensity or stimulus dimension (e.g. contrast, amplitude sound pressure level, etc), of the form $f(x) = k(x + c)$ for a stimulus intensity level $x$, intercept or threshold $c$ and slope $k$. It then puts flexible Gaussian Process (GP) priors on the both the slope and intercept parameters as a function of other stimulus dimensions. Informally, it can be thought of as a flexible nonlinear generalization of the model in QUEST+ ([Watson, 2017](https://jov.arvojournals.org/article.aspx?articleid=2611972)) in that it uses the same model for the intensity dimension but replaces the fixed parametric model for the context dimension that QUEST+ uses, with a GP. AEPsych's `BernoulliMCMutualInformation` acquisition function is equivalent to the infomax sampling policy in QUEST+ (also known as BALD in the ML literature).
#
# The SemiP paper provides both the "full" semi-parametric model (with independent GP posteriors estimated for the slope and intercept), and a multivariate normal-approximate semiparametric model, which derives an approximate single-MVN posterior to the SemiP model. The full SemiP model tends to be more accurate but slower and less compatible with state of the art lookahead sampling policies that rely on an MVN posterior.
#
# This notebook will demonstrate fitting of both models on the novel discrimination and detection test functions developed in [Owen et al., 2021](https://arxiv.org/abs/2104.09549). These test functions include a nontrivial interaction between context and intensity dimensions devised to be challenging for traditional methods that only assume additive threshold shifts by context.
# In[1]:
# imports and seeds
import matplotlib.pyplot as plt
import numpy as np
import torch
from aepsych.benchmark.test_functions import novel_detection_testfun, novel_discrimination_testfun
from aepsych.utils import dim_grid, make_scaled_sobol
from scipy.stats import norm
from torch.distributions import Bernoulli, Normal
np.random.seed(1)
torch.manual_seed(1)
# Below we import the SemiP specific packages. Note that we are importing three link functions here, ```FloorGumbelObjective, FloorLogitObjective, FloorProbitObjective```. In this tutorial we will only use the ```FloorProbitObjective``` to match how we generate synthetic data. From a theoretical perspective, the link function is the CDF of the noise in the perceptual system (i.e. Probit link implies Gaussian Noise, Logit link implies Logistic noise, and Gumbel link implies Weibull noise) and empirically different links may perform better on different datasets, so we recommend that you select the appropriate link for your application based on cross-validated performance or a priori theoretical motivation.
# In[2]:
### SemiP imports
from aepsych.likelihoods import BernoulliObjectiveLikelihood
from aepsych.acquisition.objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
)
from aepsych.likelihoods.semi_p import LinearBernoulliLikelihood
from aepsych.models import HadamardSemiPModel, SemiParametricGPModel
# #### Test function
# Here we will import the novel 2D detection test function, which provides detection probabilities ranging from 0 to 1 in a two dimensional space. The slope and intercept of the parameterized sigmoidal function vary with a fourth-order polynomial as a function of the context dimension, which makes this a challenging problem for fully parametric models that tend to not include such higher-order terms.
#
# The lower and upper bound ```lb``` and ```ub``` are needed to specify the boundaries of the function, and should be vectors of length equal to the dimensionality of the dataset you are looking to fit. The function is plotted below.
# In[3]:
# Detection test function
lb = [-1, -1]
ub = [1, 1]
xgrid = dim_grid(lower=lb, upper=ub, dim=2)
fgrid = novel_detection_testfun(xgrid)
plt.imshow(norm.cdf(fgrid).reshape(30, 30).T, aspect="auto", origin="lower")
cbar = plt.colorbar()
plt.xlabel("context dimension")
plt.ylabel("intensity dimension")
cbar.set_label("detection probability", rotation=270, labelpad=15)
plt.title("2D detection test function")
# In[4]:
# now generate some data from the Bernoulli distribution defined over a set of training locations
xtrain = make_scaled_sobol(lb=lb, ub=ub, size=100) # using random sobol sampling here for data generation
ftrain = novel_detection_testfun(xtrain)
ytrain = Bernoulli(torch.Tensor(norm.cdf(ftrain))).sample()
# #### Fitting the model
# Here we are fitting both the SemiParametric ```SemiParametricGPMOdel``` as well as the MVN-approximate SemiP model ```HadamardSemiPModel``` using the synthetic data generated in the cell above. We show tuning estimation for each model. Note here the argument ```stim_dim``` in the function call. This is needed to specify which dimension in your dataset should be the monotonically increasing (sigmoidal) dimension. This could be, for example, volume in an auditory task, or contrast in a visual task. Identifying which dimension is monotonic is crucial to see good SemiP performance. Here, the second dimension is the monotonic dimensional in our 2D test function.
# In[5]:
### fit SemiP models
semip_model = SemiParametricGPModel(lb=lb, ub=ub, dim=2, stim_dim=1,
likelihood=LinearBernoulliLikelihood(objective=FloorProbitObjective(floor=0)))
approx_model = HadamardSemiPModel(lb=lb, ub=ub, dim=2, stim_dim=1,
likelihood=BernoulliObjectiveLikelihood(objective=FloorProbitObjective(floor=0)))
semip_model.fit(xtrain, ytrain)
approx_model.fit(xtrain, ytrain)
# In[6]:
#make predictions at locations on a 2D grid and plot
semiP_pred_mu, _ = semip_model.predict(torch.Tensor(xgrid), probability_space=True)
MVN_pred_mu, _ = approx_model.predict(torch.Tensor(xgrid), probability_space=True)
fig, axs = plt.subplots(1, 2, figsize=(7, 3))
axs[0].set_ylabel("intensity dimension")
im1 = axs[0].imshow(semiP_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[0].set_title('SemiP inference')
axs[0].set_xlabel("context dimension")
axs[0].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[0].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
axs[1].imshow(MVN_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[1].set_title('MVN-approx SemiP inference')
axs[1].set_xlabel("context dimension")
axs[1].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[1].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
cb_ax = fig.add_axes([.92, 0.1, 0.02, 0.8])
cbar = fig.colorbar(im1, cax=cb_ax)
cbar.set_label("detection probability", rotation=270, labelpad=15)
# It is apparent that in this case quasi-random sobol sampling over-explores parts of the psychometric field where detection probability approaches 1.0. In a real experiment we would also use active sampling to improve sample efficiency.
# ### Discrimination function evaluation
#
# The semiparametric model can be adapted to different psychophysical task by adjusting the ```floor``` value when you instantiate the model. Below, we show an example using the 2D discrimination function, which has a minimum value of p = 0.5, corresponding to an inability to discriminate two stimuli. The logic follows from the above example, but here we simply adjust the ```floor``` value and the test function being evaluated.
#
# As above, the inefficiency of quasi-random sampling means we require a fairly large number of samples to achieve good-looking plots.
# In[7]:
lb = [-1, -1] ##
ub = [1, 1]
xgrid = dim_grid(lower=lb, upper=ub, dim=2, gridsize = 30)
fgrid = novel_discrimination_testfun(xgrid)
# now generate some data from the Bernoulli distribution defined over a set of training locations
xtrain = make_scaled_sobol(lb=lb, ub=ub, size=300) # using random sobol sampling here for data generation
ftrain = novel_discrimination_testfun(xtrain)
ytrain = Bernoulli(torch.Tensor(norm.cdf(ftrain))).sample()
### fit SemiP model
## note you can adjust the slope_mean value to bias a steeper rise with intensity.
# Default value is 2, here we are setting to 6
semip_model = SemiParametricGPModel(lb=lb, ub=ub, dim=2, stim_dim=1, slope_mean=6,
likelihood=LinearBernoulliLikelihood(objective=FloorProbitObjective(floor=0.5)))
semip_model.fit(xtrain, ytrain)
approx_model = HadamardSemiPModel(lb=lb, ub=ub, dim=2, stim_dim=1, slope_mean=6,
likelihood=BernoulliObjectiveLikelihood(objective=FloorProbitObjective(floor=0.5)))
semip_model.fit(xtrain, ytrain)
approx_model.fit(xtrain, ytrain)
#make predictions at locations on a 2D grid and plot
semiP_pred_mu, _ = semip_model.predict(torch.Tensor(xgrid), probability_space=True)
approx_pred_mu, _ = approx_model.predict(torch.Tensor(xgrid), probability_space=True)
fig, axs = plt.subplots(1, 3, figsize=(9, 3))
axs[0].imshow(norm.cdf(fgrid).reshape(30, 30).T, aspect="auto", origin="lower", vmin=0.5, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[0].set_title('True Discrimination')
axs[0].set_xlabel("context dimension")
axs[0].set_xlabel("intensity dimension")
im1 = axs[1].imshow(semiP_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0.5, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[1].set_title('SemiP inference')
axs[1].set_xlabel("context dimension")
axs[1].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[1].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
axs[2].imshow(approx_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0.5, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[2].set_title('MVN-SemiP inference')
axs[2].set_xlabel("context dimension")
axs[2].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[2].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
cb_ax = fig.add_axes([.92, 0.1, 0.02, 0.8])
cbar = fig.colorbar(im1, cax=cb_ax)
cbar.set_label("detection probability", rotation=270, labelpad=15)
# # Active learning
#
# Finally, we provide an example active learning experiment using the semi-parametric models. For more on how active learning in AEPsych works, see the [introductory documentation](https://aepsych.org/docs/gp_intro)
#
|
aepsych-main
|
website/static/files/Semi_P_tutorial.py
|
#!/usr/bin/env python3
# coding: utf-8
# # Data Collection and Analysis Using AEPsych
#
# This tutorial serves as a complete example on how to collect and analyze data from perceptual experiments using AEPsych. For more information on AEPsych, refer to the documentation in the [GitHub repository](https://github.com/facebookresearch/aepsych).
#
# This tutorial demonstrates how to create an experiment to measure one's detection threshold for orientation. On each trial of the experiment, the participant is shown two gabor-patch stimuli, one oriented vertically (the foil) and one oriented at an angle (the target). The goal of the experiment is to find the smallest angle at which the participant can reliably identify the target. You can run the code blocks below interactively to participate in the experiment yourself, or you can simply view data collected from an example participant.
# ## Experiment Overview
# Below we define the functions we will need to conduct our experiment. Note that the code here is mainly for demonstration purposes and should not be used for serious experiments. If you would like to run psychophysics experiments in Python, consider using [Psychopy](https://www.psychopy.org/).
# In[1]:
import math
from IPython import get_ipython
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import time
from IPython.display import clear_output
import random
fig_size = 15
# Show a fixation cross with a blank background
def show_fixation_cross():
_, ax = plt.subplots(1, figsize=(fig_size, fig_size/2))
for spine in ax.spines.values():
spine.set_visible(False)
ax.tick_params(bottom=False, labelbottom=False,
left=False, labelleft=False)
ax.text(0.5, 0.5, '+', size=100,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
ax.axis('off')
ax.grid(b=None)
plt.show()
# Evaluate the gabor filter function at an x,y position
def gabor(x, y, theta):
f = 0.05
sigma = 25
x_ = x * math.cos(theta) + y * math.sin(theta)
y_ = y * math.cos(theta) - x * math.sin(theta)
num = x_ ** 2 + y_ ** 2
return math.exp(num / (-2 * sigma**2)) * math.cos(2 * math.pi * f * x_)
# Show a gabor patch
def show_gabor(theta, ax):
# The size of the gabor patch
radius = 60
# Convert from degrees to radians
theta = math.radians(theta)
M = np.array([[gabor(i, j, theta) for j in range(-radius, radius)] for i in range(-radius, radius)])
M = ((M - M.min())) / (M.max() - M.min())
ax.axis('off')
ax.grid(b=None)
ax.imshow(M.T, cmap=cm.Greys_r)
def run_trial(angle, trial_number):
fixation_duration = 1
trial_duration = 0.5
# Show the fixation cross
show_fixation_cross()
time.sleep(fixation_duration)
clear_output(wait=True)
# randomly select if target should go on left or right side
right_side = random.choice([0, 1])
# randomly select if target angle should be clockwise or counterclockwise
direction = random.choice([-1, 1])
angle *= direction
# Show the foil and target
_, axs = plt.subplots(1, 2, figsize=(fig_size, fig_size))
show_gabor(angle, axs[right_side])
show_gabor(0, axs[int(not right_side)])
# Ask for the participant's response
ans = None
ans_key = {'j': 1, 'J': 1, 'f': 0, 'F': 0}
while ans not in ans_key:
plt.show()
time.sleep(trial_duration)
clear_output()
ans = input(f"Trial #{trial_number}: Which side was angled? (Enter 'F' for left or 'J' for right)")
# Check if the response was correct
is_correct = int(ans_key[ans] == right_side)
target_side = "right" if right_side else "left"
return is_correct, target_side
# On each trial of the experiment, the participant will first see a white box with fixation cross for 1 second. The box looks like this:
# In[2]:
show_fixation_cross()
# After 1 second, the fixation cross will disappear, and two gabor patches will appear side-by-side. One patch will be the foil with a vertical orientation, and one will be the target with an angled orientation. The position of the target and whether the angle is measured clockwise or counterclockwise is randomized each trial. An example foil and target are shown below:
# In[3]:
_, axs = plt.subplots(1, 2, figsize=(fig_size, fig_size))
show_gabor(0, axs[0])
show_gabor(5, axs[1])
# After 0.5 seconds, the patches will disappear, and the participant will be prompted to report which one was the target by typing "F" for left or "J" for right, and then hitting enter. Try running the code block below to experience a trial for yourself. The `run_trial` function takes an angle and a trial number as input and returns whether or not you were correct (1 for correct, 0 for incorrect), as well as the side which the target was actually on.
# In[4]:
run_trial(5, 0)
# ## Starting the AEPsych Server
# The code block below starts an AEPsych server that will run in the background (you can also start the server by running the second line in a command prompt). We can contact the server at IP address 0.0.0.0, port 5555, and the data will be saved in a database named "data_collection_analysis_tutorial.db". In this tutorial, we will run the server on the same computer as the experiment, but it is also possible to run the server remotely.
# In[5]:
get_ipython().run_cell_magic('bash', '--bg', '\naepsych_server --ip 0.0.0.0 --port 5555 database --db data_collection_analysis_tutorial.db\n')
# In[6]:
from aepsych_client import AEPsychClient
client = AEPsychClient(ip="0.0.0.0", port=5555)
# We tell the server what kind of experiment we are running by sending it a configure message (see the [configs folder](https://github.com/facebookresearch/aepsych/tree/main/configs) for some examples. The gist of the config here is that it is telling the server that our experiment will have one parameter called "angle", which will range from 0.1 to 5 degrees. (If you run this experiment on yourself and find that this range of angles makes the experiment too easy or too hard, you can adjust the `lb` and `ub` values in the string below). This experiment will last for 50 trials. The parameter values from the first 10 trials will be drawn from the [Sobol sequence](https://en.wikipedia.org/wiki/Sobol_sequence), to provide some initial data to initialize AEPsych's model; the following 40 trials will be drawn from that model. In this case, the model will be a classification [Gaussian Process](https://en.wikipedia.org/wiki/Gaussian_process) (GP).
#
# GPs can be thought of as generalizations of traditional psychophysics models that can handle multiple dimensions and allow the response function to be nonlinear (for further discussion see the [AEPsych preprint](https://arxiv.org/abs/2104.09549)). Furthermore, GPs can be used in conjunction with acquisition functions to perform [active learning](https://en.wikipedia.org/wiki/Active_learning_(machine_learning)--that is, the model can determine which points in the parameter space should be sampled next to achieve some goal. In this case we use the [level set estimation](https://www.ijcai.org/Proceedings/13/Papers/202.pdf) function to find the angle at which the participant will correctly identify the target 75% of the time.
#
# GPs are defined by a mean function and covariance function. Because we don't define what these functions should be in the config, they revert to their default values of a constant mean function, and a [radial basis covariance function](https://en.wikipedia.org/wiki/Radial_basis_function). These functions are fine for parameter space we want to explore here, but if we wanted to expand our search across a larger range of angles, we would probably want to use a periodic covariance function to account for that fact that angles loop every 360 degrees.
# In[7]:
config_str = """
[common]
parnames = [theta] # names of the parameters
lb = [0.1] # lower bound of the parameter
ub = [5] # upper bound of parameter
stimuli_per_trial = 1 # the number of stimuli shown in each trial; 1 for single, or 2 for pairwise experiments
outcome_types = [binary] # the type of response given by the participant; can be [binary] or [continuous]
target = 0.75 # desired threshold, for threshold estimation.
strategy_names = [init_strat, opt_strat] # The strategies that will be used, corresponding to the named sections below
# Configuration for the initialization strategy, which we use to gather initial points
# before we start doing model-based acquisition
[init_strat]
min_total_tells = 10 # number of sobol trials to run
generator = SobolGenerator # The generator class used to generate new parameter values
# Configuration for the optimization strategy, our model-based acquisition
[opt_strat]
min_total_tells = 50 # total number of trials to run
refit_every = 5 # how often to refit the model from scratch
generator = OptimizeAcqfGenerator # The generator class used to generate new parameter values
acqf = MCLevelSetEstimation # The acquisition function; MCLevelSetEstimation is used for threshold finding
model = GPClassificationModel # The model class
"""
client.configure(config_str=config_str, config_name="1d_gabor_config")
# Now that we have set up our client and configured our server, we can start collecting data. The basic loop of the experiment is as follows:
#
# 1. Ask AEPsych what value of our parameter, angle, to try next.
# 2. Run a trial using this suggested value.
# 3. Tell AEPsych the particant's response so that it can update its model.
# 4. Repeat for the specified number of trials.
#
# We ask AEPsych for parameters by calling client.ask(). This returns a dictionary with two entries. The first, `'config'`, contains another dictionary whose keys are the names of your parameters, and whose values are lists of parameter values to try. The second, `'is_finished'`, is a bool indicating whether the number of trials specified in the config have been completed.
# In[8]:
client.ask()
# We tell AEPsych about the parameter values we have tried by calling client.tell(). This method has two required arguments. The first, config, is a dictionary representing the set of parameter values you would like to tell AEPsych about, and takes the same format as the 'config' entry from client.ask(). The second argument is the binary outcome of a trial, indicated by 0 (the participant did not identify the target) or 1 (the participant did identify the target). This method also optionally takes other keyword arguments that will be stored as metadata in AEPsych's database. For our experiment, we will record which side the target was on.
# In[9]:
client.tell(config={'theta':[.1]}, outcome=0, target_side='right')
# The code below asks AEPsych for parameter values and runs trials until the experiment is completed:
# In[10]:
finished = False
trial_number = 1
while not finished:
response = client.ask()
theta = response["config"]["theta"][0]
outcome, target_side = run_trial(theta, trial_number)
client.tell(config={"theta": [theta]}, outcome=outcome, target_side=target_side)
finished = response["is_finished"]
trial_number += 1
# Note that even after the number of trials specified in the config have completed, you can still ask for more parameter values and conduct more trials:
# In[11]:
client.ask()
# You are also not restricted to only using the parameter values that AEPsych suggests. You can tell it the outcome of any parameter values that you would like:
# In[12]:
client.tell(config={'theta':[5]}, outcome=1, target_side='left')
# Once you are done collecting data, you can close the server by calling `client.finalize()`
# In[13]:
client.finalize()
# ## Replaying the Experiment and Analyzing Data
# To analyze the data, we open the database with an `AEPsychServer` object. This server runs here in the notebook rather than in the background like the server we used to collect data.
# In[14]:
from aepsych.server import AEPsychServer
serv = AEPsychServer(database_path="data_collection_analysis_tutorial.db")
# The database is made of a set of experiments, which have unique experiment UUIDs. Every time the server is started (e.g. from the command line), a new experiment id is generated. For a list of all experiment ids:
# In[15]:
exp_ids = [rec.experiment_id for rec in serv.db.get_master_records()]
print(exp_ids)
# The above indicates that there is only 1 experiment_id in this database.
#
# Note that the above commands do not actually load any of the experiment data from the database. The data is only loaded when you run serv.replay to replay all of the setup, ask, and tell messages that are recorded in the database. We will pass skip_computations = True to this method to skip all of the model-fitting computations and make the replay finish faster.
# In[16]:
serv.replay(exp_ids[-1], skip_computations=True)
# The data has been loaded into the servers list of strategies, which we can access through `serv._strats`. Per our config string, we have two strategies, the first being the model-less initialization strategy, and the second being the model-based threshold-finding strategy. We can see the model-based strategy's data using its `x` and `y` properties:
# In[17]:
strat = serv._strats[-1]
print(strat.x)
print(strat.y)
# Since we passed `skip_computations = True` into the replay method before, we will have to manually refit the strategy's model:
# In[18]:
strat.model.fit(strat.x, strat.y)
# We can now plot the posterior of the fitted model:
# In[20]:
from aepsych.plotting import plot_strat
plt.rcParams["figure.figsize"] = (15,15)
plt.rcParams['figure.facecolor'] = 'white'
plot_strat(strat, xlabel='angle (degrees)', ylabel='Probability of Selecting Target', target_level=.75)
# In this plot, the blue and red ticks at the bottom represent angles at which the participant did and did not successfully identify the target, respectively. The dark blue curve represents the model's posterior probabilty that the participant would select the target, with 95% of the posterior mass lying in the shaded region. The orange horizontal line represents the participant's detection threshold, which once again is defined as the smallest angle at which the participant would select the target 75% of the time. If you are viewing the data from the example participant, you will see that their threshold is somewhere between about 0.5 and 1.5 degrees (note, however, that threshold estimation for non-monotonic models may not always be accurate; we are working on better algorithms for this). More data could be collected to reduce this uncertainty. If you collected data on your own data, your plot may look different; there are often large individual differences in psychophysics tasks. In any case you should see that most of the sampled points are near the estimated threshold; the level set estimation algorithm intelligently selects points so that time is not wasted collecting data at points far away from the threshold, allowing for a more accurate threshold estimate in fewer trials than traditional methods.
#
# ## Conclusion
#
# This tutorial has shwown a complete example of how to conduct an AEPsych experiment and analyze the data. You can easily adapt this code for your own needs by changing the config string and the code that runs trials. If you need any help debugging or setting up your experiment, you can [open a GitHub issue](https://github.com/facebookresearch/aepsych/issues). You can also try conducting AEPsych experiments without writing any code, by running [this notebook](https://github.com/facebookresearch/aepsych/blob/main/examples/Interactive_AEPsych.ipynb).
|
aepsych-main
|
website/static/files/data_collection_analysis_tutorial.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from copy import deepcopy
import numpy as np
import torch
from aepsych.config import Config
from aepsych.models import GPClassificationModel
from aepsych.utils import _process_bounds, get_dim, get_parameters, make_scaled_sobol
class UtilsTestCase(unittest.TestCase):
def test_scaled_sobol_asserts(self):
lb = np.r_[0, 0, 1]
ub = np.r_[1]
with self.assertRaises(AssertionError):
make_scaled_sobol(lb, ub, 10)
def test_scaled_sobol_sizes(self):
lb = np.r_[0, 1]
ub = np.r_[1, 30]
grid = make_scaled_sobol(lb, ub, 100)
self.assertEqual(grid.shape, (100, 2))
def test_dim_grid_model_size(self):
lb = -4.0
ub = 4.0
dim = 1
gridsize = 10
mb = GPClassificationModel(lb=lb, ub=ub, dim=dim)
grid = GPClassificationModel.dim_grid(mb, gridsize=gridsize)
self.assertEqual(grid.shape, torch.Size([10, 1]))
def test_process_bounds(self):
lb, ub, dim = _process_bounds(np.r_[0, 1], np.r_[2, 3], None)
self.assertTrue(torch.all(lb == torch.tensor([0.0, 1.0])))
self.assertTrue(torch.all(ub == torch.tensor([2.0, 3.0])))
self.assertEqual(dim, 2)
# Wrong dim
with self.assertRaises(AssertionError):
_process_bounds(np.r_[0, 0], np.r_[1, 1], 3)
# ub < lb
with self.assertRaises(AssertionError):
_process_bounds(np.r_[1], np.r_[0], None)
class ParameterUtilsTestCase(unittest.TestCase):
def setUp(self) -> None:
config_str = """
[common]
parnames = [par1, par2, par3]
lb = [0, 0, 0]
ub = [1, 1000, 10]
choice_parnames = [par4, par5, par6, par9]
fixed_parnames = [par7, par8]
[par2]
log_scale = True
[par3]
value_type = int
[par4]
choices = [a, b]
[par5]
choices = [x]
[par6]
choices = [x, y, z]
is_ordered = True
[par7]
value = 123
[par8]
value = foo
[par9]
choices = [x, y, z]
"""
self.config = Config(config_str=config_str)
def test_get_ax_parameters(self):
params = get_parameters(self.config)
correct_range_params = [
{
"name": "par1",
"type": "range",
"value_type": "float",
"log_scale": False,
"bounds": [0.0, 1.0],
},
{
"name": "par2",
"type": "range",
"value_type": "float",
"log_scale": True,
"bounds": [0.0, 1000.0],
},
{
"name": "par3",
"type": "range",
"value_type": "int",
"log_scale": False,
"bounds": [0.0, 10.0],
},
]
correct_choice_params = [
{
"name": "par4",
"type": "choice",
"value_type": "str",
"is_ordered": False,
"values": ["a", "b"],
},
{
"name": "par5",
"type": "choice",
"value_type": "str",
"is_ordered": False,
"values": ["x"],
},
{
"name": "par6",
"type": "choice",
"value_type": "str",
"is_ordered": True,
"values": ["x", "y", "z"],
},
{
"name": "par9",
"type": "choice",
"value_type": "str",
"is_ordered": False,
"values": ["x", "y", "z"],
},
]
correct_fixed_params = [
{
"name": "par7",
"type": "fixed",
"value": 123.0,
},
{
"name": "par8",
"type": "fixed",
"value": "foo",
},
]
self.assertEqual(
params, correct_range_params + correct_choice_params + correct_fixed_params
)
def test_get_dim(self):
dim = get_dim(self.config)
# 3 dims from par1, par2, par3
# 1 binary dim from par4
# 0 dim from par5 (effectively a fixed dim)
# 1 dim from par6 (is_ordered makes it one continuous dim)
# 0 dim from par7 & par8 (fixed dims aren't modeled)
# 3 dim from par9 (one-hot vector with 3 elements)
# 8 total dims
self.assertEqual(8, dim)
# Count only choice dims
copied_config = deepcopy(self.config)
del copied_config["common"]["parnames"]
del copied_config["common"]["lb"]
del copied_config["common"]["ub"]
dim = get_dim(copied_config)
self.assertEqual(5, dim)
# Removing par5 does nothing
copied_config["common"]["choice_parnames"] = "[par4, par6, par9]"
dim = get_dim(copied_config)
self.assertEqual(5, dim)
# Removing par6 leaves us with 3 binary dimension and 1 continuous dimension
copied_config["common"]["choice_parnames"] = "[par4, par9]"
dim = get_dim(copied_config)
self.assertEqual(4, dim)
# Removing par9 leaves us with 1 binary dimension
copied_config["common"]["choice_parnames"] = "[par4]"
dim = get_dim(copied_config)
self.assertEqual(1, dim)
# Removing par7 & par8 does nothing
del copied_config["common"]["fixed_parnames"]
dim = get_dim(copied_config)
self.assertEqual(1, dim)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
import numpy as np
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.config import Config
from aepsych.generators import MonotonicRejectionGenerator, SobolGenerator
from aepsych.models.gp_classification import GPClassificationModel
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from aepsych.strategy import AEPsychStrategy, SequentialStrategy, Strategy
class TestSequenceGenerators(unittest.TestCase):
def setUp(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
extra_acqf_args = {"target": 0.75, "beta": 1.96}
self.strat = Strategy(
model=MonotonicRejectionGP(
lb=lb,
ub=ub,
dim=2,
monotonic_idxs=[1],
),
generator=MonotonicRejectionGenerator(
acqf=MonotonicMCLSE, acqf_kwargs=extra_acqf_args
),
min_asks=50,
lb=lb,
ub=ub,
min_post_range=0.3,
stimuli_per_trial=1,
outcome_types=["binary"],
)
self.strat.model.fit = MagicMock()
self.strat.model.update = MagicMock()
self.strat.generator.gen = MagicMock()
def test_opt_strategy_single(self):
lbs = [[-1], [-10]]
ubs = [[1], [-8]]
n = [3, 5]
strat_list = []
for lb, ub, n in zip(lbs, ubs, n):
gen = SobolGenerator(lb, ub)
strat = Strategy(
min_asks=n,
generator=gen,
lb=lb,
ub=ub,
min_total_outcome_occurrences=0,
stimuli_per_trial=1,
outcome_types=["binary"],
)
strat_list.append(strat)
strat = SequentialStrategy(strat_list)
out = np.zeros(8)
for i in range(8):
next_x = strat.gen()
strat.add_data(next_x, [1])
out[i] = next_x
gen1 = out[:3]
gen2 = out[3:]
self.assertTrue(np.min(gen1) >= -1)
self.assertTrue(np.min(gen2) >= -10)
self.assertTrue(np.max(gen1) <= 1)
self.assertTrue(np.max(gen2) <= -8)
def test_warmstart(self):
self.strat.refit_every = 10
for _ in range(50):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertEqual(
self.strat.model.fit.call_count, 4
) # first fit gets skipped because there is no data
self.assertEqual(self.strat.model.update.call_count, 45)
def test_no_warmstart(self):
for _ in range(50):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertEqual(
self.strat.model.fit.call_count, 49
) # first fit gets skipped because there is no data
self.assertEqual(self.strat.model.update.call_count, 0)
def test_finish_criteria(self):
for _ in range(49):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertFalse(self.strat.finished)
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertFalse(self.strat.finished) # not enough "no" trials
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [0])
self.assertFalse(
self.strat.finished
) # not enough difference between posterior min/max
for _ in range(50):
self.strat.gen()
self.strat.add_data(np.r_[0.0, 0.0], [0])
self.assertTrue(self.strat.finished)
def test_max_asks(self):
self.strat.max_asks = 50
for _ in range(49):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertFalse(self.strat.finished)
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertTrue(self.strat.finished)
def test_keep_most_recent(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
self.strat = Strategy(
model=GPClassificationModel(
lb=lb,
ub=ub,
),
generator=SobolGenerator(lb=lb, ub=ub),
min_asks=50,
lb=lb,
ub=ub,
stimuli_per_trial=1,
outcome_types=["binary"],
)
self.strat.keep_most_recent = 2
data = torch.rand(4, 2)
for i, d in enumerate(data):
self.strat.add_data(d, [0])
self.strat.update()
lb = max(0, i - self.strat.keep_most_recent + 1)
self.assertTrue(
torch.equal(self.strat.model.train_inputs[0], data[lb : i + 1])
)
def test_run_indefinitely(self):
lb = [-1, -1]
ub = [1, 1]
with self.assertWarns(UserWarning):
self.strat = Strategy(
model=GPClassificationModel(
lb=lb,
ub=ub,
),
generator=SobolGenerator(lb=lb, ub=ub),
lb=lb,
ub=ub,
stimuli_per_trial=1,
outcome_types=["binary"],
min_asks=1, # should be ignored
run_indefinitely=True,
)
self.strat.gen()
self.assertFalse(self.strat.finished)
self.strat.finish()
self.assertTrue(self.strat.finished)
def test_n_trials_deprecation(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
self.strat = Strategy(
generator=SobolGenerator(lb=lb, ub=ub),
min_asks=50,
lb=lb,
ub=ub,
stimuli_per_trial=1,
outcome_types=["binary"],
)
with self.assertWarns(DeprecationWarning):
self.assertEqual(self.strat.n_trials, 50)
def test_batchsobol_pairwise(self):
lb = [1, 2, 3]
ub = [2, 3, 4]
min_asks = 10
mod = Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=12345, stimuli_per_trial=2),
min_asks=min_asks,
stimuli_per_trial=2,
outcome_types=["binary"],
)
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3, 2))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3, 2))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3, 2))
def test_opt_strategy_pairwise(self):
strat_list = [
Strategy(
lb=[-1],
ub=[1],
min_asks=3,
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=2),
stimuli_per_trial=2,
outcome_types=["binary"],
min_total_outcome_occurrences=0,
),
Strategy(
lb=[-10],
ub=[-8],
min_asks=5,
generator=SobolGenerator(lb=[-10], ub=[-8], stimuli_per_trial=2),
stimuli_per_trial=2,
outcome_types=["binary"],
min_total_outcome_occurrences=0,
),
]
strat = SequentialStrategy(strat_list)
out = np.zeros((8, 2))
for i in range(8):
next_x = strat.gen()
strat.add_data(next_x, [1])
out[i] = next_x
gen1 = out[:3]
gen2 = out[3:]
self.assertTrue(np.min(gen2) >= -10)
self.assertTrue(np.min(gen1) >= -1)
self.assertTrue(np.max(gen1) <= 1)
self.assertTrue(np.max(gen2) <= -8)
def test_strategy_asserts(self):
class MockModel(object):
_num_outputs = 1
_batch_shape = 2
stimuli_per_trial = 1
outcome_type = "binary"
# assert if model and strategy disagree on stimuli_per_trial
with self.assertRaises(AssertionError):
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=2,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=2),
outcome_types=["binary"],
)
# assert if model and strategy disagree on outcome_type
with self.assertRaises(AssertionError):
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=1),
outcome_types=["notbinary"],
)
# assert if model and strategy disagree on num outcomes
with self.assertRaises(AssertionError):
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=1),
outcome_types=["binary", "extra"],
)
try:
# no assert on 1 stim per trial
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=1),
outcome_types=["binary"],
)
# no assert on 2 stim per trial
model = MockModel()
model._num_outputs = 2
model.outcome_type = ["binary", "extra"]
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=model,
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=2),
outcome_types=["binary", "extra"],
)
except AssertionError:
self.fail("Strategy raised unexpected AssertionError on __init__!")
class GenerationStrategyTestCase(unittest.TestCase):
def test_finish(self):
config_str = """
[common]
use_ax = True
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [x]
lb = [0]
ub = [1]
strategy_names = [test_strat]
[test_strat]
generator = SobolGenerator
run_indefinitely = True
"""
config = Config(config_str=config_str)
strat = AEPsychStrategy.from_config(config)
self.assertFalse(strat.finished)
strat.finish()
self.assertTrue(strat.finished)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_strategy.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from aepsych.benchmark.test_functions import make_songetal_testfun
from aepsych.utils import dim_grid
class BenchmarkTestCase(unittest.TestCase):
def test_songetal_funs_smoke(self):
valid_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
grid = dim_grid(lower=[-3, -20], upper=[4, 120], dim=2, gridsize=30)
try:
for phenotype in valid_phenotypes:
testfun = make_songetal_testfun(phenotype=phenotype)
f = testfun(grid)
self.assertTrue(f.shape == torch.Size([900]))
except Exception:
self.fail()
with self.assertRaises(AssertionError):
_ = make_songetal_testfun(phenotype="not_a_real_phenotype")
|
aepsych-main
|
tests/test_bench_testfuns.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import shutil
import unittest
import uuid
from configparser import DuplicateOptionError
from pathlib import Path
import aepsych.config as configuration
import aepsych.database.db as db
import aepsych.database.tables as tables
import sqlalchemy
class DBTestCase(unittest.TestCase):
def setUp(self):
# random datebase path name without dashes
self._dbname = "./{}.db".format(str(uuid.uuid4().hex))
self._database = db.Database(db_path=self._dbname)
def tearDown(self):
self._database.delete_db()
def test_db_create(self):
engine = self._database.get_engine()
self.assertIsNotNone(engine)
self.assertIsNotNone(self._database._engine)
def test_record_setup_basic(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
result = self._database.get_replay_for(master_table.experiment_id)
self.assertNotEqual(None, result)
self.assertEqual(len(result), 1)
self._database.record_message(
master_table=master_table,
type="test_type",
request={"test": "this is a follow on request"},
)
result = self._database.get_replay_for(master_table.experiment_id)
self.assertNotEqual(None, result)
self.assertEqual(len(result), 2)
def test_record_setup_doublesetup_goodid(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 1)
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
id=master_table.experiment_id,
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 2)
def test_record_setup_doublesetup_badid(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 1)
self.assertRaises(
RuntimeError,
self._database.record_setup,
description="test description",
name="test name",
request={"test": "this is a test request"},
id=1,
)
def test_record_setup_master_children(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 1)
self._database.record_message(
master_table, "test", request={"test": "this is a test request"}
)
self.assertEqual(len(master_table.children_replay), 2)
def test_extra_info(self):
extra_info_setup = {"test": "this is extra_info"}
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request", "extra_info": extra_info_setup},
)
extra_info_record = {"test": "This is another extra_info"}
self._database.record_message(
master_table,
"test",
request={"test": "this is a test request", "extra_info": extra_info_record},
)
new_master = self._database.get_master_record(master_table.experiment_id)
self.assertEqual(new_master.children_replay[0].extra_info, extra_info_setup)
self.assertEqual(new_master.children_replay[1].extra_info, extra_info_record)
def test_update_db(self):
current_path = Path(os.path.abspath(__file__)).parent
db_path = current_path
db_path = db_path.joinpath("test_databases/test_original_schema.db")
# copy the db to a new file
dst_db_path = Path(self._dbname)
shutil.copy(str(db_path), str(dst_db_path))
self.assertTrue(dst_db_path.is_file())
# open the new db
test_database = db.Database(db_path=dst_db_path.as_posix())
self.assertFalse(tables.DbReplayTable._has_extra_info(test_database._engine))
self.assertTrue(test_database.is_update_required())
# make sure we raise the exception on newer columns
self.assertRaises(
sqlalchemy.exc.OperationalError,
test_database.record_setup,
description="test description",
name="test name",
request={"test": "this is a test request"},
)
test_database._session.rollback()
test_database.perform_updates()
# retry adding rows
master_table = test_database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
test_database.record_message(
master_table, "test", request={"test": "this is a test request"}
)
# make sure the new column exists
self.assertTrue(tables.DbReplayTable._has_extra_info(test_database._engine))
test_database.delete_db()
def test_update_db_with_raw_data_tables(self):
current_path = Path(os.path.abspath(__file__)).parent
db_path = current_path
db_path = db_path.joinpath("test_databases/multi_stimuli.db")
# copy the db to a new file
dst_db_path = Path(self._dbname)
shutil.copy(str(db_path), str(dst_db_path))
self.assertTrue(dst_db_path.is_file())
# open the new db
test_database = db.Database(db_path=dst_db_path.as_posix())
# Make sure that update is required
self.assertTrue(test_database.is_update_required())
# Update the database
test_database.perform_updates()
# Check that the update was successful
# Known expected data
par1 = [[0.1, 0.2], [0.3, 1], [2, 3], [4, 0.1], [0.2, 2], [1, 0.3], [0.3, 0.1]]
par2 = [[4, 0.1], [3, 0.2], [2, 1], [0.3, 0.2], [2, 0.3], [1, 0.1], [0.3, 4]]
outcomes = [[1, 0], [-1, 0], [0.1, 0], [0, 0], [-0.1, 0], [0, 0], [0, 0]]
param_dict_expected = {x: {} for x in range(1, 8)}
for i in range(1, 8):
param_dict_expected[i]["par1_stimuli0"] = par1[i - 1][0]
param_dict_expected[i]["par1_stimuli1"] = par1[i - 1][1]
param_dict_expected[i]["par2_stimuli0"] = par2[i - 1][0]
param_dict_expected[i]["par2_stimuli1"] = par2[i - 1][1]
outcome_dict_expected = {x: {} for x in range(1, 8)}
for i in range(1, 8):
outcome_dict_expected[i]["outcome_0"] = outcomes[i - 1][0]
outcome_dict_expected[i]["outcome_1"] = outcomes[i - 1][1]
# Check that the number of entries in each table is correct
n_iterations = (
test_database.get_engine()
.execute("SELECT COUNT(*) FROM raw_data")
.fetchone()[0]
)
self.assertEqual(n_iterations, 7)
n_params = (
test_database.get_engine()
.execute("SELECT COUNT(*) FROM param_data")
.fetchone()[0]
)
self.assertEqual(n_params, 28)
n_outcomes = (
test_database.get_engine()
.execute("SELECT COUNT(*) FROM outcome_data")
.fetchone()[0]
)
self.assertEqual(n_outcomes, 14)
# Check that the data is correct
param_data = (
test_database.get_engine().execute("SELECT * FROM param_data").fetchall()
)
param_dict = {x: {} for x in range(1, 8)}
for param in param_data:
param_dict[param.iteration_id][param.param_name] = float(param.param_value)
self.assertEqual(param_dict, param_dict_expected)
outcome_data = (
test_database.get_engine().execute("SELECT * FROM outcome_data").fetchall()
)
outcome_dict = {x: {} for x in range(1, 8)}
for outcome in outcome_data:
outcome_dict[outcome.iteration_id][
outcome.outcome_name
] = outcome.outcome_value
self.assertEqual(outcome_dict, outcome_dict_expected)
# Make sure that update is no longer required
self.assertFalse(test_database.is_update_required())
test_database.delete_db()
def test_update_configs(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.84
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_str},
}
dbname = "./{}.db".format(str(uuid.uuid4().hex))
database = db.Database(dbname)
database.record_setup(
description="default description",
name="default name",
request=request,
)
self.assertTrue(database.is_update_required())
database.perform_updates()
self.assertFalse(database.is_update_required())
database.delete_db()
def test_strat_table(self):
test_strat = {"strat": "this is nothing like a strat"}
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
# record a strat
self._database.record_strat(master_table, strat=test_strat)
experiment_id = master_table.experiment_id
strat = self._database.get_strat_for(experiment_id)
self.assertEqual(test_strat, strat)
def test_config_table(self):
test_config = {"config": "this is nothing like a config but it works."}
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
# record a strat
self._database.record_config(master_table, config=test_config)
experiment_id = master_table.experiment_id
config = self._database.get_config_for(experiment_id)
self.assertEqual(test_config, config)
def test_raw_table(self):
model_data = True
master_table = self._database.record_setup(
description="test raw table",
name="test",
request={"test": "this a test request"},
)
# Record a raw data entry
self._database.record_raw(master_table, model_data=model_data)
experiment_id = master_table.experiment_id
raw_data = self._database.get_raw_for(experiment_id)
self.assertEqual(len(raw_data), 1)
self.assertEqual(raw_data[0].model_data, model_data)
def test_param_table(self):
param_name = "test_param"
param_value = 1.123
master_table = self._database.record_setup(
description="test param table",
name="test",
request={"test": "this a test request"},
)
raw_table = self._database.record_raw(master_table, model_data=True)
# Record a param data entry
self._database.record_param(raw_table, param_name, param_value)
experiment_id = master_table.experiment_id
iteration_id = raw_table.unique_id
param_data = self._database.get_param_for(experiment_id, iteration_id)
self.assertEqual(len(param_data), 1)
self.assertEqual(param_data[0].param_name, param_name)
self.assertEqual(float(param_data[0].param_value), param_value)
def test_outcome_table(self):
outcome_value = 1.123
outcome_name = "test_outcome"
master_table = self._database.record_setup(
description="test outcome table",
name="test",
request={"test": "this a test request"},
)
raw_table = self._database.record_raw(master_table, model_data=True)
# Record an outcome data entry
self._database.record_outcome(raw_table, outcome_name, outcome_value)
experiment_id = master_table.experiment_id
iteration_id = raw_table.unique_id
outcome_data = self._database.get_outcome_for(experiment_id, iteration_id)
self.assertEqual(len(outcome_data), 1)
self.assertEqual(outcome_data[0].outcome_name, outcome_name)
self.assertEqual(outcome_data[0].outcome_value, outcome_value)
# Test some metadata flow stuff and see if it is working.
def test_metadata(self):
# Run tests using the native config_str functionality.
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.98
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
[metadata]
experiment_name = Lucas
experiment_description = Test
metadata1 = one
metadata2 = two
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_str},
}
# Generate a config for later to run .jsonifyMetadata() on.
generated_config = configuration.Config(**request["message"])
master_table = self._database.record_setup(
description=generated_config["metadata"]["experiment_description"],
name=generated_config["metadata"]["experiment_name"],
request=request,
extra_metadata=generated_config.jsonifyMetadata(),
)
self.assertEqual(
generated_config.jsonifyMetadata(),
master_table.extra_metadata, # Test in JSON form
)
# Next I can deserialize into a dictionary and make sure each element is 1-to-1.
## Important thing to note is generated_config will have extra fields because of configparser's.
## Run comparison of json.loads -> generated_config, NOT the other way around.
deserializedjson = json.loads(
master_table.extra_metadata
) # Directly from master table entry.
## Going to check each value in the deserialized json from the DB to the expected values along with the config prior to insertion.
## This will check if it retains the individual values.
self.assertEqual(deserializedjson["metadata1"], "one")
self.assertEqual(deserializedjson["metadata2"], "two")
self.assertEqual(deserializedjson["experiment_name"], "Lucas")
self.assertEqual(deserializedjson["experiment_description"], "Test")
self.assertEqual(
deserializedjson["experiment_name"], master_table.experiment_name
)
self.assertEqual(
deserializedjson["experiment_description"],
master_table.experiment_description,
)
def test_broken_metadata(self):
# We are going to be testing some broken metadata here. We need to make sure it does not misbehave.
config_strdupe = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.98
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
[metadata]
experiment_name = Lucas
experiment_description = Test
metadata1 =
metadata2 = two
metadata2 = three
"""
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.98
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
[metadata]
metadata1 =
metadata2 = three
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_strdupe},
}
request2 = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_str},
}
# Generate a config for later to run .jsonifyMetadata() on.
with self.assertRaises(DuplicateOptionError):
configuration.Config(**request["message"])
generated_config = configuration.Config(**request2["message"])
master_table = self._database.record_setup(
description=(
generated_config["metadata"]["experiment_description"]
if ("experiment_description" in generated_config["metadata"].keys())
else "default description"
),
name=(
generated_config["metadata"]["experiment_name"]
if ("experiment_name" in generated_config["metadata"].keys())
else "default name"
),
request=request,
extra_metadata=generated_config.jsonifyMetadata(),
)
deserializedjson = json.loads(
master_table.extra_metadata
) # This is initial process is exactly the same but now we switch things up...
self.assertEqual(deserializedjson["metadata2"], "three") # test normal value
self.assertEqual(deserializedjson["metadata1"], "") # test an empty value
self.assertEqual(
master_table.experiment_name, "default name"
) # test default name value
self.assertEqual(
master_table.experiment_description, "default description"
) # test default description value
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_db.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from itertools import product
import numpy as np
import torch
from aepsych.acquisition import (
ApproxGlobalSUR,
EAVC,
GlobalMI,
GlobalSUR,
LocalMI,
LocalSUR,
)
from aepsych.acquisition.bvn import bvn_cdf
from aepsych.acquisition.lookahead_utils import posterior_at_xstar_xq
from botorch.utils.testing import MockModel, MockPosterior
from gpytorch.distributions import MultivariateNormal
from scipy.stats import multivariate_normal
class BvNCDFTestCase(unittest.TestCase):
def test_bvncdf(self):
rhos = np.linspace(0.3, 0.9, 7)
xus = [0.3, 0.5, 0.7]
yus = [0.3, 0.5, 0.7]
params = product(rhos, xus, yus)
for par in params:
with self.subTest(paraams=params):
rho, xu, yu = par
var = np.r_[1, rho, rho, 1].reshape(2, 2)
x = np.r_[xu, yu]
scipy_answer = multivariate_normal(cov=var).cdf(x)
torch_answer = bvn_cdf(
torch.tensor(xu), torch.tensor(yu), torch.tensor(rho)
)
self.assertTrue(np.isclose(scipy_answer, torch_answer))
class LookaheadPosteriorTestCase(unittest.TestCase):
def setUp(self):
torch.manual_seed(1)
np.random.seed(1)
self.xstar = torch.zeros(1, 1, 1)
self.xq = torch.randn(1, 2, 1)
f = torch.rand(3)
a = torch.rand(3, 3)
covar = a @ a.T
flat_diag = torch.rand(3)
covar = covar + torch.diag_embed(flat_diag)
mvn = MultivariateNormal(mean=f, covariance_matrix=covar)
model = MockModel(
MockPosterior(mean=f[:, None], variance=torch.diag(covar)[None, :, None])
)
model._posterior.distribution = mvn
self.model, self.f, self.covar = model, f, covar
def test_posterior_extraction(self):
mu_s, s2_s, mu_q, s2_q, cov_q = posterior_at_xstar_xq(
self.model, self.xstar, self.xq
)
# mean extraction correct
self.assertTrue(mu_s == self.f[0])
self.assertTrue((mu_q == self.f[1:]).all())
# var extraction correct
self.assertTrue(s2_s == self.covar[0, 0])
self.assertTrue((s2_q == torch.diag(self.covar)[1:]).all())
# covar extraction correct
self.assertTrue((cov_q == self.covar[0, 1:]).all())
self.assertTrue((cov_q == self.covar[1:, 0]).all())
def mi_smoketest(self):
# with the mock posterior, local and global MI should be identical
local_mi = LocalMI(model=self.model, target=0.75)
global_mi = GlobalMI(model=self.model, target=0.75, Xq=self.xq[0])
self.assertTrue(global_mi(self.xstar) == local_mi(self.xstar))
def sur_smoketest(self):
# with the mock posterior, local and global SUR should be identical
local_sur = LocalSUR(model=self.model, target=0.75)
global_sur = GlobalSUR(model=self.model, target=0.75, Xq=self.xq[0])
self.assertTrue(global_sur(self.xstar) == local_sur(self.xstar))
def global_lookahead_smoketest(self):
for global_lookahead_acq in [
GlobalMI,
GlobalSUR,
ApproxGlobalSUR,
EAVC,
]:
acq = global_lookahead_acq(model=self.model, target=0.75, Xq=self.xq[0])
acqval = acq(self.xstar)
self.assertTrue(acqval.shape == torch.Size([]))
self.assertTrue(np.isfinite(acqval.numpy()))
def local_lookahead_smoketest(self):
for local_lookahead_acq in [
LocalMI,
LocalSUR,
]:
acq = local_lookahead_acq(model=self.model, target=0.75)
acqval = acq(self.xstar)
self.assertTrue(acqval.shape == torch.Size([]))
self.assertTrue(np.isfinite(acqval.numpy()))
|
aepsych-main
|
tests/test_lookahead.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import gpytorch
import numpy as np
from aepsych.config import Config
from aepsych.factory import (
default_mean_covar_factory,
monotonic_mean_covar_factory,
song_mean_covar_factory,
)
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from scipy.stats import norm
class TestFactories(unittest.TestCase):
def _test_mean_covar(self, meanfun, covarfun):
self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
self.assertTrue(meanfun.constant.requires_grad)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(
covarfun.base_kernel._priors["lengthscale_prior"][0],
gpytorch.priors.GammaPrior,
)
)
self.assertTrue(
isinstance(
covarfun._priors["outputscale_prior"][0],
gpytorch.priors.SmoothedBoxPrior,
)
)
self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.RBFKernel))
def test_default_factory_1d_config(self):
config = Config(
config_dict={"default_mean_covar_factory": {"lb": [0], "ub": [1]}}
)
meanfun, covarfun = default_mean_covar_factory(config=config)
self._test_mean_covar(meanfun, covarfun)
def test_default_factory_1d_dim(self):
meanfun, covarfun = default_mean_covar_factory(dim=1)
self._test_mean_covar(meanfun, covarfun)
def test_default_factory_args_1d(self):
conf = {
"default_mean_covar_factory": {
"lb": [0],
"ub": [1],
"fixed_mean": True,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"target": 0.5,
"kernel": "MaternKernel",
}
}
config = Config(config_dict=conf)
meanfun, covarfun = default_mean_covar_factory(config)
self.assertFalse(meanfun.constant.requires_grad)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(
covarfun.base_kernel._priors["lengthscale_prior"][0],
gpytorch.priors.GammaPrior,
)
)
self.assertTrue(
isinstance(
covarfun._priors["outputscale_prior"][0], gpytorch.priors.GammaPrior
)
)
self.assertTrue(
covarfun.base_kernel._priors["lengthscale_prior"][0].concentration == 3.0
)
self.assertTrue(
covarfun.base_kernel._priors["lengthscale_prior"][0].rate == 6.0
)
self.assertTrue(covarfun._priors["outputscale_prior"][0].concentration == 2.0)
self.assertTrue(covarfun._priors["outputscale_prior"][0].rate == 0.15)
self.assertTrue(
covarfun.base_kernel._priors["lengthscale_prior"][0]._transform is None
)
self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.MaternKernel))
def test_default_factory_raises(self):
bad_confs = [
{
"default_mean_covar_factory": {
"lb": [0],
"ub": [1],
"lengthscale_prior": "box",
}
},
{
"default_mean_covar_factory": {
"lb": [0],
"ub": [1],
"outputscale_prior": "normal",
}
},
{"default_mean_covar_factory": {"lb": [0], "ub": [1], "fixed_mean": True}},
]
for conf in bad_confs:
with self.assertRaises(RuntimeError):
config = Config(conf)
_, __ = default_mean_covar_factory(config)
with self.assertRaises(AssertionError):
default_mean_covar_factory()
config = Config(
config_dict={"default_mean_covar_factory": {"lb": [0], "ub": [1]}}
)
with self.assertRaises(AssertionError):
default_mean_covar_factory(config=config, dim=2)
def test_default_factory_2d(self):
conf = {"default_mean_covar_factory": {"lb": [-2, 3], "ub": [1, 10]}}
config = Config(config_dict=conf)
meanfun, covarfun = default_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 2)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.RBFKernel))
def test_monotonic_factory_1d(self):
conf = {"monotonic_mean_covar_factory": {"lb": [0], "ub": [1]}}
config = Config(config_dict=conf)
meanfun, covarfun = monotonic_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
self.assertTrue(meanfun.constant.requires_grad)
def test_monotonic_factory_args_1d(self):
conf = {
"monotonic_mean_covar_factory": {
"lb": [0],
"ub": [1],
"fixed_mean": True,
"target": 0.88,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = monotonic_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
self.assertFalse(meanfun.constant.requires_grad)
self.assertTrue(np.allclose(meanfun.constant, norm.ppf(0.88)))
def test_monotonic_factory_2d(self):
conf = {
"monotonic_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"fixed_mean": True,
"target": 0.89,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = monotonic_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 2)
self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
self.assertFalse(meanfun.constant.requires_grad)
self.assertTrue(np.allclose(meanfun.constant, norm.ppf(0.89)))
def test_song_factory_1d(self):
conf = {"song_mean_covar_factory": {"lb": [0], "ub": [1]}}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.LinearKernel)
)
def test_song_factory_1d_intensity_RBF(self):
conf = {
"song_mean_covar_factory": {"lb": [0], "ub": [1], "intensity_RBF": True}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
)
self.assertTrue(
isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
)
def test_song_factory_2d(self):
conf = {
"song_mean_covar_factory": {"lb": [0, 1], "ub": [1, 70], "target": 0.75}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
)
self.assertTrue(covarfun.kernels[0].base_kernel.active_dims == 0)
self.assertTrue(
isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 1)
# flip the stim dim
conf = {
"song_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"target": 0.75,
"stim_dim": 0,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 0)
self.assertTrue(covarfun.kernels[0].base_kernel.active_dims == 1)
def test_song_factory_2d_intensity_RBF(self):
conf = {
"song_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"target": 0.75,
"intensity_RBF": True,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 2)
self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
)
self.assertTrue(
np.allclose(covarfun.kernels[0].base_kernel.active_dims, [0, 1])
)
self.assertTrue(
isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 1)
# flip the stim dim
conf = {
"song_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"target": 0.75,
"stim_dim": 0,
"intensity_RBF": True,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 0)
self.assertTrue(
np.allclose(covarfun.kernels[0].base_kernel.active_dims, [0, 1])
)
|
aepsych-main
|
tests/test_mean_covar_factories.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from scipy.stats import norm
def f_1d(x, mu=0):
"""
latent is just a gaussian bump at mu
"""
return np.exp(-((x - mu) ** 2))
def f_2d(x):
"""
a gaussian bump at 0 , 0
"""
return np.exp(-np.linalg.norm(x, axis=-1))
def new_novel_det_params(freq, scale_factor=1.0):
"""Get the loc and scale params for 2D synthetic novel_det(frequency) function
Keyword arguments:
freq -- 1D array of frequencies whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
target -- target threshold
"""
locs = 0.66 * np.power(0.8 * freq * (0.2 * freq - 1), 2) + 0.05
scale = 2 * locs / (3 * scale_factor)
loc = -1 + 2 * locs
return loc, scale
def target_new_novel_det(freq, scale_factor=1.0, target=0.75):
"""Get the target (i.e. threshold) for 2D synthetic novel_det(frequency) function
Keyword arguments:
freq -- 1D array of frequencies whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
target -- target threshold
"""
locs, scale = new_novel_det_params(freq, scale_factor)
return norm.ppf(target, loc=locs, scale=scale)
def new_novel_det(x, scale_factor=1.0):
"""Get the cdf for 2D synthetic novel_det(frequency) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is frequency from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
"""
freq = x[..., 0]
locs, scale = new_novel_det_params(freq, scale_factor)
return (x[..., 1] - locs) / scale
def cdf_new_novel_det(x, scale_factor=1.0):
"""Get the cdf for 2D synthetic novel_det(frequency) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is frequency from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
"""
return norm.cdf(new_novel_det(x, scale_factor))
def new_novel_det_channels_params(channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the target parameters for 2D synthetic novel_det(channel) function
Keyword arguments:
channel -- 1D array of channel locations whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
target -- target threshold
"""
locs = -0.3 * np.sin(5 * wave_freq * (channel - 1 / 6) / np.pi) ** 2 - 0.5
scale = (
1 / (10 * scale_factor) * (0.75 + 0.25 * np.cos(10 * (0.3 + channel) / np.pi))
)
return locs, scale
def target_new_novel_det_channels(channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the target (i.e. threshold) for 2D synthetic novel_det(channel) function
Keyword arguments:
channel -- 1D array of channel locations whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
target -- target threshold
"""
locs, scale = new_novel_det_channels_params(
channel, scale_factor, wave_freq, target
)
return norm.ppf(target, loc=locs, scale=scale)
def new_novel_det_channels(x, channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the 2D synthetic novel_det(channel) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is channel from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
"""
locs, scale = new_novel_det_channels_params(
channel, scale_factor, wave_freq, target
)
return (x - locs) / scale
def cdf_new_novel_det_channels(channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the cdf for 2D synthetic novel_det(channel) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is channel from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
"""
return norm.cdf(new_novel_det_channels(channel, scale_factor, wave_freq, target))
def new_novel_det_3D_params(x, scale_factor=1.0):
freq = x[..., 0]
chan = x[..., 1]
locs_freq = -0.32 + 2 * (0.66 * np.power(0.8 * freq * (0.2 * freq - 1), 2) + 0.05)
locs = (
0.7 * ((-0.35 * np.sin(5 * (chan - 1 / 6) / np.pi) ** 2) - 0.5)
+ 0.9 * locs_freq
)
scale = 0.3 * locs / (3 * scale_factor) * 1 / (10 * scale_factor) + 0.15 * (
0.75 + 0.25 * np.cos(10 * (0.6 + chan) / np.pi)
)
return locs, scale
def new_novel_det_3D(x, scale_factor=1.0):
"""
Get the synthetic 3D novel_det
function over freqs,channels and amplitudes
"""
locs, scale = new_novel_det_3D_params(x, scale_factor)
return (x[..., 2] - locs) / scale
def cdf_new_novel_det_3D(x, scale_factor=1.0):
"""
Get the cdf for 3D synthetic novel_det function
x -- array of shape (n,3) of locations to sample
x[...,0] is frequency, x[...,1] is channel, x[...,2] is intensity
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
"""
return norm.cdf(new_novel_det_3D(x, scale_factor))
def target_new_novel_det_3D(x, scale_factor=1.0, target=0.75):
"""
Get target for 3D synthetic novel_det function at location x
x -- array of shape (n,2) of locations to sample
x[...,0] is frequency, x[...,1] is channel,
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
target -- target threshold
"""
locs, scale = new_novel_det_3D_params(x, scale_factor)
return norm.ppf(target, loc=locs, scale=scale)
def f_pairwise(f, x, noise_scale=1):
return norm.cdf((f(x[..., 1]) - f(x[..., 0])) / (noise_scale * np.sqrt(2)))
|
aepsych-main
|
tests/common.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import time
import unittest
import numpy as np
import torch
from aepsych.benchmark import (
Benchmark,
DerivedValue,
LSEProblem,
PathosBenchmark,
Problem,
)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def f(x, delay=False):
if delay:
time.sleep(0.1 * random.random())
if len(x.shape) == 1:
return x
else:
return x.sum(axis=-1)
class TestProblem(Problem):
name = "test problem"
bounds = np.c_[0, 1].T
threshold = 0.75
def f(self, x):
return f(x)
class TestSlowProblem(TestProblem):
name = "test slow problem"
def f(self, x):
return f(x, delay=True)
class LSETestProblem(LSEProblem):
name = "test lse problem"
bounds = np.c_[[-1, -1], [1, 1]].T
threshold = 0.75
def f(self, x):
return f(x)
class BenchmarkTestCase(unittest.TestCase):
def setUp(self):
# run this single-threaded since we parallelize using pathos
self.oldenv = os.environ.copy()
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["NUMEXPR_MAX_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = "1"
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
self.bench_config = {
"common": {
"invalid_config": DerivedValue(
[("init_strat", "min_asks")],
lambda min_asks: True if min_asks > 2 else False,
),
"stimuli_per_trial": 1,
"outcome_types": ["binary"],
"strategy_names": "[init_strat, opt_strat]",
},
"experiment": {
"acqf": "MCLevelSetEstimation",
"model": "GPClassificationModel",
},
"init_strat": {
"min_asks": [2, 4],
"generator": "SobolGenerator",
"min_total_outcome_occurrences": 0,
},
"opt_strat": {
"min_asks": [
DerivedValue(
[("problem", "name")], lambda x: 1 + int(x == "test problem")
),
DerivedValue(
[("problem", "name")], lambda x: 2 + int(x == "test problem")
),
],
"generator": "OptimizeAcqfGenerator",
"min_total_outcome_occurrences": 0,
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
},
"GPClassificationModel": {
"inducing_size": 10,
"mean_covar_factory": "default_mean_covar_factory",
"refit_every": 100,
"max_fit_time": 0.1,
},
"OptimizeAcqfGenerator": {
"restarts": 1,
"samps": 20,
"max_gen_time": 0.1,
},
}
def tearDown(self):
os.environ.clear()
os.environ.update(self.oldenv)
def test_bench_smoke(self):
problem1 = TestProblem()
problem2 = LSETestProblem()
bench = Benchmark(
problems=[problem1, problem2],
configs=self.bench_config,
n_reps=2,
log_every=2,
)
bench.run_benchmarks()
out = bench.pandas()
# assert problem metadata was correctly saved
self.assertEqual(
sorted(out["problem_name"].unique()), ["test lse problem", "test problem"]
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"problem_threshold"
].unique()
),
["0.75"],
)
# assert derived values work correctly
self.assertEqual(
sorted(
out[out["problem_name"] == "test problem"][
"opt_strat_min_asks"
].unique()
),
["2", "3"],
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"opt_strat_min_asks"
].unique()
),
["1", "2"],
)
# have as many final results as we expect. Because of invalid trials,
# only half of benchmarks are valid
self.assertTrue(len(out[out.final]) == bench.num_benchmarks // 2)
# have as many repetitions as we expect
self.assertTrue(len(out.rep.unique()) == bench.n_reps)
# reporting intervals are correct
self.assertTrue((out[~out.final].trial_id % 2 == 0).all())
# we don't run extra trials
total_trials = out.init_strat_min_asks.astype(
int
) + out.opt_strat_min_asks.astype(int)
self.assertTrue((out.trial_id <= total_trials).all())
# ensure each simulation has a unique random seed
self.assertTrue(out[out["final"]]["seed"].is_unique)
def test_bench_pathossmoke(self):
problem1 = TestProblem()
problem2 = LSETestProblem()
bench = PathosBenchmark(
problems=[problem1, problem2], configs=self.bench_config, n_reps=2, nproc=2
)
bench.run_benchmarks()
out = bench.pandas()
# assert problem metadata was correctly saved
self.assertEqual(
sorted(out["problem_name"].unique()), ["test lse problem", "test problem"]
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"problem_threshold"
].unique()
),
["0.75"],
)
# assert derived values work correctly
self.assertEqual(
sorted(
out[out["problem_name"] == "test problem"][
"opt_strat_min_asks"
].unique()
),
["2", "3"],
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"opt_strat_min_asks"
].unique()
),
["1", "2"],
)
# have as many final results as we expect (half of configs are invalid)
self.assertTrue(len(out[out.final]) == bench.num_benchmarks // 2)
# have as many repetitions as we expect
self.assertTrue(len(out.rep.unique()) == bench.n_reps)
# reporting intervals are correct
self.assertTrue((out[~out.final].trial_id % 2 == 0).all())
# we don't run extra trials
total_trials = out.init_strat_min_asks.astype(
int
) + out.opt_strat_min_asks.astype(int)
self.assertTrue((out.trial_id <= total_trials).all())
# ensure each simulation has a unique random seed
self.assertTrue(out[out["final"]]["seed"].is_unique)
def test_bench_pathos_partial(self):
"""
test that we can launch async and get partial results
"""
problem = TestSlowProblem()
bench = PathosBenchmark(
problems=[problem], configs=self.bench_config, n_reps=1, log_every=2
)
bench.start_benchmarks()
# wait for something to finish
while len(bench._log) == 0:
time.sleep(0.1)
bench.collate_benchmarks(wait=False)
out = bench.pandas() # this should only be a partial result
# have fewer than all the results (which is half of all benchmarks
# since half are invalid)
self.assertTrue(len(out[out.final]) < (bench.num_benchmarks // 2))
bench.collate_benchmarks(wait=True) # wait for everything to finish
out = bench.pandas() # complete results
# now we should have everything (valid = half of all benchmarks)
self.assertTrue(len(out[out.final]) == (bench.num_benchmarks // 2))
class BenchProblemTestCase(unittest.TestCase):
def setUp(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
def test_nonmonotonic_single_lse_eval(self):
config = {
"common": {
"stimuli_per_trial": 1,
"outcome_types": ["binary"],
"strategy_names": "[init_strat, opt_strat]",
"acqf": "MCLevelSetEstimation",
"model": "GPClassificationModel",
},
"init_strat": {"generator": "SobolGenerator", "min_asks": 50},
"opt_strat": {"generator": "OptimizeAcqfGenerator", "min_asks": 1},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
},
"GPClassificationModel": {
"inducing_size": 10,
"mean_covar_factory": "default_mean_covar_factory",
},
"OptimizeAcqfGenerator": {
"restarts": 10,
"samps": 1000,
},
}
problem = LSETestProblem()
bench = Benchmark(problems=[problem], configs=config, log_every=100)
_, strat = bench.run_experiment(problem, bench.combinations[0], 0, 0)
e = problem.evaluate(strat)
self.assertTrue(e["mean_square_err_p"] < 0.05)
def test_monotonic_single_lse_eval(self):
config = {
"common": {
"stimuli_per_trial": 1,
"outcome_types": ["binary"],
"strategy_names": "[init_strat, opt_strat]",
"acqf": "MonotonicMCLSE",
"model": "MonotonicRejectionGP",
},
"init_strat": {"generator": "SobolGenerator", "min_asks": 50},
"opt_strat": {"generator": "MonotonicRejectionGenerator", "min_asks": 1},
"SobolGenerator": {"seed": 1},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 10,
"mean_covar_factory": "monotonic_mean_covar_factory",
"monotonic_idxs": "[1]",
},
"MonotonicRejectionGenerator": {
"model_gen_options": {
"num_restarts": 10,
"raw_samples": 1000,
}
},
}
problem = LSETestProblem()
bench = Benchmark(problems=[problem], configs=config, log_every=100)
_, strat = bench.run_experiment(problem, bench.combinations[0], 0, 0)
e = problem.evaluate(strat)
self.assertTrue(e["mean_square_err_p"] < 0.05)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_benchmark.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import unittest
import uuid
import torch
from aepsych.acquisition import EAVC, MCLevelSetEstimation
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.acquisition.objective import FloorGumbelObjective, ProbitObjective
from aepsych.config import Config
from aepsych.generators import (
MonotonicRejectionGenerator,
OptimizeAcqfGenerator,
SobolGenerator,
)
from aepsych.likelihoods import BernoulliObjectiveLikelihood
from aepsych.models import (
GPClassificationModel,
HadamardSemiPModel,
MonotonicRejectionGP,
PairwiseProbitModel,
)
from aepsych.server import AEPsychServer
from aepsych.strategy import SequentialStrategy, Strategy
from aepsych.version import __version__
from botorch.acquisition import qNoisyExpectedImprovement
from botorch.acquisition.active_learning import PairwiseMCPosteriorVariance
from aepsych.server.message_handlers.handle_setup import configure
class ConfigTestCase(unittest.TestCase):
def test_single_probit_config(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
model = GPClassificationModel
acqf = MCLevelSetEstimation
[init_strat]
generator = SobolGenerator
min_asks = 10
min_total_outcome_occurrences = 5
[opt_strat]
generator = OptimizeAcqfGenerator
min_asks = 20
min_post_range = 0.01
keep_most_recent = 10
[MCLevelSetEstimation]
target = 0.75
beta = 3.84
objective = ProbitObjective
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
config = Config()
config.update(config_str=config_str)
self.assertTrue(
config.get_section("MCLevelSetEstimation")
== {"beta": "3.84", "objective": "ProbitObjective", "target": "0.75"}
)
self.assertTrue(
config.get_section("OptimizeAcqfGenerator")
== {"restarts": "10", "samps": "1000"}
)
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(isinstance(strat.strat_list[1].model, GPClassificationModel))
self.assertTrue(strat.strat_list[1].generator.acqf is MCLevelSetEstimation)
# since ProbitObjective() is turned into an obj, we check for keys and then vals
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys())
== {"beta", "target", "objective"}
)
self.assertTrue(strat.strat_list[1].generator.acqf_kwargs["target"] == 0.75)
self.assertTrue(strat.strat_list[1].generator.acqf_kwargs["beta"] == 3.84)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 1)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
self.assertEqual(strat.strat_list[0].min_total_outcome_occurrences, 5)
self.assertEqual(strat.strat_list[0].min_post_range, None)
self.assertEqual(strat.strat_list[0].keep_most_recent, None)
self.assertEqual(strat.strat_list[1].min_total_outcome_occurrences, 1)
self.assertEqual(strat.strat_list[1].min_post_range, 0.01)
self.assertEqual(strat.strat_list[1].keep_most_recent, 10)
def test_missing_config_file(self):
config_file = "../configs/does_not_exist.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
with self.assertRaises(FileNotFoundError):
Config(config_fnames=[config_file])
with self.assertRaises(FileNotFoundError):
Config(config_fnames=[])
def test_single_probit_config_file(self):
config_file = "../configs/single_lse_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
config = Config()
config.update(config_fnames=[config_file])
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(strat.strat_list[1].generator.acqf is EAVC)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"target"}
)
self.assertTrue(strat.strat_list[1].generator.acqf_kwargs["target"] == 0.75)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 1)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_nonmonotonic_optimization_config_file(self):
config_file = "../configs/nonmonotonic_optimization_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
config = Config()
config.update(config_fnames=[config_file])
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(strat.strat_list[1].generator.acqf is qNoisyExpectedImprovement)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 1)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_name_conflict_warns(self):
class DummyMod:
pass
Config.register_object(DummyMod)
with self.assertWarns(Warning):
Config.register_object(DummyMod)
def test_multiple_models_and_strats(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat1, opt_strat2]
[init_strat]
generator = SobolGenerator
min_asks = 1
[opt_strat1]
generator = OptimizeAcqfGenerator
min_asks = 1
model = GPClassificationModel
acqf = MCLevelSetEstimation
[opt_strat2]
generator = MonotonicRejectionGenerator
min_asks = 1
model = MonotonicRejectionGP
acqf = MonotonicMCLSE
"""
config = Config()
config.update(config_str=config_str)
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(isinstance(strat.strat_list[1].model, GPClassificationModel))
self.assertTrue(strat.strat_list[1].generator.acqf is MCLevelSetEstimation)
self.assertTrue(
isinstance(strat.strat_list[2].generator, MonotonicRejectionGenerator)
)
self.assertTrue(isinstance(strat.strat_list[2].model, MonotonicRejectionGP))
self.assertTrue(strat.strat_list[2].generator.acqf is MonotonicMCLSE)
def test_experiment_deprecation(self):
config_str = """
[experiment]
acqf = PairwiseMCPosteriorVariance
model = PairwiseProbitModel
"""
config = Config()
config.update(config_str=config_str)
self.assertTrue("acqf" in config["common"])
self.assertTrue("model" in config["common"])
def test_to_string(self):
in_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
model = GPClassificationModel
acqf = LevelSetEstimation
[init_strat]
generator = SobolGenerator
min_asks = 10
[opt_strat]
generator = OptimizeAcqfGenerator
min_asks = 20
[LevelSetEstimation]
beta = 3.84
objective = ProbitObjective
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000""".strip().replace(
" ", ""
)
config = Config(config_str=in_str)
out_str = str(config).strip().replace(" ", "")
self.assertEqual(in_str, out_str)
def test_conversion(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.84
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
"""
config = Config(config_str=config_str)
self.assertEqual(config.version, "0.0")
config.convert_to_latest()
self.assertEqual(config.version, __version__)
self.assertEqual(config["common"]["strategy_names"], "[init_strat, opt_strat]")
self.assertEqual(config["common"]["acqf"], "MonotonicMCLSE")
self.assertEqual(config["init_strat"]["min_asks"], "10")
self.assertEqual(config["init_strat"]["generator"], "SobolGenerator")
self.assertEqual(config["opt_strat"]["min_asks"], "20")
self.assertEqual(config["opt_strat"]["refit_every"], "5")
self.assertEqual(
config["opt_strat"]["generator"], "MonotonicRejectionGenerator"
)
self.assertEqual(config["opt_strat"]["model"], "MonotonicRejectionGP")
self.assertEqual(config["MonotonicRejectionGenerator"]["restarts"], "10")
self.assertEqual(config["MonotonicRejectionGenerator"]["samps"], "1000")
self.assertEqual(config["common"]["stimuli_per_trial"], "1")
self.assertEqual(config["common"]["outcome_types"], "[binary]")
def test_warn_about_refit(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat]
model = GPClassificationModel
[init_strat]
generator = SobolGenerator
min_asks = 10
refit_every = 5
"""
config = Config(config_str=config_str)
with self.assertWarns(UserWarning):
Strategy.from_config(config, "init_strat")
def test_pairwise_probit_config(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 2
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
model = PairwiseProbitModel
[init_strat]
min_asks = 10
generator = SobolGenerator
[opt_strat]
min_asks = 20
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
[SobolGenerator]
n_points = 20
"""
config = Config()
config.update(config_str=config_str)
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(
strat.strat_list[1].generator.acqf is PairwiseMCPosteriorVariance
)
# because ProbitObjective() is an object, test keys then vals
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_pairwise_probit_config_file(self):
config_file = "../configs/pairwise_al_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
config = Config()
config.update(config_fnames=[config_file])
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(
strat.strat_list[1].generator.acqf is PairwiseMCPosteriorVariance
)
# because ProbitObjective() is an object, we have to be a bit careful with
# this test
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_pairwise_al_config_file(self):
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
server = AEPsychServer(database_path=database_path)
config_file = "../configs/pairwise_al_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
configure(server, config_fnames=[config_file])
strat = server.strat
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(
strat.strat_list[1].generator.acqf is PairwiseMCPosteriorVariance
)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
# cleanup the db
if server.db is not None:
server.db.delete_db()
def test_pairwise_opt_config(self):
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
server = AEPsychServer(database_path=database_path)
config_file = "../configs/pairwise_opt_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
configure(server, config_fnames=[config_file])
strat = server.strat
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(strat.strat_list[1].generator.acqf is qNoisyExpectedImprovement)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
# cleanup the db
if server.db is not None:
server.db.delete_db()
def test_jsonify(self):
sample_configstr = """
[common]
lb = [0, 0]
ub = [1, 1]
outcome_type = pairwise_probit
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
model = PairwiseProbitModel
[init_strat]
min_asks = 10
generator = PairwiseSobolGenerator
[opt_strat]
min_asks = 20
generator = PairwiseOptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[PairwiseOptimizeAcqfGenerator]
restarts = 10
samps = 1000
[PairwiseSobolGenerator]
n_points = 20
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": sample_configstr},
}
# Generate a configuration object.
temporaryconfig = Config(**request["message"])
configedjson = temporaryconfig.jsonifyAll()
referencejsonstr = """{
"common": {
"lb": "[0, 0]",
"ub": "[1, 1]",
"outcome_type": "pairwise_probit",
"parnames": "[par1, par2]",
"strategy_names": "[init_strat, opt_strat]",
"acqf": "PairwiseMCPosteriorVariance",
"model": "PairwiseProbitModel"
},
"init_strat": {
"min_asks": "10",
"generator": "PairwiseSobolGenerator"
},
"opt_strat": {
"min_asks": "20",
"generator": "PairwiseOptimizeAcqfGenerator"
},
"PairwiseProbitModel": {
"mean_covar_factory": "default_mean_covar_factory"
},
"PairwiseMCPosteriorVariance": {
"objective": "ProbitObjective"
},
"PairwiseOptimizeAcqfGenerator": {
"restarts": "10",
"samps": "1000"
},
"PairwiseSobolGenerator": {
"n_points": "20"
}
} """
# Rather than comparing strings, we should convert to json and then convert back to test equal dicts
testconfig = json.loads(configedjson)
testsample = json.loads(referencejsonstr)
# most depth is option within section
self.assertEqual(testconfig, testsample)
def test_stimuli_compatibility(self):
config_str1 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config1 = Config()
config1.update(config_str=config_str1)
config_str2 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config2 = Config()
config2.update(config_str=config_str2)
config_str3 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = PairwiseProbitModel
"""
config3 = Config()
config3.update(config_str=config_str3)
# this should work
SequentialStrategy.from_config(config1)
# this should fail
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
# this should fail too
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
def test_outcome_compatibility(self):
config_str1 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config1 = Config()
config1.update(config_str=config_str1)
config_str2 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [continuous]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config2 = Config()
config2.update(config_str=config_str2)
config_str3 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPRegressionModel
"""
config3 = Config()
config3.update(config_str=config_str3)
# this should work
SequentialStrategy.from_config(config1)
# this should fail
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
# this should fail too
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
def test_strat_names(self):
good_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
[opt_strat]
generator = OptimizeAcqfGenerator
model = GPClassificationModel
"""
bad_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
good_config = Config(config_str=good_str)
bad_config = Config(config_str=bad_str)
# this should work
SequentialStrategy.from_config(good_config)
# this should fail
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(bad_config)
def test_semip_config(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
acqf = MCLevelSetEstimation
model = HadamardSemiPModel
[init_strat]
min_asks = 10
generator = SobolGenerator
refit_every = 10
[opt_strat]
min_asks = 20
generator = OptimizeAcqfGenerator
[HadamardSemiPModel]
stim_dim = 1
inducing_size = 10
inducing_point_method = sobol
likelihood = BernoulliObjectiveLikelihood
[BernoulliObjectiveLikelihood]
objective = FloorGumbelObjective
[FloorGumbelObjective]
floor = 0.123
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
config = Config()
config.update(config_str=config_str)
strat = SequentialStrategy.from_config(config)
opt_strat = strat.strat_list[1]
model = opt_strat.model
self.assertTrue(isinstance(model, HadamardSemiPModel))
self.assertTrue(torch.all(model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(model.ub == torch.Tensor([1, 1])))
self.assertTrue(model.dim == 2)
self.assertTrue(model.inducing_size == 10)
self.assertTrue(model.stim_dim == 1)
self.assertTrue(model.inducing_point_method == "sobol")
self.assertTrue(isinstance(model.likelihood, BernoulliObjectiveLikelihood))
self.assertTrue(isinstance(model.likelihood.objective, FloorGumbelObjective))
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from aepsych.likelihoods import OrdinalLikelihood
from gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
emtpy_batch_shape = torch.Size([])
class TestOrdinalLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 1
n_levels = 3
def _create_targets(self, batch_shape=emtpy_batch_shape):
res = torch.randint(low=0, high=self.n_levels, size=(*batch_shape, 5)).float()
return res
def create_likelihood(self):
return OrdinalLikelihood(n_levels=self.n_levels)
def _test_marginal(self, batch_shape=emtpy_batch_shape):
# disable this test, since Categorical.mean returns nan anyway
# and we're not overriding this method on base Likelihood
pass
|
aepsych-main
|
tests/test_likelihoods.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import uuid
import torch
from aepsych.server import AEPsychServer
from aepsych_client import AEPsychClient
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
from ax.modelbridge import Models
from botorch.test_functions.multi_objective import BraninCurrin
branin_currin = BraninCurrin(negate=True).to(
dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
)
def evaluate(parameters):
evaluation = branin_currin(
torch.tensor([parameters.get("x1"), parameters.get("x2")])
)
# In our case, standard error is 0, since we are computing a synthetic function.
# Set standard error to None if the noise level is unknown.
return {"out1": evaluation[0].item(), "out2": evaluation[1].item()}
class MultiOutcomeTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create a server object configured to run a 2d threshold experiment
database_path = "./{}.db".format(str(uuid.uuid4().hex))
cls.client = AEPsychClient(server=AEPsychServer(database_path=database_path))
config_file = "../configs/multi_outcome_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
cls.client.configure(config_file)
cls.gs = cls.client.server.strat.ax_client.generation_strategy
cls.experiment = cls.client.server.strat.ax_client.experiment
def test_generation_strategy(self):
self.assertEqual(len(self.gs._steps), 2 + 1)
self.assertEqual(self.gs._steps[0].model, Models.SOBOL)
self.assertEqual(self.gs._steps[1].model, Models.MOO)
self.assertEqual(self.gs._steps[2].model, Models.MOO) # Extra final step
def test_experiment(self):
self.assertEqual(len(self.experiment.metrics), 2)
self.assertIn("out1", self.experiment.metrics)
self.assertIn("out2", self.experiment.metrics)
self.assertIsInstance(
self.experiment.optimization_config, MultiObjectiveOptimizationConfig
)
(
threshold1,
threshold2,
) = self.experiment.optimization_config.objective_thresholds
self.assertEqual(threshold1.bound, -18)
self.assertEqual(threshold2.bound, -6)
(
objective1,
objective2,
) = self.experiment.optimization_config.objective.objectives
self.assertFalse(objective1.minimize)
self.assertFalse(objective2.minimize)
# Smoke test just to make sure server can handle multioutcome messages
def test_ask_tell(self):
while not self.client.server.strat.finished:
trial_params = self.client.ask()
for trial in trial_params["config"]:
outcome = evaluate(trial_params["config"][trial])
self.client.tell_trial_by_index(trial, outcome)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_multioutcome.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import unittest
import uuid
import numpy as np
import torch
from aepsych.config import Config
from aepsych.server import AEPsychServer
from aepsych_client import AEPsychClient
from ax.service.utils.report_utils import exp_to_df
from parameterized import parameterized_class
@parameterized_class(
("config_file", "should_ignore"),
[
("../configs/ax_example.ini", False),
("../configs/ax_ordinal_exploration_example.ini", True),
],
)
class AxIntegrationTestCase(unittest.TestCase):
n_extra_asks = 3
@classmethod
def setUpClass(cls):
if cls.should_ignore:
raise unittest.SkipTest("Skipping because should_ignore is True.")
def sigmoid(x):
return 1 / (1 + math.exp(-x / 100))
# Simulate participant responses; just returns the sum of the flat parameters
def simulate_response(trial_params):
pars = [
trial_params[par]
for par in trial_params
if type(trial_params[par]) == float
]
response = round(sigmoid(np.array(pars).mean()) * 4)
return response
# Fix random seeds
np.random.seed(123)
torch.manual_seed(123)
# Create a server object configured to run a 2d threshold experiment
database_path = "./{}.db".format(str(uuid.uuid4().hex))
cls.client = AEPsychClient(server=AEPsychServer(database_path=database_path))
cls.config_file = os.path.join(os.path.dirname(__file__), cls.config_file)
cls.client.configure(cls.config_file)
cls.can_fit_at_start = cls.client.server.strat.can_fit
while not cls.client.server.strat.finished:
# Ask the server what the next parameter values to test should be.
trial_params = cls.client.ask()
# Simulate a participant response.
for trial in trial_params["config"]:
outcome = simulate_response(trial_params["config"][trial])
# Tell the server what happened so that it can update its model.
cls.client.tell_trial_by_index(trial, outcome)
# Make sure we can manually tell without asking first
cls.client.tell(trial_params["config"][trial], outcome)
# Add an extra ask to make sure we can generate trials endlessly
trial_params = cls.client.ask(cls.n_extra_asks)
cls.can_fit_at_end = cls.client.server.strat.can_fit
cls.df = exp_to_df(cls.client.server.strat.experiment)
cls.config = Config(config_fnames=[cls.config_file])
def tearDown(self):
if self.client.server.db is not None:
self.client.server.db.delete_db()
def test_random_seed(self):
self.assertEqual(self.client.server.strat.ax_client._random_seed, 123)
def test_bounds(self):
lb = self.config.getlist("common", "lb", element_type=float)
ub = self.config.getlist("common", "ub", element_type=float)
par4choices = self.config.getlist("par4", "choices", element_type=str)
par5choices = self.config.getlist("par5", "choices", element_type=str)
par6value = self.config.getfloat("par6", "value")
par7value = self.config.get("par7", "value")
self.assertTrue((self.df["par1"] >= lb[0]).all())
self.assertTrue((self.df["par1"] <= ub[0]).all())
self.assertTrue((self.df["par2"] >= lb[1]).all())
self.assertTrue((self.df["par2"] <= ub[1]).all())
self.assertTrue((self.df["par3"] >= lb[2]).all())
self.assertTrue((self.df["par3"] <= ub[2]).all())
self.assertTrue(self.df["par4"].isin(par4choices).all())
self.assertTrue(self.df["par5"].isin(par5choices).all())
self.assertTrue((self.df["par6"] == par6value).all())
self.assertTrue((self.df["par7"] == par7value).all())
@unittest.skip(
"This test is flaky due to non-determinism in asks after the experiment is finished. Skipping until this gets fixed."
)
def test_constraints(self):
constraints = self.config.getlist("common", "par_constraints", element_type=str)
for constraint in constraints:
self.assertEqual(len(self.df.query(constraint)), len(self.df))
self.assertEqual(self.df["par3"].dtype, "int64")
def test_n_trials(self):
n_tells = (self.df["trial_status"] == "COMPLETED").sum()
correct_n_tells = self.config.getint("opt_strat", "min_total_tells") + 1
self.assertEqual(n_tells, correct_n_tells)
n_asks = self.client.server.strat.experiment.num_asks
correct_n_asks = (
self.config.getint("opt_strat", "min_total_tells") + self.n_extra_asks
)
self.assertEqual(n_asks, correct_n_asks)
def test_generation_method(self):
n_sobol = (self.df["generation_method"] == "Sobol").sum()
n_opt = (self.df["generation_method"] == "BoTorch").sum()
n_manual = (self.df["generation_method"] == "Manual").sum()
correct_n_sobol = self.config.getint("init_strat", "min_total_tells")
correct_n_opt = (
self.config.getint("opt_strat", "min_total_tells")
- correct_n_sobol
+ self.n_extra_asks
)
self.assertEqual(n_sobol, correct_n_sobol)
self.assertEqual(n_opt, correct_n_opt)
self.assertEqual(n_manual, 1)
def test_can_fit(self):
self.assertFalse(self.can_fit_at_start)
self.assertTrue(self.can_fit_at_end)
@unittest.skip("Base integration tests already cover most of these")
class AxBetaRegressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Simulate participant responses; just returns the average percentage value of the par1-3
def simulate_response(trial_params):
pars = [
(trial_params["par1"][0] - cls.lb[0]) / (cls.ub[0] - cls.lb[0]),
(trial_params["par2"][0] - cls.lb[1]) / (cls.ub[1] - cls.lb[1]),
(trial_params["par3"][0] - cls.lb[2]) / (cls.ub[2] - cls.lb[2]),
]
response = np.array(pars).mean()
return response
# Fix random seeds
np.random.seed(0)
torch.manual_seed(0)
# Create a server object configured to run a 2d threshold experiment
database_path = "./{}.db".format(str(uuid.uuid4().hex))
cls.client = AEPsychClient(server=AEPsychServer(database_path=database_path))
config_file = "../configs/ax_beta_regression_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
cls.client.configure(config_file)
cls.config = Config(config_fnames=[config_file])
cls.lb = cls.config.getlist("common", "lb", element_type=float)
cls.ub = cls.config.getlist("common", "ub", element_type=float)
while True:
# Ask the server what the next parameter values to test should be.
response = cls.client.ask()
if response["is_finished"]:
break
# Simulate a participant response.
outcome = simulate_response(response["config"])
# Tell the server what happened so that it can update its model.
cls.client.tell(response["config"], outcome)
cls.df = exp_to_df(cls.client.server.strat.experiment)
def tearDown(self):
if self.client.server.db is not None:
self.client.server.db.delete_db()
def test_bounds(self):
par4choices = self.config.getlist("par4", "choices", element_type=str)
par5choices = self.config.getlist("par5", "choices", element_type=str)
par6value = self.config.getfloat("par6", "value")
par7value = self.config.get("par7", "value")
self.assertTrue((self.df["par1"] >= self.lb[0]).all())
self.assertTrue((self.df["par1"] <= self.ub[0]).all())
self.assertTrue((self.df["par2"] >= self.lb[1]).all())
self.assertTrue((self.df["par2"] <= self.ub[1]).all())
self.assertTrue((self.df["par3"] >= self.lb[2]).all())
self.assertTrue((self.df["par3"] <= self.ub[2]).all())
self.assertTrue(self.df["par4"].isin(par4choices).all())
self.assertTrue(self.df["par5"].isin(par5choices).all())
self.assertTrue((self.df["par6"] == par6value).all())
self.assertTrue((self.df["par7"] == par7value).all())
def test_constraints(self):
constraints = self.config.getlist("common", "par_constraints", element_type=str)
for constraint in constraints:
self.assertEqual(len(self.df.query(constraint)), len(self.df))
self.assertEqual(self.df["par3"].dtype, "int64")
def test_n_trials(self):
n_tells = (self.df["trial_status"] == "COMPLETED").sum()
correct_n_tells = self.config.getint("opt_strat", "min_total_tells")
self.assertEqual(n_tells, correct_n_tells)
def test_generation_method(self):
n_sobol = (self.df["generation_method"] == "Sobol").sum()
n_opt = (self.df["generation_method"] == "BoTorch").sum()
correct_n_sobol = self.config.getint("init_strat", "min_total_tells")
correct_n_opt = (
self.config.getint("opt_strat", "min_total_tells") - correct_n_sobol
)
self.assertEqual(n_sobol, correct_n_sobol)
self.assertEqual(n_opt, correct_n_opt)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_ax_integration.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
These tests check that the server can handle different experiments
(multi/single stimuli, multi/single outcome). They ensure that the
data is correctly stored in the database tables (raw, param, and outcome).
It also checks that the experiment table is correctly populated
(generate_experiment_table method).
"""
import logging
import unittest
import uuid
from itertools import product
import aepsych.server as server
import aepsych.utils_logging as utils_logging
from parameterized import parameterized
params = {
"singleStimuli": {
"x1": [0.1, 0.2, 0.3, 1, 2, 3, 4],
"x2": [4, 0.1, 3, 0.2, 2, 1, 0.3],
},
"multiStimuli": {
"x1": [[0.1, 0.2], [0.3, 1], [2, 3], [4, 0.1], [0.2, 2], [1, 0.3], [0.3, 0.1]],
"x2": [[4, 0.1], [3, 0.2], [2, 1], [0.3, 0.2], [2, 0.3], [1, 0.1], [0.3, 4]],
},
}
outcomes = {
"singleOutcome": [1, -1, 0.1, 0, -0.1, 0, 0],
"multiOutcome": [
[[1], [0]],
[[-1], [0]],
[[0.1], [0]],
[[0], [0]],
[[-0.1], [0]],
[[0], [0]],
[[0], [0]],
],
}
multistim_config = """
# Configuration for multi-stimulus experiment integration test
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [x1, x2]
stimuli_per_trial = 2
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 3
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 4
generator = OptimizeAcqfGenerator
acqf = qNoisyExpectedImprovement
model = PairwiseProbitModel
min_total_outcome_occurrences = 0
[SobolGenerator]
n_points = 2
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[PairwiseProbitModel]
inducing_size = 100
mean_covar_factory = default_mean_covar_factory
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
[qNoisyExpectedImprovement]
objective = ProbitObjective
"""
singlestim_config = """
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [x1, x2]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 3
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 4
generator = OptimizeAcqfGenerator
acqf = MCPosteriorVariance
model = GPClassificationModel
min_total_outcome_occurrences = 0
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[SobolGenerator]
n_points = 2
"""
test_configs = {"singleStimuli": singlestim_config, "multiStimuli": multistim_config}
all_tests = list(product(params, outcomes))
class IntegrationTestCase(unittest.TestCase):
def setUp(self):
# setup logger
server.logger = utils_logging.getLogger(logging.DEBUG, "logs")
# random port
socket = server.sockets.PySocket(port=0)
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = server.AEPsychServer(socket=socket, database_path=database_path)
# Server messages
self.setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": None},
}
self.ask_request = {"type": "ask", "message": ""}
self.tell_request = {
"type": "tell",
"message": {"config": {}, "outcome": 0},
"extra_info": {},
}
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
def get_tell(self, x1, x2, outcome):
self.tell_request["message"]["config"]["x1"] = x1
self.tell_request["message"]["config"]["x2"] = x2
self.tell_request["message"]["outcome"] = outcome
self.tell_request["extra_info"]["e1"] = 1
self.tell_request["extra_info"]["e2"] = 2
def check_params(self, param_type, x1, x2):
if param_type == "multiStimuli":
x1_stimuli0_saved = (
self.s.db.get_engine()
.execute("SELECT x1_stimuli0 FROM experiment_table")
.fetchall()
)
x1_stimuli1_saved = (
self.s.db.get_engine()
.execute("SELECT x1_stimuli1 FROM experiment_table")
.fetchall()
)
x1_stimuli0_saved = [
float(item) for sublist in x1_stimuli0_saved for item in sublist
]
x1_stimuli1_saved = [
float(item) for sublist in x1_stimuli1_saved for item in sublist
]
# Reshape
x1_saved = []
for i in range(len(x1_stimuli0_saved)):
x1_saved.append([x1_stimuli0_saved[i], x1_stimuli1_saved[i]])
self.assertEqual(x1_saved, x1)
x2_stimuli0_saved = (
self.s.db.get_engine()
.execute("SELECT x2_stimuli0 FROM experiment_table")
.fetchall()
)
x2_stimuli1_saved = (
self.s.db.get_engine()
.execute("SELECT x2_stimuli1 FROM experiment_table")
.fetchall()
)
x2_stimuli0_saved = [
float(item) for sublist in x2_stimuli0_saved for item in sublist
]
x2_stimuli1_saved = [
float(item) for sublist in x2_stimuli1_saved for item in sublist
]
# Reshape
x2_saved = []
for i in range(len(x2_stimuli0_saved)):
x2_saved.append([x2_stimuli0_saved[i], x2_stimuli1_saved[i]])
self.assertEqual(x2_saved, x2)
elif param_type == "singleStimuli":
x1_saved = (
self.s.db.get_engine()
.execute("SELECT x1 FROM experiment_table")
.fetchall()
)
x1_saved = [float(item) for sublist in x1_saved for item in sublist]
self.assertTrue(x1_saved == x1)
x2_saved = (
self.s.db.get_engine()
.execute("SELECT x2 FROM experiment_table")
.fetchall()
)
x2_saved = [float(item) for sublist in x2_saved for item in sublist]
self.assertTrue(x2_saved == x2)
def check_outcome(self, outcome_type, outcome):
if outcome_type == "multiOutcome":
outcome0_saved = (
self.s.db.get_engine()
.execute("SELECT outcome_0 FROM experiment_table")
.fetchall()
)
outcome1_saved = (
self.s.db.get_engine()
.execute("SELECT outcome_1 FROM experiment_table")
.fetchall()
)
outcome0_saved = [item for sublist in outcome0_saved for item in sublist]
outcome1_saved = [item for sublist in outcome1_saved for item in sublist]
outcome_saved = []
for i in range(len(outcome0_saved)):
outcome_saved.append([[outcome0_saved[i]], [outcome1_saved[i]]])
self.assertEqual(outcome_saved, outcome)
elif outcome_type == "singleOutcome":
outcome_saved = (
self.s.db.get_engine()
.execute("SELECT outcome FROM experiment_table")
.fetchall()
)
outcome_saved = [item for sublist in outcome_saved for item in sublist]
self.assertTrue(outcome_saved == outcome)
@parameterized.expand(all_tests)
def test_experiment(self, param_type, outcome_type):
x1 = params[param_type]["x1"]
x2 = params[param_type]["x2"]
outcome = outcomes[outcome_type]
dummy_config = test_configs[param_type]
self.setup_request["message"]["config_str"] = dummy_config
self.s.handle_request(self.setup_request)
i = 0
while not self.s.strat.finished:
self.s.handle_request(self.ask_request)
self.get_tell(x1[i], x2[i], outcome[i])
i = i + 1
self.s.handle_request(self.tell_request)
# Experiment id
exp_id = self.s.db.get_master_records()[0].experiment_id
# Create table with experiment data
self.s.generate_experiment_table(exp_id, return_df=True)
# Check that table exists
self.assertTrue("experiment_table" in self.s.db.get_engine().table_names())
# Check that parameter and outcomes values are correct
self.check_outcome(outcome_type, outcome)
self.check_params(param_type, x1, x2)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/test_integration.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/acquisition/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from aepsych.acquisition.lse import MCLevelSetEstimation
from aepsych.acquisition.objective import ProbitObjective
from botorch.utils.testing import MockModel, MockPosterior
from scipy.stats import norm
class TestLSE(unittest.TestCase):
def setUp(self):
f = torch.ones(1) * 1.7
var = torch.ones(1) * 2.3
samps = torch.ones(1, 1, 1) * 1.7
self.model = MockModel(MockPosterior(mean=f, variance=var, samples=samps))
def test_mclse(self):
mclse = MCLevelSetEstimation(
model=self.model, target=5.0, beta=3.84, objective=ProbitObjective()
)
expected = np.sqrt(3.84) * np.sqrt(1e-5) - np.abs(norm.cdf(1.7) - 5)
self.assertAlmostEqual(mclse(torch.zeros(1, 1)), expected)
|
aepsych-main
|
tests/acquisition/test_lse.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from aepsych.acquisition.mutual_information import (
BernoulliMCMutualInformation,
MonotonicBernoulliMCMutualInformation,
)
from aepsych.acquisition.objective import ProbitObjective
from aepsych.generators import (
MonotonicRejectionGenerator,
OptimizeAcqfGenerator,
SobolGenerator,
)
from aepsych.models import GPClassificationModel, MonotonicRejectionGP
from aepsych.strategy import SequentialStrategy, Strategy
from gpytorch.kernels import LinearKernel
from gpytorch.means import ConstantMean
from scipy.stats import bernoulli, multivariate_normal, norm, pearsonr
from ..common import f_1d
class SingleProbitMI(unittest.TestCase):
def test_1d_monotonic_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 15
n_opt = 1
lb = -4.0
ub = 4.0
acqf = MonotonicBernoulliMCMutualInformation
acqf_kwargs = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
min_asks=n_opt,
model=MonotonicRejectionGP(lb=lb, ub=ub, dim=1, monotonic_idxs=[0]),
generator=MonotonicRejectionGenerator(acqf, acqf_kwargs),
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
x = torch.linspace(-4, 4, 100).reshape(-1, 1)
zhat, _ = strat.predict(x)
true = f_1d(x.detach().numpy())
est = zhat.detach().numpy()
# close enough!
self.assertTrue((((norm.cdf(est) - true) ** 2).mean()) < 0.25)
def test_1d_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 15
n_opt = 20
lb = -4.0
ub = 4.0
acqf = BernoulliMCMutualInformation
extra_acqf_args = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, dim=1, inducing_size=10),
generator=OptimizeAcqfGenerator(acqf, extra_acqf_args),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
true = f_1d(x.detach().numpy())
est = zhat.detach().numpy()
# close enough!
self.assertTrue((((norm.cdf(est) - true) ** 2).mean()) < 0.25)
def test_mi_acqf(self):
mean = ConstantMean().initialize(constant=1.2)
covar = LinearKernel().initialize(variance=1.0)
model = GPClassificationModel(
lb=torch.Tensor([0]),
ub=torch.Tensor([1]),
inducing_size=10,
mean_module=mean,
covar_module=covar,
)
x = torch.rand(size=(10, 1))
acqf = BernoulliMCMutualInformation(model=model, objective=ProbitObjective())
acq_pytorch = acqf(x)
samps_numpy = norm.cdf(
multivariate_normal.rvs(mean=np.ones(10) * 1.2, cov=x @ x.T, size=10000)
)
samp_entropies = bernoulli(samps_numpy).entropy()
mean_entropy = bernoulli(samps_numpy.mean(axis=0)).entropy()
acq_numpy = mean_entropy - samp_entropies.mean(axis=0)
# this assertion fails, not sure why, these should be equal to numerical
# precision
# self.assertTrue(np.allclose(acq_numpy, acq_pytorch.detach().numpy().flatten()))
# this one succeeds
self.assertTrue(
pearsonr(acq_numpy, acq_pytorch.detach().numpy().flatten())[0] > (1 - 1e-5)
)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/acquisition/test_mi.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from itertools import product
import numpy as np
import torch
from aepsych.acquisition.objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
)
from parameterized import parameterized
from scipy.stats import gumbel_l, logistic, norm
objective_pairs = [
(FloorLogitObjective, logistic),
(FloorProbitObjective, norm),
(FloorGumbelObjective, gumbel_l),
]
floors = [0, 0.5, 0.33]
all_tests = list(product(objective_pairs, floors))
class FloorLinkTests(unittest.TestCase):
@parameterized.expand(all_tests)
def test_floor_links(self, objectives, floor):
our_objective, scipy_dist = objectives
x = np.linspace(-3, 3, 50)
scipy_answer = scipy_dist.cdf(x)
scipy_answer = scipy_answer * (1 - floor) + floor
our_link = our_objective(floor=floor)
our_answer = our_link(torch.Tensor(x).unsqueeze(-1))
self.assertTrue(np.allclose(scipy_answer, our_answer.numpy()))
our_inverse = our_link.inverse(our_answer)
self.assertTrue(np.allclose(x, our_inverse.numpy()))
|
aepsych-main
|
tests/acquisition/test_objective.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.acquisition.objective import ProbitObjective
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP
from botorch.acquisition.objective import IdentityMCObjective
from botorch.utils.testing import BotorchTestCase
class TestMonotonicAcq(BotorchTestCase):
def test_monotonic_acq(self):
# Init
train_X_aug = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 2.0, 0.0]])
deriv_constraint_points = torch.tensor(
[[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [2.0, 2.0, 1.0]]
)
train_Y = torch.tensor([[1.0], [2.0], [3.0]])
m = MixedDerivativeVariationalGP(
train_x=train_X_aug, train_y=train_Y, inducing_points=train_X_aug
)
acq = MonotonicMCLSE(
model=m,
deriv_constraint_points=deriv_constraint_points,
num_samples=5,
num_rejection_samples=8,
target=1.9,
)
self.assertTrue(isinstance(acq.objective, IdentityMCObjective))
acq = MonotonicMCLSE(
model=m,
deriv_constraint_points=deriv_constraint_points,
num_samples=5,
num_rejection_samples=8,
target=1.9,
objective=ProbitObjective(),
)
# forward
acq(train_X_aug)
Xfull = torch.cat((train_X_aug, acq.deriv_constraint_points), dim=0)
posterior = m.posterior(Xfull)
samples = acq.sampler(posterior)
self.assertEqual(samples.shape, torch.Size([5, 6, 1]))
|
aepsych-main
|
tests/acquisition/test_monotonic.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from aepsych.acquisition.rejection_sampler import RejectionSampler
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP
from botorch.utils.testing import BotorchTestCase
class TestRejectionSampling(BotorchTestCase):
def test_rejection_sampling(self):
train_X_aug = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 2.0, 0.0]])
deriv_constraint_points = torch.tensor(
[[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [2.0, 2.0, 1.0]]
)
train_Y = torch.tensor([[1.0], [2.0], [3.0]])
m = MixedDerivativeVariationalGP(
train_x=train_X_aug, train_y=train_Y, inducing_points=train_X_aug
)
Xfull = torch.cat((train_X_aug, deriv_constraint_points), dim=0)
posterior = m.posterior(Xfull)
sampler = RejectionSampler(
num_samples=3,
num_rejection_samples=5000,
constrained_idx=torch.tensor([3, 4, 5]),
)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([3, 6, 1]))
self.assertTrue(torch.all(samples.squeeze(-1)[:, 3:] > 0).item())
|
aepsych-main
|
tests/acquisition/test_rejection_sampler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/server/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import select
import unittest
import uuid
from unittest.mock import MagicMock, patch, PropertyMock
import aepsych.server as server
import aepsych.utils_logging as utils_logging
import torch
from aepsych.config import Config
from aepsych.server.message_handlers.handle_setup import configure
from aepsych.server.sockets import BAD_REQUEST
from aepsych.strategy import AEPsychStrategy
dummy_config = """
[common]
lb = [0]
ub = [1]
parnames = [x]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 2
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 2
generator = OptimizeAcqfGenerator
acqf = MCPosteriorVariance
model = GPClassificationModel
min_total_outcome_occurrences = 0
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[SobolGenerator]
n_points = 2
"""
class BaseServerTestCase(unittest.TestCase):
def setUp(self):
# setup logger
server.logger = utils_logging.getLogger(logging.DEBUG, "logs")
# random port
socket = server.sockets.PySocket(port=0)
# random datebase path name without dashes
database_path = "./{}_test_server.db".format(str(uuid.uuid4().hex))
self.s = server.AEPsychServer(socket=socket, database_path=database_path)
self.db_name = database_path.split("/")[1]
self.db_path = database_path
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
def dummy_create_setup(self, server, request=None):
request = request or {"test": "test request"}
server._db_master_record = server.db.record_setup(
description="default description", name="default name", request=request
)
class ServerTestCase(BaseServerTestCase):
def test_final_strat_serialization(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
}
self.s.handle_request(setup_request)
while not self.s.strat.finished:
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
stored_strat = self.s.get_strat_from_replay(exp_id)
# just some spot checks that the strat's the same
# same data. We do this twice to make sure buffers are
# in a good state and we can load twice without crashing
for _ in range(2):
stored_strat = self.s.get_strat_from_replay(exp_id)
self.assertTrue((stored_strat.x == self.s.strat.x).all())
self.assertTrue((stored_strat.y == self.s.strat.y).all())
# same lengthscale and outputscale
self.assertEqual(
stored_strat.model.covar_module.lengthscale,
self.s.strat.model.covar_module.lengthscale,
)
self.assertEqual(
stored_strat.model.covar_module.outputscale,
self.s.strat.model.covar_module.outputscale,
)
def test_pandadf_dump_single(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
"extra_info": {},
}
self.s.handle_request(setup_request)
expected_x = [0, 1, 2, 3]
expected_z = list(reversed(expected_x))
expected_y = [x % 2 for x in expected_x]
i = 0
while not self.s.strat.finished:
self.s.handle_request(ask_request)
tell_request["message"]["config"]["x"] = [expected_x[i]]
tell_request["message"]["config"]["z"] = [expected_z[i]]
tell_request["message"]["outcome"] = expected_y[i]
tell_request["extra_info"]["e1"] = 1
tell_request["extra_info"]["e2"] = 2
i = i + 1
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
out_df = self.s.get_dataframe_from_replay(exp_id)
self.assertTrue((out_df.x == expected_x).all())
self.assertTrue((out_df.z == expected_z).all())
self.assertTrue((out_df.response == expected_y).all())
self.assertTrue((out_df.e1 == [1] * 4).all())
self.assertTrue((out_df.e2 == [2] * 4).all())
self.assertTrue("post_mean" in out_df.columns)
self.assertTrue("post_var" in out_df.columns)
def test_pandadf_dump_multistrat(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
"extra_info": {},
}
expected_x = [0, 1, 2, 3]
expected_z = list(reversed(expected_x))
expected_y = [x % 2 for x in expected_x]
i = 0
self.s.handle_request(setup_request)
while not self.s.strat.finished:
self.s.handle_request(ask_request)
tell_request["message"]["config"]["x"] = [expected_x[i]]
tell_request["message"]["config"]["z"] = [expected_z[i]]
tell_request["message"]["outcome"] = expected_y[i]
tell_request["extra_info"]["e1"] = 1
tell_request["extra_info"]["e2"] = 2
i = i + 1
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
out_df = self.s.get_dataframe_from_replay(exp_id)
self.assertTrue((out_df.x == expected_x).all())
self.assertTrue((out_df.z == expected_z).all())
self.assertTrue((out_df.response == expected_y).all())
self.assertTrue((out_df.e1 == [1] * len(expected_x)).all())
self.assertTrue((out_df.e2 == [2] * len(expected_x)).all())
self.assertTrue("post_mean" in out_df.columns)
self.assertTrue("post_var" in out_df.columns)
def test_pandadf_dump_flat(self):
"""
This test handles the case where the config values are flat
scalars and not lists
"""
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
"extra_info": {},
}
self.s.handle_request(setup_request)
expected_x = [0, 1, 2, 3]
expected_z = list(reversed(expected_x))
expected_y = [x % 2 for x in expected_x]
i = 0
while not self.s.strat.finished:
self.s.handle_request(ask_request)
tell_request["message"]["config"]["x"] = expected_x[i]
tell_request["message"]["config"]["z"] = expected_z[i]
tell_request["message"]["outcome"] = expected_y[i]
tell_request["extra_info"]["e1"] = 1
tell_request["extra_info"]["e2"] = 2
i = i + 1
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
out_df = self.s.get_dataframe_from_replay(exp_id)
self.assertTrue((out_df.x == expected_x).all())
self.assertTrue((out_df.z == expected_z).all())
self.assertTrue((out_df.response == expected_y).all())
self.assertTrue((out_df.e1 == [1] * 4).all())
self.assertTrue((out_df.e2 == [2] * 4).all())
self.assertTrue("post_mean" in out_df.columns)
self.assertTrue("post_var" in out_df.columns)
def test_receive(self):
"""test_receive - verifies the receive is working when server receives unexpected messages"""
message1 = b"\x16\x03\x01\x00\xaf\x01\x00\x00\xab\x03\x03\xa9\x80\xcc" # invalid message
message2 = b"\xec\xec\x14M\xfb\xbd\xac\xe7jF\xbe\xf9\x9bM\x92\x15b\xb5" # invalid message
message3 = {"message": {"target": "test request"}} # valid message
message_list = [message1, message2, json.dumps(message3)]
self.s.socket.conn = MagicMock()
for i, message in enumerate(message_list):
select.select = MagicMock(return_value=[[self.s.socket.conn], [], []])
self.s.socket.conn.recv = MagicMock(return_value=message)
if i != 2:
self.assertEqual(self.s.socket.receive(False), BAD_REQUEST)
else:
self.assertEqual(self.s.socket.receive(False), message3)
def test_error_handling(self):
# double brace escapes, single brace to substitute, so we end up with 3 braces
request = f"{{{BAD_REQUEST}}}"
expected_error = f"server_error, Request '{request}' raised error ''str' object has no attribute 'keys''!"
self.s.socket.accept_client = MagicMock()
self.s.socket.receive = MagicMock(return_value=request)
self.s.socket.send = MagicMock()
self.s.exit_server_loop = True
with self.assertRaises(SystemExit):
self.s.serve()
self.s.socket.send.assert_called_once_with(expected_error)
def test_queue(self):
"""Test to see that the queue is being handled correctly"""
self.s.socket.accept_client = MagicMock()
ask_request = {"type": "ask", "message": ""}
self.s.socket.receive = MagicMock(return_value=ask_request)
self.s.socket.send = MagicMock()
self.s.exit_server_loop = True
with self.assertRaises(SystemExit):
self.s.serve()
assert len(self.s.queue) == 0
def test_ax_functionality(self):
config_str = """
[common]
use_ax = True
lb = [0]
ub = [1]
parnames = [x]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
generator = SobolGenerator
[opt_strat]
generator = OptimizeAcqfGenerator
model = ContinuousRegressionGP
acqf = qNoisyExpectedImprovement
"""
config = Config(config_str=config_str)
configure(self.s, config=config)
self.assertTrue(self.s.use_ax)
self.assertIsInstance(self.s.strat, AEPsychStrategy)
def test_config_to_tensor(self):
with patch(
"aepsych.server.AEPsychServer.parnames", new_callable=PropertyMock
) as mock_parnames:
mock_parnames.return_value = ["par1", "par2", "par3"]
# test single
config = {"par1": 0.0, "par2": 1.0, "par3": 2.0}
tensor = self.s._config_to_tensor(config)
self.assertTrue(torch.equal(tensor, torch.tensor([0.0, 1.0, 2.0])))
config = {"par1": [0.0], "par2": [1.0], "par3": [2.0]}
tensor = self.s._config_to_tensor(config)
self.assertTrue(torch.equal(tensor, torch.tensor([0.0, 1.0, 2.0])))
# test pairwise
config = {"par1": [0.0, 2.0], "par2": [1.0, 1.0], "par3": [2.0, 0.0]}
tensor = self.s._config_to_tensor(config)
self.assertTrue(
torch.equal(tensor, torch.tensor([[0.0, 2.0], [1.0, 1.0], [2.0, 0.0]]))
)
def test_handle_request_untyped(self):
"""test_handle_request_untyped"""
request = {}
# check untyped request
with self.assertRaises(RuntimeError):
self.s.handle_request(request)
def test_handle_request_type_invalid(self):
"""test_handle_request_type_invalid"""
request = {"type": "invalid"}
# make sure invalid types handle properly
with self.assertRaises(RuntimeError):
self.s.handle_request(request)
def test_serve_handle_request(self):
"""Tests that the full pipeline is working. Message should go from _receive_send to _handle_queue
to the version handler"""
request = {"version": 0}
self.s.socket.receive = MagicMock(return_value=request)
self.s.socket.accept_client = MagicMock()
self.s.handle_request = MagicMock()
self.s.handle_request = MagicMock()
self.s.exit_server_loop = True
with self.assertRaises(SystemExit):
self.s.serve()
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/test_server.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from aepsych.config import Config
from ..test_server import BaseServerTestCase, dummy_config
class HandleExitTestCase(BaseServerTestCase):
def test_get_config(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
get_config_request = {"type": "get_config", "message": {}}
self.s.handle_request(setup_request)
config_dict = self.s.handle_request(get_config_request)
true_config_dict = Config(config_str=dummy_config).to_dict(deduplicate=False)
self.assertEqual(config_dict, true_config_dict)
get_config_request["message"] = {
"section": "init_strat",
"property": "min_asks",
}
response = self.s.handle_request(get_config_request)
self.assertEqual(response, true_config_dict["init_strat"]["min_asks"])
get_config_request["message"] = {"section": "init_strat", "property": "lb"}
response = self.s.handle_request(get_config_request)
self.assertEqual(response, true_config_dict["init_strat"]["lb"])
get_config_request["message"] = {"property": "min_asks"}
with self.assertRaises(RuntimeError):
response = self.s.handle_request(get_config_request)
get_config_request["message"] = {"section": "init_strat"}
with self.assertRaises(RuntimeError):
response = self.s.handle_request(get_config_request)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/message_handlers/test_handle_get_config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/server/message_handlers/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
from ..test_server import BaseServerTestCase
class HandleExitTestCase(BaseServerTestCase):
def test_handle_exit(self):
request = {}
request["type"] = "exit"
self.s.socket.accept_client = MagicMock()
self.s.socket.receive = MagicMock(return_value=request)
self.s.dump = MagicMock()
with self.assertRaises(SystemExit) as cm:
self.s.serve()
self.assertEqual(cm.exception.code, 0)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/message_handlers/test_handle_exit.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from ..test_server import BaseServerTestCase, dummy_config
class QueryHandlerTestCase(BaseServerTestCase):
def test_strat_query(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": [
{"config": {"x": [0.5]}, "outcome": 1},
{"config": {"x": [0.0]}, "outcome": 0},
{"config": {"x": [1]}, "outcome": 0},
],
}
self.s.handle_request(setup_request)
while not self.s.strat.finished:
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
query_max_req = {
"type": "query",
"message": {
"query_type": "max",
},
}
query_min_req = {
"type": "query",
"message": {
"query_type": "min",
},
}
query_pred_req = {
"type": "query",
"message": {
"query_type": "prediction",
"x": {"x": [0.0]},
},
}
query_inv_req = {
"type": "query",
"message": {
"query_type": "inverse",
"y": 5.0,
},
}
response_max = self.s.handle_request(query_max_req)
response_min = self.s.handle_request(query_min_req)
response_pred = self.s.handle_request(query_pred_req)
response_inv = self.s.handle_request(query_inv_req)
for response in [response_max, response_min, response_pred, response_inv]:
self.assertTrue(type(response["x"]) is dict)
self.assertTrue(len(response["x"]["x"]) == 1)
self.assertTrue(type(response["y"]) is float)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/message_handlers/test_query_handlers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from ..test_server import BaseServerTestCase, dummy_config
class StratCanModelTestCase(BaseServerTestCase):
def test_strat_can_model(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": [
{"config": {"x": [0.5]}, "outcome": 1},
],
}
can_model_request = {
"type": "can_model",
"message": {},
}
self.s.handle_request(setup_request)
# At the start there is no model, so can_model returns false
response = self.s.handle_request(can_model_request)
self.assertTrue(response["can_model"] == 0)
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
self.s.handle_request(ask_request)
# Dummy config has 2 init trials; so after third ask, can_model returns true
response = self.s.handle_request(can_model_request)
self.assertTrue(response["can_model"] == 1)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/message_handlers/test_can_model.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from ..test_server import BaseServerTestCase, dummy_config
class ResumeTestCase(BaseServerTestCase):
def test_handle_finish_strategy(self):
setup_request = {
"type": "setup",
"message": {"config_str": dummy_config},
}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
}
ask_request = {"type": "ask", "message": ""}
strat_name_request = {"type": "info"}
finish_strat_request = {"type": "finish_strategy"}
self.s.handle_request(setup_request)
strat_name = self.s.handle_request(strat_name_request)["current_strat_name"]
self.assertEqual(strat_name, "init_strat")
# model-based strategies require data
self.s.handle_request(tell_request)
msg = self.s.handle_request(finish_strat_request)
self.assertEqual(msg, "finished strategy init_strat")
# need to gen another trial to move to next strategy
self.s.handle_request(ask_request)
strat_name = self.s.handle_request(strat_name_request)["current_strat_name"]
self.assertEqual(strat_name, "opt_strat")
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/message_handlers/test_handle_finish_strategy.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
from ..test_server import BaseServerTestCase, dummy_config
class MessageHandlerTellTests(BaseServerTestCase):
def test_tell(self):
setup_request = {
"type": "setup",
"message": {"config_str": dummy_config},
}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
}
self.s.db.record_message = MagicMock()
self.s.handle_request(setup_request)
self.s.handle_request(tell_request)
self.assertEqual(self.s.db.record_message.call_count, 1)
self.assertEqual(len(self.s.strat.x), 1)
tell_request["message"]["model_data"] = False
self.s.handle_request(tell_request)
self.assertEqual(self.s.db.record_message.call_count, 2)
self.assertEqual(len(self.s.strat.x), 1)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/server/message_handlers/test_tell_handlers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from sklearn.datasets import make_classification
from aepsych.models import GPClassificationModel
from aepsych.models.utils import select_inducing_points
class UtilsTestCase(unittest.TestCase):
def test_select_inducing_points(self):
"""Verify that when we have n_induc > data size, we use data as inducing,
and otherwise we correctly select inducing points."""
X, y = make_classification(
n_samples=100,
n_features=1,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
X, y = torch.Tensor(X), torch.Tensor(y)
inducing_size = 20
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=inducing_size
)
model.set_train_data(X[:10, ...], y[:10])
# (inducing point selection sorts the inputs so we sort X to verify)
self.assertTrue(
np.allclose(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="auto",
),
X[:10].sort(0).values,
)
)
model.set_train_data(X, y)
self.assertTrue(
len(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="auto",
)
)
<= 20
)
self.assertTrue(
len(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="pivoted_chol",
)
)
<= 20
)
self.assertEqual(
len(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="kmeans++",
)
),
20,
)
with self.assertRaises(AssertionError):
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="12345",
)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
import numpy as np
from aepsych.config import Config
from aepsych.models.monotonic_projection_gp import MonotonicProjectionGP
from sklearn.datasets import make_classification
class MonotonicProjectionGPtest(unittest.TestCase):
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_samples=25,
n_features=3,
n_redundant=0,
n_informative=3,
random_state=1,
n_clusters_per_class=1,
)
self.X, self.y = torch.Tensor(X), torch.Tensor(y)
def test_posterior(self):
X, y = self.X, self.y
model = MonotonicProjectionGP(
lb=torch.Tensor([-4, -4, -4]),
ub=torch.Tensor([4, 4, 4]),
inducing_size=10,
monotonic_dims=[0, 1],
)
model.fit(X, y)
# Check that it is monotonic in both dims
for i in range(2):
Xtest = torch.zeros(3, 3)
Xtest[:, i] = torch.tensor([-1, 0, 1])
post = model.posterior(Xtest)
mu = post.mean.squeeze()
self.assertTrue(
torch.equal(
torch.tensor([0, 1, 2], dtype=torch.long),
torch.argsort(mu),
)
)
# Check that min_f_val is respected
model = MonotonicProjectionGP(
lb=torch.Tensor([-4]),
ub=torch.Tensor([4]),
inducing_size=10,
monotonic_dims=[0],
min_f_val=5.0,
)
model.fit(X, y)
post = model.posterior(Xtest)
mu = post.mean.squeeze()
self.assertTrue(mu.min().item() >= 4.9)
# And in samples
samps = model.sample(Xtest, num_samples=10)
self.assertTrue(samps.min().item() >= 4.9)
def test_from_config(self):
config_str = """
[common]
parnames = [x, y]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat]
[init_strat]
generator = OptimizeAcqfGenerator
model = MonotonicProjectionGP
[MonotonicProjectionGP]
monotonic_dims = [0]
monotonic_grid_size = 10
min_f_val = 0.1
"""
config = Config(config_str=config_str)
model = MonotonicProjectionGP.from_config(config)
self.assertEqual(model.monotonic_dims, [0])
self.assertEqual(model.mon_grid_size, 10)
self.assertEqual(model.min_f_val, 0.1)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_monotonic_projection_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import numpy as np
import torch
from aepsych.generators import SobolGenerator
from aepsych.models import IndependentMultitaskGPRModel, MultitaskGPRModel
from parameterized import parameterized
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
models = [
(MultitaskGPRModel(num_outputs=2, rank=2, lb=[-1], ub=[3]),),
(IndependentMultitaskGPRModel(num_outputs=2, lb=[-1], ub=[3]),),
]
class MultitaskGPRegressionTest(unittest.TestCase):
def setUp(self):
np.random.seed(0)
torch.manual_seed(0)
generator = SobolGenerator(lb=[-1], ub=[3], dim=1)
self.x = generator.gen(50)
f1 = self.x**3 - 4 * self.x**2 + np.random.normal() * 0.01
f2 = self.x**2 - 7 * self.x + np.random.normal() * 0.01
self.f = torch.cat((f1, f2), dim=-1)
self.xtest = generator.gen(10)
ytrue1 = self.xtest**3 - 4 * self.xtest**2
ytrue2 = self.xtest**2 - 7 * self.xtest
self.ytrue = torch.cat((ytrue1, ytrue2), dim=-1)
@parameterized.expand(models)
def test_mtgpr_smoke(self, model):
model.fit(self.x, self.f)
ypred, _ = model.predict(self.xtest)
self.assertTrue(np.allclose(self.ytrue, ypred, atol=0.1)) # loose smoke test
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_multitask_regression.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import uuid
import numpy as np
import numpy.testing as npt
import torch
from aepsych.server import AEPsychServer
from gpytorch.likelihoods import GaussianLikelihood
from aepsych.server.message_handlers.handle_ask import ask
from aepsych.server.message_handlers.handle_tell import tell
from aepsych.server.message_handlers.handle_setup import configure
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
class GPRegressionTest(unittest.TestCase):
def f(self, x):
return x**3 - 4 * x**2 + np.random.normal() * 0.1
def simulate_response(self, trial_params):
x = trial_params["par1"][0]
response = self.f(x)
return response
def setUp(self):
np.random.seed(0)
torch.manual_seed(0)
dbname = "./{}.db".format(str(uuid.uuid4().hex))
config = """
[common]
parnames = [par1]
lb = [-1]
ub = [3]
stimuli_per_trial=1
outcome_types=[continuous]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 10
generator = SobolGenerator
[opt_strat]
min_asks = 5
generator = OptimizeAcqfGenerator
model = GPRegressionModel
acqf = qNoisyExpectedImprovement
[GPRegressionModel]
likelihood = GaussianLikelihood
max_fit_time = 1
"""
self.server = AEPsychServer(database_path=dbname)
configure(self.server, config_str=config)
while not self.server.strat.finished:
trial_params = ask(self.server)
outcome = self.simulate_response(trial_params)
tell(self.server, outcome, trial_params)
def tearDown(self):
self.server.db.delete_db()
def test_extremum(self):
tol = 0.2 # don't need to be super precise because it's small data
fmax, argmax = self.server.strat.get_max()
npt.assert_allclose(fmax, 0, atol=tol)
npt.assert_allclose(argmax, 0, atol=tol)
fmin, argmin = self.server.strat.get_min()
npt.assert_allclose(fmin, -256 / 27, atol=tol)
npt.assert_allclose(argmin, 8 / 3, atol=tol)
val, arg = self.server.strat.inv_query(0)
npt.assert_allclose(val, 0, atol=tol)
npt.assert_allclose(arg, 0, atol=tol)
def test_from_config(self):
model = self.server.strat.model
npt.assert_allclose(model.lb, [-1.0])
npt.assert_allclose(model.ub, [3.0])
self.assertEqual(model.dim, 1)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertEqual(model.max_fit_time, 1)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_gp_regression.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/models/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.acquisition import MCPosteriorVariance
from aepsych.acquisition.lookahead import GlobalMI
from aepsych.acquisition.objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
ProbitObjective,
)
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.likelihoods import BernoulliObjectiveLikelihood
from aepsych.strategy import SequentialStrategy, Strategy
from aepsych.acquisition.objective.semi_p import (
SemiPProbabilityObjective,
SemiPThresholdObjective,
)
from aepsych.likelihoods.semi_p import LinearBernoulliLikelihood
from aepsych.models import HadamardSemiPModel, SemiParametricGPModel
from aepsych.models.semi_p import (
_hadamard_mvn_approx,
semi_p_posterior_transform,
)
from gpytorch.distributions import MultivariateNormal
from parameterized import parameterized
def _hadamard_model_constructor(lb, ub, stim_dim, floor, objective=FloorLogitObjective):
return HadamardSemiPModel(
lb=lb,
ub=ub,
stim_dim=stim_dim,
likelihood=BernoulliObjectiveLikelihood(objective=objective(floor=floor)),
inducing_size=10,
inducing_point_method="auto",
max_fit_time=0.5,
)
def _semip_model_constructor(lb, ub, stim_dim, floor, objective=FloorLogitObjective):
return SemiParametricGPModel(
lb=lb,
ub=ub,
stim_dim=stim_dim,
likelihood=LinearBernoulliLikelihood(objective=objective(floor=floor)),
inducing_size=10,
inducing_point_method="auto",
)
links = [FloorLogitObjective, FloorProbitObjective, FloorGumbelObjective]
floors = [0, 0.3, 0.5]
constructors = [_semip_model_constructor, _hadamard_model_constructor]
test_configs = [[FloorLogitObjective, 0.3, _hadamard_model_constructor]]
# test_configs = list(product(links, floors, constructors)) # TODO too slow
class SemiPSmokeTests(unittest.TestCase):
def setUp(self):
self.seed = 1
self.stim_dim = 0
self.context_dim = 1
np.random.seed(1)
torch.manual_seed(1)
X = np.random.randn(100, 2) / 3
xcontext = X[..., self.context_dim]
xintensity = X[..., self.stim_dim]
# polynomial context
slope = (
xcontext - 0.7 * xcontext**2 + 0.3 * xcontext**3 - 0.1 * xcontext**4
)
intercept = (
xcontext + 0.03 * xcontext**5 - 0.2 * xcontext**3 - 0.7 * xcontext**4
)
# multiply by intensity
self.f = torch.Tensor(slope * (intercept + xintensity)).unsqueeze(-1)
X[:, 0] = X[:, 0] * 100
X[:, 1] = X[:, 1] / 100
self.lb = [-100, -0.01]
self.ub = [100, 0.01]
self.X = torch.Tensor(X)
@parameterized.expand(
[(SemiPThresholdObjective(target=0.75),), (SemiPProbabilityObjective(),)]
)
def test_mc_generation(self, objective):
# no objective here, the objective for `gen` is not the same as the objective
# for the likelihood in this case
model = SemiParametricGPModel(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
likelihood=LinearBernoulliLikelihood(),
inducing_size=10,
inducing_point_method="auto",
)
generator = OptimizeAcqfGenerator(
acqf=MCPosteriorVariance,
acqf_kwargs={"objective": objective},
max_gen_time=0.1,
)
y = torch.bernoulli(model.likelihood.objective(self.f))
model.set_train_data(
self.X[:10], y[:10]
) # no need to fit for checking gen shapes
next_x = generator.gen(num_points=1, model=model)
self.assertEqual(
next_x.shape,
(
1,
2,
),
)
def test_analytic_lookahead_generation(self):
floor = 0
objective = FloorProbitObjective
model = _semip_model_constructor(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
floor=floor,
objective=objective,
)
generator = OptimizeAcqfGenerator(
acqf=GlobalMI,
acqf_kwargs={
"posterior_transform": semi_p_posterior_transform,
"target": 0.75,
"query_set_size": 100,
},
max_gen_time=0.2,
)
link = objective(floor=floor)
y = torch.bernoulli(link(self.f))
model.set_train_data(
self.X[:10], y[:10]
) # no need to fit for checking gen shapes
next_x = generator.gen(num_points=1, model=model)
self.assertEqual(
next_x.shape,
(
1,
2,
),
)
@parameterized.expand(test_configs)
@unittest.skip("Slow smoke test, TODO speed me up")
def test_memorize_data(self, objective, floor, model_constructor):
"""
see approximate accuracy on easy logistic ps that only varies in 1d
(no slope and intercept)
accuracy determined by average performance on training data
"""
with self.subTest(
objective=objective.__name__,
floor=floor,
model_constructor=model_constructor,
):
link = objective(floor=floor)
y = torch.bernoulli(link(self.f))
model = model_constructor(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
floor=floor,
objective=objective,
)
model.fit(train_x=self.X[:50], train_y=y[:50])
pm, _ = model.predict(self.X[:50])
pred = (link(pm) > 0.5).numpy()
npt.assert_allclose(pred, y[:50].numpy(), atol=1) # mismatch at most one
model.update(self.X, y)
pm, _ = model.predict(self.X[50:])
pred = (link(pm) > 0.5).numpy()
npt.assert_allclose(pred, y[50:].numpy(), atol=1)
@parameterized.expand([(_semip_model_constructor,), (_hadamard_model_constructor,)])
def test_prediction_shapes(self, model_constructor):
n_opt = 1
lb = [-1, -1]
ub = [1, 1]
with self.subTest(model_constructor=model_constructor):
model = model_constructor(lb=lb, ub=ub, stim_dim=self.stim_dim, floor=0)
strat_list = [
Strategy(
lb=lb,
ub=ub,
model=model,
generator=SobolGenerator(lb=lb, ub=ub, seed=self.seed),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
train_x = torch.tensor([[0.0, 0.0], [2.0, 1.0], [2.0, 2.0]])
train_y = torch.tensor([1.0, 1.0, 0.0])
model.fit(train_x=train_x, train_y=train_y)
f, var = model.predict(train_x)
self.assertEqual(f.shape, torch.Size([3]))
self.assertEqual(var.shape, torch.Size([3]))
p, pvar = model.predict(train_x, probability_space=True)
self.assertEqual(p.shape, torch.Size([3]))
self.assertEqual(pvar.shape, torch.Size([3]))
if isinstance(model, SemiParametricGPModel):
samps = model.sample(train_x, 11, probability_space=True)
self.assertEqual(samps.shape, torch.Size([11, 3]))
post = model.posterior(train_x)
self.assertEqual(post.mvn.mean.shape, torch.Size([2, 3]))
self.assertTrue(torch.equal(post.Xi, torch.tensor([0.0, 2.0, 2.0])))
samps = post.rsample(sample_shape=torch.Size([6]))
# samps should be n_samp x 2 (slope, intercept) * 3 (xshape)
self.assertEqual(samps.shape, torch.Size([6, 2, 3]))
# now check posterior samp sizes. They have
# an extra dim (since it's 1d outcome), which
# model.sample squeezes, except for thresh sampling
# which is already squeezed by the threshold objective
# TODO be more consistent with how we use dims
post = model.posterior(train_x)
p_samps = post.sample_p(torch.Size([6]))
self.assertEqual(p_samps.shape, torch.Size([6, 1, 3]))
f_samps = post.sample_f(torch.Size([6]))
self.assertEqual(f_samps.shape, torch.Size([6, 1, 3]))
thresh_samps = post.sample_thresholds(
threshold_level=0.75, sample_shape=torch.Size([6])
)
self.assertEqual(thresh_samps.shape, torch.Size([6, 3]))
strat.add_data(train_x, train_y)
Xopt = strat.gen()
self.assertEqual(Xopt.shape, torch.Size([1, 2]))
@parameterized.expand([(_semip_model_constructor,), (_hadamard_model_constructor,)])
def test_reset_variational_strategy(self, model_constructor):
lb = [-3, -3]
ub = [3, 3]
stim_dim = 0
with self.subTest(model_constructor=model_constructor):
model = model_constructor(lb=lb, ub=ub, stim_dim=stim_dim, floor=0)
link = FloorLogitObjective(floor=0)
y = torch.bernoulli(link(self.f))
variational_params_before = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_before = model.variational_strategy.inducing_points
model.fit(torch.Tensor(self.X[:15]), torch.Tensor(y[:15]))
variational_params_after = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_after = model.variational_strategy.inducing_points
model._reset_variational_strategy()
variational_params_reset = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_reset = model.variational_strategy.inducing_points
# before should be different from after and after should be different
# from reset
self.assertFalse(np.allclose(induc_before, induc_after))
self.assertFalse(np.allclose(induc_after, induc_reset))
for before, after in zip(
variational_params_before, variational_params_after
):
self.assertFalse(np.allclose(before, after))
for after, reset in zip(variational_params_after, variational_params_reset):
self.assertFalse(np.allclose(after, reset))
def test_slope_mean_setting(self):
for slope_mean in (2, 4):
model = SemiParametricGPModel(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
likelihood=LinearBernoulliLikelihood(),
inducing_size=10,
slope_mean=slope_mean,
inducing_point_method="auto",
)
with self.subTest(model=model, slope_mean=slope_mean):
self.assertEqual(model.mean_module.constant[-1], slope_mean)
model = HadamardSemiPModel(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
likelihood=BernoulliObjectiveLikelihood(objective=ProbitObjective()),
inducing_size=10,
slope_mean=slope_mean,
inducing_point_method="auto",
)
with self.subTest(model=model, slope_mean=slope_mean):
self.assertEqual(model.slope_mean_module.constant.item(), slope_mean)
class HadamardSemiPtest(unittest.TestCase):
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
stim_dim = 0
X = torch.randn(100, 2)
self.X = X
link = ProbitObjective()
self.y = torch.bernoulli(link(X[:, stim_dim]))
def test_reset_hyperparams(self):
model = HadamardSemiPModel(lb=[-3, -3], ub=[3, 3], inducing_size=20)
slope_os_before = model.slope_covar_module.outputscale.clone().detach().numpy()
offset_os_before = (
model.offset_covar_module.outputscale.clone().detach().numpy()
)
slope_ls_before = (
model.slope_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
offset_ls_before = (
model.offset_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
model.fit(self.X[:15], self.y[:15])
slope_os_after = model.slope_covar_module.outputscale.clone().detach().numpy()
offset_os_after = model.offset_covar_module.outputscale.clone().detach().numpy()
slope_ls_after = (
model.slope_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
offset_ls_after = (
model.offset_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
model._reset_hyperparameters()
slope_os_reset = model.slope_covar_module.outputscale.clone().detach().numpy()
offset_os_reset = model.offset_covar_module.outputscale.clone().detach().numpy()
slope_ls_reset = (
model.slope_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
offset_ls_reset = (
model.offset_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
# before should be different from after and after should be different
# from reset but before and reset should be same
self.assertFalse(np.allclose(slope_os_before, slope_os_after))
self.assertFalse(np.allclose(slope_os_after, slope_os_reset))
self.assertTrue(np.allclose(slope_os_before, slope_os_reset))
self.assertFalse(np.allclose(slope_ls_before, slope_ls_after))
self.assertFalse(np.allclose(slope_ls_after, slope_ls_reset))
self.assertTrue(np.allclose(slope_ls_before, slope_ls_reset))
self.assertFalse(np.allclose(offset_os_before, offset_os_after))
self.assertFalse(np.allclose(offset_os_after, offset_os_reset))
self.assertTrue(np.allclose(offset_os_before, offset_os_reset))
self.assertFalse(np.allclose(offset_ls_before, offset_ls_after))
self.assertFalse(np.allclose(offset_ls_after, offset_ls_reset))
self.assertTrue(np.allclose(offset_ls_before, offset_ls_reset))
def _make_psd_matrix(self, size):
L = torch.randn((size, size))
return L @ L.T
def test_normal_approx(self):
np.random.seed(123)
torch.manual_seed(123)
npoints = 10
def make_samp_and_approx_mvns(kcov_scale=1.0, ccov_scale=1.0):
X = torch.randn(npoints)
kmean = torch.randn(npoints)
cmean = torch.randn(npoints)
kcov = self._make_psd_matrix(npoints) * kcov_scale
ccov = self._make_psd_matrix(npoints) * ccov_scale
k_mvn = MultivariateNormal(kmean, kcov)
c_mvn = MultivariateNormal(cmean, ccov)
ksamps = k_mvn.sample(torch.Size([1000]))
csamps = c_mvn.sample(torch.Size([1000]))
samp_mean = (ksamps * (X + csamps)).mean(0)
samp_cov = (ksamps * (X + csamps)).T.cov()
approx_mean, approx_cov = _hadamard_mvn_approx(
X, slope_mean=kmean, slope_cov=kcov, offset_mean=cmean, offset_cov=ccov
)
return samp_mean, samp_cov, approx_mean, approx_cov
# verify that as kvar approaches 0, approx improves on average
mean_errs = []
cov_errs = []
for kcov_scale in [1e-5, 1e-2, 1]:
mean_err = 0
cov_err = 0
for _rep in range(100):
(
samp_mean,
samp_cov,
approx_mean,
approx_cov,
) = make_samp_and_approx_mvns(kcov_scale=kcov_scale)
mean_err += (samp_mean - approx_mean).abs().mean().item()
cov_err += (samp_cov - approx_cov).abs().mean().item()
mean_errs.append(mean_err / 100)
cov_errs.append(cov_err / 100)
npt.assert_equal(mean_errs, sorted(mean_errs))
npt.assert_equal(cov_errs, sorted(cov_errs))
# verify that as cvar approaches 0, approx improves on average
mean_errs = []
cov_errs = []
for ccov_scale in [1e-5, 1e-2, 1]:
mean_err = 0
cov_err = 0
for _rep in range(100):
(
samp_mean,
samp_cov,
approx_mean,
approx_cov,
) = make_samp_and_approx_mvns(ccov_scale=ccov_scale)
mean_err += (samp_mean - approx_mean).abs().mean().item()
cov_err += (samp_cov - approx_cov).abs().mean().item()
mean_errs.append(mean_err / 100)
cov_errs.append(cov_err / 100)
npt.assert_equal(mean_errs, sorted(mean_errs))
npt.assert_equal(cov_errs, sorted(cov_errs))
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_semi_p.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP
from botorch.fit import fit_gpytorch_mll
from botorch.utils.testing import BotorchTestCase
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.mlls.variational_elbo import VariationalELBO
class TestDerivativeGP(BotorchTestCase):
def testKernel(self):
K = RBFKernelPartialObsGrad(ard_num_dims=2)
x1 = torch.cat((torch.rand(5, 2), torch.zeros(5, 1)), dim=1)
x2 = torch.cat((torch.rand(3, 2), torch.ones(3, 1)), dim=1)
self.assertEqual(K.forward(x1, x2).shape, torch.Size([5, 3]))
def testMean(self):
mu = ConstantMeanPartialObsGrad()
mu.constant.requires_grad_(False)
mu.constant.copy_(torch.tensor(5.0))
mu.constant.requires_grad_(True)
x1 = torch.cat((torch.rand(5, 2), torch.zeros(5, 1)), dim=1)
x2 = torch.cat((torch.rand(3, 2), torch.ones(3, 1)), dim=1)
input = torch.cat((x1, x2))
z = mu(input)
self.assertTrue(
torch.equal(z, torch.tensor([5.0, 5.0, 5.0, 5.0, 5.0, 0.0, 0.0, 0.0]))
)
def testMixedDerivativeVariationalGP(self):
train_x = torch.cat(
(torch.tensor([1.0, 2.0, 3.0, 4.0]).unsqueeze(1), torch.zeros(4, 1)), dim=1
)
train_y = torch.tensor([1.0, 2.0, 3.0, 4.0])
m = MixedDerivativeVariationalGP(
train_x=train_x,
train_y=train_y,
inducing_points=train_x,
fixed_prior_mean=0.5,
)
self.assertEqual(m.mean_module.constant.item(), 0.5)
self.assertEqual(
m.covar_module.base_kernel.raw_lengthscale.shape, torch.Size([1, 1])
)
mll = VariationalELBO(
likelihood=BernoulliLikelihood(), model=m, num_data=train_y.numel()
)
mll = fit_gpytorch_mll(mll)
test_x = torch.tensor([[1.0, 0], [3.0, 1.0]])
m(test_x)
|
aepsych-main
|
tests/models/test_derivative_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.likelihoods.ordinal import OrdinalLikelihood
from aepsych.models import BinaryClassificationGP
from aepsych.models.variational_gp import BetaRegressionGP, OrdinalGP
from botorch.fit import fit_gpytorch_mll
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.mlls import VariationalELBO
from sklearn.datasets import make_classification, make_regression
class BinaryClassificationGPTestCase(unittest.TestCase):
"""
Super basic smoke test to make sure we know if we broke the underlying model
for single-probit ("1AFC") model
"""
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_samples=10,
n_features=1,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
self.X, self.y = torch.Tensor(X), torch.Tensor(y).reshape(-1, 1)
def test_1d_classification(self):
"""
Just see if we memorize the training set
"""
X, y = self.X, self.y
model = BinaryClassificationGP(
train_X=X, train_Y=y, likelihood=BernoulliLikelihood(), inducing_points=10
)
mll = VariationalELBO(model.likelihood, model.model, len(y))
fit_gpytorch_mll(mll)
# pspace
pm, pv = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred.reshape(-1, 1), y)
npt.assert_array_less(pv, 1)
# fspace
pm, pv = model.predict(X)
pred = (pm > 0).numpy()
npt.assert_allclose(pred.reshape(-1, 1), y)
npt.assert_array_less(1, pv)
class AxBetaRegressionGPTextCase(unittest.TestCase):
@classmethod
def setUp(cls):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_regression(
n_samples=7,
n_features=3,
n_informative=1,
random_state=1,
)
# Rescale the target values to the range [0, 1]
y = (y - y.min()) / (y.max() - y.min())
cls.X, cls.y = torch.Tensor(X), torch.Tensor(y).reshape(-1, 1)
def test_1d_regression(self):
X, y = self.X, self.y
model = BetaRegressionGP(train_X=X, train_Y=y, inducing_points=10)
mll = VariationalELBO(model.likelihood, model.model, len(y))
fit_gpytorch_mll(mll)
pm, pv = model.predict(X)
npt.assert_allclose(pm.reshape(-1, 1), y, atol=0.1)
npt.assert_array_less(pv, 1)
class AxOrdinalGPTestCase(unittest.TestCase):
@classmethod
def setUp(cls):
np.random.seed(1)
torch.manual_seed(1)
cls.n_levels = 5
X, y = make_classification(
n_samples=20,
n_features=5,
n_classes=cls.n_levels,
n_informative=3,
n_clusters_per_class=1,
)
cls.X, cls.y = torch.Tensor(X), torch.Tensor(y).reshape(-1, 1)
def test_ordinal_classification(self):
model = OrdinalGP(
train_X=self.X,
train_Y=self.y,
likelihood=OrdinalLikelihood(n_levels=self.n_levels),
inducing_points=2000,
)
mll = VariationalELBO(model.likelihood, model.model, len(self.y))
fit_gpytorch_mll(mll)
# pspace
probs = model.predict_probability(self.X)
pred = np.argmax(probs.detach().numpy(), axis=1).reshape(-1, 1)
clipped_pred = np.clip(pred, 0, self.n_levels)
npt.assert_allclose(clipped_pred, pred, atol=1, rtol=1)
npt.assert_allclose(pred, self.y, atol=1, rtol=1)
# fspace
pm, pv = model.predict(self.X)
pred = np.floor(self.n_levels * pm).reshape(-1, 1)
pred_var = (self.n_levels * pv).reshape(-1, 1)
clipped_pred = np.clip(pred, 0, self.n_levels)
npt.assert_allclose(clipped_pred, pred, atol=3, rtol=self.n_levels)
npt.assert_allclose(pred, self.y, atol=3, rtol=self.n_levels)
npt.assert_allclose(
pred_var, np.ones_like(pred_var), atol=self.n_levels, rtol=self.n_levels
)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_variational_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
import uuid
import numpy as np
import numpy.testing as npt
import torch
from aepsych import server, utils_logging
from aepsych.acquisition.objective import ProbitObjective
from aepsych.config import Config
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.models import PairwiseProbitModel
from aepsych.server.message_handlers.handle_ask import ask
from aepsych.server.message_handlers.handle_setup import configure
from aepsych.server.message_handlers.handle_tell import tell
from aepsych.strategy import SequentialStrategy, Strategy
from botorch.acquisition import qUpperConfidenceBound
from botorch.acquisition.active_learning import PairwiseMCPosteriorVariance
from scipy.stats import bernoulli, norm, pearsonr
from ..common import f_1d, f_2d, f_pairwise, new_novel_det
class PairwiseProbitModelStrategyTest(unittest.TestCase):
def test_pairs_to_comparisons(self):
def ptc_numpy(x, y, dim):
"""
old numpy impl of pairs to comparisons
"""
# This needs to take a unique over the feature dim by flattening
# over pairs but not instances/batches. This is actually tensor
# matricization over the feature dimension but awkward in numpy
unique_coords = np.unique(np.moveaxis(x, 1, 0).reshape(dim, -1), axis=1)
def _get_index_of_equal_row(arr, x, axis=0):
return np.argwhere(np.all(np.equal(arr, x[:, None]), axis=axis)).item()
comparisons = []
for pair, judgement in zip(x, y):
comparison = (
_get_index_of_equal_row(unique_coords, pair[..., 0]),
_get_index_of_equal_row(unique_coords, pair[..., 1]),
)
if judgement == 0:
comparisons.append(comparison)
else:
comparisons.append(comparison[::-1])
return torch.Tensor(unique_coords.T), torch.LongTensor(comparisons)
x = np.random.normal(size=(10, 1, 2))
y = np.random.choice((0, 1), size=10)
datapoints1, comparisons1 = ptc_numpy(x, y, 1)
pbo = PairwiseProbitModel(lb=[-10], ub=[10])
datapoints2, comparisons2 = pbo._pairs_to_comparisons(
torch.Tensor(x), torch.Tensor(y)
)
npt.assert_equal(datapoints1.numpy(), datapoints2.numpy())
npt.assert_equal(comparisons1.numpy(), comparisons2.numpy())
x = np.random.normal(size=(10, 2, 2))
y = np.random.choice((0, 1), size=10)
datapoints1, comparisons1 = ptc_numpy(x, y, 2)
pbo = PairwiseProbitModel(lb=[-10], ub=[10], dim=2)
datapoints2, comparisons2 = pbo._pairs_to_comparisons(
torch.Tensor(x), torch.Tensor(y)
)
npt.assert_equal(datapoints1.numpy(), datapoints2.numpy())
npt.assert_equal(comparisons1.numpy(), comparisons2.numpy())
def test_pairwise_probit_batched(self):
"""
test our 1d gaussian bump example
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 20
n_opt = 1
lb = [-4.0, 1e-5]
ub = [-1e-5, 4.0]
extra_acqf_args = {"beta": 3.84}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=qUpperConfidenceBound,
acqf_kwargs=extra_acqf_args,
stimuli_per_trial=2,
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
while not strat.finished:
next_pair = strat.gen(num_points=3)
# next_pair is batch x dim x pair,
# this checks that we have the reshapes
# right
self.assertTrue((next_pair[:, 0, :] < 0).all())
self.assertTrue((next_pair[:, 1, :] > 0).all())
strat.add_data(
next_pair,
bernoulli.rvs(
f_pairwise(f_1d, next_pair.sum(1), noise_scale=0.1).squeeze()
),
)
xgrid = strat.model.dim_grid(gridsize=10)
zhat, _ = strat.predict(xgrid)
# true max is 0, very loose test
self.assertTrue(xgrid[torch.argmax(zhat, 0)].sum().detach().numpy() < 0.5)
def test_pairwise_memorize(self):
"""
can we memorize a simple function
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
gen = SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2)
x = torch.Tensor(gen.gen(num_points=20))
# "noiseless" new_novel_det (just take the mean instead of sampling)
y = torch.Tensor(f_pairwise(new_novel_det, x) > 0.5).int()
model = PairwiseProbitModel(lb=lb, ub=ub)
model.fit(x[:18], y[:18])
with torch.no_grad():
f0, _ = model.predict(x[18:, ..., 0])
f1, _ = model.predict(x[18:, ..., 1])
pred_diff = norm.cdf(f1 - f0)
pred = pred_diff > 0.5
npt.assert_allclose(pred, y[18:])
def test_pairwise_memorize_rescaled(self):
"""
can we memorize a simple function (with rescaled inputs)
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1000, 0]
ub = [0, 1e-5]
gen = SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2)
x = torch.Tensor(gen.gen(num_points=20))
# "noiseless" new_novel_det (just take the mean instead of sampling)
xrescaled = x.clone()
xrescaled[:, 0, :] = xrescaled[:, 0, :] / 500 + 1
xrescaled[:, 1, :] = xrescaled[:, 1, :] / 5e-6 - 1
y = torch.Tensor(f_pairwise(new_novel_det, xrescaled) > 0.5).int()
model = PairwiseProbitModel(lb=lb, ub=ub)
model.fit(x[:18], y[:18])
with torch.no_grad():
f0, _ = model.predict(x[18:, ..., 0])
f1, _ = model.predict(x[18:, ..., 1])
pred_diff = norm.cdf(f1 - f0)
pred = pred_diff > 0.5
npt.assert_allclose(pred, y[18:])
def test_1d_pairwise_probit(self):
"""
test our 1d gaussian bump example
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
extra_acqf_args = {"beta": 3.84}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=qUpperConfidenceBound,
acqf_kwargs=extra_acqf_args,
stimuli_per_trial=2,
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair, [bernoulli.rvs(f_pairwise(f_1d, next_pair, noise_scale=0.1))]
)
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_pairwise_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -2.0
ub = 2.0
acqf = PairwiseMCPosteriorVariance
extra_acqf_args = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=acqf, acqf_kwargs=extra_acqf_args, stimuli_per_trial=2
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair,
[bernoulli.rvs(f_pairwise(lambda x: x, next_pair, noise_scale=0.1))],
)
test_gen = SobolGenerator(lb=lb, ub=ub, seed=seed + 1, stimuli_per_trial=2)
test_x = torch.Tensor(test_gen.gen(100))
ftrue_test = (test_x[..., 0] - test_x[..., 1]).squeeze()
with torch.no_grad():
fdiff_test = (
strat.model.predict(test_x[..., 0], rereference=None)[0]
- strat.model.predict(test_x[..., 1], rereference=None)[0]
)
self.assertTrue(pearsonr(fdiff_test, ftrue_test)[0] >= 0.9)
with torch.no_grad():
fdiff_test_reref = (
strat.model.predict(test_x[..., 0])[0]
- strat.model.predict(test_x[..., 1])[0]
)
self.assertTrue(pearsonr(fdiff_test_reref, ftrue_test)[0] >= 0.9)
def test_2d_pairwise_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 20
n_opt = 1
lb = np.r_[-1, -1]
ub = np.r_[1, 1]
extra_acqf_args = {"beta": 3.84}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=qUpperConfidenceBound,
acqf_kwargs=extra_acqf_args,
stimuli_per_trial=2,
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair, [bernoulli.rvs(f_pairwise(f_2d, next_pair, noise_scale=0.1))]
)
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = strat.predict(torch.Tensor(xy))
# true min is at 0,0
self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.2))
def test_2d_pairwise_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 20
n_opt = 1
lb = np.r_[-1, -1]
ub = np.r_[1, 1]
acqf = PairwiseMCPosteriorVariance
extra_acqf_args = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=acqf, acqf_kwargs=extra_acqf_args, stimuli_per_trial=2
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair, [bernoulli.rvs(f_pairwise(new_novel_det, next_pair))]
)
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = strat.predict(torch.Tensor(xy))
ztrue = new_novel_det(xy)
corr = pearsonr(zhat.detach().flatten(), ztrue.flatten())[0]
self.assertTrue(corr > 0.80)
def test_sobolmodel_pairwise(self):
# test that SobolModel correctly gets bounds
sobol_x = np.zeros((10, 3, 2))
mod = Strategy(
lb=[1, 2, 3],
ub=[2, 3, 4],
min_asks=10,
stimuli_per_trial=2,
outcome_types=["binary"],
generator=SobolGenerator(
lb=[1, 2, 3], ub=[2, 3, 4], seed=12345, stimuli_per_trial=2
),
)
for i in range(10):
sobol_x[i, ...] = mod.gen()
self.assertTrue(np.all(sobol_x[:, 0, :] > 1))
self.assertTrue(np.all(sobol_x[:, 1, :] > 2))
self.assertTrue(np.all(sobol_x[:, 2, :] > 3))
self.assertTrue(np.all(sobol_x[:, 0, :] < 2))
self.assertTrue(np.all(sobol_x[:, 1, :] < 3))
self.assertTrue(np.all(sobol_x[:, 2, :] < 4))
def test_hyperparam_consistency(self):
# verify that creating the model `from_config` or with `__init__` has the same hyperparams
m1 = PairwiseProbitModel(lb=[1, 2], ub=[3, 4])
m2 = PairwiseProbitModel.from_config(
config=Config(config_dict={"common": {"lb": "[1,2]", "ub": "[3,4]"}})
)
self.assertTrue(isinstance(m1.covar_module, type(m2.covar_module)))
self.assertTrue(
isinstance(m1.covar_module.base_kernel, type(m2.covar_module.base_kernel))
)
self.assertTrue(isinstance(m1.mean_module, type(m2.mean_module)))
m1priors = list(m1.covar_module.named_priors())
m2priors = list(m2.covar_module.named_priors())
for p1, p2 in zip(m1priors, m2priors):
name1, parent1, prior1, paramtransforms1, priortransforms1 = p1
name2, parent2, prior2, paramtransforms2, priortransforms2 = p2
self.assertTrue(name1 == name2)
self.assertTrue(isinstance(parent1, type(parent2)))
self.assertTrue(isinstance(prior1, type(prior2)))
# no obvious way to test paramtransform equivalence
class PairwiseProbitModelServerTest(unittest.TestCase):
def setUp(self):
# setup logger
server.logger = utils_logging.getLogger(logging.DEBUG, "logs")
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = server.AEPsychServer(database_path=database_path)
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
def test_1d_pairwise_server(self):
seed = 123
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 2
config_str = f"""
[common]
lb = [-4]
ub = [4]
stimuli_per_trial = 2
outcome_types =[binary]
parnames = [x]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = {n_opt}
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(
server,
config_str=config_str,
)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_y = bernoulli.rvs(f_pairwise(f_1d, next_config["x"], noise_scale=0.1))
tell(server, config=next_config, outcome=next_y)
x = torch.linspace(-4, 4, 100)
zhat, _ = server.strat.predict(x)
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_2d_pairwise_server(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
config_str = f"""
[common]
lb = [-1, -1]
ub = [1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
min_asks = {n_opt}
model = PairwiseProbitModel
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(
server,
config_str=config_str,
)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_pair = np.c_[next_config["x"], next_config["y"]].T
next_y = bernoulli.rvs(f_pairwise(f_2d, next_pair, noise_scale=0.1))
tell(server, config=next_config, outcome=next_y)
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = server.strat.predict(torch.Tensor(xy))
# true min is at 0,0
self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.2))
def test_serialization_1d(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 3
n_opt = 1
config_str = f"""
[common]
lb = [-4]
ub = [4]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = {n_opt}
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(server, config_str=config_str)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_y = bernoulli.rvs(f_pairwise(f_1d, next_config["x"]))
tell(server, config=next_config, outcome=next_y)
import dill
# just make sure it works
try:
s = dill.dumps(server)
server2 = dill.loads(s)
self.assertEqual(len(server2._strats), len(server._strats))
for strat1, strat2 in zip(server._strats, server2._strats):
self.assertEqual(type(strat1), type(strat2))
self.assertEqual(type(strat1.model), type(strat2.model))
self.assertTrue(torch.equal(strat1.x, strat2.x))
self.assertTrue(torch.equal(strat1.y, strat2.y))
except Exception:
self.fail()
def test_serialization_2d(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 3
n_opt = 1
config_str = f"""
[common]
lb = [-1, -1]
ub = [1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = {n_opt}
generator = PairwiseOptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[PairwiseOptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(server, config_str=config_str)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_pair = np.c_[next_config["x"], next_config["y"]].T
next_y = bernoulli.rvs(f_pairwise(f_2d, next_pair))
tell(server, config=next_config, outcome=next_y)
import dill
# just make sure it works
try:
s = dill.dumps(server)
server2 = dill.loads(s)
self.assertEqual(len(server2._strats), len(server._strats))
for strat1, strat2 in zip(server._strats, server2._strats):
self.assertEqual(type(strat1), type(strat2))
self.assertEqual(type(strat1.model), type(strat2.model))
self.assertTrue(torch.equal(strat1.x, strat2.x))
self.assertTrue(torch.equal(strat1.y, strat2.y))
except Exception:
self.fail()
def test_config_to_tensor(self):
config_str = """
[common]
lb = [-1]
ub = [1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = 1
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = 1
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(server, config_str=config_str)
conf = ask(server)
self.assertTrue(server._config_to_tensor(conf).shape == (1, 2))
config_str = """
[common]
lb = [-1, -1]
ub = [1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = 1
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = 1
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
configure(server, config_str=config_str)
conf = ask(server)
self.assertTrue(server._config_to_tensor(conf).shape == (2, 2))
config_str = """
[common]
lb = [-1, -1, -1]
ub = [1, 1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y, z]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = 1
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = 1
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
configure(server, config_str=config_str)
conf = ask(server)
self.assertTrue(server._config_to_tensor(conf).shape == (3, 2))
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_pairwise_probit.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.acquisition.objective import ProbitObjective
from aepsych.generators import MonotonicRejectionGenerator
from aepsych.models import MonotonicRejectionGP
from aepsych.strategy import Strategy
from botorch.acquisition.objective import IdentityMCObjective
from botorch.utils.testing import BotorchTestCase
from gpytorch.likelihoods import BernoulliLikelihood, GaussianLikelihood
from scipy.stats import norm
class MonotonicRejectionGPLSETest(BotorchTestCase):
def testRegression(self):
# Init
target = 1.5
model_gen_options = {"num_restarts": 1, "raw_samples": 3, "epochs": 5}
lb = torch.tensor([0, 0])
ub = torch.tensor([4, 4])
m = MonotonicRejectionGP(
lb=lb,
ub=ub,
likelihood=GaussianLikelihood(),
fixed_prior_mean=target,
monotonic_idxs=[1],
num_induc=2,
num_samples=3,
num_rejection_samples=4,
)
strat = Strategy(
lb=lb,
ub=ub,
model=m,
generator=MonotonicRejectionGenerator(
MonotonicMCLSE,
acqf_kwargs={"target": target},
model_gen_options=model_gen_options,
),
min_asks=1,
stimuli_per_trial=1,
outcome_types=["binary"],
)
# Fit
train_x = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]])
train_y = torch.tensor([[1.0], [2.0], [3.0]])
m.fit(train_x=train_x, train_y=train_y)
self.assertEqual(m.inducing_points.shape, torch.Size([2, 2]))
self.assertEqual(m.mean_module.constant.item(), 1.5)
# Predict
f, var = m.predict(train_x)
self.assertEqual(f.shape, torch.Size([3]))
self.assertEqual(var.shape, torch.Size([3]))
# Gen
strat.add_data(train_x, train_y)
Xopt = strat.gen()
self.assertEqual(Xopt.shape, torch.Size([1, 2]))
# Acquisition function
acq = strat.generator._instantiate_acquisition_fn(m)
self.assertEqual(acq.deriv_constraint_points.shape, torch.Size([2, 3]))
self.assertTrue(
torch.equal(acq.deriv_constraint_points[:, -1], 2 * torch.ones(2))
)
self.assertEqual(acq.target, 1.5)
self.assertTrue(isinstance(acq.objective, IdentityMCObjective))
def testClassification(self):
# Init
target = 0.75
model_gen_options = {"num_restarts": 1, "raw_samples": 3, "epochs": 5}
lb = torch.tensor([0, 0])
ub = torch.tensor([4, 4])
m = MonotonicRejectionGP(
lb=lb,
ub=ub,
likelihood=BernoulliLikelihood(),
fixed_prior_mean=target,
monotonic_idxs=[1],
num_induc=2,
num_samples=3,
num_rejection_samples=4,
)
strat = Strategy(
lb=lb,
ub=ub,
model=m,
generator=MonotonicRejectionGenerator(
MonotonicMCLSE,
acqf_kwargs={"target": target, "objective": ProbitObjective()},
model_gen_options=model_gen_options,
),
min_asks=1,
stimuli_per_trial=1,
outcome_types=["binary"],
)
# Fit
train_x = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]])
train_y = torch.tensor([1.0, 1.0, 0.0])
m.fit(train_x=train_x, train_y=train_y)
self.assertEqual(m.inducing_points.shape, torch.Size([2, 2]))
self.assertAlmostEqual(m.mean_module.constant.item(), norm.ppf(0.75))
# Predict
f, var = m.predict(train_x)
self.assertEqual(f.shape, torch.Size([3]))
self.assertEqual(var.shape, torch.Size([3]))
# Gen
strat.add_data(train_x, train_y)
Xopt = strat.gen()
self.assertEqual(Xopt.shape, torch.Size([1, 2]))
# Acquisition function
acq = strat.generator._instantiate_acquisition_fn(m)
self.assertEqual(acq.deriv_constraint_points.shape, torch.Size([2, 3]))
self.assertTrue(
torch.equal(acq.deriv_constraint_points[:, -1], 2 * torch.ones(2))
)
self.assertEqual(acq.target, 0.75)
self.assertTrue(isinstance(acq.objective, ProbitObjective))
# Update
m.update(train_x=train_x[:2, :2], train_y=train_y[:2], warmstart=True)
self.assertEqual(m.train_inputs[0].shape, torch.Size([2, 3]))
|
aepsych-main
|
tests/models/test_monotonic_rejection_gp.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from botorch.fit import fit_gpytorch_mll
from gpytorch.mlls import ExactMarginalLogLikelihood
from aepsych.models.exact_gp import ExactGP
# Fix random seeds
np.random.seed(0)
torch.manual_seed(0)
class TestModelQuery(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bounds = torch.tensor([[0.0], [1.0]])
x = torch.linspace(0.0, 1.0, 10).reshape(-1, 1)
y = torch.sin(6.28 * x).reshape(-1, 1)
cls.model = ExactGP(x, y)
mll = ExactMarginalLogLikelihood(cls.model.likelihood, cls.model)
fit_gpytorch_mll(mll)
def test_min(self):
mymin, my_argmin = self.model.get_min(self.bounds)
# Don't need to be precise since we're working with small data.
self.assertLess(mymin, -0.9)
self.assertTrue(0.7 < my_argmin < 0.8)
def test_max(self):
mymax, my_argmax = self.model.get_max(self.bounds)
# Don't need to be precise since we're working with small data.
self.assertGreater(mymax, 0.9)
self.assertTrue(0.2 < my_argmax < 0.3)
def test_inverse_query(self):
bounds = torch.tensor([[0.1], [0.9]])
val, arg = self.model.inv_query(0.0, bounds)
# Don't need to be precise since we're working with small data.
self.assertTrue(-0.01 < val < 0.01)
self.assertTrue(0.45 < arg < 0.55)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_model_query.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
from functools import partial
from unittest.mock import MagicMock
import numpy as np
import numpy.testing as npt
from aepsych.acquisition import MCLevelSetEstimation
from aepsych.config import Config
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.models import GPClassificationModel
from aepsych.strategy import SequentialStrategy, Strategy
from botorch.acquisition import qUpperConfidenceBound
from botorch.optim.fit import fit_gpytorch_mll_torch
from botorch.optim.stopping import ExpMAStoppingCriterion
from botorch.posteriors import GPyTorchPosterior
from gpytorch.distributions import MultivariateNormal
from scipy.stats import bernoulli, norm, pearsonr
from sklearn.datasets import make_classification
from torch.distributions import Normal
from torch.optim import Adam
from ..common import cdf_new_novel_det, f_1d, f_2d
class GPClassificationSmoketest(unittest.TestCase):
"""
Super basic smoke test to make sure we know if we broke the underlying model
for single-probit ("1AFC") model
"""
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_samples=100,
n_features=1,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
self.X, self.y = torch.Tensor(X), torch.Tensor(y)
def test_1d_classification(self):
"""
Just see if we memorize the training set
"""
X, y = self.X, self.y
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=10
)
model.fit(X[:50], y[:50])
# pspace
pm, _ = model.predict_probability(X[:50])
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y[:50])
# fspace
pm, _ = model.predict(X[:50], probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y[:50])
# smoke test update
model.update(X, y)
# pspace
pm, _ = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y)
# fspace
pm, _ = model.predict(X, probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y)
def test_1d_classification_pytorchopt(self):
"""
Just see if we memorize the training set
"""
X, y = self.X, self.y
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=10
)
model.fit(
X[:50],
y[:50],
optimizer=fit_gpytorch_mll_torch,
optimizer_kwargs={
"stopping_criterion": ExpMAStoppingCriterion(maxiter=30),
"optimizer": partial(Adam, lr=0.05),
},
)
# pspace
pm, _ = model.predict_probability(X[:50])
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y[:50])
# fspace
pm, _ = model.predict(X[:50], probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y[:50])
# smoke test update
model.update(
X,
y,
optimizer=fit_gpytorch_mll_torch,
optimizer_kwargs={"stopping_criterion": ExpMAStoppingCriterion(maxiter=30)},
)
# pspace
pm, _ = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y)
# fspace
pm, _ = model.predict(X, probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y)
def test_1d_classification_different_scales(self):
"""
Just see if we memorize the training set
"""
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_features=2,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
X, y = torch.Tensor(X), torch.Tensor(y)
X[:, 0] = X[:, 0] * 1000
X[:, 1] = X[:, 1] / 1000
lb = [-3000, -0.003]
ub = [3000, 0.003]
model = GPClassificationModel(lb=lb, ub=ub, inducing_size=20)
model.fit(X[:50], y[:50])
# pspace
pm, _ = model.predict_probability(X[:50])
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y[:50])
# fspace
pm, _ = model.predict(X[:50], probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y[:50])
# smoke test update
model.update(X, y)
# pspace
pm, _ = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y)
# fspace
pm, _ = model.predict(X, probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y)
def test_reset_hyperparams(self):
model = GPClassificationModel(lb=[-3], ub=[3], inducing_size=20)
os_before = model.covar_module.outputscale.clone().detach().numpy()
ls_before = model.covar_module.base_kernel.lengthscale.clone().detach().numpy()
model.fit(torch.Tensor(self.X), torch.Tensor(self.y))
os_after = model.covar_module.outputscale.clone().detach().numpy()
ls_after = model.covar_module.base_kernel.lengthscale.clone().detach().numpy()
model._reset_hyperparameters()
os_reset = model.covar_module.outputscale.clone().detach().numpy()
ls_reset = model.covar_module.base_kernel.lengthscale.clone().detach().numpy()
# before should be different from after and after should be different
# from reset but before and reset should be same
self.assertFalse(np.allclose(os_before, os_after))
self.assertFalse(np.allclose(os_after, os_reset))
self.assertTrue(np.allclose(os_before, os_reset))
self.assertFalse(np.allclose(ls_before, ls_after))
self.assertFalse(np.allclose(ls_after, ls_reset))
self.assertTrue(np.allclose(ls_before, ls_reset))
def test_reset_variational_strategy(self):
model = GPClassificationModel(lb=[-3], ub=[3], inducing_size=20)
variational_params_before = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_before = model.variational_strategy.inducing_points
model.fit(torch.Tensor(self.X), torch.Tensor(self.y))
variational_params_after = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_after = model.variational_strategy.inducing_points
model._reset_variational_strategy()
variational_params_reset = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_reset = model.variational_strategy.inducing_points
# before should be different from after and after should be different
# from reset
self.assertFalse(np.allclose(induc_before, induc_after))
self.assertFalse(np.allclose(induc_after, induc_reset))
for before, after in zip(variational_params_before, variational_params_after):
self.assertFalse(np.allclose(before, after))
for after, reset in zip(variational_params_after, variational_params_reset):
self.assertFalse(np.allclose(after, reset))
def test_predict_p(self):
"""
Verify analytic p-space mean and var is correct.
"""
X, y = self.X, self.y
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=10
)
model.fit(X, y)
pmean_analytic, pvar_analytic = model.predict_probability(X)
fsamps = model.sample(X, 150000)
psamps = norm.cdf(fsamps)
pmean_samp = psamps.mean(0)
pvar_samp = psamps.var(0)
# TODO these tolerances are a bit loose, verify this is right.
self.assertTrue(np.allclose(pmean_analytic, pmean_samp, atol=0.001))
self.assertTrue(np.allclose(pvar_analytic, pvar_samp, atol=0.001))
class GPClassificationTest(unittest.TestCase):
def test_1d_single_probit_new_interface(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
while not strat.finished:
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
self.assertTrue(strat.y.shape[0] == n_init + n_opt)
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_single_probit_batched(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 2
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
while not strat.finished:
next_x = strat.gen(num_points=2)
strat.add_data(next_x, bernoulli.rvs(f_1d(next_x)).squeeze())
self.assertEqual(strat.y.shape[0], n_init + n_opt)
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_single_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# f(x) = x so we're just looking at corr between cdf(zhat) and cdf(x)
self.assertTrue(
pearsonr(norm.cdf(zhat.detach().numpy()).flatten(), norm.cdf(x).flatten())[
0
]
> 0.95
)
def test_2d_single_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = [-1, -1]
ub = [1, 1]
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(cdf_new_novel_det(next_x))])
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
post_mean, _ = strat.predict(torch.Tensor(xy))
phi_post_mean = norm.cdf(post_mean.reshape(30, 30).detach().numpy())
phi_post_true = cdf_new_novel_det(xy)
self.assertTrue(
pearsonr(phi_post_mean.flatten(), phi_post_true.flatten())[0] > 0.9
)
def test_1d_single_targeting(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
target = 0.75
def obj(x):
return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# since target is 0.75, find the point at which f_est is 0.75
est_max = x[np.argmin((norm.cdf(zhat.detach().numpy()) - 0.75) ** 2)]
# since true z is just x, the true max is where phi(x)=0.75,
self.assertTrue(np.abs(est_max - norm.ppf(0.75)) < 0.5)
def test_1d_jnd(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 150
n_opt = 1
lb = -4.0
ub = 4.0
target = 0.5
def obj(x):
return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x / 1.5))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# we expect jnd close to the target to be close to the correct
# jnd (1.5), and since this is linear model this should be true
# for both definitions of JND
jnd_step = strat.get_jnd(grid=x[:, None], method="step")
est_jnd_step = jnd_step[50]
# looser test because step-jnd is hurt more by reverting to the mean
self.assertTrue(np.abs(est_jnd_step - 1.5) < 0.5)
jnd_taylor = strat.get_jnd(grid=x[:, None], method="taylor")
est_jnd_taylor = jnd_taylor[50]
self.assertTrue(np.abs(est_jnd_taylor - 1.5) < 0.25)
def test_1d_single_lse(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
# target is in z space not phi(z) space, maybe that's
# weird
extra_acqf_args = {"target": 0.75, "beta": 1.96}
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
min_asks=n_opt,
generator=OptimizeAcqfGenerator(
MCLevelSetEstimation, acqf_kwargs=extra_acqf_args
),
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# since target is 0.75, find the point at which f_est is 0.75
est_max = x[np.argmin((norm.cdf(zhat.detach().numpy()) - 0.75) ** 2)]
# since true z is just x, the true max is where phi(x)=0.75,
self.assertTrue(np.abs(est_max - norm.ppf(0.75)) < 0.5)
def test_2d_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 150
n_opt = 1
lb = [-1, -1]
ub = [1, 1]
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=20),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_2d(next_x[None, :]))])
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = strat.predict(torch.Tensor(xy))
self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.5))
def test_extra_ask_warns(self):
# test that when we ask more times than we have models, we warn but keep going
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 3
n_opt = 1
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(f_1d(next_x)))])
with self.assertWarns(RuntimeWarning):
strat.gen()
def test_1d_query(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = -4.0
ub = 4.0
strat = Strategy(
lb=lb,
ub=ub,
min_asks=1,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=50),
stimuli_per_trial=1,
outcome_types=["binary"],
)
# mock the posterior call and remove calls that don't need
# to happen
def get_fake_posterior(X, posterior_transform=None):
fmean = torch.sin(torch.pi * X / 4).squeeze(-1)
fcov = torch.eye(fmean.shape[0])
fake_posterior = GPyTorchPosterior(
mvn=MultivariateNormal(mean=fmean, covariance_matrix=fcov)
)
return fake_posterior
strat.model.posterior = get_fake_posterior
strat.model.__call__ = MagicMock()
strat.model.fit = MagicMock()
x = strat.gen(1)
y = torch.Tensor([1])
strat.add_data(x, y)
strat.model.set_train_data(x, y)
# We expect the global max to be at (2, 1), the min at (-2, -1)
fmax, argmax = strat.get_max()
self.assertTrue(np.allclose(fmax, 1))
self.assertTrue(np.allclose(argmax, 2))
fmin, argmin = strat.get_min()
self.assertTrue(np.allclose(fmin, -1))
self.assertTrue(np.allclose(argmin, -2, atol=0.2))
# Inverse query at val .85 should return (.85,[2.7])
val, loc = strat.inv_query(0.85, constraints={})
self.assertTrue(np.allclose(val, 0.85))
self.assertTrue(np.allclose(loc.item(), 2.7, atol=1e-2))
def test_hyperparam_consistency(self):
# verify that creating the model `from_config` or with `__init__` has the same hyperparams
m1 = GPClassificationModel(lb=[1, 2], ub=[3, 4])
m2 = GPClassificationModel.from_config(
config=Config(config_dict={"common": {"lb": "[1,2]", "ub": "[3,4]"}})
)
self.assertTrue(isinstance(m1.covar_module, type(m2.covar_module)))
self.assertTrue(
isinstance(m1.covar_module.base_kernel, type(m2.covar_module.base_kernel))
)
self.assertTrue(isinstance(m1.mean_module, type(m2.mean_module)))
m1priors = list(m1.covar_module.named_priors())
m2priors = list(m2.covar_module.named_priors())
for p1, p2 in zip(m1priors, m2priors):
name1, parent1, prior1, paramtransforms1, priortransforms1 = p1
name2, parent2, prior2, paramtransforms2, priortransforms2 = p2
self.assertTrue(name1 == name2)
self.assertTrue(isinstance(parent1, type(parent2)))
self.assertTrue(isinstance(prior1, type(prior2)))
# no obvious way to test paramtransform equivalence
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/models/test_gp_classification.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import unittest
import numpy as np
import torch
from aepsych.acquisition import MCLevelSetEstimation
from aepsych.config import Config
from aepsych.generators import AxOptimizeAcqfGenerator, OptimizeAcqfGenerator
from aepsych.models import (
ContinuousRegressionGP,
GPClassificationModel,
PairwiseProbitModel,
)
from ax.modelbridge import Models
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from sklearn.datasets import make_classification
class TestOptimizeAcqfGenerator(unittest.TestCase):
def test_time_limits(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
X, y = make_classification(
n_samples=100,
n_features=8,
n_redundant=3,
n_informative=5,
random_state=1,
n_clusters_per_class=4,
)
X, y = torch.Tensor(X), torch.Tensor(y)
model = GPClassificationModel(
lb=-3 * torch.ones(8),
ub=3 * torch.ones(8),
max_fit_time=0.5,
inducing_size=10,
)
model.fit(X, y)
generator = OptimizeAcqfGenerator(
acqf=MCLevelSetEstimation, acqf_kwargs={"beta": 1.96, "target": 0.5}
)
start = time.time()
generator.gen(1, model)
end = time.time()
long = end - start
generator = OptimizeAcqfGenerator(
acqf=MCLevelSetEstimation,
acqf_kwargs={"beta": 1.96, "target": 0.5},
max_gen_time=0.1,
)
start = time.time()
generator.gen(1, model)
end = time.time()
short = end - start
# very loose test because fit time is only approximately computed
self.assertTrue(long > short)
def test_instantiate_eubo(self):
config = """
[OptimizeAcqfGenerator]
acqf = AnalyticExpectedUtilityOfBestOption
stimuli_per_trial = 2
"""
generator = OptimizeAcqfGenerator.from_config(Config(config_str=config))
self.assertTrue(generator.acqf == AnalyticExpectedUtilityOfBestOption)
# need a fitted model in order to instantiate the acqf successfully
model = PairwiseProbitModel(lb=[-1], ub=[1])
train_x = torch.Tensor([-0.5, 1, 0.5, -1]).reshape((2, 1, 2))
train_y = torch.Tensor([0, 1])
model.fit(train_x, train_y)
acqf = generator._instantiate_acquisition_fn(model=model)
self.assertTrue(isinstance(acqf, AnalyticExpectedUtilityOfBestOption))
def test_axoptimizeacqf_config(self):
config_str = """
[common]
use_ax = True
parnames = [foo]
lb = [0]
ub = [1]
stimuli_per_trial = 1
outcome_types = [continuous]
strat_names = [opt]
[opt]
generator = OptimizeAcqfGenerator
model = ContinuousRegressionGP
[ContinuousRegressionGP]
max_fit_time = 1
[OptimizeAcqfGenerator]
acqf = MCLevelSetEstimation
max_gen_time = 1
restarts = 1
samps = 100
[MCLevelSetEstimation]
beta = 1
target = 0.5
"""
config = Config(config_str=config_str)
gen = AxOptimizeAcqfGenerator.from_config(config, "opt")
self.assertEqual(gen.model, Models.BOTORCH_MODULAR)
self.assertEqual(
gen.model_kwargs["surrogate"].botorch_model_class, ContinuousRegressionGP
)
self.assertEqual(gen.model_gen_kwargs["restarts"], 1)
self.assertEqual(gen.model_gen_kwargs["samps"], 100)
self.assertEqual(gen.model_kwargs["acquisition_options"]["target"], 0.5)
self.assertEqual(gen.model_kwargs["acquisition_options"]["beta"], 1.0)
# TODO: Implement max_gen_time
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_optimize_acqf_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
from aepsych.config import Config
from aepsych.generators import AxRandomGenerator, RandomGenerator
from ax.modelbridge import Models
class TestRandomGenerator(unittest.TestCase):
def test_randomgen_single(self):
# test that RandomGenerator doesn't mess with shapes
n = 100
rand = np.zeros((n, 3))
mod = RandomGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3)
for i in range(n):
rand[i, :] = mod.gen()
# check that bounds are right
self.assertTrue(np.all(rand[:, 0] > 1))
self.assertTrue(np.all(rand[:, 1] > 2))
self.assertTrue(np.all(rand[:, 2] > 3))
self.assertTrue(np.all(rand[:, 0] < 2))
self.assertTrue(np.all(rand[:, 1] < 3))
self.assertTrue(np.all(rand[:, 2] < 4))
def test_randomgen_batch(self):
# test that RandomGenerator doesn't mess with shapes
n = 100
mod = RandomGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3)
rand = mod.gen(n)
# check that bounds are right
self.assertTrue((rand[:, 0] > 1).all())
self.assertTrue((rand[:, 1] > 2).all())
self.assertTrue((rand[:, 2] > 3).all())
self.assertTrue((rand[:, 0] < 2).all())
self.assertTrue((rand[:, 1] < 3).all())
self.assertTrue((rand[:, 2] < 4).all())
def test_randomgen_config(self):
lb = [-1, 0]
ub = [1, 2]
config_str = f"""
[common]
lb = {lb}
ub = {ub}
"""
config = Config(config_str=config_str)
gen = RandomGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array(lb))
npt.assert_equal(gen.ub.numpy(), np.array(ub))
self.assertEqual(gen.dim, len(lb))
def test_axrandom_config(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [-1, 0]
ub = [1, 2]
outcome_types = [continuous]
strategy_names = [init]
[init]
generator = RandomGenerator
[RandomGenerator]
seed=231
deduplicate=True
"""
config = Config(config_str=config_str)
gen = AxRandomGenerator.from_config(config, name="init")
self.assertEqual(gen.model, Models.UNIFORM)
self.assertEqual(gen.model_kwargs["seed"], 231)
self.assertTrue(gen.model_kwargs["deduplicate"])
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_random_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from aepsych.config import Config
from aepsych.generators.completion_criterion import (
MinAsks,
MinTotalOutcomeOccurrences,
MinTotalTells,
RunIndefinitely,
)
from aepsych.strategy import AEPsychStrategy
class CompletionCriteriaTestCase(unittest.TestCase):
def setUp(self):
config_str = """
[common]
use_ax = True
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [x]
lb = [0]
ub = [1]
strategy_names = [test_strat]
[test_strat]
generator = SobolGenerator
"""
config = Config(config_str=config_str)
self.strat = AEPsychStrategy.from_config(config)
def test_min_asks(self):
config_str = """
[test_strat]
min_asks = 2
"""
config = Config(config_str=config_str)
criterion = MinAsks.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertTrue(criterion.is_met(self.strat.experiment))
def test_min_total_tells(self):
config_str = """
[test_strat]
min_total_tells = 2
"""
config = Config(config_str=config_str)
criterion = MinTotalTells.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertTrue(criterion.is_met(self.strat.experiment))
def test_min_total_outcome_occurences(self):
config_str = """
[common]
outcome_types = [binary]
min_total_outcome_occurrences = 2
"""
config = Config(config_str=config_str)
criterion = MinTotalOutcomeOccurrences.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 1.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 1.0)
self.assertTrue(criterion.is_met(self.strat.experiment))
def run_indefinitely(self):
config_str = """
[common]
outcome_types = [binary]
run_indefinitely = False
"""
config = Config(config_str=config_str)
criterion = RunIndefinitely(**RunIndefinitely.from_config(config, "test_strat"))
self.assertTrue(criterion.is_met(self.strat.experiment))
config_str = """
[common]
outcome_types = [binary]
run_indefinitely = True
"""
config = Config(config_str=config_str)
criterion = RunIndefinitely(**RunIndefinitely.from_config(config, "test_strat"))
self.assertFalse(criterion.is_met(self.strat.experiment))
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_completion_criteria.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
import numpy as np
import torch
from aepsych.acquisition import MonotonicMCLSE
from aepsych.config import Config
from aepsych.generators import EpsilonGreedyGenerator, MonotonicRejectionGenerator
class TestEpsilonGreedyGenerator(unittest.TestCase):
def test_epsilon_greedy(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
total_trials = 2000
extra_acqf_args = {"target": 0.75, "beta": 1.96}
for epsilon in (0.1, 0.5):
gen = EpsilonGreedyGenerator(
subgenerator=MonotonicRejectionGenerator(
acqf=MonotonicMCLSE, acqf_kwargs=extra_acqf_args
),
epsilon=epsilon,
)
model = MagicMock()
gen.subgenerator.gen = MagicMock()
for _ in range(total_trials):
gen.gen(1, model)
self.assertTrue(
np.abs(gen.subgenerator.gen.call_count / total_trials - (1 - epsilon))
< 0.01
)
def test_greedyepsilon_config(self):
config_str = """
[common]
acqf = MonotonicMCLSE
[EpsilonGreedyGenerator]
subgenerator = MonotonicRejectionGenerator
epsilon = .5
"""
config = Config()
config.update(config_str=config_str)
gen = EpsilonGreedyGenerator.from_config(config)
self.assertIsInstance(gen.subgenerator, MonotonicRejectionGenerator)
self.assertEqual(gen.subgenerator.acqf, MonotonicMCLSE)
self.assertEqual(gen.epsilon, 0.5)
|
aepsych-main
|
tests/generators/test_epsilon_greedy_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
aepsych-main
|
tests/generators/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.config import Config
from aepsych.generators import AxSobolGenerator, SobolGenerator
from aepsych.utils import make_scaled_sobol
from ax.modelbridge import Models
class TestSobolGenerator(unittest.TestCase):
def test_batchsobol(self):
mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3))
def test_sobolgen_single(self):
# test that SobolGenerator doesn't mess with shapes
sobol1 = make_scaled_sobol(lb=[1, 2, 3], ub=[2, 3, 4], size=10, seed=12345)
sobol2 = torch.zeros((10, 3))
mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)
for i in range(10):
sobol2[i, :] = mod.gen()
npt.assert_almost_equal(sobol1.numpy(), sobol2.numpy())
# check that bounds are also right
self.assertTrue(torch.all(sobol1[:, 0] > 1))
self.assertTrue(torch.all(sobol1[:, 1] > 2))
self.assertTrue(torch.all(sobol1[:, 2] > 3))
self.assertTrue(torch.all(sobol1[:, 0] < 2))
self.assertTrue(torch.all(sobol1[:, 1] < 3))
self.assertTrue(torch.all(sobol1[:, 2] < 4))
def test_sobol_config(self):
config_str = """
[common]
lb = [0]
ub = [1]
parnames = [par1]
stimuli_per_trial = 1
[SobolGenerator]
seed=12345
"""
config = Config()
config.update(config_str=config_str)
gen = SobolGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array([0]))
npt.assert_equal(gen.ub.numpy(), np.array([1]))
self.assertEqual(gen.seed, 12345)
self.assertEqual(gen.stimuli_per_trial, 1)
def test_pairwise_sobol_sizes(self):
for dim in np.arange(1, 4):
for nsamp in (3, 5, 7):
generator = SobolGenerator(
lb=np.arange(dim).tolist(),
ub=(1 + np.arange(dim)).tolist(),
stimuli_per_trial=2,
)
shape_out = (nsamp, dim, 2)
self.assertEqual(generator.gen(nsamp).shape, shape_out)
def test_axsobol_config(self):
config_str = """
[common]
parnames = [par1]
lb = [0]
ub = [1]
stimuli_per_trial = 1
outcome_types = [continuous]
strategy_names = [init]
[init]
generator = SobolGenerator
[SobolGenerator]
seed=12345
scramble=False
"""
config = Config(config_str=config_str)
gen = AxSobolGenerator.from_config(config, name="init")
self.assertEqual(gen.model, Models.SOBOL)
self.assertEqual(gen.model_kwargs["seed"], 12345)
self.assertFalse(gen.model_kwargs["scramble"])
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_sobol_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
from aepsych.config import Config
from aepsych.generators import ManualGenerator
class TestManualGenerator(unittest.TestCase):
def test_batchmanual(self):
points = np.random.rand(10, 3)
mod = ManualGenerator(
lb=[0, 0, 0], ub=[1, 1, 1], dim=3, points=points, shuffle=False
)
npt.assert_allclose(points, mod.points) # make sure they weren't shuffled
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3))
with self.assertWarns(RuntimeWarning):
acq4 = mod.gen(num_points=10)
self.assertEqual(acq4.shape, (4, 3))
def test_manual_generator(self):
points = [[0, 0], [0, 1], [1, 0], [1, 1]]
config_str = f"""
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [par1, par2]
[ManualGenerator]
points = {points}
"""
config = Config()
config.update(config_str=config_str)
gen = ManualGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array([0, 0]))
npt.assert_equal(gen.ub.numpy(), np.array([1, 1]))
self.assertFalse(gen.finished)
p1 = list(gen.gen()[0])
p2 = list(gen.gen()[0])
p3 = list(gen.gen()[0])
p4 = list(gen.gen()[0])
self.assertEqual(sorted([p1, p2, p3, p4]), points)
self.assertTrue(gen.finished)
if __name__ == "__main__":
unittest.main()
|
aepsych-main
|
tests/generators/test_manual_generator.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
# -- Path setup --------------------------------------------------------------
import os
import sys
# from pkg_resources import get_distribution
# sys.path.insert(0, os.path.abspath("../../"))
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../../"))
sys.path.insert(0, target_dir)
# base_path = os.path.abspath(os.path.join(__file__, "..", "..", "..", "aepsych"))
# print(sys.path, base_path, "======")
# sys.path.append(base_path)
# -- Project information -----------------------------------------------------
project = "AEPsych"
# copyright = "Meta, Inc."
author = "Meta, Inc."
# get version string
# version = get_distribution("aepsych").version
version = ""
release = ""
# -- General configuration ---------------------------------------------------
# Sphinx extension modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The main toctree document.
index_doc = "index"
# The language for content autogenerated by Sphinx.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Default options for autodoc directives. Applied to all autodoc directives
autodoc_default_options = {
"undoc-members": True,
"show-inheritance": True,
"member-order": "bysource",
}
# show type hints in the method description
autodoc_typehints = "description"
# Inlcude init docstrings into body of autoclass directives
autoclass_content = "both"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path = [] # for now we have no static files to track
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "aepsychdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(index_doc, "aepsych.tex", "AEPsych Documentation", "Meta, Inc.", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(index_doc, "aepsych", "aepsych Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
index_doc,
"aepsych",
"AEPsych Documentation",
author,
"AEPsych",
"AEPsych",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_mock_imports = ["botorch"]
|
aepsych-main
|
sphinx/source/conf.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import numpy as np
constants = {
"savefolder": "./databases/",
"timestamp": datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
"config_path": "./aepsych_config.ini",
"seed": 1,
}
# base parameters in case we don't want AEPsych to manage all 8.
base_params = {
"spatial_frequency": 2,
"orientation": 0,
"pedestal": 0.5,
"contrast": 0.75,
"temporal_frequency": 0,
"size": 10,
"angle_dist": 0,
"eccentricity": 0,
}
psychopy_vars = {
"setSizePix": [1680, 1050],
"setWidth": 47.475,
"setDistance": 57,
"pre_duration_s": 0.0,
"stim_duration_s": 5.0,
"post_duration_s": 1,
"response_wait": 2,
"iti": 0,
}
|
aepsych-main
|
examples/contrast_discrimination_psychopy/experiment_config.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import experiment_config
import numpy as np
import torch
from aepsych_client import AEPsychClient
from helpers import HalfGrating
from psychopy import core, data, event, gui, monitors, visual
from psychopy.tools.filetools import toFile
def run_experiment():
seed = experiment_config.constants["seed"]
config_path = experiment_config.constants["config_path"]
torch.manual_seed(seed)
np.random.seed(seed)
expInfo = {"observer": "default_observer"}
expInfo["dateStr"] = data.getDateStr() # add the current time
# present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title="multi-D JND Exp", fixed=["dateStr"])
if dlg.OK:
toFile("lastParams.pickle", expInfo) # save params to file for next time
else:
core.quit() # the user hit cancel so exit
screen = monitors.Monitor("testMonitor", gamma=1)
screen.setSizePix(experiment_config.psychopy_vars["setSizePix"])
screen.setWidth(experiment_config.psychopy_vars["setWidth"])
screen.setDistance(experiment_config.psychopy_vars["setDistance"])
win = visual.Window(
allowGUI=True,
units="deg",
monitor=screen,
bpc=(8, 8, 8),
size=experiment_config.psychopy_vars["setSizePix"],
fullscr=False,
)
screen_text_g = visual.TextStim(win, text=None, alignHoriz="center", color="green")
screen_text_r = visual.TextStim(win, text=None, alignHoriz="center", color="red")
screen_text = visual.TextStim(win, text=None, alignHoriz="center", color="gray")
# display instructions and wait
message2 = visual.TextStim(
win,
pos=[0, +3],
text="Hit the space bar key when ready and "
"to advance to the next trial after you see a red cross.",
)
message1 = visual.TextStim(
win,
pos=[0, -3],
text="You'll see a stimulus. One side will have a grating and the other will be noise."
" "
"Press left or right corresponding to the side with noise. If you don't know, please guess.",
)
message1.draw()
message2.draw()
win.flip() # to show our newly drawn 'stimuli'
# pause until there's a keypress
event.waitKeys()
# start the trial: draw grating
clock = core.Clock()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
aepsych_client = AEPsychClient()
aepsych_client.configure(config_path=config_path)
# create stimulus
stim = HalfGrating(**experiment_config.base_params, win=win)
i = 0
is_finished = False
while not is_finished:
ask_response = aepsych_client.ask()
trial_params = ask_response["config"]
is_finished = ask_response["is_finished"]
stim.update(trial_params)
print(trial_params)
bg_color = np.array([stim.pedestal_psychopy_scale] * 3)
win.setColor(bg_color)
win.color = bg_color
win.flip()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
core.wait(experiment_config.psychopy_vars["iti"])
fixation_keys = []
while not fixation_keys:
fixation_keys = event.getKeys(keyList=["space"])
fixation_keys = ["space"] ## for debugging
if "space" in fixation_keys:
screen_text.setText("+")
screen_text.draw(win=win)
win.flip()
noisy_half = "left" if np.random.randint(2) == 0 else "right"
clock.reset()
keys = stim.draw(
noisy_half=noisy_half,
win=win,
pre_duration_s=experiment_config.psychopy_vars["pre_duration_s"],
stim_duration_s=experiment_config.psychopy_vars["stim_duration_s"],
)
# keys = event.waitKeys(keyList=["left", "right"]) # phil took out max wait
rt = clock.getTime()
response = noisy_half in keys
print(f"keys:{keys}, ca:{noisy_half}, acc:{response}, rt:{rt}")
win.flip()
if response:
screen_text_g.setText("Correct")
screen_text_g.draw()
win.flip()
else:
screen_text_r.setText("Incorrect")
screen_text_r.draw()
win.flip()
# inform bayesopt of the response, needed to calculate next contrast
aepsych_client.tell(config=trial_params, outcome=response, rt=rt)
# core.wait(experiment_config.psychopy_vars["post_duration_s"])
event.clearEvents()
print(f"trial {i}")
i = i + 1
win.close()
aepsych_client.finalize()
core.quit()
if __name__ == "__main__":
run_experiment()
|
aepsych-main
|
examples/contrast_discrimination_psychopy/experiment.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pyglet
from psychopy import core, event
from psychopy.visual import Window
from psychopy.visual.image import ImageStim
pyglet.options["debug_gl"] = False
GL = pyglet.gl
def polar_to_cartesian(r, theta):
z = r * np.exp(1j * np.radians(theta))
return z.real, z.imag
def cartesian_to_polar(x, y):
z = x + 1j * y
return (np.abs(z), np.angle(z, deg=True))
class AnimatedGrating:
param_transforms = {"contrast": lambda x: 10 ** x, "pedestal": lambda x: 10 ** x}
def __init__(
self,
spatial_frequency: float,
orientation: float,
pedestal: float,
contrast: float,
temporal_frequency: float,
eccentricity: float,
size: float,
angle_dist: float,
win: Window,
cpd=60, # display cycles per degree
Lmin=0, # min luminance in nits
Lmax=255, # max luminance in nits
res=256, # texture resolution
noisy=False,
*args,
**kw,
):
"""Generate animated Gabor grating
Args:
spatial_frequency (float): Spatial frequency.
orientation (float): Orientation (degrees)
pedestal (float): Background luminance.
contrast (float): Stimulus contrast.
temporal_frequency (float): Temporal frequency (seconds).
eccentricity (float): Stimulus eccentricity relative to center (degrees).
size (float): Stimulus size.
angle_dist (float): Stimulus angle relative to center.
win (Window): Window to render to.
cpd (int, optional): Display cycles per degree. Defaults to 60.
"""
self.spatial_frequency = spatial_frequency
self.temporal_frequency = temporal_frequency
self.orientation = orientation
self.pedestal = pedestal
self.contrast = contrast
self.settable_params = (
"spatial_frequency",
"temporal_frequency",
"orientation",
"pedestal",
"contrast",
"size",
"eccentricity",
"angle_dist",
)
self.cpd = cpd
self.Lmin = Lmin
self.Lmax = Lmax
self.res = res
self.noisy = noisy
self.initial_phase = np.random.uniform(low=0, high=0.2, size=(1))
img = np.zeros((self.res, self.res))
self.win = win
self._stim = ImageStim(image=img, mask="gauss", win=win, *args, **kw)
# these get set on _stim
self.size = size
self.eccentricity = eccentricity
self.angle_dist = angle_dist
def update(self, trial_config):
for k, v in trial_config.items():
if k in self.settable_params:
if k in self.param_transforms:
setattr(self, k, self.param_transforms[k](v[0]))
else:
setattr(self, k, v[0])
@property
def size(self):
return self._stim.size
@size.setter
def size(self, x):
self._stim.size = x
@property
def eccentricity(self):
return cartesian_to_polar(*self._stim.pos)[0]
@eccentricity.setter
def eccentricity(self, x):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(x, current_coords[1])
@property
def angle_dist(self):
return cartesian_to_polar(*self._stim.pos)[1]
@angle_dist.setter
def angle_dist(self, deg):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(current_coords[0], deg + 90)
@property
def pedestal_psychopy_scale(self):
return self.pedestal * 2 - 1
def draw(
self,
noisy=False,
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
self._stim.image = self.get_texture(self.initial_phase, noisy=noisy)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while clock.getTime() < pre_duration_s + stim_duration_s:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy=noisy
)
self._stim.draw()
def get_texture(self, phase=0, noisy=False):
pedestal_lum = self.pedestal * (self.Lmax - self.Lmin) + self.Lmin
grating_max = (self.contrast * (2 * pedestal_lum + self.Lmin) + self.Lmin) / 2
x = np.arange(0, self.res) / self.cpd + phase
y = np.arange(0, self.res) / self.cpd + phase
x_grid, y_grid = np.meshgrid(x, y)
wave = x_grid * np.cos(np.radians(self.orientation)) + y_grid * np.sin(
np.radians(self.orientation)
)
scaled_imag_wave = 1j * 2 * np.pi * self.spatial_frequency * wave
img = grating_max * np.real(np.exp(scaled_imag_wave)) + pedestal_lum
# convert from luminance to values in [-1, 1] as psychopy wants
img = img / ((self.Lmax - self.Lmin) / 2) - 1
if noisy:
flatimg = img.flatten()
np.random.shuffle(flatimg)
img = flatimg.reshape(self.res, self.res)
return img
class HalfGrating(AnimatedGrating):
"""Gabor animated grating, half of which is scrambled into white noise."""
def noisify_half_texture(self, img, noisy_half):
img = img.T # transpose so our indexing tricks work
flatimg = img.flatten()
if noisy_half == "left":
noisy = flatimg[: (self.res ** 2) // 2]
np.random.shuffle(noisy)
img = np.r_[noisy, flatimg[(self.res ** 2) // 2 :]].reshape(
self.res, self.res
)
else:
noisy = flatimg[(self.res ** 2) // 2 :]
np.random.shuffle(noisy)
img = np.r_[flatimg[: (self.res ** 2) // 2], noisy].reshape(
self.res, self.res
)
return img.T # untranspose
def get_texture(self, phase, noisy_half):
img = super().get_texture(phase, noisy=False)
img = self.noisify_half_texture(img, noisy_half)
return img
def draw(
self,
noisy_half="left",
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
event.clearEvents()
self._stim.image = self.get_texture(self.initial_phase, noisy_half=noisy_half)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while True:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy_half=noisy_half
)
self._stim.draw()
keys = event.getKeys(keyList=["left", "right"])
win.flip()
if len(keys) > 0:
return keys
return keys
class ExperimentAborted(Exception):
pass
class QuitHelper:
"""Helper to quit the experiment by pressing a key twice within 500ms.
It quits by simply raising 'ExperimentAborted'. This is necessary because
from the separate thread that psychopy checks its global key events in, you
cannot raise an Exception in the main thread.
"""
def __init__(self):
self.quit_requested = False
self.debounce_timestamp = None
def request_quit(self):
"""Must be called twice in 500ms to set a flag that causes ExperimentAborted
to be raised when quit_if_requested is called. This indirection is needed if request_quit
is called from a separate thread (as with psychopy global event keys)
"""
tprev = self.debounce_timestamp
tnow = core.getTime()
if tprev is not None and tnow - tprev < 0.5:
self.quit_requested = True
self.debounce_timestamp = tnow
def quit_if_requested(self):
"""Raises ExperimentAborted if request_quit has been called twice in 500ms"""
if self.quit_requested:
raise ExperimentAborted
return True
|
aepsych-main
|
examples/contrast_discrimination_psychopy/helpers.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# we have pretty verbose messaging by default, suppress that here
import logging
import warnings
warnings.filterwarnings("ignore")
logging.disable(logging.WARNING) # disable anything below warning
import os
import time
from copy import copy
from itertools import product
from aepsych.benchmark import (
Problem,
LSEProblem,
BenchmarkLogger,
PathosBenchmark,
combine_benchmarks,
)
from aepsych.benchmark.test_functions import (
make_songetal_testfun,
novel_detection_testfun,
novel_discrimination_testfun,
)
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["MKL_THREADING_LAYER"] = "GNU"
nproc = 94
n_reps = 100
sobol_trials = 5
total_trials = 150
global_seed = 3
log_every = 5
# test functions and boundaries
novel_names = ["novel_detection", "novel_discrimination"]
novel_testfuns = [novel_detection_testfun, novel_discrimination_testfun]
novel_bounds = [{"lb": [-1, -1], "ub": [1, 1]}, {"lb": [-1, -1], "ub": [1, 1]}]
song_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
song_betavals = [0.2, 0.5, 1, 2, 5, 10]
song_testfuns = [
make_songetal_testfun(p, b) for p, b in product(song_phenotypes, song_betavals)
]
song_bounds = [{"lb": [-3, -20], "ub": [4, 120]}] * len(song_testfuns)
song_names = [f"song_p{p}_b{b}" for p, b in product(song_phenotypes, song_betavals)]
all_testfuns = song_testfuns + novel_testfuns
all_bounds = song_bounds + novel_bounds
all_names = song_names + novel_names
combo_logger = BenchmarkLogger(log_every=log_every)
# benchmark configs, have to subdivide into 5
# configs Sobol, MCLSETS, and Song vs ours get set up all differently
# Song benches
bench_config_nonsobol_song = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": [
"MCLevelSetEstimation",
"BernoulliMCMutualInformation",
"MCPosteriorVariance",
],
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
"objective": "ProbitObjective",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
bench_config_sobol_song = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MCLevelSetEstimation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
"objective": "ProbitObjective",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": list(range(sobol_trials, total_trials - 1, log_every)),
},
"ModelWrapperStrategy": {
"n_trials": [1],
"refit_every": 1,
},
}
# non-Song benches
bench_config_sobol_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicGPLSETS",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicGPLSETS": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": list(range(sobol_trials, total_trials - 1, log_every)),
},
"ModelWrapperStrategy": {
"n_trials": [1],
"refit_every": 1,
},
}
bench_config_all_but_gplsets_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": [
"MonotonicMCLSE",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCPosteriorVariance",
],
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicBernoulliMCMutualInformation": {},
"MonotonicMCPosteriorVariance": {},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
bench_config_gplsets_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicGPLSETS",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicGPLSETS": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
all_bench_configs = [
bench_config_sobol_song,
bench_config_nonsobol_song,
bench_config_sobol_rbf,
bench_config_all_but_gplsets_rbf,
bench_config_gplsets_rbf,
]
def make_problemobj(testfun, lb, ub):
# This constructs a Problem from a
# test function and bounds
class Inner(LSEProblem, Problem):
def f(self, x):
return testfun(x)
obj = Inner(lb=lb, ub=ub)
return obj
def make_bench(testfun, logger, name, configs, lb, ub):
# make a bench object from test function config
# and bench config
benches = []
problem = make_problemobj(testfun, lb, ub)
for config in configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
full_config["common"]["name"] = name
benches.append(
PathosBenchmark(
nproc=nproc,
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=n_reps,
)
)
return combine_benchmarks(*benches)
def aggregate_bench_results(all_benchmarks):
combo_logger = BenchmarkLogger(log_every=log_every)
for bench in all_benchmarks:
combo_logger._log.extend(bench.logger._log)
out_pd = combo_logger.pandas()
return out_pd
if __name__ == "__main__":
# one benchmark per test function
print("Creating benchmark objects...")
all_benchmarks = [
make_bench(testfun, combo_logger, name, all_bench_configs, **bounds)
for (testfun, bounds, name) in zip(all_testfuns, all_bounds, all_names)
]
# start all the benchmarks
print("Starting benchmarks...")
for bench in all_benchmarks:
bench_name = bench.combinations[0]["common"]["name"]
print(f"starting {bench_name}...")
bench.start_benchmarks()
done = False
# checkpoint every minute in case something breaks
while not done:
time.sleep(60)
print("Checkpointing benches...")
done = True
for bench in all_benchmarks:
bench_name = bench.combinations[0]["common"]["name"]
bench.collate_benchmarks(wait=False)
if bench.is_done:
print(f"bench {bench_name} is done!")
else:
done = False
temp_results = aggregate_bench_results(all_benchmarks)
temp_results.to_csv(f"bench_checkpoint_seed{global_seed}.csv")
print("Done with all benchmarks, saving!")
final_results = aggregate_bench_results(all_benchmarks)
final_results.to_csv(f"bench_final_seed{global_seed}.csv")
|
aepsych-main
|
pubs/owenetal/code/benchmark_threshold.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import torch
from aepsych.benchmark import (
Problem,
LSEProblem,
BenchmarkLogger,
Benchmark,
combine_benchmarks,
)
from aepsych.benchmark.test_functions import (
make_songetal_testfun,
novel_detection_testfun,
novel_discrimination_testfun,
)
from aepsych.config import Config
from aepsych.plotting import plot_strat
from aepsych.strategy import SequentialStrategy
from scipy.stats import norm
global_seed = 3
refit_every = 1
figdir = "./figs/"
def plot_audiometric_lse_grids(
sobol_trials, opt_trials, phenotype="Metabolic+Sensory", beta=2
):
"""
Generates Fig. 8
"""
logger = BenchmarkLogger(log_every=5)
bench_rbf = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
bench_song = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "BernoulliMCMutualInformation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
all_bench_configs = [bench_rbf, bench_song]
testfun = make_songetal_testfun(phenotype=phenotype, beta=beta)
class AudiometricProblem(LSEProblem, Problem):
def f(self, x):
return testfun(x)
lb = [-3, -20]
ub = [4, 120]
benches = []
problem = AudiometricProblem(lb, ub)
for config in all_bench_configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
benches.append(
Benchmark(
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=1,
)
)
combo_bench = combine_benchmarks(*benches)
strats = []
for config in combo_bench.combinations:
strat = combo_bench.run_experiment(config, logger, seed=global_seed, rep=0)
strats.append(strat)
titles = [
"Monotonic RBF Model, LSE (ours)",
"Nonmonotonic RBF Model, LSE (ours)",
"Linear-Additive Model, BALD",
]
fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
fig.delaxes(axes[1, 1])
_ = [
plot_strat(
strat=strat_,
title=title_,
ax=ax_,
true_testfun=testfun,
xlabel="Frequency (kHz)",
ylabel="Intensity (dB HL)",
flipx=True,
logx=True,
show=False,
include_legend=False,
include_colorbar=False
)
for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = axes[1, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
cbr.set_label("Probability of Detection")
return fig
def plot_novel_lse_grids(sobol_trials, opt_trials, funtype="detection"):
"""
Generates Fig. TBA
"""
logger = BenchmarkLogger(log_every=opt_trials) # we only care about final perf
bench_rbf = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
bench_song = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "BernoulliMCMutualInformation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
all_bench_configs = [bench_rbf, bench_song]
if funtype == "detection":
testfun = novel_detection_testfun
yes_label = "Detected trial"
no_label = "Nondetected trial"
elif funtype == "discrimination":
testfun = novel_discrimination_testfun
yes_label = "Correct trial"
no_label = "Incorrect trial"
else:
raise RuntimeError("unknown testfun")
class NovelProblem(LSEProblem, Problem):
def f(self, x):
return testfun(x)
lb = [-1, -1]
ub = [1, 1]
benches = []
problem = NovelProblem(lb, ub, gridsize=50)
for config in all_bench_configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
benches.append(
Benchmark(
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=1,
)
)
combo_bench = combine_benchmarks(*benches)
strats = []
for config in combo_bench.combinations:
strat = combo_bench.run_experiment(config, logger, seed=global_seed, rep=0)
strats.append(strat)
titles = [
"Monotonic RBF Model, LSE (ours)",
"Nonmonotonic RBF Model, LSE (ours)",
"Linear-Additive Model, BALD",
]
fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
fig.delaxes(axes[1, 1])
_ = [
plot_strat(
strat=strat_,
title=title_,
ax=ax_,
true_testfun=testfun,
yes_label=yes_label,
no_label=no_label,
show=False,
include_legend=False,
include_colorbar=False
)
for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = axes[1, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
cbr.set_label("Probability of Detection")
return fig
def plot_acquisition_examples(sobol_trials, opt_trials, target_level=0.75):
### Same model, different acqf figure ####
configs = {
"common": {
"pairwise": False,
"target": target_level,
"lb": "[-3]",
"ub": "[3]",
},
"experiment": {
"acqf": [
"MonotonicMCPosteriorVariance",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCLSE",
],
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[intensity]",
},
"MonotonicMCLSE": {
"target": target_level,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": "monotonic_mean_covar_factory",
"monotonic_idxs": "[0]",
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {"n_trials": sobol_trials},
"ModelWrapperStrategy": {
"n_trials": opt_trials,
"refit_every": refit_every,
},
}
def true_testfun(x):
return norm.cdf(3 * x)
class SimpleLinearProblem(Problem):
def f(self, x):
return norm.ppf(true_testfun(x))
lb = [-3]
ub = [3]
logger = BenchmarkLogger()
problem = SimpleLinearProblem(lb, ub)
bench = Benchmark(
problem=problem,
logger=logger,
configs=configs,
global_seed=global_seed,
n_reps=1,
)
# sobol_trials
# now run each for just init trials, taking care to reseed each time
strats = []
for c in bench.combinations:
np.random.seed(global_seed)
torch.manual_seed(global_seed)
s = SequentialStrategy.from_config(Config(config_dict=c))
for _ in range(sobol_trials):
next_x = s.gen()
s.add_data(next_x, [problem.sample_y(next_x)])
strats.append(s)
# get first gen from all 3
first_gens = [s.gen() for s in strats]
fig, ax = plt.subplots(2, 2)
plot_strat(
strat=strats[0],
title=f"First active trial\n (after {sobol_trials} Sobol trials)",
ax=ax[0, 0],
true_testfun=true_testfun,
target_level=target_level,
show=False,
include_legend=False
)
samps = [
norm.cdf(s.sample(torch.Tensor(g), num_samples=10000))
for s, g in zip(strats, first_gens)
]
predictions = [np.mean(s) for s in samps]
names = ["First BALV sample", "First BALD sample", "First LSE sample"]
markers = ["s", "*", "^"]
for i in range(3):
ax[0, 0].scatter(
first_gens[i][0][0],
predictions[i],
label=names[i],
marker=markers[i],
color="black",
)
# now run them all for the full duration
for s in strats:
for _tr in range(opt_trials):
next_x = s.gen()
s.add_data(next_x, [problem.sample_y(next_x)])
plotting_axes = [ax[0, 1], ax[1, 0], ax[1, 1]]
titles = [
f"Monotonic RBF Model,\n BALV, after {sobol_trials+opt_trials} total trials",
f"Monotonic RBF Model,\n BALD, after {sobol_trials+opt_trials} total trials",
f"Monotonic RBF Model,\n LSE (ours) after {sobol_trials+opt_trials} total trials",
]
_ = [
plot_strat(
strat=s, title=t, ax=a, true_testfun=true_testfun, target_level=target_level, show=False, include_legend=False
)
for a, s, t in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = ax[0, 0].get_legend_handles_labels()
lgd = fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(1.5, 0.25))
# return legend so savefig works correctly
return fig, lgd
if __name__ == "__main__":
audio_lse_grids_fig = plot_audiometric_lse_grids(sobol_trials=5, opt_trials=45)
audio_lse_grids_fig.savefig(fname=figdir + "audio_lse_grids_fig.pdf", dpi=200)
novel_detection_lse_grids_fig = plot_novel_lse_grids(
sobol_trials=5, opt_trials=45, funtype="detection"
)
novel_detection_lse_grids_fig.savefig(
fname=figdir + "detection_lse_grids_fig.pdf", dpi=200
)
# this is extra hard, run more trials
novel_discrimination_lse_grids_fig = plot_novel_lse_grids(
sobol_trials=5, opt_trials=95, funtype="discrimination"
)
novel_discrimination_lse_grids_fig.savefig(
fname=figdir + "discrimination_lse_grids_fig.pdf", dpi=200
)
same_model_different_acq_fig, lgd = plot_acquisition_examples(
sobol_trials=5, opt_trials=15
)
same_model_different_acq_fig.savefig(
fname=figdir + "same_model_different_acq.pdf",
bbox_extra_artists=(lgd,),
bbox_inches="tight",
dpi=200,
)
|
aepsych-main
|
pubs/owenetal/code/stratplots.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
from botorch.utils.sampling import draw_sobol_samples
from scipy.stats import norm
sns.set_theme()
from aepsych.config import Config
from aepsych.factory import (
default_mean_covar_factory,
song_mean_covar_factory,
monotonic_mean_covar_factory,
)
from aepsych.models import GPClassificationModel, MonotonicRejectionGP
from aepsych.models.monotonic_rejection_gp import MixedDerivativeVariationalGP
from aepsych.utils import _dim_grid
global_seed = 3
def plot_prior_samps_1d():
config = Config(
config_dict={
"common": {
"outcome_type": "single_probit",
"target": 0.75,
"lb": "[-3]",
"ub": "[3]",
},
"default_mean_covar_factory": {},
"song_mean_covar_factory": {},
"monotonic_mean_covar_factory": {"monotonic_idxs": "[0]"},
}
)
lb = torch.Tensor([-3])
ub = torch.Tensor([3])
nsamps = 10
gridsize = 50
grid = _dim_grid(lower=lb, upper=ub, dim=1, gridsize=gridsize)
np.random.seed(global_seed)
torch.random.manual_seed(global_seed)
with gpytorch.settings.prior_mode(True):
rbf_mean, rbf_covar = default_mean_covar_factory(config)
rbf_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=rbf_mean,
covar_module=rbf_covar,
)
# add just two samples at high and low
rbf_model.set_train_data(
torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
)
rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))
song_mean, song_covar = song_mean_covar_factory(config)
song_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=song_mean,
covar_module=song_covar,
)
song_model.set_train_data(
torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
)
song_samps = song_model(grid).sample(torch.Size([nsamps]))
mono_mean, mono_covar = monotonic_mean_covar_factory(config)
mono_model = MonotonicRejectionGP(
likelihood="probit-bernoulli",
monotonic_idxs=[0],
mean_module=mono_mean,
covar_module=mono_covar,
)
bounds_ = torch.tensor([-3.0, 3.0])[:, None]
# Select inducing points
mono_model.inducing_points = draw_sobol_samples(
bounds=bounds_, n=mono_model.num_induc, q=1
).squeeze(1)
inducing_points_aug = mono_model._augment_with_deriv_index(
mono_model.inducing_points, 0
)
scales = ub - lb
dummy_train_x = mono_model._augment_with_deriv_index(
torch.Tensor([-3, 3])[:, None], 0
)
mono_model.model = MixedDerivativeVariationalGP(
train_x=dummy_train_x,
train_y=torch.LongTensor([0, 1]),
inducing_points=inducing_points_aug,
scales=scales,
fixed_prior_mean=torch.Tensor([0.75]),
covar_module=mono_covar,
mean_module=mono_mean,
)
mono_samps = mono_model.sample(grid, nsamps)
fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
fig.tight_layout(rect=[0.01, 0.03, 1, 0.9])
fig.suptitle("GP prior samples (probit-transformed)")
ax[0].plot(grid.squeeze(), norm.cdf(song_samps.T), "b")
ax[0].set_ylabel("Response Probability")
ax[0].set_title("Linear kernel")
ax[1].plot(grid.squeeze(), norm.cdf(rbf_samps.T), "b")
ax[1].set_xlabel("Intensity")
ax[1].set_title("RBF kernel (nonmonotonic)")
ax[2].plot(grid.squeeze(), norm.cdf(mono_samps.T), "b")
ax[2].set_title("RBF kernel (monotonic)")
return fig
def plot_prior_samps_2d():
config = Config(
config_dict={
"common": {
"outcome_type": "single_probit",
"target": 0.75,
"lb": "[-3, -3]",
"ub": "[3, 3]",
},
"default_mean_covar_factory": {},
"song_mean_covar_factory": {},
"monotonic_mean_covar_factory": {"monotonic_idxs": "[1]"},
}
)
lb = torch.Tensor([-3, -3])
ub = torch.Tensor([3, 3])
nsamps = 5
gridsize = 30
grid = _dim_grid(lower=lb, upper=ub, dim=2, gridsize=gridsize)
np.random.seed(global_seed)
torch.random.manual_seed(global_seed)
with gpytorch.settings.prior_mode(True):
rbf_mean, rbf_covar = default_mean_covar_factory(config)
rbf_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=rbf_mean,
covar_module=rbf_covar,
)
# add just two samples at high and low
rbf_model.set_train_data(torch.Tensor([-3, -3])[:, None], torch.LongTensor([0]))
rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))
song_mean, song_covar = song_mean_covar_factory(config)
song_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=song_mean,
covar_module=song_covar,
)
song_model.set_train_data(
torch.Tensor([-3, -3])[:, None], torch.LongTensor([0])
)
song_samps = song_model(grid).sample(torch.Size([nsamps]))
mono_mean, mono_covar = monotonic_mean_covar_factory(config)
mono_model = MonotonicRejectionGP(
likelihood="probit-bernoulli",
monotonic_idxs=[1],
mean_module=mono_mean,
covar_module=mono_covar,
num_induc=1000,
)
bounds_ = torch.tensor([-3.0, -3.0, 3.0, 3.0]).reshape(2, -1)
# Select inducing points
mono_model.inducing_points = draw_sobol_samples(
bounds=bounds_, n=mono_model.num_induc, q=1
).squeeze(1)
inducing_points_aug = mono_model._augment_with_deriv_index(
mono_model.inducing_points, 0
)
scales = ub - lb
dummy_train_x = mono_model._augment_with_deriv_index(
torch.Tensor([-3, 3])[None, :], 0
)
mono_model.model = MixedDerivativeVariationalGP(
train_x=dummy_train_x,
train_y=torch.LongTensor([0]),
inducing_points=inducing_points_aug,
scales=scales,
fixed_prior_mean=torch.Tensor([0.75]),
covar_module=mono_covar,
mean_module=mono_mean,
)
mono_samps = mono_model.sample(grid, nsamps)
intensity_grid = np.linspace(-3, 3, gridsize)
fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
fig.tight_layout(rect=[0, 0.03, 1, 0.9])
fig.suptitle("Prior samples")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in song_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[0].plot(intensity_grid, plotsamps, "b")
ax[0].set_title("Linear kernel model")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in rbf_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[1].plot(intensity_grid, plotsamps, "b")
ax[1].set_title("Nonmonotonic RBF kernel model")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in mono_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[2].plot(intensity_grid, plotsamps, "b")
ax[2].set_title("Monotonic RBF kernel model")
return fig
if __name__ == "__main__":
prior_samps_1d = plot_prior_samps_1d()
prior_samps_1d.savefig("./figs/prior_samps.pdf", dpi=200)
|
aepsych-main
|
pubs/owenetal/code/prior_plots.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
from __future__ import annotations
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, PythonExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
""" # noqa: E501
def validate_tutorial_links(repo_dir: str) -> None:
"""Checks that all .ipynb files that present are linked on the website, and vice
versa, that any linked tutorial has an associated .ipynb file present.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.load(infile)
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
tutorials_nbs = {
fn.replace(".ipynb", "")
for fn in os.listdir(os.path.join(repo_dir, "tutorials"))
if fn[-6:] == ".ipynb"
}
missing_files = tutorial_ids - tutorials_nbs
missing_ids = tutorials_nbs - tutorial_ids
if missing_files:
raise RuntimeError(
"The following tutorials are linked on the website, but missing an "
f"associated .ipynb file: {missing_files}."
)
if missing_ids:
print( '\033[93m' + 'Warning: ' + '\x1b[0m' + "The following tutorial files are present, but are not linked on the "
"website: {}.".format(", ".join([nbid + ".ipynb" for nbid in missing_ids])))
# raise RuntimeError(
# "The following tutorial files are present, but are not linked on the "
# "website: {}.".format(", ".join([nbid + ".ipynb" for nbid in missing_ids]))
# )
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for AEPsych Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.load(infile)
# create output directories if necessary
html_out_dir = os.path.join(repo_dir, "website", "_tutorials")
files_out_dir = os.path.join(repo_dir, "website", "static", "files")
for d in (html_out_dir, files_out_dir):
if not os.path.exists(d):
os.makedirs(d)
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
for tid in tutorial_ids:
print(f"Generating {tid} tutorial")
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", f"{tid}.ipynb")
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter(template_name="classic")
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
html_out_dir,
f"{tid}.html",
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", f"{tid}.js"
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(files_out_dir, f"{tid}.ipynb")
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = PythonExporter()
script, meta = exporter.from_notebook_node(nb)
# make sure to use python3 shebang
script = script.replace("#!/usr/bin/env python", "#!/usr/bin/env python3")
py_out_path = os.path.join(repo_dir, "website", "static", "files", f"{tid}.py")
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"-w",
"--repo_dir",
metavar="path",
required=True,
help="aepsych repo directory.",
)
args = parser.parse_args()
validate_tutorial_links(args.repo_dir)
gen_tutorials(args.repo_dir)
|
aepsych-main
|
scripts/parse_tutorials.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
from __future__ import annotations
import argparse
import os
from bs4 import BeautifulSoup
#The base_url must match the base url in the /website/siteConfig.js
# Note if it is not updated API doc searchbar will not be displayed
# 1) update base_url below
base_url = "/"
js_scripts = """
<script type="text/javascript" id="documentation_options" data-url_root="./" src="{0}js/documentation_options.js"></script>
<script type="text/javascript" src="{0}js/jquery.js"></script>
<script type="text/javascript" src="{0}js/underscore.js"></script>
<script type="text/javascript" src="{0}js/doctools.js"></script>
<script type="text/javascript" src="{0}js/language_data.js"></script>
<script type="text/javascript" src="{0}js/searchtools.js"></script>
""".format(base_url) # noqa: E501
# 2) update
# Search.loadIndex("/<<update to match baseUrl>>/js/searchindex.js"
search_js_scripts = """
<script type="text/javascript">
jQuery(function() { Search.loadIndex("/js/searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
"""
def parse_sphinx(input_dir, output_dir):
for cur, _, files in os.walk(input_dir):
for fname in files:
if fname.endswith(".html"):
with open(os.path.join(cur, fname), "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
doc = soup.find("div", {"class": "document"})
wrapped_doc = doc.wrap(soup.new_tag("div", **{"class": "sphinx"}))
# add js
if fname == "search.html":
out = js_scripts + search_js_scripts + str(wrapped_doc)
else:
out = js_scripts + str(wrapped_doc)
output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir))
os.makedirs(output_path, exist_ok=True)
with open(os.path.join(output_path, fname), "w") as fout:
fout.write(out)
# update reference in JS file
with open(os.path.join(input_dir, "_static/searchtools.js"), "r") as js_file:
js = js_file.read()
js = js.replace(
"DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/'", "'_sphinx-sources/'"
)
with open(os.path.join(input_dir, "_static/searchtools.js"), "w") as js_file:
js_file.write(js)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.")
parser.add_argument(
"-i",
"--input_dir",
metavar="path",
required=True,
help="Input directory for Sphinx HTML.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="path",
required=True,
help="Output directory in Docusaurus.",
)
args = parser.parse_args()
parse_sphinx(args.input_dir, args.output_dir)
|
aepsych-main
|
scripts/parse_sphinx.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from __future__ import annotations
import argparse
import json
import os
import shutil
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Demo = require(`${{CWD}}/core/Demo.js`);
class DemoPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Demo baseUrl={{baseUrl}} demoID="{}" hasWinDemo="{}"
hasMacDemo="{}"/>;
}}
}}
module.exports = DemoPage;
"""
JS_SCRIPTS = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
"""
def validate_demo_links(repo_dir: str) -> None:
"""Checks that all .zip files that present are linked on the website, and vice
versa, that any linked demos has an associated .zip file present.
"""
with open(os.path.join(repo_dir, "website", "demos.json")) as f:
demo_config = json.load(f)
demo_ids = {x["id"] for v in demo_config.values() for x in v}
demo_names = {
fn.replace(".zip", "")
for fn in os.listdir(os.path.join(repo_dir, "demos"))
if fn[-4:] == ".zip"
}
# Check if the ID is present in the set and if both "_Mac" and "_Win" endings exist
for id in demo_ids:
if f"{id}_Mac" in demo_names and f"{id}_Win" in demo_names:
print(f"Both '{id}_Mac' and {id}_Win' demos .zip files are present.")
elif f"{id}_Mac" in demo_names:
print(f"Only '{id}_Mac'.zip demo is present.")
else:
print(f"Only '{id}_Win'.zip demo is present.")
def gen_demos(repo_dir: str) -> None:
"""Generate HTML demos for AEPsych Docusaurus site for download."""
with open(os.path.join(repo_dir, "website", "demos.json"), "r") as f:
demo_config = json.load(f)
# create output directories if necessary
html_out_dir = os.path.join(repo_dir, "website", "_demos")
files_out_dir = os.path.join(repo_dir, "website", "static", "files", "demos")
for d in (html_out_dir, files_out_dir):
if not os.path.exists(d):
os.makedirs(d)
demo_ids = {x["id"] for v in demo_config.values() for x in v}
for d_id in demo_ids:
print(f"Generating {d_id} demo")
# convert markdown to HTML
md_in_path = os.path.join(repo_dir, "demos", "markdown", f"{d_id}.md")
with open(md_in_path, "r") as infile:
markdown_content = infile.read()
notebook_node = nbformat.v4.new_notebook()
markdown_cell = nbformat.v4.new_markdown_cell(markdown_content)
notebook_node["cells"] = [markdown_cell]
exporter = HTMLExporter(template_name="classic")
html, meta = exporter.from_notebook_node(notebook_node)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
html_out_dir,
f"{d_id}.html",
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
has_mac_demo = os.path.exists(os.path.join(repo_dir, "demos", f"{d_id}_Mac.zip"))
has_win_demo = os.path.exists(os.path.join(repo_dir, "demos", f"{d_id}_Win.zip"))
script = TEMPLATE.format(d_id,has_win_demo,has_mac_demo)
js_out_path = os.path.join(repo_dir, "website", "pages", "demos", f"{d_id}.js")
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output demo in zip format
if has_mac_demo:
mac_source_path = os.path.join(repo_dir, "demos", f"{d_id}_Mac.zip")
mac_zip_out_path = os.path.join(files_out_dir, f"{d_id}_Mac.zip")
shutil.copy(mac_source_path, mac_zip_out_path)
if has_win_demo:
win_source_path = os.path.join(repo_dir, "demos", f"{d_id}_Win.zip")
win_zip_out_path = os.path.join(files_out_dir, f"{d_id}_Win.zip")
shutil.copy(win_source_path, win_zip_out_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, and zip files for demos."
)
parser.add_argument(
"-w",
"--repo_dir",
metavar="path",
required=True,
help="aepsych repo directory.",
)
args = parser.parse_args()
validate_demo_links(args.repo_dir)
gen_demos(args.repo_dir)
|
aepsych-main
|
scripts/parse_demos.py
|
from setuptools import find_packages, setup
from os.path import basename, splitext
from glob import glob
setup(name='smallfry',
version='0.1',
description='Code for smallfry.',
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
url='https://github.com/HazyResearch/smallfry.git',
author='Avner May / Jian Zhang',
author_email='zjian@stanford.edu',
license='Apache Version 2',
install_requires = ['numpy',
'torch']
)
|
smallfry-master
|
setup.py
|
from quant_embedding import compress_long_mat
from quant_embedding import decompress_long_mat
from quant_embedding import QuantEmbedding
from quant_embedding import quantize_embed
import compress
from unittest import TestCase
import torch
import numpy as np
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger("quant embedding test")
EMBEDDING_TEST_FILE = "./test_embed.txt"
class QuantEmbeddingTest(TestCase):
def test_compress_decompress_funcs(self):
# test the compress and decompress functions are reverse to each other
n_bit = int(np.random.choice([2, 4, 8, 16, 32]))
n_vals = int(2**n_bit)
n_dim = np.random.randint(low=2, high=100)
batch_size = np.random.randint(low=3, high=16)
length = np.random.randint(low=10, high=50)
input = torch.LongTensor(batch_size, length, n_dim).random_(to=n_vals)
compressed = compress_long_mat(input, n_bit)
decompressed = decompress_long_mat(compressed, n_bit, dim=n_dim)
# print(input)
# print(decompressed)
assert torch.all(torch.eq(input, decompressed))
def test_embeding_replacement_func(self):
layer1 = torch.nn.Embedding(100, 10)
layer2 = torch.nn.Embedding(200, 20)
layer3 = torch.nn.Embedding(300, 30)
layer4 = torch.nn.Embedding(400, 40)
module_list1 = torch.nn.ModuleList([layer1, layer2])
module_list2 = torch.nn.ModuleList([layer3, layer4])
module_list = torch.nn.ModuleList([module_list1, module_list2])
module_list_comp = quantize_embed(module_list, nbit=4)
assert isinstance(module_list_comp[0][0], QuantEmbedding)
assert isinstance(module_list_comp[0][1], QuantEmbedding)
assert isinstance(module_list_comp[1][0], QuantEmbedding)
assert isinstance(module_list_comp[1][1], QuantEmbedding)
layer1 = torch.nn.Embedding(100, 10)
layer2 = torch.nn.Embedding(200, 20)
layer3 = torch.nn.Embedding(300, 30)
layer4 = torch.nn.Embedding(400, 40)
module_list1 = torch.nn.Sequential(layer1, layer2)
module_list2 = torch.nn.Sequential(layer3, layer4)
module_list = torch.nn.Sequential(module_list1, module_list2)
module_list_comp = quantize_embed(module_list, nbit=4)
assert isinstance(module_list_comp[0][0], QuantEmbedding)
assert isinstance(module_list_comp[0][1], QuantEmbedding)
assert isinstance(module_list_comp[1][0], QuantEmbedding)
assert isinstance(module_list_comp[1][1], QuantEmbedding)
def generate_embedding_file(self,
n_bit,
n_dim,
n_word,
quantized_input,
file_name=EMBEDDING_TEST_FILE):
if quantized_input:
n_val = int(2**n_bit)
value_list = np.random.rand(n_val)
index = np.random.randint(low=0, high=n_val, size=(n_word, n_dim))
embedding = value_list[index]
else:
embedding = np.random.rand(n_word, n_dim)
with open(file_name, "w") as f:
for i in range(n_word):
line = "x" + str(i) + "".join(
[" " + str(val) for val in embedding[i]]) + "\n"
f.write(line)
logger.info("generated embedding, nbit, ndim, n_word " +
str(n_bit) + " / " + str(n_dim) + "/ " + str(n_word))
return torch.FloatTensor(embedding)
def get_embeddings_for_test(self, quantized_input=False, use_file=True):
n_dim = int(np.random.randint(low=1, high=100))
n_bit = int(np.random.choice([2, 4, 8, 16]))
n_word = np.random.choice(np.arange(1, 100))
input_embedding = self.generate_embedding_file(
n_dim=n_dim,
n_bit=n_bit,
n_word=n_word,
quantized_input=quantized_input,
file_name=EMBEDDING_TEST_FILE)
if use_file:
weight = None
embedding_file = EMBEDDING_TEST_FILE
else:
weight = input_embedding.clone()
embedding_file = None
# test if the file is loaded correctly
# test 32 bit representation
embedding = QuantEmbedding(
num_embeddings=n_word,
embedding_dim=n_dim,
padding_idx=0,
nbit=32,
_weight=weight,
embedding_file=embedding_file)
# test non 32 bit representation
quant_embedding = QuantEmbedding(
num_embeddings=n_word,
embedding_dim=n_dim,
padding_idx=0,
nbit=n_bit,
_weight=weight,
embedding_file=embedding_file)
if quantized_input or np.unique(input_embedding).size <= 2**n_bit:
ref_embedding = input_embedding.clone()
else:
# we only quantize when there is not enough bits
ref_embedding, _, _ = compress.compress_uniform(
input_embedding.cpu().numpy(),
n_bit,
adaptive_range=True,
stochastic_round=False)
ref_embedding = torch.FloatTensor(ref_embedding)
assert embedding.embedding_dim == ref_embedding.size(-1)
assert quant_embedding.embedding_dim == ref_embedding.size(-1)
return input_embedding, ref_embedding, embedding, quant_embedding
def forward(self, cuda=False):
config_list = [("load quantized file as input test ", {
"quantized_input": True,
"use_file": True
}), ("load unquantized file as input test ", {
"quantized_input": False,
"use_file": True
}), ("load unquantized tensor as input test ", {
"quantized_input": False,
"use_file": False
}), ("load quantized tensor as input test ", {
"quantized_input": True,
"use_file": False
})]
for info, config in config_list:
input_embedding, ref_embedding, embedding, quant_embedding = \
self.get_embeddings_for_test(**config)
# ref_embedding, embedding, quant_embedding = self.get_embeddings_for_test(
# embedding_file=EMBEDDING_TEST_FILE)
n_dim = embedding.weight.size(-1)
n_word = int(embedding.weight.size(0))
batch_size = np.random.randint(low=3, high=16)
length = np.random.randint(low=10, high=50)
input = torch.LongTensor(batch_size, length,
n_dim).random_(to=n_word)
if cuda and torch.cuda.is_available():
input_embedding = input_embedding.cuda()
ref_embedding = ref_embedding.cuda()
embedding = embedding.cuda()
quant_embedding = quant_embedding.cuda()
input = input.cuda()
assert quant_embedding.weight.is_cuda == cuda
assert quant_embedding.value_list.is_cuda == cuda
input_out = input_embedding[input]
ref_out = ref_embedding[input]
out = embedding(input)
quant_out = quant_embedding(input)
assert torch.all(torch.eq(input_out, out))
assert torch.all(torch.eq(ref_out, quant_out))
logger.info(info + "passed!")
def test_forward_cpu(self):
self.forward(cuda=False)
def test_forward_gpu(self):
self.forward(cuda=True)
if __name__ == "__main__":
unittest.main()
|
smallfry-master
|
src/smallfry/quant_embedding_test.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from smallfry import compress
import logging
import sys, os
LONG_BITS = 64
def fix_randomness(seed):
np.random.seed(seed)
use_cuda = torch.cuda.is_available()
torch.manual_seed(seed)
if use_cuda:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
##################################################################
# The core helper functions for compressing embeddings
# into torch int64 LongTensor.
##################################################################
def compress_long_mat(long_tensor, nbit):
"""
we assume a single vector is along the last dimension.
We compress
"""
assert long_tensor.dtype == torch.int64
assert LONG_BITS % nbit == 0
# n_entry is the # of entries each long value can contain
n_entry = LONG_BITS // nbit
mask = int("".join(['0'] * (LONG_BITS - nbit) + ['1'] * nbit), 2)
out_shape = list(long_tensor.shape)
out_shape[-1] = math.ceil(out_shape[-1] / n_entry)
out = torch.zeros(*out_shape, device=long_tensor.device, dtype=torch.int64)
out_flat = out.view(-1, out_shape[-1])
long_tensor_flat = long_tensor.view(-1, long_tensor.shape[-1])
for i in range(n_entry):
# number of value to compress
n_val = long_tensor_flat[:, i::n_entry].size(-1)
out_flat[:, :n_val] |= (long_tensor_flat[:, i::n_entry] & mask) << (
(n_entry - i - 1) * nbit)
return out # out is the original version of out_flat
def decompress_long_mat(byte_tensor, nbit, dim=None):
"""
we assume a single vector is along the last dimension.
"""
assert byte_tensor.dtype == torch.int64
assert LONG_BITS % nbit == 0
n_entry = LONG_BITS // nbit
mask = int("".join(['0'] * (LONG_BITS - nbit) + ['1'] * nbit), 2)
out_shape = list(byte_tensor.shape)
out_shape[-1] *= n_entry
out = torch.zeros(*out_shape, device=byte_tensor.device, dtype=torch.int64)
# manipulate as 2d tensors
out_flat = out.view(-1, out_shape[-1])
byte_tensor_flat = byte_tensor.view(-1, byte_tensor.shape[-1])
for i in range(n_entry):
out_flat[:, i::n_entry] = (byte_tensor_flat >>
((n_entry - i - 1) * nbit)) & mask
if dim is not None:
# cut the redundent dimensions
out_flat = out_flat[:, :dim].contiguous()
out_shape = list(byte_tensor.shape)
out_shape[-1] = dim
out = out_flat.view(*out_shape)
return out
##################################################################
# Helpers for replacing original pytorch embedding layers to
# the quantized embedding layer (i.e. class QuantEmbedding)
##################################################################
def quantize_embed(module, nbit=32):
"""
This function replace all embedding modules
to QuantEmbedding layer recursively.
The input module should be a torch.nn.Module object.
nbit specifies the precision for the desired compressed embedding
"""
for name, child in module.named_children():
if isinstance(child, torch.nn.Embedding):
quant_embedding = QuantEmbedding(
num_embeddings=child.num_embeddings,
embedding_dim=child.embedding_dim,
padding_idx=child.padding_idx,
nbit=nbit,
_weight=child.weight)
# send the quant embedding layer to gpu
# if the original embedding is on gpu
if next(child.parameters()).is_cuda:
quant_embedding.cuda()
setattr(module, name, quant_embedding)
logging.info("Replaced " + name + " in " +
module.__class__.__name__)
else:
quantize_embed(child, nbit)
return module
def find_embedding_module_name(module, module_name=""):
module_name_list = []
for name, child in module.named_children():
if isinstance(child, torch.nn.Embedding):
if module_name == "":
module_name_list.append(name)
else:
module_name_list.append(module_name + "." + name)
else:
if module_name == "":
module_name_list += find_embedding_module_name(child, name)
else:
module_name_list += find_embedding_module_name(
child, module_name + "." + name)
return module_name_list
##################################################################
# Misc helpers
##################################################################
def line2vec(line, value_dict=None):
"""
convert a line in embedding file to a float tensor vector
"""
if value_dict is None:
return torch.FloatTensor(
[float(value) for value in line.strip('\n').split(" ")[1:]])
else:
return torch.LongTensor([
value_dict[float(value)]
for value in line.strip('\n').split(" ")[1:]
])
def load_embed_from_ckpt(model, ckpt_file):
""" load normal full precision embedding modules """
embed_module_names = find_embedding_module_name(model)
assert os.path.isfile(
ckpt_file), "model ckpt file " + ckpt_file + " is missing!"
ckpt_state_dict = torch.load(ckpt_file)["model"]
emb_state_dict = {}
for name in embed_module_names:
try:
emb_state_dict[name + ".weight"] = ckpt_state_dict[name +
".weight"]
except:
raise Exception(name +
".weight not found in the model checkpoint file")
model_dict = model.state_dict()
model_dict.update(emb_state_dict)
model.load_state_dict(model_dict)
def print_model_mem(model):
embed_module_names = find_embedding_module_name(model)
embed_mem = 0.0
non_embed_mem = 0.0
model_dict = model.state_dict()
for k, v in model_dict.items():
is_embed = False
for name in embed_module_names:
if name in k:
is_embed = True
if is_embed:
embed_mem += v.element_size() * v.nelement()
else:
non_embed_mem += v.element_size() * v.nelement()
logging.info("Embed memory (bytes) " + str(embed_mem))
logging.info("Non-embed memory (bytes) " + str(non_embed_mem))
def reshape_ckpt_value_list_shape(model, state, nbit):
embed_module_names = find_embedding_module_name(model)
for name in embed_module_names:
# print("test ", name, state.keys())
if name + ".value_list" in state.keys():
# assert name + ".value_list" in state.keys(), "embedding not found in the ckpt!"
value_list = torch.zeros([2**nbit], dtype=torch.float32)
old_value_list = state[name + ".value_list"]
state[name + ".value_list"] = value_list
state[name + ".value_list"][:old_value_list.nelement()].copy_(
old_value_list)
logging.info("Updated value_list to shape " +
str(state[name + ".value_list"].size()))
return state
def dummy_filter(x):
return True
def fix_embedding_parameters(model, module_name_filter=dummy_filter):
embed_module_names = find_embedding_module_name(model)
embed_module_names = [
x for x in embed_module_names if module_name_filter(x)
]
for param_name, param in model.named_parameters():
if any([module_name in param_name for module_name in embed_module_names]):
param.requires_grad = False
logging.info("Embedding " + param_name + " is set to non-training mode")
def log_param_list(model):
for name, param in model.named_parameters():
logging.info('{} {} {} {}'.format(name, param.dtype, param.requires_grad, param.shape))
##################################################################
# The quantized embedding pytorch layer
##################################################################
class QuantEmbedding(nn.Embedding):
def __init__(self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
nbit=32,
embedding_file=None):
"""
Impelmentation of the quantized embedding layer. This layer
memory efficient embedding storage during inference. Currently,
the implementation support 1, 2, 4, 8, 16 bit represention.
The QuantEmbedding layer save the quantized representation in
LongTensor, during forward for inference, the bits are extracted
from LongTensor and put into Float32 tensor for inference.
There are 2 ways to initialize the quantized embedding layer:
1. a float32 tensor containing quantized or unquantized values
_weight=<a float32 tensor>, embedding_file=None
2. a file containing quantized or unquantized float values
_weight=None, quantized_input=<file name>
If you use the file-style input, for reference format,
please refer to http://nlp.stanford.edu/data/glove.6B.zip.
"""
assert nbit in (1, 2, 4, 8, 16, 32)
assert max_norm == None
assert norm_type == 2.
assert scale_grad_by_freq == False
assert sparse == False
if (_weight is None and embedding_file is None) or (
_weight is not None and embedding_file is not None):
raise Exception(
"Should provide input either from a tensor or a file!")
self.nbit = nbit
# set the dimensionality of the actual compressed tensor
if self.nbit == 32:
self.tensor_dim = embedding_dim
else:
self.tensor_dim = math.ceil(embedding_dim * nbit / LONG_BITS)
nn.Embedding.__init__(
self,
num_embeddings, # we use the actual tensor dim here, otherwise will raise error
self.tensor_dim,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
# if we have an cuda input _weight, we convert it to cpu
# so that the intermediate memory in initialization would
# not exhaust the gpu memory.
if _weight is not None and _weight.is_cuda:
_weight = _weight.detach().cpu()
if self.nbit == 32:
# we only support forward pass
self.weight.requires_grad = False
if embedding_file is not None:
_weight = self._load_from_unquant_file_to_uncompressed_tensor(
embedding_file)
self.weight.copy_(_weight.data)
else:
self.weight = nn.Parameter(
torch.zeros(
num_embeddings, self.tensor_dim, dtype=torch.int64),
requires_grad=False)
# record the true embeding_dim
self.embedding_dim = embedding_dim
# load the quantized values from file / tensor to the int64 tensor
if self._quantized_input(_weight, embedding_file):
if _weight is not None:
assert isinstance(_weight, torch.FloatTensor)
# the input weight is already quantized and does not need clipping/quantization
self._compress_tensor(_weight, do_quant=False)
elif embedding_file is not None:
# the functionality of compress tensor is included in the loading function here
self._load_from_quant_file_to_compressed_tensor(
embedding_file)
else:
if _weight is not None:
assert isinstance(_weight, torch.FloatTensor)
elif embedding_file is not None:
_weight = self._load_from_unquant_file_to_uncompressed_tensor(
embedding_file)
# compress _weight into self.weight
self._compress_tensor(_weight)
logging.info("Compressed embedding to " + str(self.nbit) + " bits!")
def _get_value_list_from_tensor(self, weight):
# get the unique values into a list
if isinstance(weight, torch.FloatTensor):
weight = weight.detach().cpu().numpy()
sorted_vals = sorted(np.unique(weight).tolist())
return sorted_vals
def _get_value_list_from_file(self, file_name):
value_set = set([])
with open(file_name, "r") as f:
for line_id, line in enumerate(f.readlines()):
for value in line.strip('\n').split(" ")[1:]:
value_set.add(float(value))
sorted_vals = sorted(list(value_set))
return sorted_vals
def _quantized_input(self, weight, embedding_file):
assert weight is None or embedding_file is None, " Can only use one out of Tensor or File as input!"
if weight is not None:
return len(
self._get_value_list_from_tensor(weight)) <= 2**self.nbit
else:
return len(
self._get_value_list_from_file(embedding_file)) <= 2**self.nbit
def _load_from_unquant_file_to_uncompressed_tensor(self, file_name):
weight = torch.zeros(self.num_embeddings, self.embedding_dim)
# put vectors into int32 tensor
with open(file_name, "r") as f:
for line_id, line in enumerate(f.readlines()):
vector = line2vec(line)
if self.embedding_dim != vector.numel():
raise Exception(
"Dimensionality in embedding file does not match dimensionality specified for embedding layer"
)
weight[line_id].copy_(vector)
return weight
def _compress_tensor(self, weight, do_quant=True):
'''
if weight is not quantized yet, we specify do_quant to quantize here
'''
if (weight.shape[0] != self.num_embeddings) or (weight.shape[1] !=
self.embedding_dim):
raise Exception(
"The shape of the input embedding does not match the compressed tensor!"
)
assert self.nbit != 32, "_compress_tensor should only be called when nbit < 32"
if do_quant:
weight, _, _ = compress.compress_uniform(
weight.detach().cpu().numpy(),
self.nbit,
adaptive_range=True,
stochastic_round=False)
else:
weight = weight.detach().cpu().numpy()
# construct value dict
sorted_vals = self._get_value_list_from_tensor(weight)
value_list = torch.zeros([2**self.nbit], dtype=torch.float32)
value_list[:len(sorted_vals)].copy_(torch.FloatTensor(sorted_vals))
self.register_buffer("value_list", value_list)
self.value_dict = {
float(value): i
for i, value in enumerate(sorted_vals)
}
assert len(sorted_vals) <= 2**self.nbit
if len(sorted_vals) < 2**self.nbit:
logging.warning(
"Set of actual values is smaller than set of possible values.")
weight = np.vectorize(self.value_dict.get)(weight)
# compress vectors into quantized embeddings
self.weight.copy_(
compress_long_mat(torch.LongTensor(weight), nbit=self.nbit))
def _load_from_quant_file_to_compressed_tensor(self, file_name):
if self.nbit != 32:
# construct the mapping between quantized index and quantized value
sorted_vals = self._get_value_list_from_file(file_name)
self.register_buffer("value_list", torch.FloatTensor(sorted_vals))
self.value_dict = {
float(value): i
for i, value in enumerate(sorted_vals)
}
assert len(sorted_vals) <= 2**self.nbit
if len(sorted_vals) < 2**self.nbit:
logging.warning(
"Set of actual values is smaller than set of possible values."
)
else:
with open(file_name, "r") as f:
line_cnt = len(f.readlines())
# put vectors into int64 tensor
with open(file_name, "r") as f:
for line_id, line in enumerate(f.readlines()):
if self.nbit != 32:
vector = line2vec(line, self.value_dict)
self.weight[line_id].copy_(
compress_long_mat(vector, self.nbit))
else:
self.weight[line_id].copy_(
torch.tensor(
[float(value) for value in line.split(" ")[1:]],
dtype=self.weight.dtype))
if self.num_embeddings > line_id + 1:
logging.warning(
"The input vocab is smaller then the specified vocab size")
def forward(self, input):
embedding = F.embedding(input, self.weight, self.padding_idx,
self.max_norm, self.norm_type,
self.scale_grad_by_freq, self.sparse)
if self.nbit != 32:
embedding = decompress_long_mat(embedding, self.nbit,
self.embedding_dim)
embedding = self.value_list[embedding]
assert self.weight.requires_grad == False, " QuantEmbedding only support fixed embedding"
return embedding
|
smallfry-master
|
src/smallfry/quant_embedding.py
|
import os
import sys
import socket
import json
import datetime
import logging
import pathlib
import time
import random
import subprocess
import argparse
import numpy as np
import getpass
def load_embeddings(path):
"""
Loads a GloVe or FastText format embedding at specified path. Returns a
vector of strings that represents the vocabulary and a 2-D numpy matrix that
is the embeddings.
"""
logging.info('Beginning to load embeddings')
with open(path, 'r', encoding='utf8') as f:
lines = f.readlines()
wordlist = []
embeddings = []
if is_fasttext_format(lines): lines = lines[1:]
for line in lines:
row = line.strip('\n').split(' ')
wordlist.append(row.pop(0))
embeddings.append([float(i) for i in row])
embeddings = np.array(embeddings)
assert len(wordlist) == embeddings.shape[0], 'Embedding dim must match wordlist length.'
logging.info('Finished loading embeddings')
return embeddings, wordlist
def get_embedding_dimension(embed_path):
with open(embed_path) as f_embed:
for line in f_embed:
if not is_fasttext_format([line]):
pieces = line.rstrip().split(' ')
embed_dim = len(pieces) - 1
logging.info('Loading ' + str(embed_dim) +
' dimensional embedding')
break
assert embed_dim > 0
return embed_dim
def is_fasttext_format(lines):
first_line = lines[0].strip('\n').split(' ')
return len(first_line) == 2 and first_line[0].isdigit() and first_line[1].isdigit()
|
smallfry-master
|
src/smallfry/utils.py
|
import os
import logging
import math
import time
import pathlib
import traceback
import numpy as np
from smallfry import utils
def compress_uniform(X, bit_rate, adaptive_range=False, stochastic_round=False,
skip_quantize=False):
'''
This function compresses an embedding matrix using uniform quantization.
Parameters:
X (numpy array): Embedding matrix (rows of X are word embeddings).
bit_rate (int): Number of bits to use per entry of the compressed embedding matrix.
adaptive_range (bool): If True, golden section search is used to find the optimal
value at which to clip the extreme values of the embedding matrix X before
performing quantization.
stochastic_round (bool): If True, stochastic rounding is used for the quantization.
skip_quantize (bool): If True, the embedding matrix will not be quantized.
If adaptive_range is True, the extreme values of X will still be clipped.
Returns:
Xq (numpy array): The compressed embedding matrix.
frob_squared_error (float): The Frobenius norm of the difference between
the compressed and uncompressed embedding matrices.
elapsed (float): The duration (in seconds) of this function call.
'''
start = time.time()
if adaptive_range:
# Note that deterministic quantization is always uses for find_optimal_range.
range_limit = find_optimal_range(X, bit_rate, stochastic_round=False)
else:
range_limit = get_max_abs(X)
Xq = _compress_uniform(X, bit_rate, range_limit,
stochastic_round=stochastic_round, skip_quantize=skip_quantize)
elapsed = time.time() - start
frob_squared_error = np.linalg.norm(X-Xq)**2
return Xq, frob_squared_error, elapsed
# Internal function. This one expects an explicit range_limit.
def _compress_uniform(X, bit_rate, range_limit, stochastic_round=False,
skip_quantize=False):
'''
Internal uniform quantization function.
Parameters:
X (numpy array): Embedding matrix (rows of X are word embeddings).
bit_rate (int): Number of bits to use per entry of the compressed embedding matrix.
range_limit (float): All values in X with absolute value greater than
this range_limit will be clipped.
stochastic_round (bool): If True, stochastic rounding is used for the quantization.
skip_quantize (bool): If True, the embedding matrix will not be quantized.
If adaptive_range is True, the extreme values of X will still be clipped.
Returns:
Xq (numpy array): The compressed embedding matrix.
'''
assert range_limit >= 0, 'range_limit must be non-negative.'
assert X.dtype == np.float or X.dtype == np.float64 or X.dtype == np.float32,\
'Only floating point inputs allowed.'
Xq = np.copy(X)
if get_max_abs(Xq) > range_limit:
np.clip(Xq, -range_limit, range_limit, out=Xq)
if not skip_quantize and range_limit != 0:
# We only need to quantize if skip_quantize is not set to true,
# and range_limit != 0 (range_limit == 0 means the whole matrix is
# already set to 0)
if bit_rate == 0:
Xq[:] = 0
elif bit_rate < 32:
# affine transform to put Xq in [0,2**bit_rate - 1]
Xq = (2**bit_rate - 1) * (Xq + range_limit) / (2 * range_limit)
if stochastic_round:
# each entry will round down if noise > fraction part
np.ceil(Xq - np.random.rand(*Xq.shape), out=Xq)
else:
np.round(Xq, out=Xq)
# undo affine transformation
Xq = (Xq * 2 * range_limit) / (2**bit_rate - 1) - range_limit
elif bit_rate >= 32:
pass # don't quantize if bitrate >= 32
return Xq
def find_optimal_range(X, bit_rate, stochastic_round=False, tol=1e-2):
'''
Find the best value to use to clip the embeddings before using uniform quantization.
Parameters:
X (numpy array): Embedding matrix (rows of X are word embeddings).
bit_rate (int): Number of bits to use per entry of the compressed embedding matrix.
stochastic_round (bool): If True, stochastic rounding is used for the quantization.
tol (float): The tolerance (maximum possible error) for the golden section search
algorithm.
Returns:
float: The optimal clipping value.
'''
f = lambda range_limit : compress_and_compute_frob_squared_error(
X, bit_rate, range_limit, stochastic_round=stochastic_round)
return golden_section_search(f, 0, get_max_abs(X), tol=tol)
def compress_and_compute_frob_squared_error(X, bit_rate, range_limit, stochastic_round=False):
'''
Function which computes frob squared error after compression. This function
is used in the find_optimal_range function to find best clip value for
adaptive range uniform compression.
Parameters:
X (numpy array): Embedding matrix (rows of X are word embeddings).
bit_rate (int): Number of bits to use per entry of the compressed embedding matrix.
range_limit (float): All values in X with absolute value greater than
this range_limit will be clipped.
stochastic_round (bool): If True, stochastic rounding is used for the quantization.
Returns:
float: The squared Frobenius error which results from compressing the X matrix
by first clipping its values to [-range_limit,range_limit], and then
uniformly quantizing the clipped values within this range.
'''
Xq = _compress_uniform(X, bit_rate, range_limit, stochastic_round=stochastic_round)
return np.linalg.norm(X - Xq)**2
def golden_section_search(f, x_min, x_max, tol=1e-2):
'''
Find argmin of f between x_min and x_max (for f uni-modal), to within a
specified tolerance (tol), using the golden section search algorithm.
Parameters:
f (function): f is the unimodal function we would like to find the argmin
for. f is assumed to take a scalar as input, and output a scalar.
x_min (float): The minimum input to consider when minimizing f.
x_max (float): The maximum input to consider when minimizing f.
tol (float): The tolerance (maximum possible error) for the
golden section search algorithm.
Returns:
float: The argmin of f, to within the specified tolerance.
This function uses the golden-section search algorithm.
It always maintains a list of four points [x1,x2,x3,x4],
which are always spaced as: [a,a+(c^2)h,a+ch,a+h].
for c = (math.sqrt(5) - 1) / 2 = 0.618...
(c is equal to 1/phi, where phi = (1+sqrt(5))/2 is the golden ratio).
The algorithm progressively reduces the size of the interval being
considered by checking whether f(x2) < f(x3), and eliminating one of the
endpoints accordingly; x4 is eliminated if f(x2) < f(x3), and x1
is eliminated otherwise.
If f(a+(c^2)h) < f(a+ch), the new interval becomes
>>> [a,a+(c^3)h,a+(c^2)h,a+ch] = [a,a+(c^2)(ch),a+c(ch),a+ch]
(So h' = ch, a' = a)
Otherwise, the interval becomes
>>> [a',a'+(c^2)h',a'+ch', a'+h'], for a' = a+(c^2)h and h'=(h-(c^2)h)
It is easy to check that a'+(c^2)h' = a + ch, and that a'+h' = a+h,
So this interval is equal to [a+(c^2)h, a+ch, X, a+h], for X=a'+ch'
The algorithm terminates when it has been narrowed
down that the argmin must be in an interval of size < tol.
'''
# Initialize points
# c is equal to 1/phi, for phi = (1+sqrt(5))/2
c = (math.sqrt(5) - 1) / 2
x1 = x_min
x4 = x_max
f_x1 = f(x1)
f_x4 = f(x4)
x2 = x1 + (x4-x1) * c**2
x3 = x1 + (x4-x1) * c
f_x2 = f(x2)
f_x3 = f(x3)
while (x4-x1 > tol):
assert (math.isclose(x2, x1 + (x4 - x1) * c**2) and
math.isclose(x3, x1 + (x4 - x1) * c))
if f_x2 < f_x3:
# The new points become [x1, NEW, x2, x3]
x4,f_x4 = x3,f_x3
x3,f_x3 = x2,f_x2
x2 = x1 + (x4-x1) * c**2
f_x2 = f(x2)
else:
# The new points become [x2, x3, NEW, x4]
x1,f_x1 = x2,f_x2
x2,f_x2 = x3,f_x3
x3 = x1 + (x4-x1) * c
f_x3 = f(x3)
# Return x-value with minimum f(x) which was found.
i = np.argmin([f_x1,f_x2,f_x3,f_x4])
x = [x1,x2,x3,x4]
return x[i]
def get_max_abs(X):
return np.max(np.abs(X))
|
smallfry-master
|
src/smallfry/compress.py
|
from utils import *
import pickle as pkl
import datetime, os
class ModelParams:
def __init__(self, dataset_name, transform, test, log_path, input_size,
layer_size, out_size, num_layers, loss, r, steps, batch_size,
lr, mom, init_type, class_type, learn_corner, n_diag_learned,
init_stddev, fix_G, check_disp, check_disp_freq, checkpoint_freq, checkpoint_path,
test_freq, verbose, decay_rate, decay_freq, learn_diagonal,
fix_A_identity, stochastic_train, flip_K_B, num_conv_layers,
torch, model, viz_freq, num_pred_plot, viz_powers,early_stop_steps,replacement,
test_best_val_checkpoint, restore_from_checkpoint, num_structured_layers,
tie_operators_same_layer, tie_layers_A_A, tie_layers_A_B, train_fraction):
if class_type not in ['symmetric', 'polynomial_transform', 'low_rank', 'toeplitz_like', 'toep_corner', 'subdiagonal', 'toep_nocorn', 'hankel_like', 'vandermonde_like', 'unconstrained', 'circulant_sparsity', 'tridiagonal_corner', 'tridiagonal_corners']:
print('Class type ' + class_type + ' not supported')
assert 0
self.dataset_name = dataset_name
# grayscale
self.transform = transform
self.train_fraction = train_fraction
self.replacement = replacement
self.test = test
self.early_stop_steps = early_stop_steps
self.log_path = log_path
self.input_size = input_size
self.layer_size = layer_size
self.out_size = out_size
self.num_layers = num_layers
self.loss = loss
self.r = r
self.fix_G = fix_G
self.steps = steps
self.batch_size = batch_size
self.lr = lr
self.mom = mom
self.init_type = init_type
self.disp_type = 'stein'
if class_type == 'toeplitz_like':
disp_type = 'sylvester'
self.class_type = class_type
self.learn_corner = learn_corner
self.n_diag_learned = n_diag_learned
self.init_stddev = init_stddev
self.check_disp = check_disp
self.check_disp_freq = check_disp_freq
self.checkpoint_freq = checkpoint_freq
self.checkpoint_path = checkpoint_path
self.test_freq = test_freq
self.verbose = verbose
self.decay_rate = decay_rate
self.decay_freq = decay_freq
self.learn_diagonal = learn_diagonal
self.fix_A_identity = fix_A_identity
self.stochastic_train = stochastic_train
self.flip_K_B = flip_K_B
self.num_conv_layers = num_conv_layers
self.torch = torch
self.model = model
self.viz_freq = viz_freq
self.num_pred_plot = num_pred_plot
self.viz_powers = viz_powers
self.test_best_val_checkpoint = test_best_val_checkpoint
self.restore_from_checkpoint = restore_from_checkpoint
self.num_structured_layers = num_structured_layers
self.tie_operators_same_layer = tie_operators_same_layer
self.tie_layers_A_A = tie_layers_A_A
self.tie_layers_A_B = tie_layers_A_B
# c1_filters, c1_ksize, p1_size, p1_strides, c2_filters, c2_ksize, p2_size, p2_strides
if self.model == 'CNN' or 'cnn' in self.transform:
self.set_cnn_params()
def set_cnn_params(self):
cnn_params = {}
if self.dataset_name.startswith('mnist_noise') or self.dataset_name == 'norb':
cnn_params['c1_ksize'] = 5
cnn_params['p1_size'] = 2
cnn_params['p1_strides'] = 2
cnn_params['c2_ksize'] = 5
cnn_params['p2_size'] = 2
cnn_params['p2_strides'] = 2
cnn_params['c1_filters'] = 6
cnn_params['c2_filters'] = 16
cnn_params['p2_flat_size'] = 7 * 7 * cnn_params['c2_filters']
self.cnn_params = cnn_params
elif self.dataset_name == 'cifar10':
cnn_params['c1_ksize'] = 5
cnn_params['p1_size'] = 2
cnn_params['p1_strides'] = 2
cnn_params['c2_ksize'] = 5
cnn_params['p2_size'] = 2
cnn_params['p2_strides'] = 2
cnn_params['c1_filters'] = 6
cnn_params['c2_filters'] = 16
cnn_params['p2_flat_size'] = 8 * 8 * cnn_params['c2_filters']
self.cnn_params = cnn_params
elif self.dataset_name.startswith('true'):
self.cnn_params = cnn_params
elif self.dataset_name in ['copy', 'iwslt', 'mnist_bg_rot', 'mnist', 'convex']:
return
#elif self.dataset_name.startswith('norb'):
# cnn_params['c1_filters'] = 9
# cnn_params['c2_filters'] = 9
# cnn_params['p1_size'] = 3
# cnn_params['p1_strides'] = 3
# cnn_params['p2_size'] = 1
# cnn_params['p2_strides'] = 1
# cnn_params['p2_flat_size'] = 9 * 9 * cnn_params['c2_filters']
else:
print('dataset_name not supported: ', self.dataset_name)
assert 0
def save(self, results_dir, name, commit_id, command):
# Append git commit ID and command
param_str = str(commit_id) + '\n' + command + '\n' + str(self)
print(param_str)
# Make new dir with timestamp
this_results_dir = os.path.join(results_dir, name + '_' + str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")))
if not os.path.exists(this_results_dir):
os.makedirs(this_results_dir)
text_file = open(os.path.join(this_results_dir, 'params.txt'), "w")
text_file.write(param_str)
text_file.close()
# Save the dict
pkl.dump(self.__dict__, open(os.path.join(this_results_dir, 'params.p'), "wb"))
return this_results_dir
def __str__(self):
attr_dict = self.__dict__
param_str = ''
for attr in attr_dict:
param_str += attr + ': ' + str(attr_dict[attr]) + '\n'
return param_str
|
structured-nets-master
|
tensorflow/model_params.py
|
import tensorflow as tf
from utils import *
import numpy as np
from scipy.linalg import solve_sylvester
import time
from krylov import *
def eigendecomp(A):
d, P = tf.self_adjoint_eig(A)
return P, tf.diag(d), tf.matrix_inverse(P)
def general_recon(G, H, A, B):
P,D_A, Pinv = eigendecomp(A)
Q, D_B, Qinv = eigendecomp(B)
#sess = tf.InteractiveSession()
#tf.initialize_all_variables().run()
eig_A = tf.diag_part(D_A)
eig_B = tf.diag_part(D_B)
eig_A_reshaped = tf.reshape(eig_A, [-1, 1])
diff = eig_A_reshaped - eig_B
C = 1.0/diff
E = tf.matmul(G, tf.transpose(H))
term = tf.matmul(Pinv, tf.matmul(E, Q))
term = tf.multiply(term, C) # Elementwise
W = tf.matmul(P, tf.matmul(term, Qinv))
#print 'W: ', sess.run(W)
#print 'Q: ', sess.run(Q)
#quit()
return W
def krylov_recon_params(layer_size, r, flip_K_B, G,H,fn_A,fn_B):
W1 = tf.zeros([layer_size, layer_size], dtype=tf.float64)
for i in range(r):
K_A = krylov(fn_A, G[:, i], layer_size)
K_B = tf.transpose(krylov(fn_B, H[:, i], layer_size))
if flip_K_B:
K_B = tf.reverse(K_B, [0])
prod = tf.matmul(K_A, K_B)
W1 = tf.add(W1, prod)
return W1
def krylov_recon(params, G, H, fn_A, fn_B):
return krylov_recon_params(params.layer_size, params.r, params.flip_K_B, G,H,fn_A,fn_B)
def circ_sparsity_recon_hadamard(G, H, n, r, learn_corner, n_diag_learned, init_type, stddev):
if learn_corner:
if init_type == 'toeplitz':
f_A = tf.Variable([1], dtype=tf.float64)
f_B = tf.Variable([-1], dtype=tf.float64)
elif init_type == 'random':
f_A = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
f_B = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
else:
print('init_type not supported: ', init_type)
assert 0
else:
f_A = tf.constant([1], dtype=tf.float64)
f_B = tf.constant([-1], dtype=tf.float64)
# diag: first n_learned entries
v_A = None
v_B = None
if n_diag_learned > 0:
if init_type == 'toeplitz':
v_A = tf.Variable(tf.ones(n_diag_learned, dtype=tf.float64))
v_B = tf.Variable(tf.ones(n_diag_learned, dtype=tf.float64))
elif init_type == 'random':
v_A = tf.Variable(tf.truncated_normal([n_diag_learned], stddev=stddev, dtype=tf.float64))
v_B = tf.Variable(tf.truncated_normal([n_diag_learned], stddev=stddev, dtype=tf.float64))
else:
print('init_type not supported: ', init_type)
assert 0
t0 = time.time()
scaling_mask = tf.constant(gen_circ_scaling_mask(n))
t1 = time.time()
f_mask_pattern = tf.constant([[True if j > k else False for j in range(n)] for k in range(n)])
all_ones = tf.ones(f_mask_pattern.get_shape(), dtype=tf.float64)
f_A_mask = tf.where(f_mask_pattern, f_A*all_ones, all_ones)
f_B_mask = tf.where(f_mask_pattern, f_B*all_ones, all_ones)
# Reconstruct W1 from G and H
index_arr = gen_index_arr(n)
W1 = tf.zeros([n, n], dtype=tf.float64)
for i in range(r):
t = time.time()
prod = circ_sparsity_recon_rank1(n, v_A, v_B, G[:, i], H[:, i], f_A_mask, f_B_mask, scaling_mask, index_arr, n_diag_learned)
W1 = tf.add(W1, prod)
# Compute a and b
a = f_A
b = f_B
if v_A is not None:
a *= tf.reduce_prod(v_A)
if v_B is not None:
b *= tf.reduce_prod(v_B)
coeff = 1.0/(1 - a*b)
#coeff = tf.Print(coeff,[coeff], message="my W1-values:") # <-------- TF PRINT STATMENT
W1_scaled = tf.scalar_mul(coeff[0], W1)
return W1_scaled, f_A, f_B, v_A, v_B
#assumes g and h are vectors.
#K(Z_f^T, g)*K(Z_f^T, h)^T
def circ_sparsity_recon_rank1(n, v_A, v_B, g, h, f_A_mask, f_B_mask, scaling_mask, index_arr, num_learned):
t1 = time.time()
K1 = krylov_circ_transpose(n, v_A, g, num_learned, f_A_mask, scaling_mask, index_arr)
t2 = time.time()
K2 = krylov_circ_transpose(n, v_B, h, num_learned, f_B_mask, scaling_mask, index_arr)
prod = tf.matmul(K1, tf.transpose(K2))
return prod
# Implements inversion in Theorem 2.2 in NIPS '15 paper.
def general_tf(A, B, G, H, r, m, n):
M = tf.zeros([m,n], dtype=tf.float64)
for i in range(r):
K_A_g = krylov_tf(A, G[:, i], m)
K_B_h = tf.transpose(krylov_tf(tf.transpose(B), H[:, i], n))
this_prod = tf.matmul(K_A_g, K_B_h)
M = tf.add(M, this_prod)
return 0.5*M
def compute_J_term(m, n, B, e):
term = np.eye(n) - e*np.linalg.matrix_power(B, m)
term_inv = np.linalg.inv(term)
J = np.flipud(np.eye(n))#np.flip(np.eye(n), axis=0)
# Multiply by J
return np.dot(J, term_inv)
def rect_recon_tf(G, H, B, m, n, e, f, r):
e_mask = tf.constant([[e if j > k else 1 for j in range(m)] for k in range(m)], dtype=tf.float64)
f_mask = gen_f_mask(f,n,m)
num_reps = int(np.ceil(float(m)/n))
# Compute J-term: once
J_term = compute_J_term(m, n, B, e)
index_arr_m = gen_index_arr(m)
index_arr_n = gen_index_arr(n)
recon_mat_partial = tf.zeros([m, n], dtype=tf.float64)
Jh = tf.reverse(H, [0])
for i in range(r):
Zg_i = circulant_tf(G[:, i], index_arr_m, e_mask)
Zh_i = circulant_mn_tf(Jh[:, i], index_arr_n, m, num_reps, f_mask)
this_prod = tf.matmul(Zg_i, tf.transpose(Zh_i))
recon_mat_partial = tf.add(recon_mat_partial, this_prod)
recon_mat_partial = tf.matmul(recon_mat_partial, J_term)
return recon_mat_partial
def toeplitz_recon(r, c):
return 0
def toeplitz_like_recon(G, H, n, r):
W1 = tf.zeros([n, n], dtype=tf.float64)
f = 1
g = -1
f_mask = tf.constant([[f if j > k else 1 for j in range(n)] for k in range(n)], dtype=tf.float64)
g_mask = tf.constant([[g if j > k else 1 for j in range(n)] for k in range(n)], dtype=tf.float64)
index_arr = gen_index_arr(n)
for i in range(r):
Z_g_i = circulant_tf(G[:, i], index_arr, f_mask)
Z_h_i = circulant_tf(tf.reverse(H[:, i], tf.constant([0])), index_arr, g_mask)
prod = tf.matmul(Z_g_i, Z_h_i)
W1 = tf.add(W1, prod)
W1 = tf.scalar_mul(0.5, W1)
return W1
# Pan's Vandermonde specific reconstruction.
def vand_recon(G, H, v, m, n, f, r):
# Create vector of fv_i^n
raised = tf.pow(v, n)
scaled = tf.cast(tf.scalar_mul(f, raised), dtype=tf.float64)
denom = tf.subtract(tf.constant(1, dtype=tf.float64), scaled)
divided = tf.divide(tf.constant(1, dtype=tf.float64), denom)
D = tf.diag(divided)
index_arr = gen_index_arr(n)
f_mask = gen_f_mask(f, n,n)
recon = tf.zeros([m,n], dtype=tf.float64)
for i in range(r):
D_g_i = tf.diag(G[:, i])
V_v = V_mn(v, m, n)
Z_h_i = circulant_tf(H[:, i], index_arr, f_mask)
Z_h_i = tf.transpose(Z_h_i)
this_prod = tf.matmul(D_g_i, V_v)
this_prod = tf.matmul(this_prod, Z_h_i)
recon = tf.add(recon, this_prod)
recon = tf.matmul(D, recon)
return recon
def sylvester(M, N, n, r):
# Generate random rank r error matrix
G = np.random.random((n, r))
H = np.random.random((n, r))
GH = np.dot(G,H.T)
# Solve Sylvester equation to recover A
# Such that MA - AN^T = GH^T
A = solve_sylvester(M, -N, GH)
E = np.dot(M,A) - np.dot(A,N)
return A,G,H
if __name__ == '__main__':
n = 10
r = 1
A = np.random.random((n, n))
A = (A+A.T)/2.0
B = np.random.random((n, n))
B = (B+B.T)/2.0
M,G,H = sylvester(A,B,n,r)
W = general_recon(G, H, A, B)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
W_real = sess.run(W)
quit()
|
structured-nets-master
|
tensorflow/reconstruction.py
|
import numpy as np
import os
import tensorflow as tf
from utils import *
from reconstruction import *
from model import *
def vandermonde_like(dataset, params, test_freq=100, verbose=False):
# A is learned, B is fixed
B_vand = gen_Z_f(params.layer_size, 0).T
f_V = 0
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
v = tf.Variable(tf.truncated_normal([params.layer_size], stddev=0.01, dtype=tf.float64))
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
W1 = vand_recon(G, H, v, params.layer_size, params.layer_size, f_V, params.r)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
# Verify displacement rank
if params.check_disp:
v_real, W1_real = sess.run([v, W1], feed_dict={x: batch_xs, y_: batch_ys})
A = np.diag(v_real)
E = W1_real - np.dot(A, np.dot(W1_real, B_vand))
print(('Disp rank: ', np.linalg.matrix_rank(E)))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, Vandermonde-like: ', test_loss))
print(('SGD test accuracy, Vandermonde-like: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def hankel_like(dataset, params, test_freq=100, verbose=False):
f = 0
g = 1
A = gen_Z_f(params.layer_size, f)
B = gen_Z_f(params.layer_size, g)
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
v = tf.Variable(tf.truncated_normal([params.layer_size], stddev=0.01, dtype=tf.float64))
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
W1 = rect_recon_tf(G, H, B, params.layer_size, params.layer_size, f, g, params.r)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
if params.check_disp:
# Verify displacement rank
W1_real = sess.run(W1, feed_dict={x: batch_xs, y_: batch_ys})
E = W1_real - np.dot(A, np.dot(W1_real, B))
print(('Disp rank: ', np.linalg.matrix_rank(E)))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, Hankel-like: ', test_loss))
print(('SGD test accuracy, Hankel-like: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def toeplitz_like(dataset, params, test_freq=100, verbose=False):
A = gen_Z_f(params.layer_size, 1)
B = gen_Z_f(params.layer_size, -1)
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
W1 = toeplitz_like_recon(G, H, params.layer_size, params.r)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_, = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
if params.check_disp:
# Verify displacement rank
W1_real = sess.run(W1, feed_dict={x: batch_xs, y_: batch_ys})
E_sylv = np.dot(A, W1_real) - np.dot(W1_real, B)
print(('Disp rank, Sylv: ', np.linalg.matrix_rank(E_sylv)))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary,
test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, Toeplitz-like: ', test_loss))
print(('SGD test accuracy, Toeplitz-like: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def low_rank(dataset, params, test_freq=100, verbose=False):
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
W1 = tf.matmul(G, H)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_, = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, low rank: ', test_loss))
print(('SGD test accuracy, low rank: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def unconstrained(dataset, params, test_freq=100, verbose=False):
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
W1 = tf.Variable(tf.truncated_normal([params.layer_size, params.layer_size], stddev=0.01, dtype=tf.float64))
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary,
test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, unconstrained: ', test_loss))
print(('SGD test accuracy, unconstrained: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
|
structured-nets-master
|
tensorflow/fixed_operators.py
|
import tensorflow as tf
import io,os
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '../tensorflow/')
from utils import *
from PIL import Image
import numpy as np
import time
# One image with params.num_pred_plot: image, caption - actual and predicted
# Here assuming classification
# x: (n, input dimension)
# y: (n)
# pred: (n)
# Each set of plots is for one image only
# Each row (for specific value of i):
# orig image (caption: actual/predicted class),B^ix, GH^T(B^ix),A^i(GH^T(B^ix))
# num rows: len(viz_powers)
# ncols: 4
ncols = 3
ram = io.StringIO()
def show_learned_operators(vis_path,A,B,W,step):
"""
print('A: ', A.shape)
print('B: ', B.shape)
print('W: ', W.shape)
"""
plt.clf()
f, plots = plt.subplots(3,figsize=(5,15))
plots[0].imshow(A)
plots[0].set_title('A')
plots[1].imshow(B)
plots[1].set_title('B')
plots[2].imshow(W)
plots[2].set_title('W')
plots[0].axis('off')
plots[1].axis('off')
plots[2].axis('off')
plt.savefig(os.path.join(vis_path, str(step) + '_A_B_W.png'))
plt.close()
# Assume all have been reshaped to be images
def show_prediction(vis_path,idx,viz_powers,image,true,pred,Bis,GHTBis,AiGHTBis,step):
plt.clf()
f, plots = plt.subplots(len(viz_powers)+1,ncols,figsize=(20,20))
for row in range(len(viz_powers)+1):
for col in range(ncols):
plots[row, col].axis('off')
plots[0, 1].imshow(image)
caption = 'Orig. Im., True: ' + str(true) + '; Pred: ' + str(pred)
if true == pred:
plots[0, 1].set_title(caption, color='green')
else:
plots[0, 1].set_title(caption, color='red')
for row in range(len(viz_powers)):
Bi = Bis[row][idx,:].reshape((image.shape[0],image.shape[1]))
GHTBi = GHTBis[row][idx,:].reshape((image.shape[0],image.shape[1]))
AiGHTBi = AiGHTBis[row][idx,:].reshape((image.shape[0],image.shape[1]))
plots[row+1,0].imshow(Bi)
plots[row+1,0].set_title(r'$B^{' + str(viz_powers[row]) + '}x$', color='green')
plots[row+1,1].imshow(GHTBi)
plots[row+1,1].set_title(r'$GH^TB^{' + str(viz_powers[row]) + '}x$', color='green')
plots[row+1,2].imshow(AiGHTBi)
plots[row+1,2].set_title(r'$A^{' + str(viz_powers[row]) + '}GH^TB^{' + str(viz_powers[row]) + '}x$', color='green')
plt.savefig(os.path.join(vis_path, str(step) + '_predictions_' + str(idx) + '.png'))
"""
plt.savefig(ram,format='png')
ram.seek(0)
im = Image.open(ram)
im2 = im.convert('RGB').convert('P', palette=Image.ADAPTIVE)
im2.save('predictions' + str(idx) + '.png', format='PNG')
"""
plt.close()
def show_predictions(vis_path,step,num_pred_plot,layer_size,viz_powers,x,y,pred,Bis,GHTBis,AiGHTBis):
assert num_pred_plot == x.shape[0] == y.size == pred.size
img_size = np.sqrt(layer_size)
assert img_size.is_integer
img_size = int(img_size)
nrows = len(viz_powers)
f, plots = plt.subplots(num_pred_plot,ncols,figsize=(20,20))
times = 0
for idx in range(num_pred_plot):
this_image = x[idx].reshape((img_size, img_size))
# Get correct
this_true = y[idx]
# Get predicted
this_pred = pred[idx]
t1 = time.time()
show_prediction(vis_path,idx,viz_powers,this_image,this_true,
this_pred,Bis,GHTBis,AiGHTBis,step)
times += (time.time() - t1)
print('Average time of show_prediction: ', times/num_pred_plot)
def visualize_predictions(params,x,y,pred):
return show_predictions(params.num_pred_plot,params.layer_size,x,y,pred)
def compute_powers(powers,A,GHT,B,x):
Bis = []
GHTBis = []
AiGHTBis = []
for power in powers:
A_i = np.linalg.matrix_power(A,power)
B_i = np.linalg.matrix_power(B,power)
#print('B_i: ', B_i)
GHTB_i = np.dot(GHT, B_i)
A_iGHTB_i = np.dot(A_i, GHTB_i)
Bis.append(np.dot(B_i, x.T).T)
#print('x: ', x)
#print('B_ix: ', Bis[-1])
GHTBis.append(np.dot(GHTB_i, x.T).T)
AiGHTBis.append(np.dot(A_iGHTB_i, x.T).T)
return Bis,GHTBis,AiGHTBis
def make_plots_params(params,A,B,G,H,W,x,y,pred,step):
"""
print('A: ', A.shape)
print('B: ', B.shape)
print('W: ', W.shape)
"""
make_plots(params.vis_path,params.num_pred_plot,params.layer_size,params.viz_powers,A,B,G,H,W,x,y,pred,step)
# Just A,B,W
show_learned_operators(params.vis_path,A,B,W,step)
def make_plots(vis_path, num_pred_plot,layer_size,viz_powers,A,B,G,H,W,x,y,pred,step):
"""
print('x.shape: ', x.shape)
print('y.shape: ', y.shape)
print('pred.shape: ', pred.shape)
"""
assert x.shape[0] == y.size == pred.size
idx = np.random.randint(x.shape[0], size=num_pred_plot)
x = x[idx,:]
y = y[idx]
pred = pred[idx]
assert x.shape[0] == y.size == pred.size
# GH^Tx
low_rank = np.dot(G,H.T)
low_rank_pred = np.dot(low_rank,x.T).T
# B^ix, various i
# GH^T(B^ix), various i
# A^i(GH^T(B^ix)), various i
t1 = time.time()
Bis,GHTBis,AiGHTBis = compute_powers(viz_powers,A,low_rank,B,x)
print('Time of compute_powers: ', time.time() - t1)
# Various inputs, predictions, and ground truth
show_predictions(vis_path,step,num_pred_plot,layer_size,viz_powers,x,y,pred,Bis,GHTBis,AiGHTBis)
def get_model_params(params,x,y_,batch_xs,batch_ys,sess,model):
G,H = sess.run([model['G'], model['H']], feed_dict={x: batch_xs, y_: batch_ys})
W = sess.run(model['W'], feed_dict={x: batch_xs, y_: batch_ys})
if params.class_type == 'circulant_sparsity':
# Construct A
f_x_A = sess.run(model['f_x_A'], feed_dict={x: batch_xs, y_: batch_ys})
# Construct B
f_x_B = sess.run(model['f_x_B'], feed_dict={x: batch_xs, y_: batch_ys})
if params.fix_A_identity:
A = np.eye(params.layer_size)
else:
A = gen_Z_f(params.layer_size, f_x_B[0], f_x_B[1:]).T
B = gen_Z_f(params.layer_size, f_x_B[0], f_x_B[1:])
elif params.class_type == 'tridiagonal_corner':
# Construct A
supdiag_A = sess.run(model['supdiag_A'], feed_dict={x: batch_xs, y_: batch_ys})
diag_A = sess.run(model['diag_A'], feed_dict={x: batch_xs, y_: batch_ys})
subdiag_A = sess.run(model['subdiag_A'], feed_dict={x: batch_xs, y_: batch_ys})
f_A = sess.run(model['f_A'], feed_dict={x: batch_xs, y_: batch_ys})
# Construct B
supdiag_B = sess.run(model['supdiag_B'], feed_dict={x: batch_xs, y_: batch_ys})
diag_B = sess.run(model['diag_B'], feed_dict={x: batch_xs, y_: batch_ys})
subdiag_B = sess.run(model['subdiag_B'], feed_dict={x: batch_xs, y_: batch_ys})
f_B = sess.run(model['f_B'], feed_dict={x: batch_xs, y_: batch_ys})
# Check if this is transpose
A = gen_tridiag_corner(subdiag_A, supdiag_A, diag_A, f_A)
B = gen_tridiag_corner(subdiag_B, supdiag_B, diag_B, f_B)
else:
print('Class type not supported: ', params.class_type)
assert 0
"""
print('A: ', A.shape)
print('B: ', B.shape)
print('W: ', W.shape)
"""
return A,B,G,H,W
def visualize(params,sess,model,x,y_,batch_xs,batch_ys,y_pred,this_step):
A,B,G,H,W = get_model_params(params,x,y_,batch_xs,batch_ys,sess,model)
"""
print('A: ', A.shape)
print('B: ', B.shape)
print('W: ', W.shape)
print('A: ', A)
print('B: ', B)
print('G: ', G)
print('H: ', H)
quit()
"""
y_true = np.argmax(batch_ys,axis=1)
y_pred = np.argmax(y_pred,axis=1)
make_plots_params(params,A,B,G,H,W,batch_xs,y_true,y_pred,this_step)
if __name__ == '__main__':
num_pred_plot = 5
img_size = 2
layer_size = img_size**2
r = 1
A = gen_Z_f(layer_size,1)#np.random.random((layer_size,layer_size))
B = gen_Z_f(layer_size,-1)#np.random.random((layer_size,layer_size))
G = np.random.random((layer_size,r))
H = np.random.random((layer_size,r))
n = 100
x = np.random.random((n,layer_size))
y = np.random.randint(low=0, high=10,size=n)
pred = np.random.randint(low=0,high=10,size=n)
viz_powers = [1,5,10]
make_plots(num_pred_plot,layer_size,viz_powers,A,B,G,H,x,y,pred)
|
structured-nets-master
|
tensorflow/visualize.py
|
from scipy.sparse import diags
import numpy as np
import tensorflow as tf
import functools
from reconstruction import *
from utils import *
from krylov import *
def check_rank(sess, x, y_, batch_xs, batch_ys, params, model):
if not params.check_disp:
return
if params.class_type in ['unconstrained', 'symmetric']:
if params.class_type == 'symmetric':
A = sess.run(model['A'], feed_dict={x: batch_xs, y_: batch_ys})
B = sess.run(model['B'], feed_dict={x: batch_xs, y_: batch_ys})
print('A: ', A)
print('B: ', B)
return
if params.class_type in ['toeplitz_like', 'hankel_like']:
A,B = gen_operators(params)
W = sess.run(model['W'], feed_dict={x: batch_xs, y_: batch_ys})
print('W: ', W.shape)
E = compute_disp(params.disp_type, W, A, B)
dr = np.linalg.matrix_rank(E)
norm_res = np.linalg.norm(E)
norm_W = np.linalg.norm(W)
elif params.class_type == 'symm_tridiag_pan':
return
elif params.class_type == 'symm_tridiag_krylov':
return
elif params.class_type == 'tridiagonal_corner':
# Construct A
supdiag_A = sess.run(model['supdiag_A'], feed_dict={x: batch_xs, y_: batch_ys})
diag_A = sess.run(model['diag_A'], feed_dict={x: batch_xs, y_: batch_ys})
subdiag_A = sess.run(model['subdiag_A'], feed_dict={x: batch_xs, y_: batch_ys})
f_A = sess.run(model['f_A'], feed_dict={x: batch_xs, y_: batch_ys})
# Construct B
supdiag_B = sess.run(model['supdiag_B'], feed_dict={x: batch_xs, y_: batch_ys})
diag_B = sess.run(model['diag_B'], feed_dict={x: batch_xs, y_: batch_ys})
subdiag_B = sess.run(model['subdiag_B'], feed_dict={x: batch_xs, y_: batch_ys})
f_B = sess.run(model['f_B'], feed_dict={x: batch_xs, y_: batch_ys})
#print 'subdiag_A: ', subdiag_A
#print 'supdiag_A: ', supdiag_A
#print 'diag_A: ', diag_A
A = gen_tridiag_corner(subdiag_A, supdiag_A, diag_A, f_A).T
B = gen_tridiag_corner(subdiag_B, supdiag_B, diag_B, f_B)
W = sess.run(model['W'], feed_dict={x: batch_xs, y_: batch_ys})
E = compute_disp(params.disp_type, W, A, B)
dr = np.linalg.matrix_rank(E)
norm_res = np.linalg.norm(E)
norm_W = np.linalg.norm(W)
elif params.class_type == 'circulant_sparsity':
# Construct A
x_f_A = sess.run(model['x_f_A'], feed_dict={x: batch_xs, y_: batch_ys})
# Construct B
x_f_B = sess.run(model['x_f_B'], feed_dict={x: batch_xs, y_: batch_ys})
if params.fix_A_identity:
A = np.eye(params.layer_size)
else:
A = gen_Z_f(params.layer_size, x_f_A[-1], x_f_A[:-1]).T
B = gen_Z_f(params.layer_size, x_f_B[-1], x_f_B[:-1])
W = sess.run(model['W'], feed_dict={x: batch_xs, y_: batch_ys})
E = compute_disp(params.disp_type, W, A, B)
dr = np.linalg.matrix_rank(E)
norm_res = np.linalg.norm(E)
norm_W = np.linalg.norm(W)
elif params.class_type == 'tridiagonal_corners':
# Construct A
subdiag_A, supdiag_A, diag_A, f_ur_A, f_ll_A = sess.run([model['subdiag_A'], model['supdiag_A'], model['diag_A'], model['f_ur_A'], model['f_ll_A']], feed_dict={x: batch_xs, y_: batch_ys})
A = gen_tridiag_corners(subdiag_A, supdiag_A, diag_A, f_ur_A, f_ll_A).T
# Construct B
subdiag_B, supdiag_B, diag_B, f_ur_B, f_ll_B = sess.run([model['subdiag_B'], model['supdiag_B'], model['diag_B'], model['f_ur_B'], model['f_ll_B']],
feed_dict={x: batch_xs, y_: batch_ys})
B = gen_tridiag_corners(subdiag_B, supdiag_B, diag_B, f_ur_B, f_ll_B)
W = sess.run(model['W'], feed_dict={x: batch_xs, y_: batch_ys})
E = compute_disp(params.disp_type, W, A, B)
dr = np.linalg.matrix_rank(E)
norm_res = np.linalg.norm(E)
norm_W = np.linalg.norm(W)
elif params.class_type == 'low_rank':
E = sess.run(model['W'], feed_dict={x: batch_xs, y_: batch_ys})
dr = np.linalg.matrix_rank(E)
norm_res = 0
norm_W = np.linalg.norm(E)
elif params.class_type == 'vandermonde_like':
v, W = sess.run([model['v'], model['W']], feed_dict={x: batch_xs, y_: batch_ys})
A = np.diag(v)
B = gen_Z_f(params.layer_size, 0).T
E = compute_disp(params.disp_type, W, A, B)
dr = np.linalg.matrix_rank(E)
norm_res = np.linalg.norm(E)
norm_W = np.linalg.norm(W)
else:
print('class_type not supported: ', params.class_type)
assert 0
ratio = norm_res/norm_W
print(E.shape)
print(('(Displacement) Rank: ', np.linalg.matrix_rank(E)))
print(('||E||/||W||: ', ratio))
eigvals_E = np.abs(np.linalg.eigvals(E))
eigvals_W = np.abs(np.linalg.eigvals(W))
eigvals_A = np.abs(np.linalg.eigvals(A))
eigvals_B = np.abs(np.linalg.eigvals(B))
#print('eigvals_E: ', eigvals_E)
#print('eigvals_W: ', eigvals_W)
#print('eigvals_A: ', eigvals_A)
#print('eigvals_B: ', eigvals_B)
return dr, norm_res, norm_W, eigvals_E, eigvals_W, eigvals_A, eigvals_B, E, W, A, B
def get_structured_W(params):
model = {}
if params.class_type == 'unconstrained':
W = tf.Variable(tf.truncated_normal([params.layer_size, params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
if params.check_disp or params.viz_freq > 0:
model['W'] = W
return W, model
elif params.class_type in ['low_rank', 'symm_tridiag_corner_pan', 'symm_tridiag_corner_krylov','symmetric', 'toeplitz_like',
'vandermonde_like', 'hankel_like', 'circulant_sparsity', 'tridiagonal_corner', 'tridiagonal_corners']:
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=params.init_stddev, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=params.init_stddev, dtype=tf.float64))
model['G'] = G
model['H'] = H
if params.class_type == 'low_rank':
W = tf.matmul(G, tf.transpose(H))
elif params.class_type == 'symm_tridiag_corner_pan':
mask = symm_tridiag_corner_mask(n)
A = tf.Variable(tf.truncated_normal([params.layer_size, params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
B = tf.Variable(tf.truncated_normal([params.layer_size, params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
A = tf.multiply(A, mask)
B = tf.multiply(B, mask)
W = general_recon(G, H, A, B)
if params.check_disp or params.viz_freq > 0:
model['A'] = A
model['B'] = B
elif params.class_type == 'symm_tridiag_corner_krylov':
diag_A = tf.Variable(tf.truncated_normal([params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
off_diag_A = tf.Variable(tf.truncated_normal([params.layer_size-1], stddev=params.init_stddev, dtype=tf.float64))
diag_B = tf.Variable(tf.truncated_normal([params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
off_diag_B = tf.Variable(tf.truncated_normal([params.layer_size-1], stddev=params.init_stddev, dtype=tf.float64))
f_A = tf.Variable(tf.truncated_normal([1], stddev=params.init_stddev, dtype=tf.float64))
f_B = tf.Variable(tf.truncated_normal([1], stddev=params.init_stddev, dtype=tf.float64))
if params.check_disp or params.viz_freq > 0:
model['diag_A'] = diag_A
model['off_diag_A'] = off_diag_A
model['f_A'] = f_A
model['diag_B'] = diag_B
model['off_diag_B'] = off_diag_B
model['f_B'] = f_B
fn_A = functools.partial(tridiag_corners_transpose_mult_fn, off_diag_A, diag_A, off_diag_A, f_A, f_A)
fn_B = functools.partial(tridiag_corners_transpose_mult_fn, off_diag_B, diag_B, off_diag_B, f_B, f_B)
W = krylov_recon(params, G, H, fn_A, fn_B)
# Compute a and b
a = tf.multiply(f_A, tf.reduce_prod(subdiag_A))
b = tf.multiply(f_B, tf.reduce_prod(subdiag_B))
coeff = 1.0/(1 - a*b)
W = tf.multiply(coeff, W)
elif params.class_type == 'symmetric':
# Initialization with T+H operators
Z1 = gen_Z_f(params.layer_size, 1)
Z1m = gen_Z_f(params.layer_size, -1)
op_A = Z1 + Z1.T
op_B = Z1m + Z1m.T
#print 'op_A: ', op_A
#print 'op_B: ', op_B
op_A = np.random.random((params.layer_size, params.layer_size))
op_B = np.random.random((params.layer_size, params.layer_size))
A = tf.Variable(op_A)
A_upper = tf.matrix_band_part(A, 0, -1)
A_symm = 0.5 * (A_upper + tf.transpose(A_upper))
B = tf.Variable(op_B)
B_upper = tf.matrix_band_part(B, 0, -1)
B_symm = 0.5 * (B_upper + tf.transpose(B_upper))
W = general_recon(G, H, A_symm, B_symm)
if params.check_disp or params.viz_freq > 0:
model['A'] = A_symm
model['B'] = B_symm
elif params.class_type == 'toeplitz_like':
W = toeplitz_like_recon(G, H, params.layer_size, params.r)
elif params.class_type == 'hankel_like':
f = 0
g = 1
B = gen_Z_f(params.layer_size, g)
W = rect_recon_tf(G, H, B, params.layer_size, params.layer_size, f, g, params.r)
elif params.class_type == 'vandermonde_like':
f_V = 0
v = tf.Variable(tf.truncated_normal([params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
model['v'] = v
W = vand_recon(G, H, v, params.layer_size, params.layer_size, f_V, params.r)
elif params.class_type == 'circulant_sparsity':
x_f_A, x_f_B = get_x_f(params.layer_size, params.init_type, params.learn_corner, params.n_diag_learned, params.init_stddev)
if params.learn_diagonal:
diag_A = tf.Variable(tf.zeros(params.layer_size, dtype=tf.float64))
diag_B = tf.Variable(tf.zeros(params.layer_size, dtype=tf.float64))
fn_A = functools.partial(circ_diag_transpose_mult_fn, x_f_A, diag_A)
fn_B = functools.partial(circ_diag_transpose_mult_fn, x_f_B, diag_B)
elif params.fix_A_identity:
# f_x_A is unused
print('fixing A to be identity')
fn_A = identity_mult_fn
fn_B = functools.partial(circ_transpose_mult_fn, x_f_B)
else:
fn_A = functools.partial(circ_transpose_mult_fn, x_f_A)
fn_B = functools.partial(circ_transpose_mult_fn, x_f_B)
W = krylov_recon(params, G, H, fn_A, fn_B)
# Compute a and b
a = tf.reduce_prod(x_f_A)
b = tf.reduce_prod(x_f_B)
coeff = 1.0/(1 - a*b)
W = tf.scalar_mul(coeff, W)
if params.check_disp or params.viz_freq > 0:
model['x_f_A'] = x_f_A
model['x_f_B'] = x_f_B
elif params.class_type == 'tridiagonal_corners':
subdiag_A, supdiag_A, diag_A, subdiag_B, supdiag_B, diag_B, f_ur_A, f_ur_B, f_ll_A, f_ll_B = get_tridiag_corners_vars(params.layer_size, params.init_type, params.init_stddev, params.learn_corner)
if params.check_disp or params.viz_freq > 0:
model['subdiag_A'] = subdiag_A
model['supdiag_A'] = supdiag_A
model['diag_A'] = diag_A
model['f_ur_A'] = f_ur_A
model['f_ur_B'] = f_ur_B
model['f_ll_A'] = f_ll_A
model['f_ll_B'] = f_ll_B
model['subdiag_B'] = subdiag_B
model['supdiag_B'] = supdiag_B
model['diag_B'] = diag_B
fn_A = functools.partial(tridiag_corners_transpose_mult_fn, subdiag_A, diag_A, supdiag_A, f_ur_A, f_ll_A)
fn_B = functools.partial(tridiag_corners_transpose_mult_fn, subdiag_B, diag_B, supdiag_B, f_ur_B, f_ll_B)
W = krylov_recon(params, G, H, fn_A, fn_B)
# Compute a and b
a = tf.multiply(f_ur_A, tf.reduce_prod(subdiag_A))
b = tf.multiply(f_ur_B, tf.reduce_prod(subdiag_B))
coeff = 1.0/(1 - a*b)
W = tf.multiply(coeff, W)
elif params.class_type == 'tridiagonal_corner':
subdiag_A, supdiag_A, diag_A, subdiag_B, supdiag_B, diag_B, f_A, f_B = get_tridiag_corner_vars(params.layer_size, params.init_type, params.init_stddev, params.learn_corner)
if params.check_disp or params.viz_freq > 0:
model['subdiag_A'] = subdiag_A
model['supdiag_A'] = supdiag_A
model['diag_A'] = diag_A
model['f_A'] = f_A
model['f_B'] = f_B
model['subdiag_B'] = subdiag_B
model['supdiag_B'] = supdiag_B
model['diag_B'] = diag_B
fn_A = functools.partial(tridiag_corner_transpose_mult_fn, subdiag_A, diag_A, supdiag_A, f_A)
fn_B = functools.partial(tridiag_corner_transpose_mult_fn, subdiag_B, diag_B, supdiag_B, f_B)
W = krylov_recon(params, G, H, fn_A, fn_B)
# Compute a and b
a = tf.multiply(f_A, tf.reduce_prod(subdiag_A))
b = tf.multiply(f_B, tf.reduce_prod(subdiag_B))
coeff = 1.0/(1 - a*b)
W = tf.multiply(coeff, W)
if params.check_disp or params.viz_freq > 0:
model['W'] = W
return W, model
else:
print('Not supported: ', params.class_type)
assert 0
def forward(x, params):
W, model = get_structured_W(params)
y = compute_y(x, W, params)
return y, model
def compute_y(x, W1, params):
if 'cnn' in params.transform:
return compute_y_cnn(x, W1, params)
elif params.num_layers==0:
y = tf.matmul(x, W1,name='forward')
return y
elif params.num_layers==1:
b1 = tf.Variable(tf.truncated_normal([params.layer_size], stddev=params.init_stddev, dtype=tf.float64))
W2 = tf.Variable(tf.truncated_normal([params.layer_size, params.out_size], stddev=params.init_stddev, dtype=tf.float64))
b2 = tf.Variable(tf.truncated_normal([params.out_size], stddev=params.init_stddev, dtype=tf.float64))
xW = tf.matmul(x, W1)
h = tf.nn.relu(xW + b1)
prod = tf.matmul(h, W2)
y = tf.add(prod, b2,name='forward')
return y
else:
print('Not supported: ', params.num_layers)
assert 0
|
structured-nets-master
|
tensorflow/model.py
|
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os,sys,h5py
import scipy.io as sio
from scipy.linalg import solve_sylvester
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
# sys.path.insert(0, '../../../../')
from utils import *
class Dataset:
# here n is the input size.
# true_test: if True, we test on test set. Otherwise, split training set into train/validation.
def __init__(self, name, layer_size, num_iters, transform, stochastic_train, replacement, test_size=1000, train_size=10000, true_test=False, train_fraction=1.0, val_fraction=0.15):
self.name = name
self.mnist = None
# train_fraction and val_fraction used only in sample complexity experiments currently
self.train_fraction = train_fraction
self.val_fraction = val_fraction
self.transform = transform
self.replacement = replacement
self.stochastic_train = stochastic_train
self.num_iters = num_iters
self.layer_size = layer_size
self.pert = None
self.current_batch = 0
self.true_transform = None
self.test_size = test_size
self.train_size = train_size
self.true_test = true_test
self.input_size = self.get_input_size()
self.set_data_locs()
if self.name in ['iwslt', 'copy']:
return
elif self.name == 'timit':
train_feat_loc = '../../../timit/timit_train_feat.mat'
train_lab_loc = '../../../timit/timit_train_lab.mat'
train_X = h5py.File(train_feat_loc, 'r')['fea']
print('loaded')
train_X = np.array(train_X).T
print('train_X: ', train_X.shape)
train_Y = sio.loadmat(train_lab_loc)['lab']
# Ensure Y is one-hot
enc = OneHotEncoder()
train_Y = enc.fit_transform(train_Y).todense()
# Split into validation and train
val_size = int(0.1*train_X.shape[0])
train_size = train_X.shape[0] - val_size
# Shuffle X
idx = np.arange(0, train_X.shape[0])
np.random.shuffle(idx)
train_idx = idx[0:train_size]
val_idx = idx[-val_size:]
assert train_idx.size == train_size
assert val_idx.size == val_size
self.val_X = train_X[val_idx, :]
self.val_Y = train_Y[val_idx, :]
self.train_X = train_X[train_idx, :]
self.train_Y = train_Y[train_idx, :]
elif self.name.startswith('mnist_noise') or self.name in ['mnist_bg_rot', 'convex', 'rect', 'norb', 'norb_val','cifar10']:
data = pkl.load(open(self.train_loc, 'rb'))
train_X = data['X']
train_Y = data['Y']
# Shuffle
idx = np.arange(train_X.shape[0])
np.random.shuffle(idx)
train_X = train_X[idx,:]
train_Y = train_Y[idx,:]
# Downsample for sample complexity experiments
if self.train_fraction is not None:
num_samples = int(self.train_fraction*train_X.shape[0])
train_X = train_X[0:num_samples,:]
train_Y = train_Y[0:num_samples,:]
val_size = int(self.val_fraction*train_X.shape[0])
else:
if self.name == 'norb_val':
val_size = 50000
elif self.name == 'rect':
val_size = 100
elif self.name == 'convex':
val_size = 800
else:
val_size = 2000
train_size = train_X.shape[0] - val_size
# Shuffle X
idx = np.arange(0, train_X.shape[0])
np.random.shuffle(idx)
train_idx = idx[0:train_size]
val_idx = idx[-val_size:]
assert train_idx.size == train_size
assert val_idx.size == val_size
self.val_X = train_X[val_idx, :]
self.val_Y = train_Y[val_idx, :]
self.train_X = train_X[train_idx, :]
self.train_Y = train_Y[train_idx, :]
# post-processing transforms
self.train_X, _ = self.postprocess(self.train_X)
self.val_X, _ = self.postprocess(self.val_X)
elif self.name == 'smallnorb':
data_loc = '/dfs/scratch1/thomasat/datasets/smallnorb/processed_py2.pkl'
# Load
data = pkl.load(open(data_loc, 'rb'))
train_X = data['train_X']
train_Y = data['train_Y']
self.test_X = data['test_X']
self.test_Y = data['test_Y']
val_size = 2000
train_size = train_X.shape[0] - val_size
print(('train size, val size: ', train_size, val_size))
# Shuffle X
idx = np.arange(0, train_X.shape[0])
np.random.shuffle(idx)
train_idx = idx[0:train_size]
val_idx = idx[-val_size:]
assert train_idx.size == train_size
assert val_idx.size == val_size
self.val_X = train_X[val_idx, :]
self.val_Y = train_Y[val_idx, :]
self.train_X = train_X[train_idx, :]
self.train_Y = train_Y[train_idx, :]
elif self.name == 'mnist':
data_dir = '/tmp/tensorflow/mnist/input_data'
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
self.train_X = self.mnist.train.images
self.train_Y = self.mnist.train.labels
self.val_X = self.mnist.validation.images
self.val_Y = self.mnist.validation.labels
self.test_X = self.mnist.test.images
self.test_Y = self.mnist.test.labels
# postprocess
self.train_X, self.train_Y = self.augment(self.train_X, self.train_Y)
# self.test_X, self.test_Y = self.augment(self.test_X, self.test_Y)
elif self.name == 'mnist_rot':
self.load_train_data()
elif self.name in ['swap_mnist_bg_rot']:
if self.name.startswith('swap'):
train_size = 40000
val_size = 10000
else:
train_size = 10000
val_size = 2000
data = np.genfromtxt(self.train_loc)
# Shuffle
X = data[:, :-1]
Y = np.expand_dims(data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
Y = enc.fit_transform(Y).todense()
idx = np.arange(0, X.shape[0])
np.random.shuffle(idx)
train_idx = idx[0:train_size]
val_idx = idx[-val_size:]
assert train_idx.size == train_size
assert val_idx.size == val_size
self.train_X = X[train_idx, :]
self.train_Y = Y[train_idx, :]
self.val_X = X[val_idx, :]
self.val_Y = Y[val_idx, :]
# post-processing transforms
self.train_X, _ = self.postprocess(self.train_X)
self.val_X, _ = self.postprocess(self.val_X)
elif self.name == 'mnist_rand_bg':
self.load_train_data()
elif self.name == 'rect_images':
train_size = 11000
val_size = 1000
data = np.genfromtxt(self.train_loc)
# Shuffle
X = data[:, :-1]
Y = np.expand_dims(data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
Y = enc.fit_transform(Y).todense()
idx = np.arange(0, X.shape[0])
np.random.shuffle(idx)
train_idx = idx[0:train_size]
val_idx = idx[-val_size:]
assert train_idx.size == train_size
assert val_idx.size == val_size
self.train_X = X[train_idx, :]
self.train_Y = Y[train_idx, :]
self.val_X = X[val_idx, :]
self.val_Y = Y[val_idx, :]
elif self.name.startswith('true'):
self.true_transform = gen_matrix(self.input_size, self.name.split("true_",1)[1] )
test_X, test_Y = gen_batch(self.true_transform, self.test_size)
val_X, val_Y = gen_batch(self.true_transform, self.test_size)
self.test_X = test_X
self.test_Y = test_Y
self.val_X = val_X
self.val_Y = val_Y
if not self.stochastic_train:
train_X, train_Y = gen_batch(self.true_transform, self.train_size)
self.train_X = train_X
self.train_Y = train_Y
else:
print('Not supported: ', self.name)
assert 0
if not self.replacement:
#For batching
self.current_idx = 0
print('Training set X,Y: ', self.train_X.shape, self.train_Y.shape)
print('Validation set X,Y: ', self.val_X.shape, self.val_Y.shape)
# self.print_dataset_stats()
def print_dataset_stats(self,test=False):
print('Train X mean, std: ', np.mean(self.train_X,axis=0), np.std(self.train_X,axis=0))
print('Train X min, max: ', np.min(self.train_X), np.max(self.train_X))
print('Val X mean, std: ', np.mean(self.val_X,axis=0), np.std(self.val_X,axis=0))
print('Val X min, max: ', np.min(self.val_X), np.max(self.val_X))
if test:
print('Test X mean, std: ', np.mean(self.test_X,axis=0), np.std(self.test_X,axis=0))
print('Test X min, max: ', np.min(self.test_X), np.max(self.test_X))
def set_data_locs(self):
prefix = '/dfs/scratch1/thomasat/datasets/'
if self.name == 'cifar10':
data_dir = prefix + 'cifar10_combined'
train_name = 'train_grayscale' if 'grayscale' in self.transform else 'train'
test_name = 'test_grayscale' if 'grayscale' in self.transform else 'test'
self.train_loc = os.path.join(data_dir, train_name)
self.test_loc = os.path.join(data_dir, test_name)
elif self.name.startswith('mnist_noise'):
idx = self.name[-1]
self.train_loc = os.path.join(prefix,'mnist_noise/train_' + str(idx))
self.test_loc = os.path.join(prefix,'mnist_noise/test_' + str(idx))
elif self.name.startswith('swap_mnist_noise'):
idx = self.name[-1]
self.train_loc = os.path.join(prefix,'mnist_noise/test_' + str(idx))
self.test_loc = os.path.join(prefix,'mnist_noise/train_' + str(idx))
elif self.name == 'norb' or self.name=='norb_val':
self.train_loc = os.path.join(prefix,'norb_full/processed_py2_train_28.pkl')
self.test_loc = os.path.join(prefix,'norb_full/processed_py2_test_28.pkl')
elif self.name == 'rect_images':
self.train_loc = os.path.join(prefix, 'rect_images/rectangles_im_train.amat')
self.test_loc = os.path.join(prefix, 'rect_images/rectangles_im_test.amat')
elif self.name == 'rect':
self.train_loc = os.path.join(prefix,'rect/train_normalized')
self.test_loc = os.path.join(prefix, 'rect/test_normalized')
elif self.name == 'convex':
self.train_loc = os.path.join(prefix, 'convex/train_normalized')
self.test_loc = os.path.join(prefix, 'convex/test_normalized')
elif self.name == 'mnist_rand_bg':
self.train_loc = os.path.join(prefix, 'mnist_rand_bg/mnist_background_random_train.amat')
self.test_loc = os.path.join(prefix, 'mnist_rand_bg/mnist_background_random_test.amat')
elif self.name == 'mnist_bg_rot':
self.train_loc = os.path.join(prefix, 'mnist_bg_rot/train_normalized')
self.test_loc = os.path.join(prefix, 'mnist_bg_rot/test_normalized')
elif self.name == 'swap_mnist_bg_rot':
self.train_loc = os.path.join(prefix, 'mnist_bg_rot/mnist_all_background_images_rotation_normalized_test.amat')
self.test_loc = os.path.join(prefix, 'mnist_bg_rot/mnist_all_background_images_rotation_normalized_train_valid.amat')
def get_input_size(self):
if 'mnist' in self.name or 'convex' in self.name or 'rect' in self.name:
if 'pad' in self.transform:
return 1024
else:
return 784
elif self.name == 'smallnorb':
return 576
elif self.name == 'norb' or self.name=='norb_val':
return 784#729
elif self.name == 'timit':
return 440
elif self.name == 'cifar10':
if 'grayscale' in self.transform:
return 1024
elif 'downsample' in self.transform:
return 768
return 3072
elif self.name.startswith('true_') or self.name in ['iwslt', 'copy']:
return self.layer_size
else:
print('Name not recognized: ', name)
assert 0
def postprocess(self, X, Y=None):
# pad from 784 to 1024
if 'pad' in self.transform:
X = np.pad(X.reshape((-1,28,28)), ((0,0),(2,2),(2,2)), 'constant').reshape(-1,1024)
# self.train_X = np.pad(self.train_X.reshape((-1,28,28)), ((0,0),(2,2),(2,2)), 'constant').reshape(-1,1024)
# self.val_X = np.pad(self.val_X.reshape((-1,28,28)), ((0,0),(2,2),(2,2)), 'constant').reshape(-1,1024)
# self.test_X = np.pad(self.test_X.reshape((-1,28,28)), ((0,0),(2,2),(2,2)), 'constant').reshape(-1,1024)
return X, Y
def augment(self, X, Y=None):
if 'contrast' in self.transform:
def scale_patch(X):
patch = ((9, 19), (9, 19))
X_ = X.copy()
X_[:, patch[0][0]:patch[0][1], patch[1][0]:patch[1][1]] *= 2
return X_
# subsample
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
X = X[idx,...]
Y = Y[idx,...]
X1 = X.reshape((-1,28,28))
X2 = scale_patch(X1)
X3 = scale_patch(X2)
X4 = scale_patch(X3)
# X5 = scale_patch(X4)
X = np.concatenate([X1, X2, X3, X4], axis=0).reshape(-1, 28*28)
Y = np.concatenate([Y, Y, Y, Y], axis=0)
if 'patch' in self.transform:
def add_patch(X):
patch = ((0, 4), (10, 18))
X_ = X.copy()
X_[:, patch[0][0]:patch[0][1], patch[1][0]:patch[1][1]] += 3.0
return X_
X1 = X.reshape((-1,28,28))
X2 = add_patch(X1)
X3 = add_patch(X2)
X4 = add_patch(X3)
X = np.concatenate([X1, X2, X3, X4], axis=0).reshape(-1, 28*28)
Y = np.concatenate([Y, Y, Y, Y], axis=0)
return X, Y
def out_size(self):
if self.name in ['convex', 'rect', 'rect_images']:
return 2
elif self.name == 'smallnorb':
return 5
elif self.name == 'norb':
return 6
elif 'mnist' in self.name or 'cifar10' in self.name:
return 10
elif self.name == 'timit':
return 147
else:
return self.input_size
def load_test_data(self):
if self.name == 'mnist':
pass
elif self.name == 'timit':
test_feat_loc = '../../../timit/timit_heldout_feat.mat'
test_lab_loc = '../../../timit/timit_heldout_lab.mat'
test_X = sio.loadmat(test_feat_loc)['fea']
print('loaded test')
test_X = np.array(test_X)
print('test_X: ', test_X.shape)
test_Y = sio.loadmat(test_lab_loc)['lab']
# Ensure Y is one-hot
enc = OneHotEncoder()
test_Y = enc.fit_transform(test_Y).todense()
self.test_X = test_X
self.test_Y = test_Y
print('test_Y: ', test_Y.shape)
elif self.name == 'smallnorb':
return
elif self.name.startswith('mnist_noise') or self.name in ['norb', 'norb_val','cifar10', 'convex', 'rect', 'mnist_bg_rot']:
data = pkl.load(open(self.test_loc, 'rb'))
self.test_X = data['X']
self.test_Y = data['Y']
elif self.test_loc:
test_data = np.genfromtxt(self.test_loc)
self.test_X = test_data[:, :-1]
self.test_Y = np.expand_dims(test_data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
self.test_Y = enc.fit_transform(self.test_Y).todense()
self.test_X, _ = self.postprocess(self.test_X)
print('Loaded test data: ')
print('Test X,Y:', self.test_X.shape, self.test_Y.shape)
# self.print_dataset_stats(test=True)
def load_train_data(self):
train_data = np.genfromtxt(self.train_loc)
self.train_X = train_data[:, :-1]
self.train_Y = np.expand_dims(train_data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
self.train_Y = enc.fit_transform(self.train_Y).todense()
def update_batch_idx(self, batch_size):
self.current_idx += batch_size
if self.current_idx >= self.train_X.shape[0]:
self.current_idx = 0
#print('Current training data index: ', self.current_idx)
def next_batch(self, batch_size):
#Randomly shuffle training set at the start of each epoch if sampling without replacement
if self.current_idx == 0:
idx = np.arange(0, self.train_X.shape[0])
np.random.shuffle(idx)
self.train_X = self.train_X[idx,:]
self.train_Y = self.train_Y[idx,:]
print('Shuffling: new epoch')
idx_end = min(self.train_X.shape[0], self.current_idx+batch_size)
batch_X = self.train_X[self.current_idx:idx_end,:]
batch_Y = self.train_Y[self.current_idx:idx_end,:]
self.update_batch_idx(batch_size)
return batch_X, batch_Y
def batch(self, batch_size, step):
if self.replacement:
return self.sample_with_replacement(batch_size, step)
else:
return self.sample_without_replacement(batch_size, step)
def sample_with_replacement(self, batch_size, step):
# if self.name == 'mnist':
# batch_xs, batch_ys = self.mnist.train.next_batch(batch_size)
# return batch_xs, batch_ys
if self.name.startswith('mnist') \
or self.name.startswith('swap_mnist') \
or self.name in ['convex', 'rect', 'rect_images', 'smallnorb', 'norb', 'norb_val', 'cifar10', 'timit']:
#Randomly sample batch_size from train_X and train_Y
idx = np.random.randint(self.train_X.shape[0], size=batch_size)
return self.train_X[idx, :], self.train_Y[idx, :]
elif self.name.startswith('true'):
if self.stochastic_train:
return gen_batch(self.true_transform, batch_size, self.pert)
else:
return self.next_batch(batch_size)
else:
print('Not supported: ', self.name)
assert 0
def sample_without_replacement(self, batch_size, step):
# if self.name == 'mnist':
# batch_xs, batch_ys = self.mnist.train.next_batch(batch_size)
# return batch_xs, batch_ys
# elif self.name.startswith('mnist') or self.name in ['convex', 'rect', 'rect_images', 'smallnorb', 'norb', 'cifar10']:
if self.name.startswith('mnist') \
or self.name.startswith('swap_mnist') \
or self.name in ['convex', 'rect', 'rect_images', 'smallnorb', 'norb', 'norb_val', 'cifar10', 'timit']:
return self.next_batch(batch_size)
elif self.name.startswith('true'):
if self.stochastic_train:
return gen_batch(self.true_transform, batch_size, self.pert)
else:
return self.next_batch(batch_size)
else:
print('Not supported: ', name)
assert 0
|
structured-nets-master
|
tensorflow/dataset.py
|
from scipy.linalg import toeplitz, circulant, solve_sylvester
from scipy.sparse import diags
import numpy as np
import tensorflow as tf
import time, subprocess
import functools
def kth_diag_indices(A, k):
rows, cols = np.diag_indices_from(A)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
def symm_tridiag_corner_mask(n):
mask = np.zeros((n,n))
mask[0, -1] = 1
mask[-1, 0] = 1
subdiag = kth_diag_indices(mask, -1)
supdiag = kth_diag_indices(mask, -1)
diag = kth_diag_indices(mask, 0)
mask[subdiag] = 1
mask[supdiag] = 1
mask[diag] = 1
return mask
def sylvester_disp(M, A, B):
return np.dot(A,M) - np.dot(M,B)
def stein_disp(M, A, B):
return M - np.dot(A,np.dot(M,B))
def compute_disp(disp_type, M, A, B):
if disp_type == 'sylvester':
return sylvester_disp(M,A,B)
elif disp_type == 'stein':
return stein_disp(M,A,B)
else:
print('disp_type not supported: ', disp_type)
assert 0
def gen_tridiag_corner_transpose(subdiag,supdiag,diag,f):
T = diags([subdiag, diag, supdiag], [-1, 0, 1]).toarray()
T[-1, 0] = f
return T
def gen_tridiag_corner(subdiag, supdiag, diag, f):
T = diags([subdiag, diag, supdiag], [-1, 0, 1]).toarray()
T[0, -1] = f
return T
def gen_tridiag_corners(subdiag, supdiag, diag, f_ur, f_ll):
T = diags([subdiag, diag, supdiag], [-1, 0, 1]).toarray()
T[0, -1] = f_ur
T[-1, 0] = f_ll
return T
# Two fs: upper right, lower left
def get_fs(learn_corner, init_type, stddev):
if learn_corner:
if init_type == 'toeplitz':
f_ur_A = tf.Variable([1], dtype=tf.float64)
f_ur_B = tf.Variable([-1], dtype=tf.float64)
f_ll_A = tf.Variable([0], dtype=tf.float64)
f_ll_B = tf.Variable([0], dtype=tf.float64)
elif init_type == 'random':
f_ur_A = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
f_ur_B = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
f_ll_A = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
f_ll_B = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
else:
print('init_type not supported: ', init_type)
assert 0
else:
f_ur_A = tf.constant([1], dtype=tf.float64)
f_ur_B = tf.constant([-1], dtype=tf.float64)
f_ll_A = tf.constant([0], dtype=tf.float64)
f_ll_B = tf.constant([0], dtype=tf.float64)
return f_ur_A, f_ur_B, f_ll_A, f_ll_B
def get_f(learn_corner, init_type, stddev):
if learn_corner:
if init_type == 'toeplitz':
f_A = tf.Variable([1], dtype=tf.float64)
f_B = tf.Variable([-1], dtype=tf.float64)
elif init_type == 'random':
f_A = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
f_B = tf.Variable(tf.truncated_normal([1], stddev=stddev, dtype=tf.float64), dtype=tf.float64)
else:
print('init_type not supported: ', init_type)
assert 0
else:
f_A = tf.constant([1], dtype=tf.float64)
f_B = tf.constant([-1], dtype=tf.float64)
return f_A, f_B
def get_subdiag(n_diag_learned, init_type, stddev):
if n_diag_learned > 0:
if init_type == 'toeplitz':
x = tf.Variable(tf.ones(n_diag_learned, dtype=tf.float64))
elif init_type == 'random':
x = tf.Variable(tf.truncated_normal([n_diag_learned], stddev=stddev, dtype=tf.float64))
else:
print('init_type not supported: ', init_type)
assert 0
return x
# Produce tf.Variable for subdiagonal + corner depending on params (initialization, num learned entries, etc.)
def get_x_f(n, init_type, learn_corner, n_diag_learned, stddev=0.01):
f_A, f_B = get_f(learn_corner, init_type, stddev)
x_A = get_subdiag(n_diag_learned, init_type, stddev)
x_B = get_subdiag(n_diag_learned, init_type, stddev)
# Pad
if n_diag_learned < (n-1):
ones = tf.ones(n-1-n_diag_learned, dtype=tf.float64)
x_A = tf.concat([x_A, ones], axis=0)
x_B = tf.concat([x_B, ones], axis=0)
# Concatenate
x_f_A = tf.concat([x_A, f_A], axis=0)
x_f_B = tf.concat([x_B, f_B], axis=0)
"""
f_x_A = np.ones(n_diag_learned+1)
f_x_A = tf.convert_to_tensor(f_x_A,dtype=tf.float64)
f_x_B = np.ones(n_diag_learned+1)
f_x_B[0] = -1
f_x_B = tf.convert_to_tensor(f_x_B, dtype=tf.float64)
"""
return x_f_A, x_f_B
def get_symm_tridiag_vars(n):
return 0
def get_symm_pos_tridiag_vars(n, init_type, stddev=0.01):
# Constraint to be positive
if init_type == 'random':
diag_A = tf.Variable(tf.truncated_normal([n], stddev=stddev, dtype=tf.float64))
off_diag_A = tf.get_variable('off_diag_A', initializer=tf.truncated_normal([n-1], stddev=stddev, dtype=tf.float64),
constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
diag_B = tf.Variable(tf.truncated_normal([n], stddev=stddev, dtype=tf.float64))
elif init_type == 'chebyshev':
# A: 0 on diagonal
# A: 1/2 on on sub/super diagonal
# B: uniform(-1, 1)
print('chebyshev initialization')
diag_A = tf.Variable(tf.zeros([n], dtype=tf.float64))
off_diag_A = tf.get_variable('off_diag_A', initializer=0.5*tf.ones([n-1], dtype=tf.float64),
constraint=lambda x: tf.clip_by_value(x, 0, np.infty))
diag_B = tf.Variable(tf.random_uniform([n],minval=-1, maxval=1, dtype=tf.float64))
else:
print('init_type not supported: ', init_type)
return diag_A, off_diag_A, diag_B
def get_tridiag_vars(n, init_type, stddev=0.01):
if init_type == 'toeplitz':
subdiag = tf.Variable(tf.ones([n-1], dtype=tf.float64))
supdiag = tf.Variable(tf.zeros([n-1], dtype=tf.float64))
diag = tf.Variable(tf.zeros([n], dtype=tf.float64))
elif init_type == 'random':
subdiag = tf.Variable(tf.truncated_normal([n-1], stddev=stddev, dtype=tf.float64))
supdiag = tf.Variable(tf.truncated_normal([n-1], stddev=stddev, dtype=tf.float64))
diag = tf.Variable(tf.truncated_normal([n], stddev=stddev, dtype=tf.float64))
else:
print('init_type not supported: ', init_type)
assert 0
return subdiag, supdiag, diag
# Returns subdiag, supdiag, diag, f
def get_tridiag_corner_vars(n, init_type, stddev=0.01, learn_corner=True):
subdiag_A, supdiag_A, diag_A = get_tridiag_vars(n, init_type, stddev)
subdiag_B, supdiag_B, diag_B = get_tridiag_vars(n, init_type, stddev)
f_A, f_B = get_f(learn_corner, init_type, stddev)
return subdiag_A, supdiag_A, diag_A, subdiag_B, supdiag_B, diag_B, f_A, f_B
# Returns subdiag, supdiag, diag, f
def get_tridiag_corners_vars(n, init_type, stddev=0.01, learn_corner=True):
subdiag_A, supdiag_A, diag_A = get_tridiag_vars(n, init_type, stddev)
subdiag_B, supdiag_B, diag_B = get_tridiag_vars(n, init_type, stddev)
f_ur_A, f_ur_B, f_ll_A, f_ll_B = get_fs(learn_corner, init_type, stddev)
return subdiag_A, supdiag_A, diag_A, subdiag_B, supdiag_B, diag_B, f_ur_A, f_ur_B, f_ll_A, f_ll_B
# Returns loss, accuracy
def compute_loss_and_accuracy(y, y_, params):
if params.loss == 'mse':
mse = tf.reduce_mean(tf.squared_difference(y, y_),name='loss')
accuracy = tf.constant(0,name='accuracy')
return mse, accuracy
elif params.loss == 'cross_entropy':
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y), name='loss')
# Get prediction
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')
return cross_entropy, accuracy
else:
print('Not supported: ', params.loss)
assert 0
def compute_y_cnn(x, W1, params):
if params.dataset_name == 'cifar10' and 'grayscale' not in params.transform:
input_layer = tf.reshape(x, [-1, 32, 32, 3])
else:
dim = int(np.sqrt(params.input_size))
input_layer = tf.reshape(x, [-1, dim, dim, 1]) # Assuming single channel
input_layer = tf.cast(input_layer, tf.float32)
print('input ', input_layer)
# Reshape to x to 32x32x3
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=params.cnn_params['c1_filters'],
kernel_size=[params.cnn_params['c1_ksize'], params.cnn_params['c1_ksize']],
padding="same",
activation=tf.nn.relu)
print('conv1', conv1)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1,
pool_size=[params.cnn_params['p1_size'], params.cnn_params['p1_size']],
strides=params.cnn_params['p1_strides'])
print('pool1 ', pool1)
if params.num_conv_layers == 1:
pool2_flat = tf.reshape(pool1, [-1, params.cnn_params['p2_flat_size']])
pool2_flat = tf.cast(pool2_flat, tf.float64)
print('pool2_flat: ', pool2_flat)
dense = tf.nn.relu(tf.matmul(pool2_flat, W1))
else:
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=params.cnn_params['c2_filters'],
kernel_size=[params.cnn_params['c2_ksize'], params.cnn_params['c2_ksize']],
padding="same",
activation=tf.nn.relu)
print('conv2 ', conv2)
pool2 = tf.layers.max_pooling2d(inputs=conv2,
pool_size=[params.cnn_params['p2_size'], params.cnn_params['p2_size']],
strides=params.cnn_params['p2_strides'])
print('pool2 ', pool2)
# Dense Layer: replace with structured matrix
pool2_flat = tf.reshape(pool2, [-1, params.cnn_params['p2_flat_size']])
pool2_flat = tf.cast(pool2_flat, tf.float64)
print('pool2_flat: ', pool2_flat)
dense = tf.nn.relu(tf.matmul(pool2_flat, W1))
print('dense ', dense)
#dense = tf.layers.dense(inputs=pool3_flat, units=1024, activation=tf.nn.relu)
# Logits Layer
logits = tf.layers.dense(inputs=dense, units=params.out_size,name='forward')
print('dense ', dense)
print('logits', logits)
return logits
def get_commit_id():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
def gen_operators(params):
if params.disp_type == 'sylvester':
return gen_sylvester_operators(params.class_type, params.layer_size, params.layer_size)
elif params.disp_type == 'stein':
return gen_stein_operators(params.class_type, params.layer_size, params.layer_size)
else:
print('disp_type not supported: ', params.disp_type)
assert 0
# Operators for Stein type displacement.
def gen_sylvester_operators(class_type, m, n):
if class_type.startswith('toeplitz'):
A = gen_Z_f(m, 1)
B = gen_Z_f(n, -1)
elif class_type.startswith('hankel'):
A = gen_Z_f(m, 1)
B = gen_Z_f(n, 0).T
elif class_type.startswith('t+h'):
A = gen_Z_f(m, 0) + gen_Z_f(m, 0).T
B = gen_Z_f(n, 0) + gen_Z_f(n, 0).T
elif class_type.startswith('vandermonde'):
v = np.random.random(n)
A = np.diag(v)
B = gen_Z_f(n, 0)
elif class_type == 'random':
A = np.random.random((m, m))
B = np.random.random((n, n))
else:
print('Class ' + prefix + ' not supported')
assert 0
return A,B
# Operators for Stein type displacement.
def gen_stein_operators(class_type, m, n):
if class_type.startswith('toeplitz'):
A = gen_Z_f(m, 1).T
B = gen_Z_f(n, -1)
elif class_type.startswith('hankel'):
A = gen_Z_f(m, 0)
B = gen_Z_f(n, 1)
elif class_type.startswith('vandermonde'):
v = np.random.random(n)
A = np.diag(v)
B = gen_Z_f(n, 0)
elif class_type == 'random':
A = np.random.random((m, m))
B = np.random.random((n, n))
else:
print('Class ' + prefix + ' not supported')
assert 0
return A,B
# Operators for Stein type displacement.
def gen_stein_operators_tf(init, m, n):
A,B = gen_stein_operators(init, m, n)
return tf.Variable(A), tf.Variable(B)
def gen_trid_mask(n):
ones1 = list(np.ones(n))
ones2 = list(np.ones(n-1))
data = [ones1, ones2, ones2]
positions = [0, 1, -1]
mask = diags(data, positions, (n, n)).toarray()
return mask
def gen_trid_corner_mask(n):
mask = gen_trid_mask(n)
mask[0, -1] = 1
return mask
# Circulant sparsity pattern
def gen_Z_f(m, f, v=None):
if v is not None:
assert v.size <= m-1
I_m = np.eye(m-1, m-1)
Z_f = np.hstack((I_m, np.zeros((m-1, 1))))
Z_f = np.vstack((np.zeros((1, m)), Z_f))
Z_f[0, -1] = f
if v is not None:
for i in range(v.size):
Z_f[i+1, i] = v[i]
return Z_f
"""
def gen_Z_f(m, f):
I_m = np.eye(m-1, m-1)
Z_f = np.hstack((I_m, np.zeros((m-1, 1))))
Z_f = np.vstack((np.zeros((1, m)), Z_f))
Z_f[0, -1] = f
return Z_f
"""
def gen_circ_scaling_mask(n):
M = np.zeros((n,n))
for i in range(n-1):
M[i, -(i+1):] = 1 #Last i+1
return np.roll(M, 2, 0).astype(np.bool)
# Shift rows circularly by num_shifts shifts.
# Or just multiply by Z_1 - which is faster?
def tf_roll_rows(x, num_shifts):
if num_shifts == 0:
return x
x = tf.transpose(x)
x_len = x.get_shape().as_list()[1]
y = tf.concat([x[:,x_len-num_shifts:], x[:,:x_len-num_shifts]], axis=1)
return tf.transpose(y)
# Replace all 0's with 1's
def update_mask(scale, mask):
all_ones = tf.ones(mask.get_shape(), dtype=tf.float64)
return tf.where(mask, scale*all_ones, all_ones)
def gen_circ_scaling_tf(x, mask, num_learned):
if x is None:
return tf.ones(mask.get_shape(), dtype=tf.float64)
t0 = time.time()
final_mask = update_mask(x[0], mask)
print('time of first update_mask call: ', time.time() - t0)
sum_update = 0
sum_mult = 0
for i in np.arange(1, num_learned):
# Shift mask
t1 = time.time()
shifted_mask = update_mask(x[i], tf_roll_rows(mask, i))
sum_update += (time.time() - t1)
t2 = time.time()
final_mask = tf.multiply(final_mask, shifted_mask)
sum_mult += (time.time() - t2)
print('time of all update_mask calls: ', sum_update)
print('time of all tf.multiply calls: ', sum_mult)
return final_mask
def gen_f_mask(f, m,n):
mask = np.ones((m, n))
f_mask = f*mask
# Set lower triangular indices to 1: 1 if row>=col
il1 = np.tril_indices(n=f_mask.shape[0], m=f_mask.shape[1])
f_mask[il1] = 1
f_mask = tf.constant(f_mask, dtype=tf.float64)
return f_mask
def gen_index_arr(n):
r = np.arange(0, n)
a = np.expand_dims(r, -1)
b = np.expand_dims(-r, 0)
index = a+b
# Create mask
pos = (index < 0)
antipos = 1-pos
updated = np.multiply(antipos, index) + np.multiply(pos, index+n)
return np.expand_dims(updated, -1)
def sylvester(A, B, n, r):
# Generate random rank r error matrix
G = np.random.random((n, r))
H = np.random.random((n, r))
GH = np.dot(G,H.T)
# Solve Sylvester equation to recover M
# Such that AM - MB = GH^T
M = solve_sylvester(A, -B, GH)
E = np.dot(A,M) - np.dot(M,B)
assert np.linalg.norm(E - GH) <= 1e-10
return M
def gen_matrix(n, prefix, r=2):
if prefix.startswith('rank'):
r = int(prefix[-1])
G = np.random.random((n,r))
H = np.random.random((n,r))
return np.dot(G,H.T)
elif prefix == 'toeplitz':
c = np.random.random(n)
r = np.random.random(n)
return toeplitz(c,r)
elif prefix == 'hankel':
c = np.random.random(n)
r = np.random.random(n)
return np.flipud(toeplitz(c,r))
elif prefix == 'vandermonde':
v = np.random.random(n)
return np.vander(v, n, increasing=True)
elif prefix == 'cauchy':
s = np.random.random(n)
t = np.random.random(n)
return 1.0 / (s.reshape((-1,1)) - t)
elif prefix == 'random':
return np.random.random((n,n))
elif prefix == 'toeplitz-like':
# Generate operators
A = gen_Z_f(n, 1)
B = gen_Z_f(n, -1)
# Generate random rank r error matrix
# Solve sylvester
return sylvester(A, B, n, r)
elif prefix == 'hankel-like':
# Generate operators
A = gen_Z_f(n, 1)
B = gen_Z_f(n, 0).T
return sylvester(A, B, n, r)
elif prefix == 'vandermonde-like':
# Generate operators
v = np.random.random(n)
#v = np.linalg.eigvals(Z1)
V = np.vander(v, increasing=True)
# Generate operators
A = np.diag(v)
B = gen_Z_f(n, 0)
return sylvester(A, B, n, r)
elif prefix == 'cauchy-like':
s = np.random.random(n)
t = np.random.random(n)
C = 1.0 / (s.reshape((-1,1)) - t)
# Generate operators
A = np.diag(s)
B = np.diag(t)
return sylvester(A, B, n, r)
elif prefix == 'tridiag_corner': #
# Generate random tridiagonal+corner operators
A = np.random.random((n,n))
B = np.random.random((n,n))
mask = gen_trid_corner_mask(n)
A = np.multiply(A, mask)
B = np.multiply(B, mask)
return sylvester(A, B, n, r)
elif prefix == 'circ_sparsity':
# Generate random circulant sparsity pattern operators
A = np.random.random((n,n))
B = np.random.random((n,n))
mask = gen_Z_f(n, 1)
A = np.multiply(A, mask)
B = np.multiply(B, mask)
return sylvester(A, B, n, r)
else:
print('Type ' + prefix + ' not supported')
assert 0
def gen_batch(A, N, P=None):
"""
Generates N random x's, computes corresponding y's, such that Ax = y.
A: the matrix.
N: number of datapoints.
P: if not None, then with probability 0.5, return (Px, APx). Otherwise return (x, Ax).
"""
X = np.random.random((A.shape[1], N))
if P is not None:
if np.random.random() >= 0.5:
X = np.dot(P,X)
Y = np.dot(A, X)
assert np.isclose(np.linalg.norm(Y[:, 0] - np.dot(A, X[:, 0])), 0)
return X.T,Y.T
def get_GH(E):
disp_rank = np.linalg.matrix_rank(E)
# SVD
U, S, V = np.linalg.svd(E, full_matrices=False)
SV = np.dot(np.diag(S), V)
G = U[:, 0:disp_rank]
H = SV[0:disp_rank, :].T
return G,H, disp_rank
def sylvester_project(M, A, B, r):
"""
Project via SVD on error matrix + solving the Sylvester equation.
"""
t = time.time()
E = np.dot(A, M) - np.dot(M, B)
G,H,dr = get_GH(E)
G_r = G[:, 0:r]
H_r = H[:, 0:r]
lowrank = np.dot(G_r, H_r.T)
print('norm(E-lowrank): ', np.linalg.norm(E-lowrank))
# Sylvester solve
M_class = solve_sylvester(A, -B, lowrank)
print('rank(lowrank): ', np.linalg.matrix_rank(lowrank))
print('rank(A): ', np.linalg.matrix_rank(A))
print('rank(B): ', np.linalg.matrix_rank(B))
print('norm(M-M_class): ', np.linalg.norm(M-M_class))
E_class = np.dot(A, M_class) - np.dot(M_class, B)
print('rank of E_class',np.linalg.matrix_rank(E_class))
#print 'eigvals of E_class',np.linalg.eigvals(E_class)
print('time of sylv project: ', time.time() - t)
return M_class
def circulant_tf(vec, index_arr, mask=None):
# Slice 2D
output = tf.gather_nd(vec, index_arr)
# Apply mask
if mask is not None:
output = tf.multiply(output, mask)
return output
# Shape of stack_circ: (v.size, n)
def circulant_mn_tf(v, index_arr, n, num_reps, f_mask):
circ_v = circulant_tf(v, index_arr)
multiples = tf.constant([1, num_reps])
stack_circ = tf.tile(circ_v, multiples)
stack_circ = tf.cast(stack_circ[:, 0:n], tf.float64)
# Element-wise multiplication
masked = tf.multiply(f_mask, stack_circ)
return masked
# K(Z_f^T, g) = scaling_term*J*Z_f(Jg)
# x.size == num_learned
def krylov_circ_transpose(n, x, v, num_learned, f_mask, scaling_mask, index_arr):
# Generate mask
t2 = time.time()
# Get scaling term
scale_term = gen_circ_scaling_tf(x, scaling_mask, num_learned)
print('time of gen_circ_scaling_tf', time.time() - t2)
# Call circulant_tf on flipped v
t3 = time.time()
Z = circulant_tf(tf.reverse(v, [0]), index_arr, f_mask)
print('time of circulant_tf', time.time() - t3)
# Flip row-wise
JZ = tf.reverse(Z, [0])
# Elementwise multiplication by scale_term
return tf.multiply(scale_term, JZ)
def krylov_tf(A, v, n):
v_exp = tf.expand_dims(v,1)
cols = [v_exp]
this_pow = A
for i in range(n-1):
this_col = tf.matmul(this_pow, v_exp)
this_pow = tf.matmul(A, this_pow)
cols.append(this_col)
K = tf.stack(cols)
return tf.transpose(tf.squeeze(K))
def Ax_circ(f_v, x, n):
# Circular shift x to the right
y = tf.concat([x[n-1:], x[:n-1]], axis=0)
# Scale by [f v]
return tf.multiply(y, f_v)
def krylov_tf_circ(f_x, v, n):
v_exp = tf.expand_dims(v,1)
cols = [v_exp]
this_col = v
for i in range(n-1):
this_col = Ax_circ(f_x, this_col, n)
cols.append(tf.expand_dims(this_col,1))
K = tf.stack(cols)
return tf.transpose(tf.squeeze(K))
def V_mn(v, m, n):
# Stack columns
# First col: ones
# Second col: v
# Subsequent cols: v^{c-1}
ones = tf.ones([m], dtype=tf.float64)
cols = [ones, v]
for i in range(n-2):
this_col = tf.pow(v, i+2)
cols.append(this_col)
V = tf.transpose(tf.stack(cols))
return tf.cast(V, tf.float64)
|
structured-nets-master
|
tensorflow/utils.py
|
import numpy as np
from scipy.sparse import diags
import tensorflow as tf
import functools
def identity_mult_fn(v, n):
return v
# Multiplication by (Z_{f,v} + diag(d))^T
def circ_diag_transpose_mult_fn(v_f, d, x, n):
#sess = tf.InteractiveSession()
#tf.initialize_all_variables().run()
#print sess.run(x)
# Circular shift x to the left
y = tf.concat([x[1:], [x[0]]], axis=0)
# Scale by [v f]
return tf.multiply(y, v_f) + tf.multiply(d, x)
# Multiplication by Z_{f,v}^T
def circ_transpose_mult_fn(v_f, x, n):
#sess = tf.InteractiveSession()
#tf.initialize_all_variables().run()
#print sess.run(x)
# Circular shift x to the left
y = tf.concat([x[1:], [x[0]]], axis=0)
# Scale by [v f]
return tf.multiply(y, v_f)
def circ_mult_fn(f_v, x, n):
# Circular shift x to the right
y = tf.concat([x[n-1:], x[:n-1]], axis=0)
# Scale by [f v]
return tf.multiply(y, f_v)
def symm_tridiag_mult_fn(diag, off_diag, x, n):
sub_result = tf.multiply(x[1:], off_diag)
sup_result = tf.multiply(x[:n-1], off_diag)
sup_result = tf.concat([[0], sup_result], axis=0)
sub_result = tf.concat([sub_result, [0]], axis=0)
return sup_result + sub_result + tf.multiply(x, diag)
def tridiag_corners_mult_fn(subdiag, diag, supdiag, f_ur, f_ll, x, n):
sup_result = tf.multiply(x[1:], supdiag)
sup_result = tf.concat([sup_result, [0]], axis=0)
sub_result = tf.multiply(x[:n-1], subdiag)
sub_result = tf.concat([[0], sub_result], axis=0)
z1 = tf.zeros(n-1, dtype=tf.float64)
z2 = tf.zeros(n-1, dtype=tf.float64)
scaled_f_ll = tf.multiply(f_ll, x[0])
f_ll_result = tf.concat([z1, scaled_f_ll], axis=0)
scaled_f_ur = tf.multiply(f_ur, x[-1])
f_ur_result = tf.concat([scaled_f_ur, z2], axis=0)
return sup_result + sub_result + tf.multiply(x,diag) + f_ll_result + f_ur_result
def tridiag_corners_transpose_mult_fn(subdiag, diag, supdiag, f_ur, f_ll, x, n):
return tridiag_corners_mult_fn(supdiag, diag, subdiag, f_ll, f_ur, x, n)
"""
# f1: top right. f2: bottom left.
def tridiag_corners_transpose_mult_fn(subdiag, diag, supdiag, f1, f2, x, n):
sub_result = tf.multiply(x[1:], subdiag)
sup_result = tf.multiply(x[:n-1], supdiag)
sup_result = tf.concat([[0], sup_result], axis=0)
sub_result = tf.concat([sub_result, [0]], axis=0)
z = tf.zeros(n-1, dtype=tf.float64)
scaled_f1 = tf.multiply(f1, x[0])
scaled_f2 = tf.multiply(f2, x[n-1])
f1_result = tf.concat([z, scaled_f1], axis=0)
f2_result = tf.concat([scaled_f2, z], axis=0)
return sup_result + sub_result + tf.multiply(x, diag) + f1_result + f2_result
"""
# subdiag, diag, supdiag of Z_f
# multiplies by Z_f^T
# subdiag: last n-1 entries
# supdiag: first n-1 entries
def tridiag_corner_transpose_mult_fn(subdiag,diag,supdiag,f,x,n):
sub_result = tf.multiply(x[1:], subdiag)
sup_result = tf.multiply(x[:n-1], supdiag)
sup_result = tf.concat([[0], sup_result], axis=0)
sub_result = tf.concat([sub_result, [0]], axis=0)
#sess = tf.InteractiveSession()
#tf.initialize_all_variables().run()
z = tf.zeros(n-1, dtype=tf.float64)
scaled_f = tf.multiply(f, x[0])
f_result = tf.concat([z, scaled_f], axis=0)
return sup_result + sub_result + tf.multiply(x, diag) + f_result
# subdiag, diag, supdiag of the transposed operator
# f: bottom left
"""
def tridiag_corner_transpose_mult_fn(subdiag, diag, supdiag, f, x, n):
sub_result = tf.multiply(x[1:], subdiag)
sup_result = tf.multiply(x[:n-1], supdiag)
sup_result = tf.concat([[0], sup_result], axis=0)
sub_result = tf.concat([sub_result, [0]], axis=0)
z = tf.zeros(n-1, dtype=tf.float64)
scaled_f = tf.multiply(f, x[0])
f_result = tf.concat([z, scaled_f], axis=0)
return sup_result + sub_result + tf.multiply(x, diag) + f_result
"""
# Multiplication by Z_{subdiag, diag, supdiag, f}^T
def tridiag_corner_mult_fn(subdiag, diag, supdiag, f, x, n):
sup_result = tf.multiply(x[1:], supdiag)
sub_result = tf.multiply(x[:n-1], subdiag)
sup_result = tf.concat([sup_result, [0]], axis=0)
sub_result = tf.concat([[0], sub_result], axis=0)
z = tf.zeros(n-1, dtype=tf.float64)
scaled_f = [tf.multiply(f, x[n-1])]
f_result = tf.concat([scaled_f, z], axis=0)
return sup_result + sub_result + tf.multiply(x, diag) + f_result
# Multiplication by diag(d)
def diag_mult_fn(d, x, n):
return tf.multiply(d, x)
def krylov(fn, v, n):
# fn: takes as input a vector and multiplies by a matrix.
v_exp = tf.expand_dims(v,1)
cols = [v_exp]
this_col = v
for i in range(n-1):
this_col = fn(this_col, n)
cols.append(tf.expand_dims(this_col,1))
K = tf.stack(cols)
return tf.transpose(tf.squeeze(K))
def test_circ_sparsity():
n = 4
subdiag = np.array([2,3,4])
supdiag = np.zeros(n-1)
diag = np.zeros(n)
# Subdiag corresponds to Z_f, we multiply by Z_f^T
A = diags([subdiag, diag, supdiag], [-1, 0, 1], (n,n)).toarray().T
f = 5.0
A[-1,0] = f
print('A:', A)
x = np.array([1,2,3,4])
subdiag = tf.constant(subdiag, dtype=tf.float64)
f = tf.constant([f], dtype=tf.float64)
subdiag_f = tf.concat([subdiag, f], axis=0)
fn = functools.partial(circ_transpose_mult_fn, subdiag_f)
print('x: ', x)
print('Ax: ', np.dot(A,x))
x = tf.constant(x, dtype=tf.float64)
result = fn(x,n)
#result = krylov(fn, x, n)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
print(sess.run(result))
def test_tridiag_corner():
n = 4
# Subdiag, supdiag, diag corresponds to Z_e, we multiply by Z_e^T
subdiag = np.array([2,3,4])
supdiag = np.array([4,5,6])
diag = np.array([1,1,1,1])
f = 5.0
A = diags([subdiag, diag, supdiag], [-1, 0, 1], (n,n)).toarray().T
A[-1, 0] = f
print('subdiag: ', subdiag)
print('supdiag: ', supdiag)
print('diag: ', diag)
print(A)
x = np.array([1,2,3,4])
print('A: ', A)
print('Ax: ', np.dot(A,x))
fn = functools.partial(tridiag_corner_transpose_mult_fn, tf.constant(subdiag, dtype=tf.float64),
tf.constant(diag, dtype=tf.float64), tf.constant(supdiag, dtype=tf.float64), tf.constant(f, dtype=tf.float64))
print('x: ', x)
x = tf.constant(x, dtype=tf.float64)
result = fn(x,n)
#result = krylov(fn, x, n)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
print(sess.run(result))
def test_symm_tridiag():
n = 4
off_diag = np.array([2,3,4])
diag = np.array([1,1,1,1])
A = diags([off_diag, diag, off_diag], [-1, 0, 1], (n,n)).toarray()
print(A)
print(np.linalg.norm(A - A.T))
x = np.array([1,2,3,4])
print(np.dot(np.linalg.matrix_power(A, 3), x))
fn = functools.partial(symm_tridiag_mult_fn, tf.constant(diag, dtype=tf.float64),
tf.constant(off_diag, dtype=tf.float64))
x = tf.constant(x, dtype=tf.float64)
result = krylov(fn, x, n)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
print(sess.run(result))
if __name__ == '__main__':
test_circ_sparsity()
#test_tridiag_corner()
#test_symm_tridiag()
|
structured-nets-master
|
tensorflow/krylov.py
|
import numpy as np
import tensorflow as tf
from utils import *
from reconstruction import *
from krylov import *
import functools
import time, os
# Only an approximate reconstruction.
def tridiagonal_corner(dataset, params, test_freq=100, verbose=False):
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
if params.fix_G:
G = tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64)
else:
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
subdiag_A, supdiag_A, diag_A, f_A, subdiag_B, supdiag_B, diag_B, f_B = get_tridiag_corner_vars(params.layer_size, params.init_type, params.init_stddev, params.learn_corner)
fn_A = functools.partial(tridiag_corner_transpose_mult_fn, subdiag_A, diag_A, supdiag_A, f_A)
fn_B = functools.partial(tridiag_corner_transpose_mult_fn, subdiag_B, diag_B, supdiag_B, f_B)
W1 = tf.zeros([params.layer_size, params.layer_size], dtype=tf.float64)
for i in range(params.r):
K_A = krylov(fn_A, G[:, i], params.layer_size)
K_B = krylov(fn_B, H[:, i], params.layer_size)
prod = tf.matmul(K_A, tf.transpose(K_B))
W1 = tf.add(W1, prod)
# Compute a and b
a = tf.multiply(f_A, tf.reduce_prod(subdiag_A))
b = tf.multiply(f_B, tf.reduce_prod(subdiag_B))
coeff = 1.0/(1 - a*b)
W1 = tf.multiply(coeff, W1)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, tridiagonal+corner: ', test_loss))
print(('SGD test accuracy, tridiagonal+corner: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def polynomial_transform(dataset, params, test_freq=100, verbose=False):
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
if params.fix_G:
G = tf.truncated_normal([params.n, params.r], stddev=0.01, dtype=tf.float64)
else:
G = tf.Variable(tf.truncated_normal([params.n, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.n, params.r], stddev=0.01, dtype=tf.float64))
diag_A, off_diag_A, diag_B = get_symm_pos_tridiag_vars(params.n, params.init_type, params.init_stddev)
fn_A = functools.partial(symm_tridiag_mult_fn, diag_A, off_diag_A)
fn_B = functools.partial(diag_mult_fn, diag_B)
W1 = tf.zeros([params.n, params.n], dtype=tf.float64)
for i in range(params.r):
K_A = krylov(fn_A, G[:, i], params.n)
K_B = krylov(fn_B, H[:, i], params.n)
prod = tf.matmul(K_A, tf.transpose(K_B))
W1 = tf.add(W1, prod)
# Compute a and b
"""
a = tf.reduce_prod(f_x_A)
b = tf.reduce_prod(f_x_B)
coeff = 1.0/(1 - a*b)
W1 = tf.scalar_mul(coeff, W1)
"""
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ, this_diag_A, this_off_diag_A, this_diag_B = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary, diag_A, off_diag_A, diag_B], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
print('diag_A: ', this_diag_A)
print('off_diag_A: ', this_off_diag_A)
print('diag_B: ', this_diag_B)
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, polynomial transform: ', test_loss))
print(('SGD test accuracy, polynomial transform: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def circulant_sparsity(dataset, params, test_freq=100, verbose=False):
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
if params.fix_G:
G = tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64)
else:
G = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.layer_size, params.r], stddev=0.01, dtype=tf.float64))
f_x_A, f_x_B = get_f_x(params.layer_size, params.init_type, params.learn_corner, params.n_diag_learned, params.init_stddev)
if params.learn_diagonal:
diag_A = tf.Variable(tf.zeros(params.layer_size, dtype=tf.float64))
diag_B = tf.Variable(tf.zeros(params.layer_size, dtype=tf.float64))
fn_A = functools.partial(circ_diag_transpose_mult_fn, tf.reverse(f_x_A, [0]), diag_A)
fn_B = functools.partial(circ_diag_transpose_mult_fn, tf.reverse(f_x_B, [0]), diag_B)
else:
fn_A = functools.partial(circ_transpose_mult_fn, tf.reverse(f_x_A, [0]))
fn_B = functools.partial(circ_transpose_mult_fn, tf.reverse(f_x_B, [0]))
W1 = tf.zeros([params.layer_size, params.layer_size], dtype=tf.float64)
for i in range(params.r):
K_A = krylov(fn_A, G[:, i], params.layer_size)
K_B = krylov(fn_B, H[:, i], params.layer_size)
prod = tf.matmul(K_A, tf.transpose(K_B))
W1 = tf.add(W1, prod)
# Compute a and b
a = tf.reduce_prod(f_x_A)
b = tf.reduce_prod(f_x_B)
coeff = 1.0/(1 - a*b)
W1 = tf.scalar_mul(coeff, W1)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if step % test_freq == 0:
print(('Training step: ', step))
train_loss, train_accuracy, train_loss_summ, train_acc_summ = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, step)
summary_writer.add_summary(train_acc_summ, step)
summary_writer.add_summary(val_loss_summ, step)
summary_writer.add_summary(val_acc_summ, step)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(('Train loss, accuracy: ', train_loss, train_accuracy))
print(('Validation loss, accuracy: ', val_loss, val_accuracy))
if verbose:
print(('Current W1: ', W1_real))
if step % params.checkpoint_freq == 0:
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(step)))
print(("Model saved in file: %s" % save_path))
step += 1
losses['train'] = train_losses
losses['val'] = val_losses
accuracies['train'] = train_accuracies
accuracies['val'] = val_accuracies
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, step)
summary_writer.add_summary(test_acc_summ, step)
print(('SGD test loss, circulant sparsity operators: ', test_loss))
print(('SGD test accuracy, circulant sparsity operators: ', test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
return losses, accuracies
def circulant_sparsity_hadamard(dataset, params, test_freq=100, verbose=False):
# Create the model
x = tf.placeholder(tf.float64, [None, params.input_size])
if params.fix_G:
G = tf.truncated_normal([params.n, params.r], stddev=0.01, dtype=tf.float64)
else:
G = tf.Variable(tf.truncated_normal([params.n, params.r], stddev=0.01, dtype=tf.float64))
H = tf.Variable(tf.truncated_normal([params.n, params.r], stddev=0.01, dtype=tf.float64))
t1 = time.time()
W1, f_A, f_B, v_A, v_B = circ_sparsity_recon(G, H, params.n, params.r, params.learn_corner,
params.n_diag_learned, params.init_type, params.init_stddev)
print('overall time of circ_sparsity_recon: ', time.time() - t1)
y = compute_y(x, W1, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size])
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
train_step = tf.train.MomentumOptimizer(params.lr, params.mom).minimize(loss)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
step = 0
losses = {}
accuracies = {}
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
while step < params.steps:
batch_xs, batch_ys = dataset.batch(params.batch_size, step)
summary, _, = sess.run([merged_summary_op, train_step], feed_dict={x: batch_xs, y_: batch_ys})
summary_writer.add_summary(summary, step)
if step % test_freq == 0:
print(('Training step: ', step))
# Verify displacement rank: Stein
v_A_real = None
v_B_real = None
if params.n_diag_learned > 0:
f_A_real, f_B_real, v_A_real, v_B_real, W1_real = sess.run([f_A, f_B, v_A, v_B, W1], feed_dict={x: batch_xs, y_: batch_ys})
else:
f_A_real, f_B_real, W1_real = sess.run([f_A, f_B, W1], feed_dict={x: batch_xs, y_: batch_ys})
A = gen_Z_f(params.n, f_A_real, v_A_real).T
B = gen_Z_f(params.n, f_B_real, v_B_real)
E = W1_real - np.dot(A, np.dot(W1_real, B))
print(('Disp rank: ', np.linalg.matrix_rank(E)))
this_loss, this_accuracy = sess.run([loss, accuracy], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
losses.append(this_loss)
accuracies.append(this_accuracy)
print(('Test loss: ', this_loss))
print(('Test accuracy: ', this_accuracy))
if verbose:
print(('Current W1: ', W1_real))
step += 1
# Test trained model
print(('SGD final loss, learned operators (fixed circulant sparsity pattern): ', sess.run(loss, feed_dict={x: dataset.test_X, y_: dataset.test_Y})))
print(('SGD final accuracy, learned operators (fixed circulant sparsity pattern): ', sess.run(accuracy, feed_dict={x: dataset.test_X, y_: dataset.test_Y})))
return losses, accuracies
|
structured-nets-master
|
tensorflow/learned_operators.py
|
"""
Compare methods in parallel, spawning separate thread for each.
"""
import sys, os, datetime
import pickle as pkl
sys.path.insert(0, '../../')
# from optimize import optimize
from utils import *
from model_params import ModelParams
from dataset import Dataset
import argparse
import thread
def create_command(args,method,rank,lr,decay_rate,mom):
command = 'python compare.py --name=%s --methods=%s --dataset=%s --result_dir=%s --r=%s --lr=%s --decay_rate=%s --decay_freq=%s --mom=%s --steps=%s --batch_size=%s --test=%s --layer_size=%s --transform=%s --torch=%s --model=%s'
return command % (args.name, method, args.dataset, args.result_dir, rank, lr, decay_rate, args.decay_freq, mom, args.steps, args.batch_size, args.test, args.layer_size, args.transform, args.torch, args.model)
# python compare_parallel.py --name=test --methods=tridiagonal_corner,toeplitz-like --dataset=true_toeplitz --result_dir=2_25_18 --r=1 --lr=1e-3 --decay_rate=1.0 --decay_steps=0.1 --mom=0.9 --steps=50000 --batch_size=1024 --test=0 --layer_size=50 --transform=none --torch=1 --model=Attention
# Command line params
parser = argparse.ArgumentParser()
parser.add_argument("--name") # Name of run
parser.add_argument("--methods") # Which methods
parser.add_argument("--dataset") # Which dataset
parser.add_argument("--result_dir") # Where to save results
parser.add_argument("--r") # Rank / displacement ranks
parser.add_argument('--lr') # Learning rates
parser.add_argument('--decay_rate') # Decay rates of learning rate
parser.add_argument('--decay_freq', type=float) # Decay steps
parser.add_argument('--mom') # Momentums
parser.add_argument('--steps', type=int) # Steps
parser.add_argument('--batch_size', type=int) # Batch size
parser.add_argument('--test', type=int) # Test on test set
parser.add_argument('--layer_size', type=int) # Size of hidden layer
parser.add_argument('--transform') # Any transform of dataset, e.g. grayscale
parser.add_argument('--torch', type=int) # Pytorch or TF
parser.add_argument('--model') # Which model, e.g. CNN, MLP, RNN
args = parser.parse_args()
methods = args.methods.split(',')
ranks = [int(r) for r in args.r.split(',')]
lrs = [float(lr) for lr in args.lr.split(',')]
decay_rates = [float(dr) for dr in args.decay_rate.split(',')]
moms = [float(mom) for mom in args.mom.split(',')]
print('Testing methods: ', methods)
print('Testing ranks: ', ranks)
print('Testing lrs: ', lrs)
print('Testing decay rates: ', decay_rates)
print('Testing moms: ', moms)
for method in methods:
for rank in ranks:
for lr in lrs:
for decay_rate in decay_rates:
for mom in moms:
command = create_command(args,method,rank,lr,decay_rate,mom)
print('Starting new thread:\n', command)
#os.system(command)
thread.start_new_thread(os.system, (command,))
|
structured-nets-master
|
tensorflow/compare_parallel.py
|
import numpy as np
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
import pickle as pkl
from utils import *
from reconstruction import *
from visualize import visualize
from model import *
import time
import logging
def restore_from_checkpoint(dataset, params, sess, saver, x, y_, loss, accuracy):
# Restore the best validation checkpoint, test on that
saver.restore(sess, tf.train.latest_checkpoint(params.checkpoint_path))
print('Restored from most recent checkpoint: ')
val_loss, val_accuracy = sess.run([loss, accuracy], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
print('After restoring, val loss and accuracy: %f, %f' % (val_loss, val_accuracy))
return val_loss, val_accuracy
def optimize_tf(dataset, params):
# Create model
x = tf.placeholder(tf.float64, [None, params.input_size],name='x')
y, model = forward(x, params)
y_ = tf.placeholder(tf.float64, [None, params.out_size],name='y_')
loss, accuracy = compute_loss_and_accuracy(y, y_, params)
train_loss_summary = tf.summary.scalar('train_loss', loss)
train_acc_summary = tf.summary.scalar('train_accuracy', accuracy)
val_loss_summary = tf.summary.scalar('val_loss', loss)
val_acc_summary = tf.summary.scalar('val_accuracy', accuracy)
test_loss_summary = tf.summary.scalar('test_loss', loss)
test_acc_summary = tf.summary.scalar('test_accuracy', accuracy)
summary_writer = tf.summary.FileWriter(params.log_path, graph=tf.get_default_graph())
# Allow for decay of learning rate
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(params.lr, global_step,
int(params.decay_freq*params.steps), params.decay_rate, staircase=True)
train_step = tf.train.MomentumOptimizer(learning_rate, params.mom).minimize(loss, global_step=global_step)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
saver = tf.train.Saver()
if params.restore_from_checkpoint:
val_loss,val_accuracy = restore_from_checkpoint(dataset,params,sess,saver,x,y_,loss,accuracy)
eigvals = {'E': [], 'W': [], 'A': [], 'B': []}
model_params = {'E': [], 'W': [], 'A': [], 'B': []}
losses = {'train': [], 'val': [], 'DR': [], 'ratio': [], 'eigvals': eigvals, 'params': model_params}
accuracies = {'train': [], 'val': [], 'best_val': 0.0, 'best_val_iter': 0}
t1 = time.time()
for _ in range(params.steps):
this_step, lr = sess.run([global_step, learning_rate])
batch_xs, batch_ys = dataset.batch(params.batch_size, this_step)
_ = sess.run([train_step], feed_dict={x: batch_xs, y_: batch_ys})
if this_step % params.test_freq == 0:
logging.debug(time.time() - t1)
t1 = time.time()
logging.debug('Training step: ' + str(this_step))
# Verify displacement rank
if params.check_disp and this_step % params.check_disp_freq == 0:
dr, norm_res, norm_W, E_ev, W_ev, A_ev, B_ev = check_rank(sess, x, y_, batch_xs, batch_ys, params, model)
losses['DR'].append(dr)
losses['norm_res'].append(norm_res)
losses['norm_W'].append(norm_W)
losses['eigvals']['E'].append(E_ev)
losses['eigvals']['W'].append(W_ev)
losses['eigvals']['A'].append(A_ev)
losses['eigvals']['B'].append(B_ev)
train_loss, train_accuracy, train_loss_summ, train_acc_summ, y_pred = sess.run([loss, accuracy, train_loss_summary,
train_acc_summary, y], feed_dict={x: batch_xs, y_: batch_ys})
val_loss, val_accuracy, val_loss_summ, val_acc_summ = sess.run([loss, accuracy, val_loss_summary,
val_acc_summary], feed_dict={x: dataset.val_X, y_: dataset.val_Y})
summary_writer.add_summary(train_loss_summ, this_step)
summary_writer.add_summary(train_acc_summ, this_step)
summary_writer.add_summary(val_loss_summ, this_step)
summary_writer.add_summary(val_acc_summ, this_step)
losses['train'].append(train_loss)
accuracies['train'].append(train_accuracy)
losses['val'].append(val_loss)
accuracies['val'].append(val_accuracy)
# Save
pkl.dump(losses, open(params.result_path + '_losses.p', 'wb'), protocol=2)
pkl.dump(accuracies, open(params.result_path + '_accuracies.p', 'wb'), protocol=2)
logging.debug('Saved losses, accuracies to: %s' % (params.result_path))
logging.debug('Train loss, accuracy for class %s: %f, %f' % (params.class_type, train_loss, train_accuracy))
logging.debug('Validation loss, accuracy %s: %f, %f' % (params.class_type, val_loss, val_accuracy))
logging.debug("Best validation accuracy so far: %f" % accuracies['best_val'])
# Update checkpoint if better validation accuracy
if val_accuracy > accuracies['best_val']:
accuracies['best_val'] = val_accuracy
accuracies['best_val_iter'] = this_step
#if this_step > 0 and this_step % params.checkpoint_freq == 0:
#save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(this_step)))
save_path = saver.save(sess, os.path.join(params.checkpoint_path, str(this_step) + '_' + str(accuracies['best_val'])))
logging.debug("Updating validation accuracy so far: %f" % accuracies['best_val'])
logging.debug("Model saved in file: %s" % save_path)
elif accuracies['best_val_iter'] <= this_step - params.early_stop_steps:
logging.debug('Early stopping: best val iter at %d, current step %d' %(accuracies['best_val_iter'], this_step))
break
if this_step > 0 and params.viz_freq > 0 and this_step % params.viz_freq == 0:
visualize(params,sess,model,x,y_,batch_xs,batch_ys,y_pred,this_step)
# Get final params
if params.check_disp:
dr, norm_res, norm_W, E_ev, W_ev, A_ev, B_ev, E, W, A, B = check_rank(sess, x, y_, batch_xs, batch_ys, params, model)
losses['DR'].append(dr)
losses['norm_res'].append(norm_res)
losses['norm_W'].append(norm_W)
losses['params']['E'] = E
losses['params']['W'] = W
losses['params']['A'] = A
losses['params']['B'] = B
# Test trained model
if params.test:
# Load test
dataset.load_test_data()
# Test on the current model
if not params.test_best_val_checkpoint:
test_loss, test_accuracy, test_loss_summ, test_acc_summ = sess.run([loss, accuracy, test_loss_summary, test_acc_summary], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
summary_writer.add_summary(test_loss_summ, this_step)
summary_writer.add_summary(test_acc_summ, this_step)
logging.debug('Test loss, %s: %f' % (params.class_type, test_loss))
logging.debug('Test accuracy, %s: %f ' % (params.class_type, test_accuracy))
losses['test'] = test_loss
accuracies['test'] = test_accuracy
else:
restore_from_checkpoint(dataset, params, sess, saver, x, y_, loss, accuracy)
test_loss, test_accuracy = sess.run([loss, accuracy], feed_dict={x: dataset.test_X, y_: dataset.test_Y})
logging.debug('Test loss of best val checkpoint, %s: %f' % (params.class_type, test_loss))
logging.debug('Test accuracy of best val checkpoint, %s: %f ' % (params.class_type, test_accuracy))
losses['test_best_val'] = test_loss
accuracies['test_best_val'] = test_accuracy
return losses, accuracies
|
structured-nets-master
|
tensorflow/optimize_tf.py
|
"""
Compare methods and hyperparameter settings sequentially.
"""
import sys, os, datetime
import pickle as pkl
# sys.path.insert(0, '../')
import argparse
import threading
import logging
import numpy as np
from optimize_tf import optimize_tf
from utils import *
from model_params import ModelParams
from dataset import Dataset
logging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s')
# Available datasets: norb, cifar10, smallnorb, mnist, mnist_noise_variation_*, mnist_rand_bg, mnist_bg_rot, convex, rect, rect_images
# Example command:
# python compare.py --name=test --methods=tridiagonal_corner,toeplitz_like --dataset=true_toeplitz --result_dir=2_25_18 --r=1 --lr=1e-3 --decay_rate=1.0 --decay_steps=0.1 --mom=0.9 --steps=50000 --batch_size=1024 --test=0 --layer_size=50 --transform=none --torch=1 --model=Attention
method_map = {'circulant_sparsity': 'cs', 'tridiagonal_corner': 'tc', 'tridiagonal_corners': 'tcs', 'low_rank': 'lr', 'unconstrained': 'u',
'toeplitz_like': 't', 'toep_corner': 't1', 'toep_nocorn': 't0', 'subdiagonal': 'sd', 'hankel_like': 'h', 'vandermonde_like': 'v'}
def compare(args, method, rank, lr, decay_rate, mom, train_frac, steps):
params = ModelParams(args.dataset, args.transform, args.test, log_path,
dataset.input_size, args.layer_size, dataset.out_size(), num_layers,
loss, rank, steps, args.batch_size, lr, mom, init_type,
method, learn_corner, n_diag_learned, init_stddev, fix_G,
check_disp, check_disp_freq, checkpoint_freq, checkpoint_path, test_freq, verbose,
decay_rate, args.decay_freq, learn_diagonal, fix_A_identity,
stochastic_train, flip_K_B, num_conv_layers, args.torch, args.model,
viz_freq, num_pred_plot, viz_powers, early_stop_steps, replacement,
test_best_val_checkpoint, args.restore, num_structured_layers,
tie_operators_same_layer, tie_layers_A_A, tie_layers_A_B, train_frac)
# Save params + git commit ID
this_id = args.name + '_' + method_map[method] + '_r' + str(rank) + '_lr' + str(lr) + '_dr' + str(decay_rate) + '_mom' + str(mom) + '_bs' + str(args.batch_size) + '_tf' + str(train_frac) + '_steps' + str(steps)
this_results_dir = params.save(results_dir, this_id, commit_id, command)
for test_iter in range(args.trials):
this_iter_name = this_id + '_' + str(test_iter)
params.log_path = os.path.join(log_path, this_iter_name)
params.checkpoint_path = os.path.join(checkpoint_path, this_iter_name)
params.vis_path = os.path.join(vis_path, this_iter_name)
params.result_path = os.path.join(this_results_dir,this_iter_name)
logging.debug('Tensorboard log path: ' + params.log_path)
logging.debug('Tensorboard checkpoint path: ' + params.checkpoint_path)
logging.debug('Tensorboard vis path: ' + params.vis_path)
logging.debug('Results dir: ' + params.result_path)
if not os.path.exists(params.checkpoint_path):
os.makedirs(params.checkpoint_path)
if not os.path.exists(params.vis_path):
os.makedirs(params.vis_path)
losses, accuracies = optimize_tf(dataset, params)
tf.reset_default_graph()
pkl.dump(losses, open(params.result_path + '_losses.p', 'wb'), protocol=2)
pkl.dump(accuracies, open(params.result_path + '_accuracies.p', 'wb'), protocol=2)
logging.debug('Saved losses and accuracies for ' + method + ' to: ' + params.result_path)
# Command line params
parser = argparse.ArgumentParser()
parser.add_argument("--name", default='') # Name of run
parser.add_argument("--methods") # Which methods
parser.add_argument("--dataset") # Which dataset
parser.add_argument("--result_dir") # Where to save results
parser.add_argument("--r", default='0') # Rank / displacement ranks
parser.add_argument('--lr') # Learning rates
parser.add_argument('--decay_rate', default='1.0') # Decay rates of learning rate
parser.add_argument('--decay_freq', type=float, default=1.0) # Decay steps
parser.add_argument('--mom', default='0.9') # Momentums
parser.add_argument('--steps', type=int) # Steps
parser.add_argument('--batch_size', type=int) # Batch size
parser.add_argument('--test', type=int, default=1) # Test on test set
parser.add_argument('--layer_size', type=int) # Size of hidden layer
parser.add_argument('--transform', default='none') # Any transform of dataset, e.g. grayscale
parser.add_argument('--torch', type=int, default=0) # Pytorch or TF
parser.add_argument('--model') # Which model, e.g. CNN, MLP, RNN
parser.add_argument('--parallel') #
parser.add_argument('--train_frac', default='1.0')
parser.add_argument('--trials', type=int, default=3) #
parser.add_argument('--restore', type=int, default=0) # Whether to restore from latest checkpoint
args = parser.parse_args()
methods = args.methods.split(',')
ranks = [int(r) for r in args.r.split(',')]
lrs = [float(lr) for lr in args.lr.split(',')]
decay_rates = [float(dr) for dr in args.decay_rate.split(',')]
moms = [float(mom) for mom in args.mom.split(',')]
train_fracs = [float(train_frac) for train_frac in args.train_frac.split(',')]
logging.debug('Testing methods: ' + str(methods))
logging.debug('Testing ranks: ' + str(ranks))
logging.debug('Testing lrs: ' + str(lrs))
logging.debug('Testing decay rates: ' + str(decay_rates))
logging.debug('Testing moms: ' + str(moms))
logging.debug('Testing train fracs: ' + str(train_fracs))
# Fixed params
num_layers = 1
out_dir = '../..'
loss = 'cross_entropy'
test_size = 1000
train_size = 10000
verbose = False
replacement = False # If true, sample with replacement when batching
check_disp = False # If true, checks rank of error matrix every check_disp_freq iters
check_disp_freq = 5000
fix_G = False
early_stop_steps = 500000
fix_A_identity = False
flip_K_B = False
init_type = 'toeplitz'
init_stddev = 0.01 # Random initializations
test_freq = 100
viz_freq = -1#1000
num_pred_plot = 5
viz_powers = [1,5,10]
learn_corner = True
learn_diagonal = False
stochastic_train = False
checkpoint_freq = 1000
num_conv_layers = 2
test_best_val_checkpoint = True # If true, tests best checkpoint (by validation accuracy). Otherwise, tests last one.
# trials = 3
# Only affect VAE
num_structured_layers = 2
tie_operators_same_layer = False
tie_layers_A_A = False
tie_layers_A_B = False
log_path = os.path.join(out_dir, 'tensorboard', args.result_dir)
results_dir = os.path.join(out_dir, 'results', args.result_dir)
checkpoint_path = os.path.join(out_dir, 'checkpoints', args.result_dir)
vis_path = os.path.join(out_dir, 'vis', args.result_dir)
commit_id = get_commit_id()
command = ' '.join(sys.argv)
# setattr(cf, 'use_cupy', True)
# TODO use itertools.product to do this
for train_frac in train_fracs:
# Scale steps by train_frac
this_steps = int(train_frac*args.steps)
dataset = Dataset(args.dataset, args.layer_size, this_steps, args.transform,
stochastic_train, replacement, test_size, train_size, args.test, train_frac)
n_diag_learned = dataset.input_size - 1
for method in methods:
for rank in ranks:
for lr in lrs:
for decay_rate in decay_rates:
for mom in moms:
if args.parallel:
logging.debug('Starting thread')
threading.Thread(target=compare,args=(args, method, rank, lr, decay_rate, mom, train_frac, this_steps),).start()
else:
compare(args, method, rank, lr, decay_rate, mom, train_frac,this_steps)
|
structured-nets-master
|
tensorflow/compare.py
|
import sys
sys.path.insert(0, '../')
from reconstruction import *
from model_params import ModelParams
from utils import *
from krylov import *
from scipy.linalg import toeplitz
import numpy as np
def test_circ_sparsity(n):
# Generate Toeplitz matrix
A = gen_Z_f(n, 1).T
B = gen_Z_f(n, -1)
M = toeplitz(np.random.random(n), np.random.random(n))
# Stein displacement
E = M - np.dot(np.dot(A,M), B)
G,H,r = get_GH(E)
G = tf.constant(G, dtype=tf.float64)
H = tf.constant(H, dtype=tf.float64)
print r
f_x_A, f_x_B = get_f_x(n, 'toeplitz', True, n-1)
fn_A = functools.partial(circ_transpose_mult_fn, tf.reverse(f_x_A, [0]))
fn_B = functools.partial(circ_transpose_mult_fn, tf.reverse(f_x_B, [0]))
W1 = tf.zeros([n, n], dtype=tf.float64)
for i in range(r):
K_A = krylov(fn_A, G[:, i], n)
K_B = krylov(fn_B, H[:, i], n)
prod = tf.matmul(K_A, tf.transpose(K_B))
W1 = tf.add(W1, prod)
# Compute a and b
a = tf.reduce_prod(f_x_A)
b = tf.reduce_prod(f_x_B)
coeff = 1.0/(1 - a*b)
W1 = tf.scalar_mul(coeff, W1)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
W1_real = sess.run(W1)
print np.linalg.norm(W1_real - M)
def test_tridiag_corner(n):
# Generate Toeplitz matrix
A = gen_Z_f(n, 1).T
B = gen_Z_f(n, -1)
M = toeplitz(np.random.random(n), np.random.random(n))
# Stein displacement
E = M - np.dot(np.dot(A,M), B)
G,H,r = get_GH(E)
print r
subdiag_A, supdiag_A, diag_A, f_A, subdiag_B, supdiag_B, diag_B, f_B = get_tridiag_corner_vars(n, 'toeplitz')
fn_A = functools.partial(tridiag_corner_transpose_mult_fn, subdiag_A, diag_A, supdiag_A, f_A)
fn_B = functools.partial(tridiag_corner_transpose_mult_fn, subdiag_B, diag_B, supdiag_B, f_B)
W1 = tf.zeros([n, n], dtype=tf.float64)
for i in range(r):
K_A = krylov(fn_A, G[:, i], n)
K_B = krylov(fn_B, H[:, i], n)
prod = tf.matmul(K_A, tf.transpose(K_B))
W1 = tf.add(W1, prod)
# Compute a and b
a = tf.multiply(f_A, tf.reduce_prod(subdiag_A))
b = tf.multiply(f_B, tf.reduce_prod(subdiag_B))
coeff = 1.0/(1 - a*b)
W1 = tf.multiply(coeff, W1)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
W1_real = sess.run(W1)
print np.linalg.norm(W1_real - M)
def test_toeplitz(n):
# Check that a Toeplitz matrix can be reconstructed
A = gen_Z_f(n, 1).T
B = gen_Z_f(n, -1)
M = toeplitz(np.random.random(n), np.random.random(n))
# Stein displacement
E = M - np.dot(np.dot(A,M), B)
G,H,r = get_GH(E)
print r
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
M_recon = sess.run(general_tf(A, B, G, H, r, n, n))
print M_recon
print M
print np.linalg.norm(M_recon - M)
def test_krylov(n):
A = gen_Z_f(n, 1)
v = tf.constant(np.arange(n), dtype=tf.float64)
sess = tf.InteractiveSession()
tf.initialize_all_variables().run()
K = sess.run(krylov_tf(A, v, n))
print K
def run_tests(n):
# TODO: add more
test_toeplitz(n)
test_krylov(n)
test_tridiag_corner(n)
test_circ_sparsity(n)
if __name__ == '__main__':
run_tests(10)
|
structured-nets-master
|
tensorflow/tests/test_reconstruction.py
|
"""
Computes projections onto various classes.
"""
import numpy as np
from scipy.linalg import toeplitz
def kth_diag_indices(A, k):
rows, cols = np.diag_indices_from(A)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
# Projects onto Toeplitz matrices, under Frobenius norm.
def toeplitz_project_frob(A):
assert A.shape[0] == A.shape[1]
A_proj = np.zeros(A.shape)
# Get indices of each diagonal
for diag_idx in np.arange(-(A.shape[0]-1),A.shape[0]):
this_idx = kth_diag_indices(A, diag_idx)
# Get average
avg = np.mean(A[this_idx])
A_proj[this_idx] = avg
return A_proj
# Projects onto Hankel matrices, under Frobenius norm.
def hankel_project_frob(A):
A_flip = np.flipud(A)
A_flip_proj = toeplitz_project_frob(A_flip)
return np.flipud(A_flip_proj)
if __name__ == '__main__':
A = np.random.randint(5, size=(3,3))
print(A)
#print kth_diag_indices(A, -4)
print(hankel_project_frob(A))
|
structured-nets-master
|
scripts/misc/projections.py
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
arrays = pickle.load(open('mnist_noise_toep_dr2_0.pkl', 'rb'), encoding='bytes')
G_toeplitz, H_toeplitz, W_toeplitz = [arrays[key] for key in [b'G', b'H', b'W']]
arrays = pickle.load(open('mnist_noise_circ_0.pkl', 'rb'), encoding='bytes')
A_subdiag, B_subdiag, G_subdiag, H_subdiag, W_subdiag = [arrays[key] for key in [b'A', b'B', b'G', b'H', b'W']]
arrays = pickle.load(open('mnist_noise_trid_2.pkl', 'rb'), encoding='bytes')
A_tridiag, B_tridiag, G_tridiag, H_tridiag, W_tridiag = [arrays[key] for key in [b'A', b'B', b'G', b'H', b'W']]
arrays = pickle.load(open('mnist_noise_unconstr_0.pkl', 'rb'), encoding='bytes')
W_unconstrained = arrays[b'W']
plt.figure(figsize=(5, 4.75))
plt.stem(abs(np.fft.fft(np.diag(A_subdiag, -1) - 1))[:100])
plt.xlabel('Frequency', fontsize=16)
plt.ylabel('Magnitude', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.savefig(f'../paper/figs/frequency_subdiag_subdiag.pdf', bbox_inches='tight')
plt.close()
plt.figure()
plt.stem(abs(np.fft.fft(np.diag(A_tridiag, -1) - 1))[:100])
plt.xlabel('Frequency', fontsize=16)
plt.ylabel('Magnitude', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=12)
plt.savefig(f'../paper/figs/frequency_subdiag_tridiag.pdf', bbox_inches='tight')
plt.close()
plt.figure()
plt.matshow(np.log(1 + np.abs(W_toeplitz.T)), cmap='hot', interpolation='none')
plt.tick_params(axis='x', bottom=True, top=False, labelbottom=True, labeltop=False)
plt.savefig(f'../paper/figs/heatmap_weight_toeplitz.pdf', dpi=600, bbox_inches='tight')
plt.close()
plt.figure()
plt.matshow(np.log(1 + np.abs(W_subdiag.T)), cmap='hot', interpolation='none')
plt.tick_params(axis='x', bottom=True, top=False, labelbottom=True, labeltop=False)
plt.savefig(f'../paper/figs/heatmap_weight_subdiag.pdf', dpi=600, bbox_inches='tight')
plt.close()
corner_A = A[0, -1]
t = np.hstack([subdiag_A, np.array(corner_A)])
plt.imshow(t.reshape(28, -1) - 1, cmap='gray')
plt.clf()
plt.plot(subdiag_A)
T = A - np.diag(subdiag_A, -1)
plt.matshow(B)
subdiag_B = np.diag(B, -1)
corner_B = B[0, -1]
t = np.hstack([subdiag_B, -np.array(corner_B)])
plt.imshow(t.reshape(28, -1), cmap='gray')
plt.clf()
plt.plot(subdiag_B)
u, s, v = np.linalg.svd(W.T)
plt.clf()
plt.figure()
plt.plot(s)
plt.clf()
plt.plot(v[0])
num = 10
plt.figure(figsize=(5 * num, 5))
for i in range(num):
ax = plt.subplot(1, num, i+1)
t = v[i].reshape((28, -1))
ax.imshow(t, cmap='gray')
ax.axis('off')
plt.close()
# plt.matshow(W, cmap='gray')
plt.matshow(W, cmap='gray')
plt.matshow(np.log(1 + np.abs(W_toeplitz)), cmap='hot')
plt.matshow(np.log(1 + np.abs(W_subdiag)), cmap='hot')
f = np.fft.fft2(W)
plt.matshow(abs(f), cmap='gray')
plt.matshow(np.log(1 + abs(f)), cmap='gray')
plt.matshow(fftpack.dct(W), cmap='gray')
plt.matshow(np.log(1 + abs(fftpack.dct(W))), cmap='gray')
from scipy.linalg import circulant
from scipy import fftpack
c = circulant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
plt.matshow(c)
c_f = np.fft.fft2(c)
c_c = fftpack.dct(c)
plt.matshow(abs(c_c))
|
structured-nets-master
|
scripts/analysis/analysis.py
|
import matplotlib.pyplot as plt
import numpy as np
# from matplotlib import rc
# # activate latex text rendering
# rc('text', usetex=True)
def update_minmax(mini, maxi, a):
return min(mini, min(a)), max(maxi, max(a))
def normalize(params, n):
return [float(p)/n**2 for p in params]
# return [n**2/float(p) for p in params]
def plot_all(ax, n, sd, td, t=None, v=None, h=None, lr=None, u=None, fc=None):
"""
pass in as dict of three arrays: r, acc, std
sd: subdiagonal
td: tridiagonal
t: toeplitz
v: vandermonde
h: hankel
lr: low rank
pass in as dict of three arrays: h (hidden units), acc, std
u: unconstrained
pass in as tuple of two numbers: acc, std
fc: n hidden units, fully connected
"""
learned_params = [2*n] + list(map(lambda r: 2*n*(r+1), sd['r'])) \
+ list(map(lambda r: 2*n*(r+3), td['r']))
learned_params = normalize(learned_params,n)
learned_acc = [t['acc'][0]] + sd['acc'] + td['acc']
learned_std = [t['std'][0]] + sd['std'] + td['std']
minp, maxp = min(learned_params), max(learned_params)
mina, maxa = min(learned_acc), max(learned_acc)
# ax.plot(learned_params, learned_acc, linewidth=3, marker='d', label=r"\textbf{Learned operators}")
ax.plot(learned_params, learned_acc, linewidth=3, marker='d', label=r"Learned operators (ours)")
if t is not None:
t_params = list(map(lambda r: 2*n*r, t['r']))
t_params_ = normalize(t_params,n)
minp, maxp = update_minmax(minp, maxp, t_params_)
mina, maxa = update_minmax(mina, maxa, t['acc'])
ax.plot(t_params_, t['acc'], linewidth=3, linestyle='-', marker='.', label='Toeplitz-like')
if v is not None:
v_params = list(map(lambda r: 2*n*r, v['r'])) # should be +n but looks weird for visualization
v_params = normalize(v_params,n)
minp, maxp = update_minmax(minp, maxp, v_params)
mina, maxa = update_minmax(mina, maxa, v['acc'])
ax.plot(v_params, v['acc'], linewidth=3, linestyle='-', marker='.', label='Vandermonde-like')
if h is not None:
h_params = list(map(lambda r: 2*n*r, h['r']))
h_params = normalize(h_params,n)
minp, maxp = update_minmax(minp, maxp, h_params)
mina, maxa = update_minmax(mina, maxa, h['acc'])
ax.plot(h_params, h['acc'], linewidth=3, linestyle='-', marker='.', label='Hankel-like')
if lr is not None:
lr_params = list(map(lambda r: 2*n*r, lr['r']))
lr_params = normalize(lr_params,n)
minp, maxp = update_minmax(minp, maxp, lr_params)
mina, maxa = update_minmax(mina, maxa, lr['acc'])
ax.plot(lr_params, lr['acc'], linewidth=3, linestyle='-', marker='.', label='Low Rank')
if u is not None:
u_params = list(map(lambda h: n*h, u['h']))
u_params = normalize(u_params,n)
minp, maxp = update_minmax(minp, maxp, u_params)
mina, maxa = update_minmax(mina, maxa, u['acc'])
ax.plot(u_params, u['acc'], linewidth=3, linestyle='-', label='Unconstrained')
if fc is not None:
mina, maxa = update_minmax(mina, maxa, [fc[0]])
ax.plot([minp, maxp], [fc[0], fc[0]], label='Fully Connected', color='black', linewidth=3, linestyle='-.')
# ax.set_xticks(t_params_, ['1/'+str(n**2/p) for p in t_params])
# ax.set_xticks(t_params_)
ax.set_aspect('auto', adjustable='box')
ax.set_xlim([minp, maxp])
ax.set_ylim([mina-(maxa-mina)*0.1, maxa+(maxa-mina)*0.1])
ticks = np.linspace(minp, maxp, num=7)
ax.set_xticks(ticks)
# ax.set_xticklabels([f'1/{n**2/p:.1f}' for p in t_params])
ax.set_xticklabels([f'{1./p:.1f}' for p in ticks])
# ax.set_xlabel('Total number of parameters')
# ax.set_ylabel('Test accuracy')
# if legend == True:
# ax.legend(loc='lower right')
plt.close()
fig = plt.figure(figsize=(12,3))
# SHL MNIST noise
sd = {
'r': [1],
'acc': [0.780],
'std': [0.012456593]
}
td = {
'r': [1],
'acc': [0.79433328],
'std': [0.0087971017]
}
t = {
'r': [1, 2, 3, 4],
'acc': [0.62916666, 0.69716668, 0.70766664, 0.70833331],
'std': [0.0097837811, 0.019707605, 0.0047667827, 0.005778301]
}
v = {
'r': [1,2,3,4],
'acc': [0.45233333, 0.56750005, 0.60416669, 0.61383337],
'std': [0.036148615, 0.018170487, 0.0040276861, 0.0034237648]
}
h = {
'r': [1,2,3,4],
'acc': [0.65174997, 0.71716666, 0.71749997, 0.71516663],
'std': [0.0052500069, 0.02878754, 0.018828174, 0.027265172]
}
lr = {
'r': [1,2,3,4],
'acc': [0.24066667, 0.37966666, 0.44949999, 0.50933331],
'std': [0.016624946, 0.002953345, 0.0028577377, 0.0042491788]
}
fc = (.6875, 0)
plt.subplot(1,3,1)
plt.title("MNIST-noise")
plot_all(fig.axes[0], 784, sd, td, t=t, v=v, h=h, lr=lr, u=None, fc=fc)
fig.axes[0].set_ylabel('Test accuracy')
# plt.show()
# plt.savefig('params_shl_mnist_noise.pdf', bbox_inches='tight')
# plt.clf()
# SHL CIFAR-10 mono
sd = {
'r': [1],
'acc': [0.43113336],
'std': [0.0055289017]
}
td = {
'r': [1],
'acc': [0.4564],
'std': [0.0008751]
}
t = {
'r': [1,2,3,4],
'acc': [0.37740001, 0.40759999, 0.41240001, 0.41920003],
'std': [0.0025495042, 0.00050000846, 0.0046396833, 0.0048194025]
}
v = {
'r': [1,2,3,4],
'acc': [0.2764667, 0.31040001, 0.32886663, 0.33543333],
'std': [0.014613534, 0.0050259386, 0.0074763279, 4.7148274e-05]
}
h = {
'r': [1,2,3,4],
'acc': [0.3804667, .39383331, 0.40773332, 0.40799999],
'std': [0.012873066, 0.0071097841, 0.0033767156, 0.0062053697]
}
lr = {
'r': [1,2,3,4],
'acc': [0.18790001, 0.24963333, 0.28833336, 0.31900001],
'std': [0.0012832283, 0.0010208919, 0.00089938374, 0.0048380499]
}
fc = (0.4708, 0)
# plt.figure()
plt.subplot(1,3,2)
plt.title("CIFAR-10")
plot_all(fig.axes[1], 1024, sd, td, t=t, v=v, h=h, lr=lr, u=None, fc=fc)
fig.axes[1].set_xlabel('Compression Ratio for Hidden Layer')
# fig.axes[1].legend(loc='lower right')
# plt.show()
# plt.savefig('params_shl_cifar_mono.pdf', bbox_inches='tight')
# plt.clf()
# SHL NORB
sd = {
'r': [1],
'acc': [0.59593624],
'std': [0.0068538445]
}
td = {
'r': [1],
'acc': [0.59523886],
'std': [0.0018132643]
}
t = {
'r': [1,2,3,4],
'acc': [0.48894033, 0.54002631, 0.55142885, 0.56502056],
'std': [0.0041263779, 0.0099200783, 0.0024635822, 0.0061167572]
}
v = {
'r': [1,2,3,4],
'acc': [0.3664552, 0.42934385, 0.4352195, 0.44603908],
'std': [0.0032585002, 0.0087335464, 0.0061135041, 0.0024887063]
}
h = {
'r': [1,2,3,4],
'acc': [0.47990969, 0.53255033, 0.5393576, 0.53800869],
'std': [0.0096045565, 0.004828494, 0.0089905132, 0.0049836915]
}
lr = {
'r': [1,2,3,4],
'acc': [0.3285208, 0.37442842, 0.39659354, 0.43265319],
'std': [0.0026101458, 0.0013176826, 0.0001270434, 0.001628752]
}
fc = (0.6041038, 0)
# plt.figure()
plt.subplot(1,3,3)
plt.title("NORB")
plot_all(fig.axes[2], 784, sd, td, t=t, v=v, h=h, lr=lr, u=None, fc=fc)
# fig.axes[2].legend(loc='lower right')
# plt.show()
# plt.savefig('params_shl_norb.pdf', bbox_inches='tight')
# plt.clf()
# plt.show()
plt.tight_layout()
# plt.show()
plt.savefig('acc_vs_params_shl.pdf', bbox_inches='tight')
plt.clf()
# CNN last layer
fig = plt.figure(figsize=(12,3))
# CNN MNIST-noise
sd = {
'r': [1],
'acc': [0.9265],
'std': [0.0047081495]
}
td = {
'r': [1],
'acc': [0.93533343],
'std': [0.0051044598]
}
t = {
'r': [1,2,3,4],
'acc': [0.90533328, 0.90933341, 0.90350002, 0.91433334],
'std': [0.0058642207, 0.0024944325, 0.0056124986, 0.0067618182]
}
v = {
'r': [1,2,3,4],
'acc': [0.74466664, 0.861, 0.85799998, 0.84683329],
'std': [0.043673653, 0.0024832655, 0.007560859, 0.011813368]
}
h = {
'r': [1,2,3,4],
'acc': [0.90700006, 0.90633339, 0.90249997, 0.91150004],
'std': [0.001080128, 0.0031710495, 0.0051153307, 0.0040207645]
}
lr = {
'r': [1,2,3,4],
'acc': [0.39333335, 0.66433334, 0.80366665, 0.84183329],
'std': [0.02600107, 0.023686616, 0.014401001, 0.010217078]
}
fc = (0.903, 0)
plt.subplot(1,3,1)
plt.title("MNIST-noise")
plot_all(fig.axes[0], 784, sd, td, t=t, v=v, h=h, lr=lr, u=None, fc=fc)
fig.axes[0].set_ylabel('Test accuracy')
# CNN CIFAR-10 mono
sd = {
'r': [1],
'acc': [0.64996666],
'std': [0.0088710589]
}
td = {
'r': [1],
'acc': [0.66089994],
'std': [0.0076345755]
}
t = {
'r': [1,2,3,4],
'acc': [0.64956665, 0.65023333, 0.65143329, 0.64866662],
'std': [0.00069441635, 0.010134532, 0.0073803198, 0.0027475108]
}
v = {
'r': [1,2,3,4],
'acc': [0.49699998, 0.56516665, 0.58923334, 0.59],
'std': [0.012887465, 0.010227513, 0.0064116176, 0.01]
}
h = {
'r': [1,2,3,4],
'acc': [0.64123327, 0.64403337, 0.6487667, 0.64303333],
'std': [0.0028755669, 0.0066979155, 0.0085495887, 0.0075429156]
}
lr = {
'r': [1,2,3,4],
'acc': [0.28889999, 0.45389998, 0.53403336, 0.57600003],
'std': [0.0041817101, 0.0045460523, 0.004343845, 0.0048006908]
}
fc = (0.6528, 0)
plt.subplot(1,3,2)
plt.title("CIFAR-10")
plot_all(fig.axes[1], 1024, sd, td, t=t, v=v, h=h, lr=lr, u=None, fc=fc)
fig.axes[1].set_xlabel('Compression Ratio for CNN Last Layer')
# fig.axes[1].legend(loc='lower right')
# CNN NORB
sd = {
'r': [1],
'acc': [0.69955987],
'std': [0.0057070036]
}
td = {
'r': [1],
'acc': [0.70161748],
'std': [0.0034562966]
}
t = {
'r': [1,2,3,4],
'acc': [0.67745197, 0.697308, 0.68828875, 0.69480449],
'std': [0.0021604896, 0.0055511161, 0.0025034547, 0.0074541033]
}
v = {
'r': [1,2,3,4],
'acc': [0.58504796, 0.67032462, 0.67778349, 0.66851282],
'std': [0.03247809, 0.010677353, 0.0046389499, 0.0086385822]
}
h = {
'r': [1,2,3,4],
'acc': [0.67735481, 0.6791895, 0.6919753, 0.69066644],
'std': [0.0094787478, 0.0045856023, 0.0037513557, 0.0023269763]
}
lr = {
'r': [1,2,3,4],
'acc': [0.47555444, 0.62934959, 0.66097963, 0.66602081],
'std': [0.0069264239, 0.0041847723, 0.0047160424, 0.0028731704]
}
fc = (0.73229307, 0)
plt.subplot(1,3,3)
plt.title("NORB")
plot_all(fig.axes[2], 784, sd, td, t=t, v=v, h=h, lr=lr, u=None, fc=fc)
fig.axes[2].legend(loc='lower right')
plt.tight_layout()
plt.savefig('acc_vs_params_cnn.pdf', bbox_inches='tight')
# plt.show()
plt.clf()
|
structured-nets-master
|
scripts/visualizations/acc_vs_params.py
|
import matplotlib.pyplot as plt
import pickle as pkl
import numpy as np
n_iters = 20000
step = 100
n = 50
mom = 0.99
prefix = '../../results/mom'
xs = np.arange(0, n_iters, step)
trid_corner = pkl.load(open(prefix + str(mom) + '_' + 'toeplitz_tridiagonal_corner_losses_' + str(n) + '.p', 'rb'))
circ = pkl.load(open(prefix + str(mom) + '_' + 'toeplitz_circulant_sparsity_losses_' + str(n) + '.p', 'rb'))
unconstr = pkl.load(open(prefix + str(mom) + '_' + 'toeplitz_unconstrained_losses_' + str(n) + '.p', 'rb'))
toep = pkl.load(open(prefix + str(mom) + '_' + 'toeplitz_toeplitz_like_losses_' + str(n) + '.p', 'rb'))
hank = pkl.load(open(prefix + str(mom) + '_' + 'toeplitz_hankel_like_losses_' + str(n) + '.p', 'rb'))
van = pkl.load(open(prefix + str(mom) + '_' + 'toeplitz_vandermonde_like_losses_' + str(n) + '.p', 'rb'))
plt.semilogy(xs, unconstr, label='Unconstrained', marker='o')
plt.semilogy(xs, trid_corner, label='Learned tridiagonal + corner operators, r=2', linestyle='--', linewidth=10)
plt.semilogy(xs, toep, label='Toeplitz-like, r=2', linewidth=3)
plt.semilogy(xs, hank, label='Hankel-like, r=2', linewidth=3)
plt.semilogy(xs, van, label='Vandermonde-like, r=2', linewidth=3)
plt.semilogy(xs, circ, label='Learned circulant sparsity pattern operators, r=2', linestyle='-', linewidth=5)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Toeplitz, mom=' + str(mom) + ', n=' + str(n))
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.1), fontsize=10)
plt.savefig("toeplitz_operators_n" + str(n) + 'mom_' + str(mom) + ".png", bbox_inches="tight")
plt.clf()
|
structured-nets-master
|
scripts/visualizations/make_plot.py
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle as pkl
# MNIST variants
# idx = 1
# # data_loc = '/dfs/scratch1/thomasat/datasets/mnist_noise/mnist_noise_variations_all_' + str(idx) + '.amat'
# data_loc = '/dfs/scratch1/thomasat/datasets/mnist_bg_rot/mnist_all_background_images_rotation_normalized_train_valid.amat'
# data = np.genfromtxt(data_loc)
# save_loc = 'mnist_bg_rot_digits.png'
# np.random.seed(1)
# samples = np.random.choice(data.shape[0], 4)
# X = data[samples, :-1].reshape((-1, 28, 28))
# NORB
data_loc = '/dfs/scratch1/thomasat/datasets/norb_full/processed_py2_train_28.pkl'
data = pkl.load(open(data_loc, 'rb'))
data = data['X']
save_loc = 'norb_digits.png'
# self.train_Y = data['Y']
np.random.seed(1)
samples = np.random.choice(data.shape[0], 4)
X = data[samples, :].reshape((-1, 28, 28))
fig = plt.figure(figsize=(8,8))
plt.subplot(2,2,1)
plt.axis('off')
plt.imshow(X[0,:], cmap='gray', interpolation='nearest')
plt.subplot(2,2,2)
plt.axis('off')
plt.imshow(X[1,:], cmap='gray', interpolation='nearest')
plt.subplot(2,2,3)
plt.axis('off')
plt.imshow(X[2,:], cmap='gray', interpolation='nearest')
plt.subplot(2,2,4)
plt.axis('off')
plt.imshow(X[3,:], cmap='gray', interpolation='nearest')
fig.subplots_adjust(wspace=0,hspace=0)
plt.savefig(save_loc, bbox_inches='tight')
plt.close()
|
structured-nets-master
|
scripts/visualizations/show_example.py
|
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from timeit import default_timer as timer
import timeit
import pickle as pkl
import matplotlib.patches as mpatches
import sys
sys.path.insert(0,'../../pytorch/')
import structure.toeplitz_cpu as toep
import structure.scratch.krylovfast as subd
plt.rcParams['font.family'] = 'serif'
def test_unstructured(n,trials,reps):
u_setup_str = '''
import numpy as np
np.random.seed(0)
A = np.random.normal(size=({n}, {n}))
v = np.random.normal(size=({n}))
'''.format(n=n)
return min(timeit.repeat("A @ v", u_setup_str, number = trials, repeat = reps))
def test_toeplitz(n,r,trials,reps):
t_setup_str = '''
import numpy as np
import structure.toeplitz_cpu as toep
np.random.seed(0)
G = np.random.normal(size=({r}, {n}))
H = np.random.normal(size=({r}, {n}))
v = np.random.normal(size=(1,{n}))
'''.format(n=n,r=r)
return min(timeit.repeat("toep.toeplitz_mult(G, H, v)", t_setup_str, number = trials, repeat = reps))
def test_lr(n,r,trials,reps):
lr_setup_str = '''
import numpy as np
np.random.seed(0)
G = np.random.normal(size=({n}, {r}))
H = np.random.normal(size=({r}, {n}))
v = np.random.normal(size={n})
'''.format(n=n,r=r)
return min(timeit.repeat("Hv = H @ v;G @ Hv", lr_setup_str, number = trials, repeat = reps))
def test_sd(n,r,trials,reps):
sd_setup_str = '''
import numpy as np
import structure.scratch.krylovfast as subd
np.random.seed(0)
G = np.random.normal(size=({r}, {n}))
H = np.random.normal(size=({r}, {n}))
v = np.random.normal(size=(1,{n}))
K = subd.KrylovMultiply({n}, 1, {r})
KT = subd.KrylovTransposeMultiply({n}, 1, {r})
subd_A = np.random.normal(size=({n}-1))
subd_B = np.random.normal(size=({n}-1))
'''.format(n=n,r=r)
return min(timeit.repeat("K(subd_A, G, KT(subd_B, H, v))", sd_setup_str, number = trials, repeat = reps))
exps = np.arange(9,16)
sizes = 1 << exps
rs = [1,2,4,8,16]
trials = 1000
reps = 10
out_loc = 'speed_data.p'
data = {}
data['rs'] = rs
data['sizes'] = sizes
data['trials'] = trials
data['reps'] = reps
times_t = np.zeros((len(rs), sizes.size))
times_sd = np.zeros((len(rs), sizes.size))
times_lr = np.zeros((len(rs), sizes.size))
speedups_t = np.zeros((len(rs), sizes.size))
speedups_sd = np.zeros((len(rs), sizes.size))
speedups_lr = np.zeros((len(rs), sizes.size))
unstructured_times = np.zeros(sizes.size)
for idx_n, n in enumerate(sizes):
unstructured_times[idx_n] = test_unstructured(n,trials,reps)
data['unstructured_times'] = unstructured_times
for idx_r, r in enumerate(rs):
for idx_n, n in enumerate(sizes):
t = test_toeplitz(n,r,trials,reps)
sd = test_sd(n,r,trials,reps)
lr = test_lr(n,r,trials,reps)
times_t[idx_r,idx_n] = t
times_sd[idx_r,idx_n] = sd
times_lr[idx_r,idx_n] = lr
data['t'] = times_t
data['sd'] = times_sd
data['lr'] = times_lr
pkl.dump(data,open(out_loc,'wb'),protocol=2)
|
structured-nets-master
|
scripts/visualizations/plot_speed.py
|
import matplotlib.pyplot as plt
# For MNIST noise
fixed_xs = [10202, 11770, 13338, 14906]
lowrank = [0.2403, 0.377, 0.4577, 0.546]
lowrank_std = [0.0184407, 0.004, 0.00334066, 0.006]
toep = [0.62525, 0.681, 0.6758, 0.712]
toep_std = [0.00125, 0.017, 0.0227389, 0.012]
hank = [0.66175, 0.696667, 0.70475, 0.704]
hank_std = [0.0284044, 0.016705, 0.0174696, 0.0194551]
van = [0.434, 0.5667, 0.584125, 0.626]
van_std = [0.00147196, 0.00804115, 0.00867016, 0.005]
# 0.6761 0.002245
unconstr_val = 0.6761
unconstr_h784 = [unconstr_val for x in range(len(fixed_xs))]
unconstr_h784std = [0,0,0,0]
unconstr = [0.5941, 0.6135, 0.6064, 0.616]
unconstr_std = [0.0118929, 0.00749667, 0.0073512, 0.0049699]
learned_xs = [10202, 10454, 11454, 11770, 14904]
learned = [toep[0], 0.6807, 0.7447, 0.765, 0.784]
learned_std = [0.0, 0.0142253, 0.0216809, 0.018, 0.018]
plt.errorbar(learned_xs, learned, yerr=learned_std, label='Learned operators (ours)')
plt.errorbar(fixed_xs, hank, yerr=hank_std, label='Hankel-like')
plt.errorbar(fixed_xs, toep, yerr=toep_std, label='Toeplitz-like')
plt.errorbar(fixed_xs, van, yerr=van_std, label='Vandermonde-like')
plt.errorbar(fixed_xs, lowrank, yerr=lowrank_std, label='Low rank')
plt.errorbar(fixed_xs, unconstr_h784, yerr=unconstr_h784std, label='Unconstrained, ' + r'$h=784$', color='black', linewidth=5, linestyle='--')
plt.errorbar(fixed_xs, unconstr, yerr=unconstr_std, label='Unconstrained')
plt.xlim([10202, 14906])
plt.ylim([0.2, 0.8])
plt.xlabel('Total number of parameters')
plt.ylabel('Test accuracy')
plt.legend(loc='lower right')
plt.savefig("mnist_noise_params.png", bbox_inches="tight")
plt.clf()
# For CIFAR-10 grayscale
lowrank = [0.18576, 0.24828 , 0.2868 , 0.31054]
lowrank_std = [ 0.000682932, 0.00265887, 0.00272177, 0.00160325]
toep = [0.32765, 0.33692, 0.342133, 0.3472]
toep_std = [0.0161717, 0.00563894, 0.00694087, 0.0106909]
hank = [0.3202, 0.329333, 0.3317, 0.34005]
hank_std = [0.00645032, 0.00373839, 0.00659999, 0.00155]
van = [0.24382, 0.26516, 0.29505, 0.31405]
van_std = [0.00246284, 0.00698873, 0.00635, 0.00225]
learned = [toep[0], 0.36406, 0.3966]
learned_std = [0.0, 0.0104458, 0.000300005]
fixed_xs = [13322, 15370, 17418, 19466]
learned_xs = [13322, 15370, 19466]
unconstr_val = 0.4326
unconstr_h784 = [unconstr_val for x in range(len(fixed_xs))]
unconstr = [0.33606, 0.34328,0.35444, 0.36318]
unconstr_std = [0.00389287, 0.0071042, 0.00625223, 0.00380337]
plt.errorbar(learned_xs, learned, yerr=learned_std, label='Learned operators (ours)')
plt.errorbar(fixed_xs, hank, yerr=hank_std, label='Hankel-like')
plt.errorbar(fixed_xs, toep, yerr=toep_std, label='Toeplitz-like')
plt.errorbar(fixed_xs, van, yerr=van_std, label='Vandermonde-like')
plt.errorbar(fixed_xs, lowrank, yerr=lowrank_std, label='Low rank')
plt.errorbar(fixed_xs, unconstr_h784, yerr=unconstr_h784std, label='Unconstrained, ' + r'$h=104$', color='black', linewidth=5, linestyle='--')
plt.errorbar(fixed_xs, unconstr, yerr=unconstr_std, label='Unconstrained')
plt.xlim([13322, 19466])
plt.xlabel('Total number of parameters')
plt.ylabel('Test accuracy')
plt.legend(loc='lower right')
#plt.legend(loc=9, bbox_to_anchor=(0.5, -0.1), fontsize=10)
plt.savefig("cifar10_params.png", bbox_inches="tight")
plt.clf()
|
structured-nets-master
|
scripts/visualizations/make_params_plot_iclr.py
|
import numpy as np
import matplotlib.pyplot as plt
# name = 'mnist_sd_r4'
names = ['bgrot_sd_r1', 'bgrot_sd_r4', 'bgrot_sd_r16']
# names = ['patch2_sd_r8', 'patch_sd_r8_best']
ranks = [1, 1, 4, 4, 16, 16]
for name in names:
# for name in [names[2]]:
n = 1024
# r = 4
G = np.loadtxt(name+'_G')
H = np.loadtxt(name+'_H')
sd_A = np.loadtxt(name+'_subd_A')
sd_B = np.loadtxt(name+'_subd_B')
plt.figure()
plt.imshow(sd_B.reshape(32,32), cmap='gray')
plt.savefig(name+'.pdf', bbox_inches='tight')
plt.clf()
# plt.show()
# def krylov_construct(A, v, m):
# n = v.shape[0]
# assert A.shape == (n,n)
# d = np.diagonal(A, 0)
# subd = np.diagonal(A, -1)
# K = np.zeros(shape=(m,n))
# K[0,:] = v
# for i in range(1,m):
# K[i,1:] = subd*K[i-1,:-1]
# return K
# A = np.diag(sd_A[:-1], -1)
# B = np.diag(sd_B[:-1], -1)
# M = sum([krylov_construct(A, G[i], n) @ krylov_construct(B, H[i], n).T for i in list(range(r))])
|
structured-nets-master
|
scripts/visualizations/fat.py
|
# Modified from https://github.com/ndrplz/small_norb/blob/master/smallnorb/dataset.py
import struct
import numpy as np
#import matplotlib.pyplot as plt
import scipy.misc
from tqdm import tqdm
from os import makedirs
from os.path import join
from os.path import exists
from itertools import groupby
#names = ['train1', 'train2', 'train3', 'train4', 'train5']#, 'train6', 'train7', 'train8', 'train9','train10']
#names = ['test1', 'test2']
class NORBExample:
def __init__(self):
self.image_lt = None
self.image_rt = None
self.category = None
self.instance = None
self.elevation = None
self.azimuth = None
self.lighting = None
def __lt__(self, other):
return self.category < other.category or \
(self.category == other.category and self.instance < other.instance)
def show(self, subplots):
fig, axes = subplots
fig.suptitle(
'Category: {:02d} - Instance: {:02d} - Elevation: {:02d} - Azimuth: {:02d} - Lighting: {:02d}'.format(
self.category, self.instance, self.elevation, self.azimuth, self.lighting))
axes[0].imshow(self.image_lt, cmap='gray')
axes[1].imshow(self.image_rt, cmap='gray')
@property
def pose(self):
return np.array([self.elevation, self.azimuth, self.lighting], dtype=np.float32)
class NORBDataset:
# Number of examples in both train and test set
n_examples = 29160
# Categories present in small NORB dataset
categories = ['animal', 'human', 'airplane', 'truck', 'car']
def __init__(self, dataset_root, names):
"""
Initialize small NORB dataset wrapper
Parameters
----------
dataset_root: str
Path to directory where small NORB archives have been extracted.
"""
self.names = names
self.dataset_root = dataset_root
self.initialized = False
# Store path for each file in small NORB dataset (for compatibility the original filename is kept)
self.dataset_files = {
'train1': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-01-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-01-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-01-dat.mat')
},
'train2': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-02-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-02-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-02-dat.mat')
},
'train3': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-03-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-03-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-03-dat.mat')
},
'train4': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-04-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-04-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-04-dat.mat')
},
'train5': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-05-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-05-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-05-dat.mat')
},
'train6': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-06-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-06-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-06-dat.mat')
},
'train7': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-07-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-07-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-07-dat.mat')
},
'train8': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-08-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-08-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-08-dat.mat')
},
'train9': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-09-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-09-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-09-dat.mat')
},
'train10': {
'cat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-10-cat.mat'),
'info': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-10-info.mat'),
'dat': join(self.dataset_root, 'norb-5x46789x9x18x6x2x108x108-training-10-dat.mat')
},
'test1': {
'cat': join(self.dataset_root, 'norb-5x01235x9x18x6x2x108x108-testing-01-cat.mat'),
'info': join(self.dataset_root, 'norb-5x01235x9x18x6x2x108x108-testing-01-info.mat'),
'dat': join(self.dataset_root, 'norb-5x01235x9x18x6x2x108x108-testing-01-dat.mat')
},
'test2': {
'cat': join(self.dataset_root, 'norb-5x01235x9x18x6x2x108x108-testing-02-cat.mat'),
'info': join(self.dataset_root, 'norb-5x01235x9x18x6x2x108x108-testing-02-info.mat'),
'dat': join(self.dataset_root, 'norb-5x01235x9x18x6x2x108x108-testing-02-dat.mat')
}
}
# Initialize both train and test data structures
self.data = {}
for name in self.names:
self.data[name] = [NORBExample() for _ in range(NORBDataset.n_examples)]
# Fill data structures parsing dataset binary files
for data_split in self.names:
print('filling for data split: ', data_split)
self._fill_data_structures(data_split)
self.initialized = True
def explore_random_examples(self, dataset_split):
"""
Visualize random examples for dataset exploration purposes
Parameters
----------
dataset_split: str
Dataset split, can be either 'train' or 'test'
Returns
-------
None
"""
if self.initialized:
subplots = plt.subplots(nrows=1, ncols=2)
for i in np.random.permutation(NORBDataset.n_examples):
self.data[dataset_split][i].show(subplots)
plt.waitforbuttonpress()
plt.cla()
def export_to_jpg(self, export_dir):
"""
Export all dataset images to `export_dir` directory
Parameters
----------
export_dir: str
Path to export directory (which is created if nonexistent)
Returns
-------
None
"""
if self.initialized:
print(('Exporting images to {}...'.format(export_dir))) #end='', flush=True)
for split_name in names:
split_dir = join(export_dir, split_name)
if not exists(split_dir):
makedirs(split_dir)
for i, norb_example in enumerate(self.data[split_name]):
category = NORBDataset.categories[norb_example.category]
instance = norb_example.instance
image_lt_path = join(split_dir, '{:06d}_{}_{:02d}_lt.jpg'.format(i, category, instance))
image_rt_path = join(split_dir, '{:06d}_{}_{:02d}_rt.jpg'.format(i, category, instance))
scipy.misc.imsave(image_lt_path, norb_example.image_lt)
scipy.misc.imsave(image_rt_path, norb_example.image_rt)
print('Done.')
def group_dataset_by_category_and_instance(self, dataset_split):
"""
Group small NORB dataset for (category, instance) key
Parameters
----------
dataset_split: str
Dataset split, can be either 'train' or 'test'
Returns
-------
groups: list
List of 25 groups of 972 elements each. All examples of each group are
from the same category and instance
"""
if dataset_split not in self.names:
raise ValueError('Dataset split "{}" not allowed.'.format(dataset_split))
groups = []
for key, group in groupby(iterable=sorted(self.data[dataset_split]),
key=lambda x: (x.category, x.instance)):
groups.append(list(group))
return groups
def _fill_data_structures(self, dataset_split):
"""
Fill NORBDataset data structures for a certain `dataset_split`.
This means all images, category and additional information are loaded from binary
files of the current split.
Parameters
----------
dataset_split: str
Dataset split, can be either 'train' or 'test'
Returns
-------
None
"""
dat_data = self._parse_NORB_dat_file(self.dataset_files[dataset_split]['dat'])
cat_data = self._parse_NORB_cat_file(self.dataset_files[dataset_split]['cat'])
info_data = self._parse_NORB_info_file(self.dataset_files[dataset_split]['info'])
for i, small_norb_example in enumerate(self.data[dataset_split]):
small_norb_example.image_lt = dat_data[2 * i]
small_norb_example.image_rt = dat_data[2 * i + 1]
small_norb_example.category = cat_data[i]
small_norb_example.instance = info_data[i][0]
small_norb_example.elevation = info_data[i][1]
small_norb_example.azimuth = info_data[i][2]
small_norb_example.lighting = info_data[i][3]
@staticmethod
def matrix_type_from_magic(magic_number):
"""
Get matrix data type from magic number
See here: https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/readme for details.
Parameters
----------
magic_number: tuple
First 4 bytes read from small NORB files
Returns
-------
element type of the matrix
"""
convention = {'1E3D4C51': 'single precision matrix',
'1E3D4C52': 'packed matrix',
'1E3D4C53': 'double precision matrix',
'1E3D4C54': 'integer matrix',
'1E3D4C55': 'byte matrix',
'1E3D4C56': 'short matrix'}
magic_str = bytearray(reversed(magic_number)).hex().upper()
return convention[magic_str]
@staticmethod
def _parse_small_NORB_header(file_pointer):
"""
Parse header of small NORB binary file
Parameters
----------
file_pointer: BufferedReader
File pointer just opened in a small NORB binary file
Returns
-------
file_header_data: dict
Dictionary containing header information
"""
# Read magic number
magic = struct.unpack('<BBBB', file_pointer.read(4)) # '<' is little endian)
# Read dimensions
dimensions = []
num_dims, = struct.unpack('<i', file_pointer.read(4)) # '<' is little endian)
for _ in range(num_dims):
dimensions.extend(struct.unpack('<i', file_pointer.read(4)))
file_header_data = {'magic_number': magic,
'matrix_type': NORBDataset.matrix_type_from_magic(magic),
'dimensions': dimensions}
return file_header_data
@staticmethod
def _parse_NORB_cat_file(file_path):
"""
Parse small NORB category file
Parameters
----------
file_path: str
Path of the small NORB `*-cat.mat` file
Returns
-------
examples: ndarray
Ndarray of shape (24300,) containing the category of each example
"""
with open(file_path, mode='rb') as f:
header = NORBDataset._parse_small_NORB_header(f)
num_examples, = header['dimensions']
struct.unpack('<BBBB', f.read(4)) # ignore this integer
struct.unpack('<BBBB', f.read(4)) # ignore this integer
examples = np.zeros(shape=num_examples, dtype=np.int32)
for i in tqdm(list(range(num_examples)), desc='Loading categories...'):
category, = struct.unpack('<i', f.read(4))
examples[i] = category
return examples
@staticmethod
def _parse_NORB_dat_file(file_path):
"""
Parse small NORB data file
Parameters
----------
file_path: str
Path of the small NORB `*-dat.mat` file
Returns
-------
examples: ndarray
Ndarray of shape (48600, 96, 96) containing images couples. Each image couple
is stored in position [i, :, :] and [i+1, :, :]
"""
print('norb file path: ', file_path)
with open(file_path, mode='rb') as f:
header = NORBDataset._parse_small_NORB_header(f)
print('header: ', header)
num_examples, channels, height, width = header['dimensions']
examples = np.zeros(shape=(num_examples * channels, height, width), dtype=np.uint8)
for i in tqdm(list(range(num_examples * channels)), desc='Loading images...'):
# Read raw image data and restore shape as appropriate
image = struct.unpack('<' + height * width * 'B', f.read(height * width))
image = np.uint8(np.reshape(image, newshape=(height, width)))
examples[i] = image
return examples
@staticmethod
def _parse_NORB_info_file(file_path):
"""
Parse small NORB information file
Parameters
----------
file_path: str
Path of the small NORB `*-info.mat` file
Returns
-------
examples: ndarray
Ndarray of shape (24300,4) containing the additional info of each example.
- column 1: the instance in the category (0 to 9)
- column 2: the elevation (0 to 8, which mean cameras are 30, 35,40,45,50,55,60,65,70
degrees from the horizontal respectively)
- column 3: the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in degrees)
- column 4: the lighting condition (0 to 5)
"""
with open(file_path, mode='rb') as f:
header = NORBDataset._parse_small_NORB_header(f)
struct.unpack('<BBBB', f.read(4)) # ignore this integer
num_examples, num_info = header['dimensions']
examples = np.zeros(shape=(num_examples, num_info), dtype=np.int32)
for r in tqdm(list(range(num_examples)), desc='Loading info...'):
for c in range(num_info):
info, = struct.unpack('<i', f.read(4))
examples[r, c] = info
return examples
|
structured-nets-master
|
scripts/data/norb.py
|
import numpy as np
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
from data_utils import normalize_data, apply_normalization
# Download from http://www.iro.umontreal.ca/~lisa/twiki/bin/view.cgi/Public/DeepVsShallowComparisonICML2007
def process_data(data):
X = data[:, :-1]
Y = np.expand_dims(data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
Y = enc.fit_transform(Y).todense()
return X,Y
train_loc = '/dfs/scratch1/thomasat/datasets/convex/convex_train.amat'
test_loc = '/dfs/scratch1/thomasat/datasets/convex/50k/convex_test.amat'
train_out = '/dfs/scratch1/thomasat/datasets/convex/train_normalized'
test_out = '/dfs/scratch1/thomasat/datasets/convex/test_normalized'
train_data = np.genfromtxt(train_loc)
train_X, train_Y = process_data(train_data)
test_data = np.genfromtxt(test_loc)
test_X, test_Y = process_data(test_data)
# Normalize
train_X, mean, sd = normalize_data(train_X)
test_X = apply_normalization(test_X, mean, sd)
# Save
print('test_X, test_Y shape: ', test_X.shape, test_Y.shape)
print('train_X, train_Y shape: ', train_X.shape, train_Y.shape)
train = {'X': train_X, 'Y': train_Y}
test = {'X': test_X, 'Y': test_Y}
pkl.dump(train, open(train_out, 'wb'), protocol=2)
pkl.dump(test, open(test_out, 'wb'), protocol=2)
print('Saved train to: ', train_out)
print('Saved test to: ', test_out)
|
structured-nets-master
|
scripts/data/preprocess_convex.py
|
import numpy as np
import pickle as pkl
import os
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from data_utils import normalize_data, apply_normalization
# Download from https://www.cs.toronto.edu/~kriz/cifar.html
# Assumes 3 input channels
# Converts to grayscale
def convert_grayscale(data, img_size=32):
n = data.shape[0]
channel_size = int(data.shape[1]/3)
print('channel_size:', channel_size)
im_r = data[:, 0:channel_size].reshape((n, img_size, img_size))
im_g = data[:, channel_size:2*channel_size].reshape((n, img_size, img_size))
im_b = data[:, 2*channel_size:].reshape((n, img_size, img_size))
img = np.stack((im_r, im_g, im_b), axis=-1)
avg_img = np.mean(img, axis=-1)
data = avg_img.reshape((n, img_size*img_size))
return data
# Loads data and optionally converts to grayscale
def load_data(loc):
data_dict = pkl.load(open(loc, 'rb'),encoding='latin1')
X = data_dict['data']
if grayscale:
print('Converting to grayscale')
X = convert_grayscale(X)
Y = np.array(data_dict['labels'])
Y = np.expand_dims(Y,1)
Y = enc.fit_transform(Y).todense()
print('X.shape, Y.shape: ', X.shape, Y.shape)
return X,Y
grayscale = False
normalize = True
train_batches = 5
data_dir = '/dfs/scratch1/thomasat/datasets/cifar10'
test_loc = os.path.join(data_dir,'test_batch')
train_out = '/dfs/scratch1/thomasat/datasets/cifar10_combined/train'
test_out = '/dfs/scratch1/thomasat/datasets/cifar10_combined/test'
if grayscale:
train_out += '_grayscale'
test_out += '_grayscale'
# Prepare training data
train_X = []
train_Y = []
enc = OneHotEncoder()
for i in range(train_batches):
this_batch_loc = os.path.join(data_dir, 'data_batch_' + str(i+1))
X,Y = load_data(this_batch_loc)
train_X.append(X)
train_Y.append(Y)
# Concatenate
train_X = np.vstack(train_X)
train_Y = np.vstack(train_Y)
# Normalize
train_X, mean, sd = normalize_data(train_X)
# Shuffle
idx = np.arange(0, train_X.shape[0])
np.random.shuffle(idx)
train_X = train_X[idx,:]
train_Y = train_Y[idx,:]
print('train_X.shape, train_Y.shape: ', train_X.shape, train_Y.shape)
# Prepare test data
test_X,test_Y = load_data(test_loc)
print('test_X.shape, test_Y.shape: ', test_X.shape, test_Y.shape)
# Normalize
test_X = apply_normalization(test_X, mean, sd)
# Save
train = {'X': train_X, 'Y': train_Y}
test = {'X': test_X, 'Y': test_Y}
pkl.dump(train, open(train_out, 'wb'), protocol=2)
pkl.dump(test, open(test_out, 'wb'), protocol=2)
print('Saved train to: ', train_out)
print('Saved test to: ', test_out)
|
structured-nets-master
|
scripts/data/preprocess_cifar10.py
|
import h5py
import scipy.io as sio
import numpy as np
import pickle as pkl
def process(feat_loc,lab_loc,train,top_N_classes=None,N=None):
lab = sio.loadmat(lab_loc)['lab']
if train:
with h5py.File(feat_loc) as f:
feat = np.array(f['fea'])
else:
feat = sio.loadmat(feat_loc)['fea']
if top_N_classes is None:
assert N is not None
counts = np.bincount(lab.flatten())
print('counts: ', counts)
idx_array = np.argsort(counts)
print('idx array: ', idx_array)
top_N_classes = idx_array[-N:][::-1]
print('top N classes: ', top_N_classes)
print('top N counts: ', counts[top_N_classes])
print('top N total: ', np.sum(counts[top_N_classes]))
idx = np.array([i for i in range(lab.size) if lab[i] in top_N_classes])
print('idx: ', idx.shape)
return feat[idx,:],lab[idx], top_N_classes
N = 25
train_feat_loc = '../timit/timit_train_feat.mat'
train_lab_loc = '../timit/timit_train_lab.mat'
test_feat_loc = '../timit/timit_heldout_feat.mat'
test_lab_loc = '../timit/timit_heldout_lab.mat'
train_feat_out_loc = '../timit/timit_train_feat_top' + str(N) + '.p'
train_lab_out_loc = '../timit/timit_train_lab_top' + str(N) + '.p'
test_feat_out_loc = '../timit/timit_test_feat_top' + str(N) + '.p'
test_lab_out_loc = '../timit/timit_test_lab_top' + str(N) + '.p'
train_feat,train_lab, top_N_classes = process(train_feat_loc,train_lab_loc,True,top_N_classes=None,N=N)
test_feat,test_lab, _ = process(test_feat_loc,test_lab_loc,False,top_N_classes=top_N_classes,N=N)
print('train_feat,train_lab: ', train_feat.shape, train_lab.shape)
print('test_feat,test_lab: ', test_feat.shape, test_lab.shape)
print('train_lab: ', np.unique(train_lab))
print('test_lab: ', np.unique(test_lab))
# Dump
pkl.dump(train_feat, open(train_feat_out_loc, 'wb'),protocol=2)
pkl.dump(train_lab, open(train_lab_out_loc, 'wb'),protocol=2)
pkl.dump(test_feat, open(test_feat_out_loc, 'wb'),protocol=2)
pkl.dump(test_lab, open(test_lab_out_loc, 'wb'),protocol=2)
|
structured-nets-master
|
scripts/data/timit.py
|
# Download from https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/
import sys
import pickle as pkl
sys.path.insert(0, '../../')
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from smallnorb import SmallNORBDataset
from scipy.misc import imresize
MAX_VAL = 255.0
DS_SIZE = (24, 24)
N_CATEGORIES = 5
OUT_LOC = '/dfs/scratch1/thomasat/datasets/smallnorb/processed_py2.pkl'
"""
Downsamples and normalizes.
"""
def process_image(image):
# Downsample
ds = imresize(image, DS_SIZE, 'nearest')
# Normalize
ds = ds/MAX_VAL
# Flatten
return ds.flatten()
"""
Downsamples, stores only left stereo pair, converts to one-hot label.
"""
def process_data(data):
X = []
Y = []
for ex in data:
this_image = ex.image_lt
this_category = ex.category
X.append(process_image(this_image))
Y.append(this_category)
X = np.array(X)
Y = np.array(Y)
Y = np.expand_dims(Y, 1)
enc = OneHotEncoder(N_CATEGORIES)
Y = enc.fit_transform(Y).todense()
return X,Y
dataset = SmallNORBDataset(dataset_root='/dfs/scratch1/thomasat/datasets/smallnorb')
train_X, train_Y = process_data(dataset.data['train'])
test_X, test_Y = process_data(dataset.data['test'])
print('train_X, train_Y, test_X, test_Y: ', train_X.shape, train_Y.shape, test_X.shape, test_Y.shape)
# Save
data_dict = {'train_X': train_X, 'train_Y': train_Y, 'test_X': test_X, 'test_Y': test_Y}
pkl.dump(data_dict, open(OUT_LOC, 'wb'), protocol=2)
|
structured-nets-master
|
scripts/data/preprocess_smallnorb.py
|
# Download from http://www.iro.umontreal.ca/~lisa/twiki/bin/view.cgi/Public/DeepVsShallowComparisonICML2007
import numpy as np
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
from data_utils import normalize_data, apply_normalization
def process_data(data):
X = data[:, :-1]
Y = np.expand_dims(data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
Y = enc.fit_transform(Y).todense()
return X,Y
train_loc = '/dfs/scratch1/thomasat/datasets/rect/rectangles_train.amat'
test_loc = '/dfs/scratch1/thomasat/datasets/rect/rectangles_test.amat'
train_out = '/dfs/scratch1/thomasat/datasets/rect/train_normalized'
test_out = '/dfs/scratch1/thomasat/datasets/rect/test_normalized'
train_data = np.genfromtxt(train_loc)
train_X, train_Y = process_data(train_data)
test_data = np.genfromtxt(test_loc)
test_X, test_Y = process_data(test_data)
# Normalize
train_X, mean, sd = normalize_data(train_X)
test_X = apply_normalization(test_X, mean, sd)
# Save
print('test_X, test_Y shape: ', test_X.shape, test_Y.shape)
print('train_X, train_Y shape: ', train_X.shape, train_Y.shape)
train = {'X': train_X, 'Y': train_Y}
test = {'X': test_X, 'Y': test_Y}
pkl.dump(train, open(train_out, 'wb'), protocol=2)
pkl.dump(test, open(test_out, 'wb'), protocol=2)
print('Saved train to: ', train_out)
print('Saved test to: ', test_out)
|
structured-nets-master
|
scripts/data/preprocess_rect.py
|
import numpy as np
def normalize_data(data):
mean = np.mean(data,axis=0)
std = np.std(data,axis=0)
return apply_normalization(data,mean,std), mean, std
def apply_normalization(data, mean, std):
normalized = (data-mean)/std
print('Apply normalization: mean, std: ', np.mean(normalized,axis=0), np.std(normalized,axis=0))
return normalized
def standardize(data, max_val=255.0):
scaled = data/max_val
scaled = (scaled - 0.5)/0.5
print('min, max: ', np.min(scaled), np.max(scaled))
return scaled
|
structured-nets-master
|
scripts/data/data_utils.py
|
# From https://github.com/ndrplz/small_norb/blob/master/smallnorb/dataset.py
import struct
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
from tqdm import tqdm
from os import makedirs
from os.path import join
from os.path import exists
from itertools import groupby
class SmallNORBExample:
def __init__(self):
self.image_lt = None
self.image_rt = None
self.category = None
self.instance = None
self.elevation = None
self.azimuth = None
self.lighting = None
def __lt__(self, other):
return self.category < other.category or \
(self.category == other.category and self.instance < other.instance)
def show(self, subplots):
fig, axes = subplots
fig.suptitle(
'Category: {:02d} - Instance: {:02d} - Elevation: {:02d} - Azimuth: {:02d} - Lighting: {:02d}'.format(
self.category, self.instance, self.elevation, self.azimuth, self.lighting))
axes[0].imshow(self.image_lt, cmap='gray')
axes[1].imshow(self.image_rt, cmap='gray')
@property
def pose(self):
return np.array([self.elevation, self.azimuth, self.lighting], dtype=np.float32)
class SmallNORBDataset:
# Number of examples in both train and test set
n_examples = 24300
# Categories present in small NORB dataset
categories = ['animal', 'human', 'airplane', 'truck', 'car']
def __init__(self, dataset_root):
"""
Initialize small NORB dataset wrapper
Parameters
----------
dataset_root: str
Path to directory where small NORB archives have been extracted.
"""
self.dataset_root = dataset_root
self.initialized = False
# Store path for each file in small NORB dataset (for compatibility the original filename is kept)
self.dataset_files = {
'train': {
'cat': join(self.dataset_root, 'smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat'),
'info': join(self.dataset_root, 'smallnorb-5x46789x9x18x6x2x96x96-training-info.mat'),
'dat': join(self.dataset_root, 'smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat')
},
'test': {
'cat': join(self.dataset_root, 'smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat'),
'info': join(self.dataset_root, 'smallnorb-5x01235x9x18x6x2x96x96-testing-info.mat'),
'dat': join(self.dataset_root, 'smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat')
}
}
# Initialize both train and test data structures
self.data = {
'train': [SmallNORBExample() for _ in range(SmallNORBDataset.n_examples)],
'test': [SmallNORBExample() for _ in range(SmallNORBDataset.n_examples)]
}
# Fill data structures parsing dataset binary files
for data_split in ['train', 'test']:
self._fill_data_structures(data_split)
self.initialized = True
def explore_random_examples(self, dataset_split):
"""
Visualize random examples for dataset exploration purposes
Parameters
----------
dataset_split: str
Dataset split, can be either 'train' or 'test'
Returns
-------
None
"""
if self.initialized:
subplots = plt.subplots(nrows=1, ncols=2)
for i in np.random.permutation(SmallNORBDataset.n_examples):
self.data[dataset_split][i].show(subplots)
plt.waitforbuttonpress()
plt.cla()
def export_to_jpg(self, export_dir):
"""
Export all dataset images to `export_dir` directory
Parameters
----------
export_dir: str
Path to export directory (which is created if nonexistent)
Returns
-------
None
"""
if self.initialized:
print(('Exporting images to {}...'.format(export_dir))) #end='', flush=True)
for split_name in ['train', 'test']:
split_dir = join(export_dir, split_name)
if not exists(split_dir):
makedirs(split_dir)
for i, norb_example in enumerate(self.data[split_name]):
category = SmallNORBDataset.categories[norb_example.category]
instance = norb_example.instance
image_lt_path = join(split_dir, '{:06d}_{}_{:02d}_lt.jpg'.format(i, category, instance))
image_rt_path = join(split_dir, '{:06d}_{}_{:02d}_rt.jpg'.format(i, category, instance))
scipy.misc.imsave(image_lt_path, norb_example.image_lt)
scipy.misc.imsave(image_rt_path, norb_example.image_rt)
print('Done.')
def group_dataset_by_category_and_instance(self, dataset_split):
"""
Group small NORB dataset for (category, instance) key
Parameters
----------
dataset_split: str
Dataset split, can be either 'train' or 'test'
Returns
-------
groups: list
List of 25 groups of 972 elements each. All examples of each group are
from the same category and instance
"""
if dataset_split not in ['train', 'test']:
raise ValueError('Dataset split "{}" not allowed.'.format(dataset_split))
groups = []
for key, group in groupby(iterable=sorted(self.data[dataset_split]),
key=lambda x: (x.category, x.instance)):
groups.append(list(group))
return groups
def _fill_data_structures(self, dataset_split):
"""
Fill SmallNORBDataset data structures for a certain `dataset_split`.
This means all images, category and additional information are loaded from binary
files of the current split.
Parameters
----------
dataset_split: str
Dataset split, can be either 'train' or 'test'
Returns
-------
None
"""
dat_data = self._parse_NORB_dat_file(self.dataset_files[dataset_split]['dat'])
cat_data = self._parse_NORB_cat_file(self.dataset_files[dataset_split]['cat'])
info_data = self._parse_NORB_info_file(self.dataset_files[dataset_split]['info'])
for i, small_norb_example in enumerate(self.data[dataset_split]):
small_norb_example.image_lt = dat_data[2 * i]
small_norb_example.image_rt = dat_data[2 * i + 1]
small_norb_example.category = cat_data[i]
small_norb_example.instance = info_data[i][0]
small_norb_example.elevation = info_data[i][1]
small_norb_example.azimuth = info_data[i][2]
small_norb_example.lighting = info_data[i][3]
@staticmethod
def matrix_type_from_magic(magic_number):
"""
Get matrix data type from magic number
See here: https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/readme for details.
Parameters
----------
magic_number: tuple
First 4 bytes read from small NORB files
Returns
-------
element type of the matrix
"""
convention = {'1E3D4C51': 'single precision matrix',
'1E3D4C52': 'packed matrix',
'1E3D4C53': 'double precision matrix',
'1E3D4C54': 'integer matrix',
'1E3D4C55': 'byte matrix',
'1E3D4C56': 'short matrix'}
magic_str = bytearray(reversed(magic_number)).hex().upper()
return convention[magic_str]
@staticmethod
def _parse_small_NORB_header(file_pointer):
"""
Parse header of small NORB binary file
Parameters
----------
file_pointer: BufferedReader
File pointer just opened in a small NORB binary file
Returns
-------
file_header_data: dict
Dictionary containing header information
"""
# Read magic number
magic = struct.unpack('<BBBB', file_pointer.read(4)) # '<' is little endian)
# Read dimensions
dimensions = []
num_dims, = struct.unpack('<i', file_pointer.read(4)) # '<' is little endian)
for _ in range(num_dims):
dimensions.extend(struct.unpack('<i', file_pointer.read(4)))
file_header_data = {'magic_number': magic,
'matrix_type': SmallNORBDataset.matrix_type_from_magic(magic),
'dimensions': dimensions}
return file_header_data
@staticmethod
def _parse_NORB_cat_file(file_path):
"""
Parse small NORB category file
Parameters
----------
file_path: str
Path of the small NORB `*-cat.mat` file
Returns
-------
examples: ndarray
Ndarray of shape (24300,) containing the category of each example
"""
with open(file_path, mode='rb') as f:
header = SmallNORBDataset._parse_small_NORB_header(f)
num_examples, = header['dimensions']
struct.unpack('<BBBB', f.read(4)) # ignore this integer
struct.unpack('<BBBB', f.read(4)) # ignore this integer
examples = np.zeros(shape=num_examples, dtype=np.int32)
for i in tqdm(list(range(num_examples)), desc='Loading categories...'):
category, = struct.unpack('<i', f.read(4))
examples[i] = category
return examples
@staticmethod
def _parse_NORB_dat_file(file_path):
"""
Parse small NORB data file
Parameters
----------
file_path: str
Path of the small NORB `*-dat.mat` file
Returns
-------
examples: ndarray
Ndarray of shape (48600, 96, 96) containing images couples. Each image couple
is stored in position [i, :, :] and [i+1, :, :]
"""
with open(file_path, mode='rb') as f:
header = SmallNORBDataset._parse_small_NORB_header(f)
num_examples, channels, height, width = header['dimensions']
examples = np.zeros(shape=(num_examples * channels, height, width), dtype=np.uint8)
for i in tqdm(list(range(num_examples * channels)), desc='Loading images...'):
# Read raw image data and restore shape as appropriate
image = struct.unpack('<' + height * width * 'B', f.read(height * width))
image = np.uint8(np.reshape(image, newshape=(height, width)))
examples[i] = image
return examples
@staticmethod
def _parse_NORB_info_file(file_path):
"""
Parse small NORB information file
Parameters
----------
file_path: str
Path of the small NORB `*-info.mat` file
Returns
-------
examples: ndarray
Ndarray of shape (24300,4) containing the additional info of each example.
- column 1: the instance in the category (0 to 9)
- column 2: the elevation (0 to 8, which mean cameras are 30, 35,40,45,50,55,60,65,70
degrees from the horizontal respectively)
- column 3: the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in degrees)
- column 4: the lighting condition (0 to 5)
"""
with open(file_path, mode='rb') as f:
header = SmallNORBDataset._parse_small_NORB_header(f)
struct.unpack('<BBBB', f.read(4)) # ignore this integer
num_examples, num_info = header['dimensions']
examples = np.zeros(shape=(num_examples, num_info), dtype=np.int32)
for r in tqdm(list(range(num_examples)), desc='Loading info...'):
for c in range(num_info):
info, = struct.unpack('<i', f.read(4))
examples[r, c] = info
return examples
|
structured-nets-master
|
scripts/data/smallnorb.py
|
import numpy as np
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
from data_utils import normalize_data, apply_normalization
# Download from http://www.iro.umontreal.ca/~lisa/twiki/bin/view.cgi/Public/DeepVsShallowComparisonICML2007
n_variations = 6
for idx in np.arange(1, n_variations+1):
data_loc = '/dfs/scratch1/thomasat/datasets/mnist_noise/mnist_noise_variations_all_' + str(idx) + '.amat'
train_out = '/dfs/scratch1/thomasat/datasets/mnist_noise/train_' + str(idx)
test_out = '/dfs/scratch1/thomasat/datasets/mnist_noise/test_' + str(idx)
test_size = 2000 # As specified in http://www.iro.umontreal.ca/~lisa/twiki/bin/view.cgi/Public/DeepVsShallowComparisonICML2007#Downloadable_datasets
data = np.genfromtxt(data_loc)
X = data[:, :-1]
Y = np.expand_dims(data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
Y = enc.fit_transform(Y).todense()
# Split into train, val, test
# Shuffle the data
idx = np.arange(0, X.shape[0])
np.random.shuffle(idx)
train_idx = idx[:-test_size]
test_idx = idx[-test_size:]
assert train_idx.size == (X.shape[0] - test_size)
assert test_idx.size == test_size
test_X = X[test_idx, :]
test_Y = Y[test_idx, :]
train_X = X[train_idx, :]
train_Y = Y[train_idx, :]
# Normalize
train_X, mean, sd = normalize_data(train_X)
test_X = apply_normalization(test_X, mean, sd)
# Save
print('test_X, test_Y shape: ', test_X.shape, test_Y.shape)
print('train_X, train_Y shape: ', train_X.shape, train_Y.shape)
train = {'X': train_X, 'Y': train_Y}
test = {'X': test_X, 'Y': test_Y}
pkl.dump(train, open(train_out, 'wb'), protocol=2)
pkl.dump(test, open(test_out, 'wb'), protocol=2)
print('Saved train to: ', train_out)
print('Saved test to: ', test_out)
|
structured-nets-master
|
scripts/data/preprocess_mnist_noise.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.