python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import dataclasses
import hydra
import torch.cuda
from hydra.core.config_store import ConfigStore
from rlhive.rl_envs import RoboHiveEnv
from torchrl.envs import (
CatTensors,
DoubleToFloat,
EnvCreator,
ObservationNorm,
ParallelEnv,
R3MTransform,
SelectTransform,
TransformedEnv,
)
from torchrl.envs.transforms import Compose, FlattenObservation, RewardScaling
from torchrl.envs.utils import set_exploration_mode
from torchrl.modules import OrnsteinUhlenbeckProcessWrapper
from torchrl.record import VideoRecorder
from torchrl.trainers.helpers.collectors import (
make_collector_offpolicy,
OffPolicyCollectorConfig,
)
from torchrl.trainers.helpers.envs import (
EnvConfig,
initialize_observation_norm_transforms,
retrieve_observation_norms_state_dict,
)
from torchrl.trainers.helpers.logger import LoggerConfig
from torchrl.trainers.helpers.losses import LossConfig, make_redq_loss
from torchrl.trainers.helpers.models import make_redq_model, REDQModelConfig
from torchrl.trainers.helpers.replay_buffer import make_replay_buffer, ReplayArgsConfig
from torchrl.trainers.helpers.trainers import make_trainer, TrainerConfig
from torchrl.trainers.loggers.utils import generate_exp_name, get_logger
def make_env(
task,
reward_scaling,
device,
obs_norm_state_dict=None,
action_dim_gsde=None,
state_dim_gsde=None,
):
base_env = RoboHiveEnv(task, device=device)
env = make_transformed_env(env=base_env, reward_scaling=reward_scaling)
if obs_norm_state_dict is not None:
obs_norm = ObservationNorm(
**obs_norm_state_dict, in_keys=["observation_vector"]
)
env.append_transform(obs_norm)
if action_dim_gsde is not None:
env.append_transform(
gSDENoise(action_dim=action_dim_gsde, state_dim=state_dim_gsde)
)
return env
def make_transformed_env(
env,
reward_scaling=5.0,
stats=None,
):
"""
Apply transforms to the env (such as reward scaling and state normalization)
"""
env = TransformedEnv(env, SelectTransform("solved", "pixels", "observation"))
env.append_transform(
Compose(
R3MTransform("resnet50", in_keys=["pixels"], download=True),
FlattenObservation(-2, -1, in_keys=["r3m_vec"]),
)
) # Necessary to Compose R3MTransform with FlattenObservation; Track bug: https://github.com/pytorch/rl/issues/802
env.append_transform(RewardScaling(loc=0.0, scale=reward_scaling))
selected_keys = ["r3m_vec", "observation"]
out_key = "observation_vector"
env.append_transform(CatTensors(in_keys=selected_keys, out_key=out_key))
# we normalize the states
if stats is None:
_stats = {"loc": 0.0, "scale": 1.0}
else:
_stats = stats
env.append_transform(
ObservationNorm(**_stats, in_keys=[out_key], standard_normal=True)
)
env.append_transform(DoubleToFloat(in_keys=[out_key], in_keys_inv=[]))
return env
config_fields = [
(config_field.name, config_field.type, config_field)
for config_cls in (
TrainerConfig,
OffPolicyCollectorConfig,
EnvConfig,
LossConfig,
REDQModelConfig,
LoggerConfig,
ReplayArgsConfig,
)
for config_field in dataclasses.fields(config_cls)
]
Config = dataclasses.make_dataclass(cls_name="Config", fields=config_fields)
cs = ConfigStore.instance()
cs.store(name="config", node=Config)
DEFAULT_REWARD_SCALING = {
"Hopper-v1": 5,
"Walker2d-v1": 5,
"HalfCheetah-v1": 5,
"cheetah": 5,
"Ant-v2": 5,
"Humanoid-v2": 20,
"humanoid": 100,
}
@hydra.main(version_base=None, config_path=".", config_name="config")
def main(cfg: "DictConfig"): # noqa: F821
device = (
torch.device("cpu")
if torch.cuda.device_count() == 0
else torch.device("cuda:0")
)
exp_name = generate_exp_name("REDQ", cfg.exp_name)
logger = get_logger(
logger_type=cfg.logger, logger_name="redq_logging", experiment_name=exp_name
)
key, init_env_steps = None, None
if not cfg.vecnorm and cfg.norm_stats:
if not hasattr(cfg, "init_env_steps"):
raise AttributeError("init_env_steps missing from arguments.")
key = ("next", "observation_vector")
init_env_steps = cfg.init_env_steps
proof_env = make_env(
task=cfg.env_name,
reward_scaling=cfg.reward_scaling,
device=device,
)
initialize_observation_norm_transforms(
proof_environment=proof_env, num_iter=init_env_steps, key=key
)
_, obs_norm_state_dict = retrieve_observation_norms_state_dict(proof_env)[0]
print(proof_env)
model = make_redq_model(
proof_env,
cfg=cfg,
device=device,
in_keys=["observation_vector"],
)
loss_module, target_net_updater = make_redq_loss(model, cfg)
actor_model_explore = model[0]
if cfg.ou_exploration:
if cfg.gSDE:
raise RuntimeError("gSDE and ou_exploration are incompatible")
actor_model_explore = OrnsteinUhlenbeckProcessWrapper(
actor_model_explore,
annealing_num_steps=cfg.annealing_frames,
sigma=cfg.ou_sigma,
theta=cfg.ou_theta,
).to(device)
if device == torch.device("cpu"):
# mostly for debugging
actor_model_explore.share_memory()
if cfg.gSDE:
with torch.no_grad(), set_exploration_mode("random"):
# get dimensions to build the parallel env
proof_td = actor_model_explore(proof_env.reset().to(device))
action_dim_gsde, state_dim_gsde = proof_td.get("_eps_gSDE").shape[-2:]
del proof_td
else:
action_dim_gsde, state_dim_gsde = None, None
proof_env.close()
create_env_fn = make_env( # Pass EnvBase instead of the create_env_fn
task=cfg.env_name,
reward_scaling=cfg.reward_scaling,
device=device,
obs_norm_state_dict=obs_norm_state_dict,
action_dim_gsde=action_dim_gsde,
state_dim_gsde=state_dim_gsde,
)
collector = make_collector_offpolicy(
make_env=create_env_fn,
actor_model_explore=actor_model_explore,
cfg=cfg,
# make_env_kwargs=[
# {"device": device} if device >= 0 else {}
# for device in args.env_rendering_devices
# ],
)
replay_buffer = make_replay_buffer(device, cfg)
# recorder = transformed_env_constructor(
# cfg,
# video_tag=video_tag,
# norm_obs_only=True,
# obs_norm_state_dict=obs_norm_state_dict,
# logger=logger,
# use_env_creator=False,
# )()
recorder = make_env(
task=cfg.env_name,
reward_scaling=cfg.reward_scaling,
device=device,
obs_norm_state_dict=obs_norm_state_dict,
action_dim_gsde=action_dim_gsde,
state_dim_gsde=state_dim_gsde,
)
# remove video recorder from recorder to have matching state_dict keys
if cfg.record_video:
recorder_rm = TransformedEnv(recorder.base_env)
for transform in recorder.transform:
if not isinstance(transform, VideoRecorder):
recorder_rm.append_transform(transform.clone())
else:
recorder_rm = recorder
if isinstance(create_env_fn, ParallelEnv):
recorder_rm.load_state_dict(create_env_fn.state_dict()["worker0"])
create_env_fn.close()
elif isinstance(create_env_fn, EnvCreator):
recorder_rm.load_state_dict(create_env_fn().state_dict())
else:
recorder_rm.load_state_dict(create_env_fn.state_dict())
# reset reward scaling
for t in recorder.transform:
if isinstance(t, RewardScaling):
t.scale.fill_(1.0)
t.loc.fill_(0.0)
trainer = make_trainer(
collector,
loss_module,
recorder,
target_net_updater,
actor_model_explore,
replay_buffer,
logger,
cfg,
)
final_seed = collector.set_seed(cfg.seed)
print(f"init seed: {cfg.seed}, final seed: {final_seed}")
trainer.train()
return (logger.log_dir, trainer._log_dict)
if __name__ == "__main__":
main()
|
agenthive-dev
|
scripts/redq/redq.py
|
"""
This is a job script for running policy gradient algorithms on gym tasks.
Separate job scripts are provided to run few other algorithms
- For DAPG see here: https://github.com/aravindr93/hand_dapg/tree/master/dapg/examples
- For model-based NPG see here: https://github.com/aravindr93/mjrl/tree/master/mjrl/algos/model_accel
"""
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp import MLP
from mjrl.baselines.mlp_baseline import MLPBaseline
from mjrl.algos.npg_cg import NPG
from mjrl.algos.batch_reinforce import BatchREINFORCE
from mjrl.algos.ppo_clip import PPO
from mjrl.utils.train_agent import train_agent
from mjrl.utils.logger import DataLog
from omegaconf import open_dict
import os
import json
import gym
# import mjrl.envs
import time as timer
import robohive
from robohive.envs.env_variants import register_env_variant
def train_loop(job_data) -> None:
if 'env_hyper_params' in job_data.keys():
job_data.env = register_env_variant(job_data.env, job_data.env_hyper_params)
e = GymEnv(job_data.env)
policy_size = tuple(eval(job_data.policy_size))
vf_hidden_size = tuple(eval(job_data.vf_hidden_size))
policy = MLP(e.spec, hidden_sizes=policy_size, seed=job_data.seed,
init_log_std=job_data.init_log_std, min_log_std=job_data.min_log_std)
baseline = MLPBaseline(e.spec, reg_coef=1e-3, batch_size=job_data.vf_batch_size, hidden_sizes=vf_hidden_size,
epochs=job_data.vf_epochs, learn_rate=job_data.vf_learn_rate)
# Construct the algorithm
if job_data.algorithm == 'NPG':
# Other hyperparameters (like number of CG steps) can be specified in config for pass through
# or default hyperparameters will be used
agent = NPG(e, policy, baseline, normalized_step_size=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'VPG':
agent = BatchREINFORCE(e, policy, baseline, learn_rate=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'NVPG':
agent = BatchREINFORCE(e, policy, baseline, desired_kl=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'PPO':
# There are many hyperparameters for PPO. They can be specified in config for pass through
# or defaults in the PPO algorithm will be used
agent = PPO(e, policy, baseline, save_logs=True, **job_data.alg_hyper_params)
else:
NotImplementedError("Algorithm not found")
# Update logger if WandB in Config
if 'wandb_params' in job_data.keys() and job_data['wandb_params']['use_wandb']==True:
if 'wandb_logdir' in job_data['wandb_params']:
job_data['wandb_params']['wandb_logdir'] = job_data['wandb_params']['wandb_logdir']
else:
with open_dict(job_data):
job_data.wandb_params.wandb_logdir = os.getcwd()
agent.logger = DataLog(**job_data['wandb_params'], wandb_config=job_data)
print("========================================")
print("Starting policy learning")
print("========================================")
ts = timer.time()
train_agent(job_name='.',
agent=agent,
seed=job_data.seed,
niter=job_data.rl_num_iter,
gamma=job_data.rl_gamma,
gae_lambda=job_data.rl_gae,
num_cpu=job_data.num_cpu,
sample_mode=job_data.sample_mode,
num_traj=job_data.rl_num_traj,
num_samples=job_data.rl_num_samples,
save_freq=job_data.save_freq,
evaluation_rollouts=job_data.eval_rollouts)
print("========================================")
print("Job Finished. Time taken = %f" % (timer.time()-ts))
print("========================================")
|
agenthive-dev
|
baselines/mjrl/mjrl_job_script.py
|
"""
This is a launcher script for launching mjrl training using hydra
"""
import os
import time as timer
import hydra
from omegaconf import DictConfig, OmegaConf
from mjrl_job_script import train_loop
# ===============================================================================
# Process Inputs and configure job
# ===============================================================================
@hydra.main(config_name="hydra_npg_config", config_path="config")
def configure_jobs(job_data):
print("========================================")
print("Job Configuration")
print("========================================")
OmegaConf.resolve(job_data) # resolve configs
assert 'algorithm' in job_data.keys()
assert any([job_data.algorithm == a for a in ['NPG', 'NVPG', 'VPG', 'PPO']])
assert 'sample_mode' in job_data.keys()
assert any([job_data.sample_mode == m for m in ['samples', 'trajectories']])
job_data.alg_hyper_params = dict() if 'alg_hyper_params' not in job_data.keys() else job_data.alg_hyper_params
with open('job_config.yaml', 'w') as fp:
OmegaConf.save(config=job_data, f=fp.name)
if job_data.sample_mode == 'trajectories':
assert 'rl_num_traj' in job_data.keys()
job_data.rl_num_samples = 0 # will be ignored
elif job_data.sample_mode == 'samples':
assert 'rl_num_samples' in job_data.keys()
job_data.rl_num_traj = 0 # will be ignored
else:
print("Unknown sampling mode. Choose either trajectories or samples")
exit()
print(OmegaConf.to_yaml(job_data, resolve=True))
train_loop(job_data)
if __name__ == "__main__":
configure_jobs()
|
agenthive-dev
|
baselines/mjrl/hydra_mjrl_launcher.py
|
import robohive
import click
DESC="""
Script to render trajectories embeded in the env"
"""
@click.command(help=DESC)
@click.option('-s', '--suite', type=str, help='environment suite to train', default="arms")
@click.option('-l', '--launcher', type=click.Choice(['', None, "local", "slurm"]), default='')
@click.option('-cn', '--config_name', type=str, default=None)
@click.option('-cp', '--config_path', type=str, default='config')
def get_train_cmd(suite, launcher, config_name, config_path):
# Resolve Suite
if suite=="multitask_":
envs = ",".join(robohive.robohive_multitask_suite)
if config_name==None:
config_name="hydra_kitchen_config.yaml"
elif suite=="arms":
envs = ",".join(robohive.robohive_arm_suite)
if config_name==None:
config_name="hydra_arms_config.yaml"
elif suite=="hands":
envs = ",".join(robohive.robohive_hand_suite)
if config_name==None:
config_name="hydra_hand_config.yaml"
elif suite=="quads":
envs = ",".join(robohive.robohive_quad_suite)
if config_name==None:
config_name="hydra_quads_config.yaml"
elif suite=="myobase":
envs = ",".join(robohive.robohive_myobase_suite)
if config_name==None:
config_name="hydra_myo_config.yaml"
elif suite=="myochallenge":
envs = ",".join(robohive.robohive_myochal_suite)
if config_name==None:
config_name="hydra_myo_config.yaml"
elif suite=="myodm":
envs = ",".join(robohive.robohive_myodm_suite)
if config_name==None:
config_name="hydra_myo_config.yaml"
else:
raise ValueError(f"Unsupported suite:{suite}")
# Resolve launcher
if launcher=='' or launcher==None:
launcher_spec = ''
else:
launcher_spec = f"--multirun hydra/output={launcher} hydra/launcher={launcher}"
# Get final training command
print(f"To train NPG via mjrl on {suite} suite, run the following command: ")
print(f"python hydra_mjrl_launcher.py --config-path {config_path} --config-name {config_name} {launcher_spec} env={envs} seed=1,2,3")
if __name__ == '__main__':
get_train_cmd()
|
agenthive-dev
|
baselines/mjrl/get_trian_cmd.py
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2017 Guillaume Papin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse
import difflib
import fnmatch
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu"
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x
for x in dnames
if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile=f"{file}\t(original)",
tofile=f"{file}\t(reformatted)",
n=3,
)
)
class DiffError(Exception):
def __init__(self, message, errs=None):
super().__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super().__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e)
def run_clang_format_diff(args, file):
try:
with open(file, encoding="utf-8") as f:
original = f.readlines()
except OSError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
encoding="utf-8",
)
except OSError as exc:
raise DiffError(
f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}"
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return "\x1b[1m\x1b[31m" + s + "\x1b[0m"
def colorize(diff_lines):
def bold(s):
return "\x1b[1m" + s + "\x1b[0m"
def cyan(s):
return "\x1b[36m" + s + "\x1b[0m"
def green(s):
return "\x1b[32m" + s + "\x1b[0m"
def red(s):
return "\x1b[31m" + s + "\x1b[0m"
for line in diff_lines:
if line[:4] in ["--- ", "+++ "]:
yield bold(line)
elif line.startswith("@@ "):
yield cyan(line)
elif line.startswith("+"):
yield green(line)
elif line.startswith("-"):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = "error:"
if use_colors:
error_text = bold_red(error_text)
print(f"{prog}: {error_text} {message}", file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--clang-format-executable",
metavar="EXECUTABLE",
help="path to the clang-format executable",
default="clang-format",
)
parser.add_argument(
"--extensions",
help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})",
default=DEFAULT_EXTENSIONS,
)
parser.add_argument(
"-r",
"--recursive",
action="store_true",
help="run recursively over directories",
)
parser.add_argument("files", metavar="file", nargs="+")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument(
"-j",
metavar="N",
type=int,
default=0,
help="run N clang-format jobs in parallel (default number of cpus + 1)",
)
parser.add_argument(
"--color",
default="auto",
choices=["auto", "always", "never"],
help="show colored diff (default: auto)",
)
parser.add_argument(
"-e",
"--exclude",
metavar="PATTERN",
action="append",
default=[],
help="exclude paths matching the given glob-like pattern(s) from recursive search",
)
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == "always":
colored_stdout = True
colored_stderr = True
elif args.color == "auto":
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, "--version"]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}",
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files,
recursive=args.recursive,
exclude=args.exclude,
extensions=args.extensions.split(","),
)
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == "__main__":
sys.exit(main())
|
agenthive-dev
|
.circleci/unittest/linux/scripts/run-clang-format.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
setup(
name="psvi",
version="0.1.0",
description="Setting up a python package for Bayesian inference using variational coresets",
author="Dionysis Manousakas",
author_email="dm754@cantab.ac.uk",
license="LICENSE",
packages=find_packages(include=["psvi", "psvi.*"]),
install_requires=[
"iopath==0.1.10",
"matplotlib>=3.5.2",
"numpy>=1.22.4",
"pandas>=1.4.3",
"Pillow==9.2.0",
"requests==2.25.1",
"scikit_learn>=1.1.1",
"setuptools>=59.6.0",
"torch>=1.12.0",
"torchvision==0.13.0",
"tqdm==4.64.0",
"TyXe @ git+https://github.com/TyXe-BDL/TyXe",
"arff==0.9",
"pystan==3.5.0",
],
keywords=[
"bilevel optimization",
"hypergradient",
"sampling",
"importance sampling",
"variational inference",
"Monte Carlo",
"Bayesian",
"neural networks",
"pruning",
"sparsity",
"coresets",
"distillation",
"meta-learning",
"inducing points",
"pseudodata",
"neural networks",
],
)
|
Blackbox-Coresets-VI-main
|
setup.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
Experiment execution script: Users can specify the dataset, the statistical model and the inference methods,
and this script will generate a dictionary with the predictive performance.
"""
# Import libraries
import argparse
import os
import pickle
from collections import defaultdict
from platform import architecture
from typing import Any, Dict, List
from psvi.inference.baselines import (
run_giga,
run_mfvi,
run_mfvi_subset,
run_opsvi,
run_random,
run_sparsevi,
run_mfvi_regressor,
run_mfvi_subset_regressor
)
from psvi.inference.psvi_classes import (
PSVI,
PSVIAFixedU,
PSVIAV,
PSVIFixedU,
PSVILearnV,
PSVI_Ablated,
PSVI_No_IW,
PSVI_No_Rescaling,
PSVIFreeV,
PSVI_regressor,
PSVILearnV_regressor,
PSVIAV_regressor,
)
from psvi.inference.sparsebbvi import run_sparsevi_with_bb_elbo
from psvi.models.logreg import *
from experiments_utils import read_dataset, read_regression_dataset
torch.autograd.set_detect_anomaly(True)
parser = argparse.ArgumentParser()
# Arguments for the experiment workflow
parser.add_argument(
"--fnm", default="results", type=str, help="Filename where results are stored"
)
parser.add_argument(
"--datasets",
default=["phishing"],
nargs="+",
choices=["webspam", "phishing", "adult", "MNIST", "halfmoon", "four_blobs", "sinus", "concrete", "energy", "power", "kin8nm", "protein", "naval", "yacht", "boston", "wine", "year", "synth_lr_10", "synth_lr_50", "synth_lr_200"],
type=str,
help="List of dataset names",
)
parser.add_argument(
"--methods",
default=["psvi_learn_v", "mfvi", "mfvi_subset"],
nargs="+",
type=str,
help="List of inference method names",
)
parser.add_argument("--mc_samples", default=10, type=int, help="Monte Carlo samples")
parser.add_argument("--num_epochs", default=301, type=int, help="Training epochs")
parser.add_argument(
"--num_trials",
default=3,
type=int,
help="Trials executed for each inference method",
)
parser.add_argument(
"--data_minibatch", default=128, type=int, help="Data minibatch size"
)
parser.add_argument(
"--inner_it",
default=100,
type=int,
help="Gradient steps in the inner problem of nested optimization",
)
parser.add_argument(
"--outer_it",
default=100,
type=int,
help="Gradient steps in the outer problem of nested optimization",
)
parser.add_argument(
"--trainer",
default="nested",
choices=["nested", "hyper", "joint"],
type=str,
help="Method for computation of hypergradient",
)
parser.add_argument(
"--diagonal",
action=argparse.BooleanOptionalAction,
help="Diagonal approximation of Gaussian covariance matrices used",
)
parser.add_argument(
"--architecture",
default="logistic_regression",
choices=["logistic_regression", "logistic_regression_fullcov", "fn", "fn2", "lenet", "regressor_net"],
type=str,
help="Model architecture",
)
parser.add_argument(
"--n_hidden",
default=40,
type=int,
help="Number of hidden units in feedforward neural architectures",
)
parser.add_argument(
"--n_layers",
default=1,
type=int,
help="Number of layers in feedforward neural architectures",
)
parser.add_argument(
"--log_every",
default=150,
type=int,
help="Frequency of logging evaluation results throughout training (in number of outer gradient iterations)",
)
parser.add_argument(
"--register_elbos",
action=argparse.BooleanOptionalAction,
help="Saving variational objectives values throughout inference for plotting",
)
parser.add_argument(
"--init_sd",
default=1e-6,
type=float,
help="Initialization of standard deviation for variational parameters",
)
parser.add_argument(
"--lr0net",
default=1e-3,
type=float,
help="Initial learning rate for model parameters optimizer",
)
parser.add_argument(
"--lr0u",
default=1e-4,
type=float,
help="Initial learning rate for optimizer of pseudocoreset point input coordinates u",
)
parser.add_argument(
"--lr0v",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset support coefficients",
)
parser.add_argument(
"--lr0z",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset points labels",
)
parser.add_argument(
"--lr0alpha",
default=1e-3,
type=float,
help="Initial learning rate for coreset likelihood rescaling coefficient",
)
parser.add_argument(
"--init_at",
default="subsample",
choices=["subsample", "random"],
type=str,
help="Method for coreset points initialization",
)
parser.add_argument(
"--compute_weights_entropy",
action=argparse.BooleanOptionalAction,
help="Comput entropy of weights for plotting",
)
parser.add_argument(
"--coreset_sizes",
default=[100],
nargs="+",
type=int,
help="List of sizes for coresets computed throughout the experiment, or subsamples used for baselines mfvi_subset and random",
)
parser.add_argument(
"--reset",
action=argparse.BooleanOptionalAction,
help="Reset model parameters over intervals during training",
)
parser.add_argument(
"--prune",
action=argparse.BooleanOptionalAction,
help="Prune to coreset of smaller size",
)
parser.add_argument(
"--prune_interval",
default=400,
type=int,
help="Gradient steps in the outer problem of nested optimization between prunning steps",
)
parser.add_argument(
"--prune_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in a pruning experiment (decreasing)",
)
parser.add_argument(
"--increment",
action=argparse.BooleanOptionalAction,
help="Learn tasks incrementally",
)
parser.add_argument(
"--increment_interval",
default=1000,
type=int,
help="Gradient steps in the outer problem of nested optimization between incremental learning stages",
)
parser.add_argument(
"--increment_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in the incremental learning setting (non-decreasing)",
)
parser.add_argument(
"--retrain_on_coreset",
action=argparse.BooleanOptionalAction,
help="Retrain the variational model restricted only on the extracted coreset datapoints for the same number of epochs",
)
parser.add_argument(
"--save_input_data",
action=argparse.BooleanOptionalAction,
help="Save input dataset",
)
parser.add_argument(
"--test_ratio", default=0.2, type=float, help="Ratio of test dataset size"
)
parser.add_argument(
"--log_pseudodata",
action=argparse.BooleanOptionalAction,
help="Store pseudodata for visualisation",
)
parser.add_argument(
"--data_folder",
default="../data",
type=str,
help="Folder where dataset gets stored",
)
parser.add_argument(
"--results_folder",
default="../results",
type=str,
help="Folder where evaluation files get stored",
)
parser.add_argument(
"--learn_z",
action=argparse.BooleanOptionalAction,
help="Learn soft labels for distilled data",
)
parser.add_argument(
"--gamma", default=1., type=float, help="Decay factor of learning rate"
)
parser.set_defaults(
diagonal=True,
reset=False,
compute_weights_entropy=False,
register_elbos=False,
save_input_data=False,
prune=False,
increment=False,
log_pseudodata=False,
retrain_on_coreset=False,
learn_z=False,
)
parsed_args = parser.parse_args()
method_args = vars(parsed_args)
datasets, methods = method_args["datasets"], method_args["methods"]
method_args["logistic_regression"] = method_args['architecture'] == 'logistic_regression'
[
os.makedirs(fold)
for fold in [method_args["data_folder"], method_args["results_folder"]]
if not os.path.exists(fold)
] # make folders for data and results storage
def rec_dd():
return defaultdict(rec_dd)
results = rec_dd() # recursive dictionary for storage of inference results
# Specify inference methods
inf_dict = {
"psvi": (lambda *args, **kwargs: PSVI(*args, **kwargs).run_psvi(*args, **kwargs)),
"psvi_ablated": (
lambda *args, **kwargs: PSVI_Ablated(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_learn_v": (
lambda *args, **kwargs: PSVILearnV(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_alpha_v": (
lambda *args, **kwargs: PSVIAV(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_no_iw": (
lambda *args, **kwargs: PSVI_No_IW(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_free_v": (
lambda *args, **kwargs: PSVIFreeV(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_no_rescaling": (
lambda *args, **kwargs: PSVI_No_Rescaling(*args, **kwargs).run_psvi(
*args, **kwargs
)
),
"psvi_fixed_u": (
lambda *args, **kwargs: PSVIFixedU(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_alpha_fixed_u": (
lambda *args, **kwargs: PSVIAFixedU(*args, **kwargs).run_psvi(
*args, **kwargs
)
),
"psvi_regressor": (
lambda *args, **kwargs: PSVI_regressor(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_alpha_v_regressor": (
lambda *args, **kwargs: PSVIAV_regressor(*args, **kwargs).run_psvi(*args, **kwargs)
),
"psvi_learn_v_regressor": (
lambda *args, **kwargs: PSVILearnV_regressor(*args, **kwargs).run_psvi(*args, **kwargs)
),
"sparsebbvi": run_sparsevi_with_bb_elbo,
"opsvi": run_opsvi,
"random": run_random,
"sparsevi": run_sparsevi,
"giga": run_giga,
"mfvi": run_mfvi,
"mfvi_subset": run_mfvi_subset,
"mfvi_regressor": run_mfvi_regressor,
"mfvi_subset_regressor": run_mfvi_subset_regressor,
}
def experiment_driver(
datasets: List[str],
methods: Dict[str, bool],
method_args: Dict[str, Any],
) -> None:
r"""
Run experiment
"""
for dnm in datasets:
# Read the dataset
print(f"\nReading/Generating the dataset {dnm.upper()}")
x, y, xt, yt, N, D, train_dataset, test_dataset, num_classes = read_dataset(
dnm, method_args
)
print(
f"\nBayesian {'logistic regression' if method_args['logistic_regression'] else 'neural network'} experiment.\nInference via {' '.join(map(lambda x:x.upper(), methods))} on {dnm} data over {method_args['num_trials']} {'independent trials.' if method_args['num_trials']>1 else 'trial.'}\n\n\n"
)
for nm_alg in methods:
print(f"\n\nRunning {nm_alg}\n")
logistic_regression = method_args.get(
"logistic_regression", method_args.get("architecture") == "logreg"
)
inf_alg = inf_dict[nm_alg]
compute_weights_entropy = (
not nm_alg.startswith(("opsvi", "mfvi_subset"))
) and method_args["compute_weights_entropy"]
tps = (
method_args["coreset_sizes"]
if nm_alg.startswith(("psvi", "opsvi", "mfvi_subset"))
else [-1]
) # alias for baselines with no explicit constraint on dataset size
for t in range(method_args["num_trials"]):
print(f"Trial #{t}")
for (
ps
) in tps: # range of pseudocoreset sizes tested over the experiment
print(
f"Coreset/Subset with {ps if not method_args['increment'] else method_args['increment_sizes'][0]} datapoints"
) if ps != -1 else print("Unconstrained data access")
results[dnm][nm_alg][ps][t] = inf_alg(
mc_samples=method_args["mc_samples"],
num_epochs=method_args["num_epochs"],
data_minibatch=method_args["data_minibatch"],
D=D,
N=N,
tr=t,
diagonal=method_args["diagonal"],
x=x,
y=y,
xt=xt,
yt=yt,
inner_it=method_args["inner_it"],
outer_it=method_args["outer_it"],
scatterplot_coreset=method_args.get(
"scatterplot_coreset"
), # not parsed for some methods atm
logistic_regression=logistic_regression,
trainer=method_args["trainer"],
log_every=method_args["log_every"],
register_elbos=method_args["register_elbos"],
lr0u=method_args["lr0u"],
lr0net=method_args["lr0net"],
lr0v=method_args["lr0v"],
lr0z=method_args["lr0z"],
lr0alpha=method_args["lr0alpha"],
init_args=method_args["init_at"],
init_sd=method_args[
"init_sd"
], # initialization of variance in variational model
num_pseudo=ps,
seed=t, # map random seed to the trial number for reproducibility of inference result at the beginning of each of the baseline
compute_weights_entropy=compute_weights_entropy,
reset=method_args.get("reset"),
reset_interval=method_args.get("reset_interval"),
architecture=method_args.get("architecture"),
log_pseudodata=method_args.get("log_pseudodata"),
n_hidden=method_args.get(
"n_hidden", 40
), # hidden units in nn architecture
n_layers=method_args.get("n_layers", 1),
train_dataset=train_dataset,
test_dataset=test_dataset,
dnm=dnm,
nc=num_classes,
prune=method_args.get("prune"),
prune_interval=method_args.get("prune_interval"),
prune_sizes=method_args.get("prune_sizes"),
increment=method_args.get("increment"),
increment_interval=method_args.get("increment_interval"),
increment_sizes=method_args.get("increment_sizes"),
retrain_on_coreset=method_args.get("retrain_on_coreset"),
learn_z=method_args["learn_z"],
)
print("Trial completed!\n")
return write_to_files(results, method_args["fnm"])
def regressor_experiment_driver(
datasets: List[str],
methods: Dict[str, bool],
method_args: Dict[str, Any],
) -> None:
r"""
Run BNN regression experiment
"""
for dnm in datasets:
# Read the dataset
print(f"\nReading/Generating the dataset {dnm.upper()}")
method_args["seed"], method_args["num_test"] = 42, .15
x, y, xv, yv, xt, yt, N, D, train_dataset, val_dataset, test_dataset, y_mean, y_std, taus = read_regression_dataset(
dnm, method_args
)
print(
f"\nRegression experiment using BNNs.\nInference via {' '.join(map(lambda x:x.upper(), methods))} on {dnm} data over {method_args['num_trials']} {'independent trials.' if method_args['num_trials']>1 else 'trial.'}\n\n\n"
)
for nm_alg in methods:
print(f"\n\nRunning {nm_alg}\n")
logistic_regression = False
inf_alg = inf_dict[nm_alg + "_regressor"]
compute_weights_entropy = (
not nm_alg.startswith("mfvi_subset")
) and method_args["compute_weights_entropy"]
tps = (
method_args["coreset_sizes"]
if nm_alg.startswith(("psvi", "mfvi_subset"))
else [-1]
) # alias for baselines with no explicit constraint on dataset size
for t in range(method_args["num_trials"]):
print(f"Trial #{t}")
for (
ps
) in tps: # range of pseudocoreset sizes tested over the experiment
print(
f"Coreset/Subset with {ps} datapoints"
) if ps != -1 else print("Unconstrained data access")
results[dnm][nm_alg][ps][t] = inf_alg(
mc_samples=method_args["mc_samples"],
num_epochs=method_args["num_epochs"],
data_minibatch=method_args["data_minibatch"],
D=D,
N=N,
tr=t,
diagonal=method_args["diagonal"],
x=x,
y=y,
xv=xv,
yv=yv,
xt=xt,
yt=yt,
inner_it=method_args["inner_it"],
outer_it=method_args["outer_it"],
scatterplot_coreset=method_args.get(
"scatterplot_coreset"
), # not parsed for some methods atm
logistic_regression=logistic_regression,
trainer=method_args["trainer"],
log_every=method_args["log_every"],
register_elbos=method_args["register_elbos"],
lr0u=method_args["lr0u"],
lr0net=method_args["lr0net"],
lr0v=method_args["lr0v"],
init_args=method_args["init_at"],
init_sd=method_args[
"init_sd"
], # initialization of variance in variational model
num_pseudo=ps,
seed=t, # map random seed to the trial number for reproducibility of inference result at the beginning of each of the baseline
compute_weights_entropy=compute_weights_entropy,
reset=method_args.get("reset"),
reset_interval=method_args.get("reset_interval"),
architecture=method_args.get("architecture"),
log_pseudodata=method_args.get("log_pseudodata"),
n_hidden=method_args.get(
"n_hidden", 40
), # hidden units in nn architecture
n_layers=method_args.get("n_layers", 1),
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset,
dnm=dnm,
y_mean=y_mean,
y_std=y_std,
taus=taus,
)
print("Trial completed!\n")
return write_to_files(results, method_args["fnm"])
def write_to_files(results: Dict[str, Any], fnm: str) -> None:
r"""
Write results to pk files
"""
res_fnm = f"{method_args['results_folder']}/{fnm}.pk"
print(f"Storing results in {res_fnm}")
with open(res_fnm, "wb") as outfile:
pickle.dump(results, outfile)
## Entry point
if __name__ == "__main__":
(experiment_driver(
datasets,
methods,
method_args,
) if method_args.get("architecture") != "regressor_net"
else regressor_experiment_driver( datasets,
methods,
method_args))# run experiment
|
Blackbox-Coresets-VI-main
|
psvi/experiments/flow_psvi.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/experiments/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
ADAPTATION OF flow_psvi FOR MULTI-GPU PLATFORMS
"""
r"""
Experiment execution script: Users can specify the dataset, the statistical model and the inference methods,
and this script will generate a dictionary with the predictive performance.
"""
# Import libraries
import argparse
import os
import pickle
from collections import defaultdict
from platform import architecture
from typing import Any, Dict, List
import concurrent
import tqdm
import random
from psvi.inference.baselines import (
run_giga,
run_mfvi,
run_mfvi_subset,
run_opsvi,
run_random,
run_sparsevi,
run_mfvi_regressor,
run_mfvi_subset_regressor
)
from psvi.inference.psvi_classes import (
PSVI,
PSVIAFixedU,
PSVIAV,
PSVIFixedU,
PSVILearnV,
PSVI_Ablated,
PSVI_No_IW,
PSVI_No_Rescaling,
PSVIFreeV,
PSVI_regressor,
PSVILearnV_regressor,
PSVIAV_regressor,
)
from psvi.inference.sparsebbvi import run_sparsevi_with_bb_elbo
from psvi.models.logreg import *
from experiments_utils import read_dataset, read_regression_dataset
import multiprocessing
from multiprocessing import set_start_method
torch.autograd.set_detect_anomaly(True)
NUM_GPUS = 8
parser = argparse.ArgumentParser()
# Arguments for the experiment workflow
parser.add_argument(
"--fnm", default="results", type=str, help="Filename where results are stored"
)
parser.add_argument(
"--datasets",
default=["phishing"],
nargs="+",
choices=["webspam", "phishing", "adult", "MNIST", "halfmoon", "four_blobs", "sinus", "concrete", "energy", "power", "kin8nm", "protein", "naval", "yacht", "boston", "wine", "year", "synth_lr_10", "synth_lr_50", "synth_lr_200"],
type=str,
help="List of dataset names",
)
parser.add_argument(
"--methods",
default=["psvi_learn_v", "mfvi", "mfvi_subset"],
nargs="+",
type=str,
help="List of inference method names",
)
parser.add_argument("--mc_samples", default=10, type=int, help="Monte Carlo samples")
parser.add_argument("--num_epochs", default=301, type=int, help="Training epochs")
parser.add_argument(
"--num_trials",
default=3,
type=int,
help="Trials executed for each inference method",
)
parser.add_argument(
"--data_minibatch", default=128, type=int, help="Data minibatch size"
)
parser.add_argument(
"--inner_it",
default=100,
type=int,
help="Gradient steps in the inner problem of nested optimization",
)
parser.add_argument(
"--outer_it",
default=100,
type=int,
help="Gradient steps in the outer problem of nested optimization",
)
parser.add_argument(
"--trainer",
default="nested",
choices=["nested", "hyper", "joint"],
type=str,
help="Method for computation of hypergradient",
)
parser.add_argument(
"--diagonal",
action=argparse.BooleanOptionalAction,
help="Diagonal approximation of Gaussian covariance matrices used",
)
parser.add_argument(
"--architecture",
default="logistic_regression",
choices=["logistic_regression", "logistic_regression_fullcov", "fn", "fn2", "lenet", "regressor_net"],
type=str,
help="Model architecture",
)
parser.add_argument(
"--n_hidden",
default=40,
type=int,
help="Number of hidden units in feedforward neural architectures",
)
parser.add_argument(
"--n_layers",
default=1,
type=int,
help="Number of layers in feedforward neural architectures",
)
parser.add_argument(
"--log_every",
default=150,
type=int,
help="Frequency of logging evaluation results throughout training (in number of outer gradient iterations)",
)
parser.add_argument(
"--register_elbos",
action=argparse.BooleanOptionalAction,
help="Saving variational objectives values throughout inference for plotting",
)
parser.add_argument(
"--init_sd",
default=1e-6,
type=float,
help="Initialization of standard deviation for variational parameters",
)
parser.add_argument(
"--lr0net",
default=1e-3,
type=float,
help="Initial learning rate for model parameters optimizer",
)
parser.add_argument(
"--lr0u",
default=1e-4,
type=float,
help="Initial learning rate for optimizer of pseudocoreset point input coordinates u",
)
parser.add_argument(
"--lr0v",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset support coefficients",
)
parser.add_argument(
"--lr0z",
default=1e-3,
type=float,
help="Initial learning rate for optimizer of coreset points labels",
)
parser.add_argument(
"--lr0alpha",
default=1e-3,
type=float,
help="Initial learning rate for coreset likelihood rescaling coefficient",
)
parser.add_argument(
"--init_at",
default="subsample",
choices=["subsample", "random"],
type=str,
help="Method for coreset points initialization",
)
parser.add_argument(
"--compute_weights_entropy",
action=argparse.BooleanOptionalAction,
help="Comput entropy of weights for plotting",
)
parser.add_argument(
"--coreset_sizes",
default=[100],
nargs="+",
type=int,
help="List of sizes for coresets computed throughout the experiment, or subsamples used for baselines mfvi_subset and random",
)
parser.add_argument(
"--reset",
action=argparse.BooleanOptionalAction,
help="Reset model parameters over intervals during training",
)
parser.add_argument(
"--prune",
action=argparse.BooleanOptionalAction,
help="Prune to coreset of smaller size",
)
parser.add_argument(
"--prune_interval",
default=400,
type=int,
help="Gradient steps in the outer problem of nested optimization between prunning steps",
)
parser.add_argument(
"--prune_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in a pruning experiment (decreasing)",
)
parser.add_argument(
"--increment",
action=argparse.BooleanOptionalAction,
help="Learn tasks incrementally",
)
parser.add_argument(
"--increment_interval",
default=1000,
type=int,
help="Gradient steps in the outer problem of nested optimization between incremental learning stages",
)
parser.add_argument(
"--increment_sizes",
default=[20],
nargs="+",
type=int,
help="List of sizes for coresets in the incremental learning setting (non-decreasing)",
)
parser.add_argument(
"--retrain_on_coreset",
action=argparse.BooleanOptionalAction,
help="Retrain the variational model restricted only on the extracted coreset datapoints for the same number of epochs",
)
parser.add_argument(
"--save_input_data",
action=argparse.BooleanOptionalAction,
help="Save input dataset",
)
parser.add_argument(
"--test_ratio", default=0.2, type=float, help="Ratio of test dataset size"
)
parser.add_argument(
"--log_pseudodata",
action=argparse.BooleanOptionalAction,
help="Store pseudodata for visualisation",
)
parser.add_argument(
"--data_folder",
default="../data",
type=str,
help="Folder where dataset gets stored",
)
parser.add_argument(
"--results_folder",
default="../results",
type=str,
help="Folder where evaluation files get stored",
)
parser.add_argument(
"--learn_z",
action=argparse.BooleanOptionalAction,
help="Learn soft labels for distilled data",
)
parser.add_argument(
"--gamma", default=1., type=float, help="Decay factor of learning rate"
)
parser.set_defaults(
diagonal=True,
reset=False,
compute_weights_entropy=False,
register_elbos=False,
save_input_data=False,
prune=False,
increment=False,
log_pseudodata=False,
retrain_on_coreset=False,
learn_z=False,
)
parsed_args = parser.parse_args()
method_args = vars(parsed_args)
datasets, methods = method_args["datasets"], method_args["methods"]
method_args["logistic_regression"] = method_args['architecture'] == 'logistic_regression'
[
os.makedirs(fold)
for fold in [method_args["data_folder"], method_args["results_folder"]]
if not os.path.exists(fold)
] # make folders for data and results storage
def pass_dict(d, f):
return f(**d)
def rec_dd():
return defaultdict(rec_dd)
results = rec_dd() # recursive dictionary for storage of inference results
# Specify inference methods
def inf_alg(**kwargs):
if kwargs["nm_alg"]=="psvi":
return PSVI(**kwargs).run_psvi( **kwargs)
elif kwargs["nm_alg"]=="psvi_ablated":
return PSVI_Ablated( **kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_learn_v":
return PSVILearnV( **kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_alpha_v":
return PSVIAV(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_no_iw":
return PSVI_No_IW(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_free_v":
return PSVIFreeV(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_no_rescaling":
return PSVI_No_Rescaling(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_fixed_u":
return PSVIFixedU(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_alpha_fixed_u":
return PSVIAFixedU(**kwargs).run_psvi(**kwargs )
elif kwargs["nm_alg"]=="psvi_regressor":
return PSVI_regressor(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_alpha_v_regressor":
return PSVIAV_regressor(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="psvi_learn_v_regressor":
return PSVILearnV_regressor(**kwargs).run_psvi(**kwargs)
elif kwargs["nm_alg"]=="sparsebbvi":
return run_sparsevi_with_bb_elbo
elif kwargs["nm_alg"]=="opsvi":
return run_opsvi
elif kwargs["nm_alg"]=="random":
return run_random
elif kwargs["nm_alg"]=="sparsevi":
return run_sparsevi
elif kwargs["nm_alg"]=="giga":
return run_giga
elif kwargs["nm_alg"]=="mfvi":
return run_mfvi
elif kwargs["nm_alg"]=="mfvi_subset":
return run_mfvi_subset
elif kwargs["nm_alg"]=="mfvi_regressor":
return run_mfvi_regressor
elif kwargs["nm_alg"]=="mfvi_subset_regressor":
return run_mfvi_subset_regressor
def experiment_driver(
datasets: List[str],
methods: Dict[str, bool],
method_args: Dict[str, Any],
) -> None:
r"""
Run experiment
"""
job_args = list()
for dnm in datasets:
# Read the dataset
print(f"\nReading/Generating the dataset {dnm.upper()}")
x, y, xt, yt, N, D, train_dataset, test_dataset, num_classes = read_dataset(
dnm, method_args
)
print(
f"\nBayesian {'logistic regression' if method_args['logistic_regression'] else 'neural network'} experiment.\nInference via {' '.join(map(lambda x:x.upper(), methods))} on {dnm} data over {method_args['num_trials']} {'independent trials.' if method_args['num_trials']>1 else 'trial.'}\n\n\n"
)
for nm_alg in methods:
print(f"\n\nRunning {nm_alg}\n")
logistic_regression = method_args.get(
"logistic_regression", method_args.get("architecture") == "logreg"
)
compute_weights_entropy = (
not nm_alg.startswith(("opsvi", "mfvi_subset"))
) and method_args["compute_weights_entropy"]
tps = (
method_args["coreset_sizes"]
if nm_alg.startswith(("psvi", "opsvi", "mfvi_subset"))
else [-1]
) # alias for baselines with no explicit constraint on dataset size
for t in range(method_args["num_trials"]):
print(f"Trial #{t}")
for (
ps
) in tps: # range of pseudocoreset sizes tested over the experiment
print(
f"Coreset/Subset with {ps if not method_args['increment'] else method_args['increment_sizes'][0]} datapoints"
) if ps != -1 else print("Unconstrained data access")
idx = len(job_args)
job_args.append({"mc_samples":method_args["mc_samples"],
"num_epochs":method_args["num_epochs"],
"data_minibatch":method_args["data_minibatch"],
"D":D,
"N":N,
"tr":t,
"diagonal":method_args["diagonal"],
"x":x,
"y":y,
"xt":xt,
"yt":yt,
"inner_it":method_args["inner_it"],
"outer_it":method_args["outer_it"],
"scatterplot_coreset":method_args.get(
"scatterplot_coreset"
), # not parsed for some methods atm
"logistic_regression":logistic_regression,
"trainer":method_args["trainer"],
"log_every":method_args["log_every"],
"register_elbos":method_args["register_elbos"],
"lr0u":method_args["lr0u"],
"lr0net":method_args["lr0net"],
"lr0v":method_args["lr0v"],
"lr0z":method_args["lr0z"],
"lr0alpha":method_args["lr0alpha"],
"init_args":method_args["init_at"],
"init_sd":method_args[
"init_sd"
], # initialization of variance in variational model
"num_pseudo":ps,
"seed":t, # map random seed to the trial number for reproducibility of inference result at the beginning of each of the baseline
"compute_weights_entropy":compute_weights_entropy,
"reset":method_args.get("reset"),
"reset_interval":method_args.get("reset_interval"),
"architecture":method_args.get("architecture"),
"log_pseudodata":method_args.get("log_pseudodata"),
"n_hidden":method_args.get(
"n_hidden", 40
), # hidden units in nn architecture
"n_layers":method_args.get("n_layers", 1),
"train_dataset":train_dataset,
"test_dataset":test_dataset,
"dnm":dnm,
"nc":num_classes,
"prune":method_args.get("prune"),
"prune_interval":method_args.get("prune_interval"),
"prune_sizes":method_args.get("prune_sizes"),
"increment":method_args.get("increment"),
"increment_interval":method_args.get("increment_interval"),
"increment_sizes":method_args.get("increment_sizes"),
"retrain_on_coreset":method_args.get("retrain_on_coreset"),
"learn_z":method_args["learn_z"],
"nm_alg":nm_alg,
"device_id":idx % NUM_GPUS,
})
pool = multiprocessing.Pool(NUM_GPUS) # first arg is the number of workers
results_pool = [pool.apply_async(inf_alg, kwds=job_arg) for job_arg in job_args]
ii=0
for result in results_pool:
_job_arg = job_args[ii]
ii+=1
results[_job_arg["dnm"]][_job_arg["nm_alg"]][_job_arg["num_pseudo"]][_job_arg["tr"]] = result.get()
return write_to_files(results, method_args["fnm"])
def write_to_files(results: Dict[str, Any], fnm: str) -> None:
r"""
Write results to pk files
"""
res_fnm = f"{method_args['results_folder']}/{fnm}.pk"
print(f"Storing results in {res_fnm}")
with open(res_fnm, "wb") as outfile:
pickle.dump(results, outfile)
## Entry point
if __name__ == "__main__":
set_start_method('spawn')
(experiment_driver(
datasets,
methods,
method_args,
) if method_args.get("architecture") != "regressor_net"
else regressor_experiment_driver(
datasets,
methods,
method_args))# run experiment
|
Blackbox-Coresets-VI-main
|
psvi/experiments/flow-psvi-parallel.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import requests
import urllib.request
import zipfile
from collections import namedtuple
from io import BytesIO
import arff
import json
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
import torchvision
from PIL import Image
from psvi.models.neural_net import (
make_fc2net,
make_fcnet,
make_lenet,
make_regressor_net,
VILinear,
VILinearMultivariateNormal,
)
from sklearn.datasets import make_moons
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from torch.utils.data import Dataset
r"""
Statistics used for normalization of some benchmark vision datasets
"""
dataset_normalization = dict(
MNIST=((0.1307,), (0.3081,)),
Cifar10=((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
)
r"""
Classes of some benchmark vision datasets
"""
dataset_labels = dict(
MNIST=list(range(10)),
Cifar10=(
"plane",
"car",
"bird",
"cat",
"deer",
"dog",
"monkey",
"horse",
"ship",
"truck",
),
)
DatasetStats = namedtuple(
"DatasetStats", " ".join(["num_channels", "real_size", "num_classes"])
)
r"""
Dimensions of vision benchmark datasets
"""
dataset_stats = dict(
MNIST=DatasetStats(1, 28, 10),
Cifar10=DatasetStats(3, 32, 10),
)
class SynthDataset(Dataset):
r"""
Custom torch dataset class supporting transforms
"""
def __init__(self, x, y=None, transforms=None):
self.data = x
self.targets = y
self.transforms = transforms
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.targets[index]
def subset_where(self, cs=[0, 1]):
r"""
Returns subset of data corresponding to given list of classes
"""
idcs = torch.isin(self.targets, torch.tensor(cs))
return SynthDataset(self.data[idcs], self.targets[idcs])
def concatenate(self, u, z):
return SynthDataset(torch.cat((self.data, u)), y=torch.cat((self.targets, z)))
def split_data(N, p_split=(0.6, 0.2, 0.2), n_split=None, shuffle=True, seed=None):
r"""
Helper function for splitting data into train / validation / test
"""
if seed is not None:
np.random.seed(seed)
if n_split is None:
p_split = np.array(p_split)
assert np.sum(p_split == -1) <= 1
p_split[p_split == -1] = 1 - (np.sum(p_split) + 1)
assert np.sum(p_split) == 1.0
p_train, p_val, p_test = p_split
train_idx = int(np.ceil(p_train * N))
val_idx = int(np.ceil(train_idx + p_val * N))
else:
n_split = np.array(n_split)
assert np.sum(n_split == -1) <= 1
n_split[n_split == -1] = N - (np.sum(n_split) + 1)
assert np.sum(n_split) == N
n_train, n_val, n_test = n_split
train_idx = int(n_train)
val_idx = int(train_idx + n_val)
idx = np.arange(N)
if shuffle:
np.random.shuffle(idx)
return {
"train": idx[:train_idx],
"val": idx[train_idx:val_idx],
"test": idx[val_idx:],
}
# custom dataset
class BaseDataset(Dataset):
def __init__(self, x, y=None, randomize=False):
self.data = (
x.mean() + 1.0 * torch.randn_like(x) if randomize else x
) # if randomize return a randomized replica of the data centered around the mean of the empirical distribution
self.targets = y
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index], self.targets[index]
def read_regression_dataset(dnm, method_args):
(X, Y), indices = get_regression_benchmark(
dnm,
seed=method_args["seed"],
p_split=(-1, 0.1, method_args["num_test"]),
)
taus = hyperparams_for_regression()[dnm]
# split into training and test sets
x, y, xv, yv, xt, yt = (
X[indices["train"]],
Y[indices["train"]],
X[indices["val"]],
Y[indices["val"]],
X[indices["test"]],
Y[indices["test"]],
)
N, D = x.shape
# compute training set statistics for normalization
x_mean, y_mean, x_std, y_std = (
np.mean(x, 0),
np.mean(y),
np.std(x, 0),
np.std(y),
)
# Parse in torch dataloaders
train_dataset, val_dataset, test_dataset, y_mean, y_std = (
BaseDataset(
torch.from_numpy(
((x - np.full(x.shape, x_mean)) / np.full(x.shape, x_std)).astype(
np.float32
)
),
torch.from_numpy(((y - y_mean) / y_std).astype(np.float32)),
),
BaseDataset(
torch.from_numpy(
(
(xv - np.full(xv.shape, x_mean)) / np.full(xv.shape, x_std)
).astype(np.float32)
),
torch.from_numpy(yv.astype(np.float32)),
),
BaseDataset(
torch.from_numpy(
(
(xt - np.full(xt.shape, x_mean)) / np.full(xt.shape, x_std)
).astype(np.float32)
),
torch.from_numpy(yt.astype(np.float32)),
),
torch.tensor(y_mean),
torch.tensor(y_std),
)
return x, y, xv, yv, xt, yt, N, D, train_dataset, val_dataset, test_dataset, y_mean, y_std, taus
def get_regression_benchmark(name, seed=111, data_dir="psvi/data/", **kwargs):
r"""
Return data from UCI sets
- param name: (str) Name of dataset to be used
- param seed: (int) Random seed for splitting data into train and test
- param kwargs: (dict) Additional arguments for splits
- return: Inputs, outputs, and data-splits
"""
np.random.seed(seed)
if not os.path.exists(data_dir):
os.mkdir(data_dir)
urllinks = {"concrete": "http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls",
"energy": "https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx",
"power": "https://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip",
"kin8nm": "https://www.openml.org/data/download/3626/dataset_2175_kin8nm.arff",
"protein": "https://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv",
"naval": "http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI%20CBM%20Dataset.zip",
"yacht": "http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data",
"boston": "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data",
"wine": "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv",
"year": "https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip"}
filename = urllinks[name].split('/')[-1]
if not os.path.exists(data_dir + filename):
urllib.request.urlretrieve(
urllinks[name], data_dir + filename)
if name in ["concrete", "energy"]:
data = np.array(pd.read_excel(data_dir + filename))
elif name == "power":
zipfile.ZipFile(data_dir + filename).extractall(data_dir)
data = pd.read_excel(data_dir + 'CCPP/Folds5x2_pp.xlsx', header=0).values
elif name == "kin8nm":
dataset = arff.load(open(data_dir + filename))
data = np.array(dataset['data'])
elif name == "protein":
data = np.array(pd.read_csv(data_dir + filename))
elif name == "naval":
zipfile.ZipFile(data_dir + filename).extractall(data_dir)
data = np.loadtxt(data_dir + "UCI CBM Dataset/data.txt")
elif name in ["yacht", "boston"]:
data = np.loadtxt(data_dir + filename)
elif name == "wine":
data = np.array(pd.read_csv(data_dir + filename, delimiter=";"))
elif name == "year":
zipfile.ZipFile(data_dir + "/YearPredictionMSD.txt.zip").extractall(data_dir)
data = np.loadtxt(data_dir + "/YearPredictionMSD.txt" , delimiter=",")
elif name == "sinus":
X = np.random.rand(10**3) * 2 * np.pi
Y = np.sin(X)
data = np.stack((X, Y), axis=-1)
else:
raise ValueError("Unsupported dataset: {}".format(data_dir, name))
if name in ["energy", "naval"]: # dataset has 2 response values
X = data[:, :-2]
Y = data[:, -2:-1] # pick first response value
else:
X = data[:, :-1]
Y = data[:, -1:]
return (X, Y), split_data(len(X), **kwargs)
def hyperparams_for_regression():
r"""
Grid search space for precision in the regression BNN model
"""
return {
"concrete": [0.025, 0.05, 0.075],
"energy": [0.25, 0.5, 0.75],
"power": [0.05, 0.1, 0.15],
"kin8nm": [150, 200, 250],
"protein": [0.025, 0.05, 0.075],
"naval": [30000, 40000, 50000],
"yacht": [0.25, 0.5, 0.75],
"boston": [0.1, 0.15, 0.2],
"wine": [2.5, 3.0, 3.5],
"year":[0.1, 1., 10.]
}
def make_four_class_dataset(N_K=250):
r"""
Return two-dimensional four_blobs dataset with datapoints equally distributed among 4 classes
:param N_K (int): number of datapoints per class
"""
X1 = torch.cat(
[
0.8 + 0.4 * torch.randn(N_K, 1),
1.5 + 0.4 * torch.randn(N_K, 1),
],
dim=-1,
)
Y1 = 0 * torch.ones(X1.size(0)).long()
X2 = torch.cat(
[
0.5 + 0.6 * torch.randn(N_K, 1),
-0.2 - 0.1 * torch.randn(N_K, 1),
],
dim=-1,
)
Y2 = 1 * torch.ones(X2.size(0)).long()
X3 = torch.cat(
[
2.5 - 0.1 * torch.randn(N_K, 1),
1.0 + 0.6 * torch.randn(N_K, 1),
],
dim=-1,
)
Y3 = 2 * torch.ones(X3.size(0)).long()
X4 = torch.distributions.MultivariateNormal(
torch.Tensor([-0.5, 1.5]),
covariance_matrix=torch.Tensor([[0.2, 0.1], [0.1, 0.1]]),
).sample(torch.Size([N_K]))
Y4 = 3 * torch.ones(X4.size(0)).long()
X = torch.cat([X1, X2, X3, X4], dim=0)
X[:, 1] -= 1
X[:, 0] -= 0.5
Y = torch.cat([Y1, Y2, Y3, Y4])
rows_permutations = torch.randperm(X.size()[0])
return (
X[rows_permutations, :],
Y[rows_permutations],
) # shuffle rows for our train/test split
def set_up_model(
D=None,
n_hidden=None,
nc=None,
mc_samples=None,
architecture=None,
**kwargs,
):
r"""
Return torch nn model with the desired architecture
:param D (int): dimensionality of input data
:param n_hidden (int): number of units in each hidden layer
:param nc (int): dimensionality of last layer
:param mc_samples (int): number of samples produced at each forward pass through the nn
:param architecture (str): nn architecture
- "fn": fully connected feedforward network with diagonal Gaussian on variational layers
- "residual_fn": fn with residual connections
- "fn2": fn with full covariance matrix on variational layers
- "lenet": LeNet architecture
- "logistic_regression": single layer nn (no hidden layers) implementing the logistic regression model
- "logistic_regression_fullcov": single layer nn (no hidden layers) implementing the logistic regression model with full covariance variational approximations
"""
if architecture in {"fn", "residual_fn"}:
return make_fcnet(
D,
n_hidden,
nc,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
residual=(architecture == "residual_fn"),
**kwargs,
)
elif architecture in {"fn2"}:
return make_fc2net(
D,
n_hidden,
nc, # does not support argument on the number of chanells
linear_class=VILinearMultivariateNormal,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
**kwargs,
)
elif architecture == "lenet":
return make_lenet(
linear_class=VILinear, nonl_class=nn.ReLU, mc_samples=mc_samples
)
elif architecture in {
"regressor_net",
}: # feed forward VI BNN for regression with diagonal covariance (optional arg for residual connections)
return make_regressor_net(
D,
n_hidden,
nc,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
residual=(architecture == "residual_fn"),
**kwargs,
)
elif architecture == "logistic_regression":
return nn.Sequential(VILinear(D, nc, mc_samples=mc_samples))
elif architecture == "logistic_regression_fullcov":
return nn.Sequential(VILinearMultivariateNormal(D, nc, mc_samples=mc_samples))
else:
raise ValueError(
"Architecture should be one of \n'lenet', 'logistic_regression', 'logistic_regression_fullcov', 'fn', 'fn2', 'residual_fn'"
)
@contextlib.contextmanager
def suppress_stdout():
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
yield
def get_torchvision_info(name):
r"""
Returns statistical information for specified torchvision benchmark dataset
"""
assert name in dataset_stats, "Unsupported dataset: {}".format(name)
num_channels, input_size, num_classes = dataset_stats[name]
normalization = dataset_normalization[name]
labels = dataset_labels[name]
return num_channels, input_size, num_classes, normalization, labels
def load_dataset(path, urls):
r"""
Writes on a file a dataset living on a given URL
"""
if not os.path.exists(path):
os.mkdir(path)
for url in urls:
data = requests.get(url).content
filename = os.path.join(path, os.path.basename(url))
with open(filename, "wb") as file:
file.write(data)
return
def read_adult(data_folder):
r"""
Returns the adult dataset for logistic regression
"""
urls = [
"http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names",
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
]
load_dataset(data_folder, urls)
columns = [
"age",
"workClass",
"fnlwgt",
"education",
"education-num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
"income",
]
train_data = pd.read_csv(
data_folder + "/adult.data",
names=columns,
sep=" *, *",
na_values="?",
engine="python",
).dropna()
test_data = pd.read_csv(
data_folder + "/adult.test",
names=columns,
sep=" *, *",
skiprows=1,
na_values="?",
engine="python",
).dropna()
X, Xt = train_data[columns[::-1]], test_data[columns[::-1]]
Y = np.array([0 if s == "<=50K" else 1 for s in train_data["income"]])
Yt = np.array([0 if s == "<=50K." else 1 for s in test_data["income"]])
# numerical columns : standardize
numcols = ["age", "education-num", "capital-gain", "capital-loss", "hours-per-week"]
ss = StandardScaler()
ss.fit(X[numcols])
Xnum, Xtnum = ss.transform(X[numcols]), ss.transform(Xt[numcols])
# categorical columns: apply 1-hot-encoding
catcols = [
"workClass",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"native-country",
]
enc = OneHotEncoder()
enc.fit(X[catcols])
Xcat, Xtcat = (
enc.transform(X[catcols]).toarray(),
enc.transform(Xt[catcols]).toarray(),
)
X, Xt = np.concatenate((Xnum, Xcat), axis=1), np.concatenate((Xtnum, Xtcat), axis=1)
pca = PCA(n_components=10)
pca.fit(X)
X = pca.transform(X)
Xt = pca.transform(Xt)
X = np.c_[X, np.ones(X.shape[0])]
Xt = np.c_[Xt, np.ones(Xt.shape[0])]
return X, Y, Xt, Yt
def read_phishing(data_folder, dnm="phishing"):
r"""
Returns the phishing dataset for logistic regression
"""
filename, urllink = (
f"{data_folder}/{dnm}.npz",
f"https://github.com/trevorcampbell/bayesian-coresets/blob/master/examples/data/{dnm}.npz?raw=true",
)
if not os.path.isfile(filename):
response = requests.get(urllink)
response.raise_for_status()
data = np.load(BytesIO(response.content))
else:
data = np.load(filename)
return data["X"], data["y"]
def read_webspam(data_folder, dnm="webspam"):
r"""
Returns the webspam dataset for logistic regression
"""
import sklearn.datasets as skl_ds
from sklearn import preprocessing
import scipy.sparse as sp
import numpy as np
fnm_train, urllink_train = (
f"{data_folder}/{dnm}_train.svm",
"https://bitbucket.org/jhhuggins/lrcoresets/raw/cdcda24b5854ef380795ec11ab5321d0ec53c3fe/data/webspam_train.svm",
)
fnm_test, urllink_test = (
f"{data_folder}/{dnm}_test.svm",
"https://bitbucket.org/jhhuggins/lrcoresets/raw/cdcda24b5854ef380795ec11ab5321d0ec53c3fe/data/webspam_test.svm",
)
import urllib.request
if not os.path.isfile(fnm_train):
urllib.request.urlretrieve(urllink_train, fnm_train)
if not os.path.isfile(fnm_test):
urllib.request.urlretrieve(urllink_test, fnm_test)
def _load_svmlight_data(path):
X, y = skl_ds.load_svmlight_file(path)
return X, y
def load_data(path, file_type, max_data=0, max_dim=0,
preprocess=True, include_offset=True):
"""Load data from a variety of file types.
Parameters
----------
path : string
Data file path.
file_type : string
Supported file types are: 'svmlight', 'npy' (with the labels y in the
rightmost col), 'npz', 'hdf5' (with datasets 'x' and 'y'), and 'csv'
(with the labels y in the rightmost col)
max_data : int
If positive, maximum number of data points to use. If zero or negative,
all data is used. Default is 0.
max_dim : int
If positive, maximum number of features to use. If zero or negative,
all features are used. Default is 0.
preprocess : boolean or Transformer, optional
Flag indicating whether the data should be preprocessed. For sparse
data, the features are scaled to [-1, 1]. For dense data, the features
are scaled to have mean zero and variance one. Default is True.
include_offset : boolean, optional
Flag indicating that an offset feature should be added. Default is
False.
Returns
-------
X : array-like matrix, shape=(n_samples, n_features)
y : int ndarray, shape=(n_samples,)
Each entry indicates whether each example is negative (-1 value) or
positive (+1 value)
pp_obj : None or Transformer
Transformer object used on data, or None if ``preprocess=False``
"""
if not isinstance(path, str):
raise ValueError("'path' must be a string")
if file_type in ["svmlight", "svm"]:
X, y = _load_svmlight_data(path)
else:
raise ValueError("unsupported file type, %s" % file_type)
y_vals = set(y)
if len(y_vals) != 2:
raise ValueError('Only expected y to take on two values, but instead'
'takes on the values ' + ', '.join(y_vals))
if 1.0 not in y_vals:
raise ValueError('y does not take on 1.0 as one on of its values, but '
'instead takes on the values ' + ', '.join(y_vals))
if -1.0 not in y_vals:
y_vals.remove(1.0)
print('converting y values of %s to -1.0' % y_vals.pop())
y[y != 1.0] = -1.0
if preprocess is False:
pp_obj = None
else:
if preprocess is True:
if sp.issparse(X):
pp_obj = preprocessing.MaxAbsScaler(copy=False)
else:
pp_obj = preprocessing.StandardScaler(copy=False)
pp_obj.fit(X)
else:
pp_obj = preprocess
X = pp_obj.transform(X)
if include_offset:
X = preprocessing.add_dummy_feature(X)
X = np.flip(X, -1) # move intercept to the last column of the array
if sp.issparse(X) and (X.nnz > np.prod(X.shape) / 10 or X.shape[1] <= 20):
print("X is either low-dimensional or not very sparse, so converting "
"to a numpy array")
X = X.toarray()
if isinstance(max_data, int) and max_data > 0 and max_data < X.shape[0]:
X = X[:max_data,:]
y = y[:max_data]
if isinstance(max_dim, int) and max_dim > 0 and max_dim < X.shape[1]:
X = X[:,:max_dim]
return X, y, pp_obj
X, y, _ = load_data(fnm_train, 'svm')
# load testing data if it exists
Xt, yt, _ = load_data(fnm_test, 'svm')
y[y==-1], yt[yt==-1] = 0, 0
np.savez('webspam', X=X, y=y, Xt=Xt, yt=yt)
return X, y, Xt, yt
def make_synthetic(num_datapoints=1000, D=2):
r"""
Generate D-dimensional synthetic dataset for logistic regression
"""
mu = np.array([0]*D)
cov = np.eye(D)
th = np.array([5]*D)
X = np.random.multivariate_normal(mu, cov, num_datapoints)
ps = 1.0/(1.0+np.exp(-(X*th).sum(axis=1)))
y = (np.random.rand(num_datapoints) <= ps).astype(int)
y[y==0] = -1
return torch.from_numpy(X.astype(np.float32)), torch.from_numpy(y.astype(np.float32))
def read_dataset(dnm, method_args):
r"""
Returns one of the supported benchmark or synthetic dataset for the experiments in logistic regression, classification or regression via Bayesian nns
"""
# TBC: check if inference methods are compatible with the dataset and raise exceptions accordingly
if dnm != "MNIST": # UCI or synthetic datasets
if dnm == "halfmoon":
# Generate HalfMoon data
(X, Y), num_classes = (
make_moons(n_samples=1000, noise=0.1, random_state=42),
2,
)
X, Y = torch.from_numpy(X.astype(np.float32)), torch.from_numpy(
Y.astype(np.float32)
)
elif dnm == "four_blobs":
# Generate synthetic multiclass data
(X, Y), num_classes = make_four_class_dataset(N_K=250), 4
elif dnm == "phishing":
(X, Y), num_classes = read_phishing(method_args["data_folder"]), 2
X, Y = torch.from_numpy(X.astype(np.float32)), torch.from_numpy(
Y.astype(np.float32)
)
elif dnm == "adult":
(x, y, xt, yt), num_classes = read_adult(method_args["data_folder"]), 2
x, y, xt, yt = (
torch.from_numpy(x.astype(np.float32)),
torch.from_numpy(y.astype(np.float32)),
torch.from_numpy(xt.astype(np.float32)),
torch.from_numpy(yt.astype(np.float32)),
)
elif dnm == "webspam":
(x, y, xt, yt), num_classes = read_webspam(method_args["data_folder"]), 2
x, y, xt, yt = (
torch.from_numpy(x.astype(np.float32)),
torch.from_numpy(y.astype(np.float32)),
torch.from_numpy(xt.astype(np.float32)),
torch.from_numpy(yt.astype(np.float32)),
)
elif dnm.startswith("synth_lr"):
(X, Y), num_classes = make_synthetic(D=int(dnm.split('_')[-1]), num_datapoints=1000), 2
if dnm.startswith(("halfmoon", "four_blobs", "phishing", "synth_lr")): # splite in train / test data
Y[Y == -1] = 0
test_size = int(method_args["test_ratio"] * X.shape[0])
x, y, xt, yt = (
X[:-test_size],
Y[:-test_size],
X[-test_size:],
Y[-test_size:],
)
N, D = x.shape
(train_dataset, test_dataset) = (
(SynthDataset(x, y), SynthDataset(xt, yt))
if dnm.startswith(("halfmoon", "four_blobs", "phishing", "synth_lr", "webspam", "adult"))
else (None, None)
)
else:
_, input_size, num_classes, normalization, _ = get_torchvision_info(dnm)
real_size = dataset_stats[dnm].real_size
N, D = 60000, input_size
if input_size != real_size:
transform_list = [
torchvision.transforms.Resize([input_size, input_size], Image.BICUBIC)
]
else:
transform_list = []
transform_list += [
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(*normalization),
]
with suppress_stdout():
train_dataset, test_dataset = torchvision.datasets.MNIST(
root=method_args["data_folder"],
download=True,
train=True,
transform=torchvision.transforms.Compose(transform_list),
), torchvision.datasets.MNIST(
root=method_args["data_folder"],
download=True,
train=False,
transform=torchvision.transforms.Compose(transform_list),
)
x, y, xt, yt = None, None, None, None
return x, y, xt, yt, N, D, train_dataset, test_dataset, num_classes
from json.decoder import JSONDecodeError
def update_hyperparams_dict(dnm, best_tau, fnm='psvi/data/opt_regr_hyperparams.json'):
pass
'''
with open(fnm, "a+") as f:
try:
opt_taus = json.loads(f)
except JSONDecodeError:
opt_taus = {"init":0}
json.dumps(opt_taus, f)
opt_taus = json.load(f)
opt_taus[dnm] = opt_taus.get(dnm, best_tau)
'''
|
Blackbox-Coresets-VI-main
|
psvi/experiments/experiments_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/models/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import operator as op
from functools import reduce
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
def gaussian_fn(loc=None, scale=None):
return dist.normal.Normal(loc, scale)
def categorical_fn(logits=None, probs=None):
return dist.Categorical(logits=logits, probs=probs)
def set_mc_samples(net, mc_samples):
for module in net.modules():
if isinstance(module, VIMixin):
module.mc_samples = mc_samples
def inverse_softplus(x):
if torch.is_tensor(x):
return x.expm1().log()
return np.log(np.expm1(x))
def prod(a):
return reduce(op.mul, a)
def deep_getattr(obj, name):
return reduce(getattr, name.split("."), obj)
def deep_delattr(obj, name):
lpart, _, rpart = name.rpartition(".")
if lpart:
obj = deep_getattr(obj, lpart)
delattr(obj, rpart)
def deep_setattr(obj, name, value):
lpart, _, rpart = name.rpartition(".")
if lpart:
obj = deep_getattr(obj, lpart)
setattr(obj, rpart, value)
class VIMixin(nn.Module):
def __init__(self, *args, init_sd=0.01, prior_sd=1.0, mc_samples=1, **kwargs):
super().__init__(*args, **kwargs)
self._weight_sd = nn.Parameter(
inverse_softplus(torch.full_like(self.weight, init_sd))
)
if self.bias is not None:
self._bias_sd = nn.Parameter(
nn.Parameter(inverse_softplus(torch.full_like(self.bias, init_sd)))
)
else:
self.register_parameter("_bias_sd", None)
(
self.prior_sd,
self.mc_samples,
self._cached_weight,
self._cached_bias,
self._init_sd,
) = (
prior_sd,
mc_samples,
None,
None,
init_sd,
)
self.reset_parameters_variational()
def reset_parameters_variational(self) -> None:
super().reset_parameters() # pyre-ignore
self._weight_sd.data.copy_(
inverse_softplus(torch.full_like(self.weight, self._init_sd))
)
if self.bias is not None:
self._bias_sd.data.copy_(
inverse_softplus(torch.full_like(self.bias, self._init_sd))
)
self._cached_weight, self._cached_bias = (
None,
None,
)
def kl(self):
w_kl = dist.kl_divergence(self.weight_dist, self.prior_weight_dist)
b_kl = (
dist.kl_divergence(self.bias_dist, self.prior_bias_dist)
if self.bias is not None
else 0.0
)
return w_kl + b_kl
def sampled_nkl(self):
w = self._cached_weight
w_kl = self.prior_weight_dist.log_prob(w) - self.weight_dist.log_prob(w)
b = self._cached_bias.squeeze(1) if self.mc_samples > 1 else self._cached_bias
b_kl = self.prior_bias_dist.log_prob(b) - self.bias_dist.log_prob(b)
return w_kl + b_kl
@property
def weight_dist(self):
return dist.Independent(
dist.Normal(self.weight, self.weight_sd), self.weight.ndim
)
@property
def prior_weight_dist(self):
return dist.Independent(
dist.Normal(torch.zeros_like(self.weight), self.prior_sd), self.weight.ndim
)
@property
def weight_sd(self):
return F.softplus(self._weight_sd)
@property
def bias_dist(self):
if self.bias is not None:
return dist.Independent(
dist.Normal(self.bias, self.bias_sd), self.bias.ndim
)
return None
@property
def prior_bias_dist(self):
if self.bias is not None:
return dist.Independent(
dist.Normal(torch.zeros_like(self.bias), self.prior_sd), self.bias.ndim
)
return None
@property
def bias_sd(self):
if self.bias is not None:
return F.softplus(self._bias_sd)
return None
def rsample(self):
weight = self.weight_dist.rsample(self.weight_batch_shape)
bias = (
self.bias_dist.rsample(self.bias_batch_shape)
if self.bias is not None
else None
)
return weight, bias
@property
def weight_batch_shape(self):
return torch.Size((self.mc_samples,) if self.mc_samples > 1 else ())
@property
def bias_batch_shape(self):
return torch.Size((self.mc_samples, 1) if self.mc_samples > 1 else ())
def extra_repr(self):
return f"{super().extra_repr()}, mc_samples={self.mc_samples}"
class VILinear(VIMixin, nn.Linear):
def forward(self, x):
self._cached_weight, self._cached_bias = self.rsample()
return x.matmul(self._cached_weight.transpose(-2, -1)) + self._cached_bias
"""
class ResNet(torch.nn.Module):
def __init__(self, module, skip_connection):
super().__init__()
self.module = module
self.skip_connection = skip_connection
def forward(self, x):
return self.module(x) + self.skip_connection(x)
"""
class VIConv2d(VIMixin, nn.Conv2d):
def __init__(self, *args, **kwargs):
if "groups" in kwargs:
raise ValueError(
"Cannot use groups argument for variational conv layer as this is used for parallelizing across samples."
)
super().__init__(*args, **kwargs)
def forward(self, x):
# x: S x N x C x H x W
# or
# x: N x C x H x W
# reshape to: N x SC x H x W
# so that when we convolve with
# w: SK x C x h x w
# we get an output with shape
# N x SK x H' x W'
# that we reshape to
# S x N x K x H' x W'
if self.mc_samples > 1:
if x.ndim == 4:
x = x.repeat(1, self.mc_samples, 1, 1)
else:
x = x.transpose(0, 1).flatten(1, 2)
self._cached_weight, self._cached_bias = self.rsample()
w = (
self._cached_weight.flatten(0, 1)
if self.mc_samples > 1
else self._cached_weight
)
b = self._cached_bias.flatten()
a = F.conv2d(
x,
w,
b,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.mc_samples,
)
if self.mc_samples > 1:
return a.view(
-1, self.mc_samples, self.out_channels, *a.shape[-2:]
).transpose(0, 1)
return a
class BatchMaxPool2d(nn.MaxPool2d):
def forward(self, x):
if x.shape == 4:
return super().forward(x)
d0, d1 = x.shape[:2]
x = super().forward(x.flatten(0, 1))
return x.view(d0, d1, *x.shape[1:])
def make_fcnet(
in_dim,
h_dim,
out_dim,
n_layers=2,
linear_class=None,
nonl_class=None,
mc_samples=4,
residual=False,
**kwargs,
):
if linear_class is None:
linear_class = VILinear
if nonl_class is None:
nonl_class = nn.ReLU
net = nn.Sequential()
for i in range(n_layers):
net.add_module(
f"lin{i}", linear_class(in_dim if i == 0 else h_dim, h_dim, **kwargs)
)
net.add_module(f"nonl{i}", nonl_class())
"""
if residual:
skip_connection = nn.Linear(in_dim, h_dim)
net = ResNet(net, skip_connection)
"""
net.add_module("classifier", linear_class(h_dim, out_dim, **kwargs))
for module in net.modules():
module.mc_samples = mc_samples
return net
def make_regressor_net(
in_dim,
h_dim,
out_dim=1,
n_layers=2,
linear_class=None,
nonl_class=None,
mc_samples=4,
residual=False,
**kwargs,
):
if linear_class is None:
linear_class = VILinear
if nonl_class is None:
nonl_class = nn.ReLU
net = nn.Sequential()
for i in range(n_layers):
net.add_module(
f"lin{i}",
linear_class(in_dim if i == 0 else h_dim, h_dim, **kwargs),
)
net.add_module(f"nonl{i}", nonl_class())
"""
if residual:
skip_connection = nn.Linear(in_dim, h_dim)
net = ResNet(net, skip_connection)
"""
net.add_module("regressor", linear_class(h_dim, out_dim, **kwargs))
for module in net.modules():
module.mc_samples = mc_samples
return net
def make_lenet(
conv_class=None, linear_class=None, pool_class=None, nonl_class=None, **kwargs
):
if conv_class is None:
conv_class = VIConv2d
if linear_class is None:
linear_class = VILinear
if pool_class is None:
pool_class = BatchMaxPool2d
if nonl_class is None:
nonl_class = nn.ReLU
return nn.Sequential(
conv_class(1, 6, 5, padding=2, **kwargs),
nonl_class(),
pool_class(2, 2),
conv_class(6, 16, 5, padding=0, **kwargs),
nonl_class(),
pool_class(2, 2),
nn.Flatten(-3, -1),
linear_class(400, 120, **kwargs),
nonl_class(),
linear_class(120, 84, **kwargs),
nonl_class(),
linear_class(84, 10),
)
def make_alexnet(
conv_class=None,
linear_class=None,
pool_class=None,
nonl_class=None,
local_response_norm_class=None,
**kwargs,
):
if conv_class is None:
conv_class = VIConv2d
if linear_class is None:
linear_class = VILinear
if pool_class is None:
pool_class = BatchMaxPool2d
if nonl_class is None:
nonl_class = nn.ReLU
if local_response_norm_class is None:
local_response_norm_class = nn.LocalResponseNorm
return nn.Sequential(
conv_class(3, 64, 5, stride=1, padding=2),
nonl_class(),
pool_class(kernel_size=3, stride=2, padding=1),
local_response_norm_class(4, alpha=0.001 / 9.0, beta=0.75, k=1),
conv_class(64, 64, kernel_size=5, padding=2, stride=1),
nonl_class(),
local_response_norm_class(4, alpha=0.001 / 9.0, beta=0.75, k=1),
pool_class(kernel_size=3, stride=2, padding=1),
nn.Flatten(-3, -1),
linear_class(
4096, 384, **kwargs
), # add kwargs so that mc_samples arg gets correctly passed
nonl_class(),
linear_class(384, 192, **kwargs),
nonl_class(),
linear_class(192, 10),
)
class network(torch.nn.Module):
def __init__(self, **kwargs):
self.upscale = nn.Upsample(scale_factor=2, mode="bilinear")
self.conv1 = nn.Conv2d(963, 128, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(64, 2, kernel_size=3, padding=1)
class MultivariateNormalVIMixin(nn.Module):
def __init__(self, *args, init_sd=0.01, prior_sd=1., mc_samples=1, **kwargs):
super().__init__(*args, **kwargs)
self.mc_samples = mc_samples
self.prior_sd = prior_sd
self.param_names = []
self.param_shapes = []
for n, p in list(self.named_parameters()):
self.param_names.append(n)
self.param_shapes.append(p.shape)
deep_delattr(self, n)
self.param_numels = list(map(prod, self.param_shapes))
n = sum(self.param_numels)
self.mean = nn.Parameter(p.new_zeros(n))
self._sd = nn.Parameter(inverse_softplus(p.new_full((n,), init_sd)))
n_corr = torch.tril_indices(n - 1, n - 1, offset=-1)[0].numel()
self._corr = nn.Parameter(p.new_zeros(n_corr))
self.num_params = n
def reset_parameters_variational(self) -> None:
raise NotImplementedError
def kl(self):
return dist.kl_divergence(self.param_dist, self.prior_dist)
def sampled_nkl(self):
x = torch.cat(
[deep_getattr(self, n).flatten(1) for n in self.param_names], dim=1
)
return self.prior_dist.log_prob(x) - self.param_dist.log_prob(x)
'''
def sampled_nkl(self):
w = self._cached_weight
w_kl = self.prior_weight_dist.log_prob(w) - self.weight_dist.log_prob(w)
b = self._cached_bias.squeeze(1) if self.mc_samples > 1 else self._cached_bias
b_kl = self.prior_bias_dist.log_prob(b) - self.bias_dist.log_prob(b)
return w_kl + b_kl
'''
@property
def scale_tril(self):
k = self.mean.new_zeros(self.num_params, self.num_params)
k[torch.arange(self.num_params), torch.arange(self.num_params)] = F.softplus(
self._sd
)
d = self.mean.size(-1) - 1
i = torch.tril_indices(d, d, offset=-1)
k[i[0], i[1]] = self._corr
return k
@property
def param_dist(self):
return dist.MultivariateNormal(self.mean, scale_tril=self.scale_tril)
def rsample(self):
x = self.param_dist.rsample((self.mc_samples,))
return [
xx.reshape(self.mc_samples, *shape)
for xx, shape in zip(x.split(self.param_numels, dim=-1), self.param_shapes)
]
def cached_rsample(self):
for name, sample in zip(self.param_names, self.rsample()):
deep_setattr(self, name, sample)
@property
def prior_dist(self):
m = torch.zeros_like(self.mean)
sd = torch.full_like(self.mean, self.prior_sd).diag_embed()
return dist.MultivariateNormal(m, scale_tril=sd)
class VILinearMultivariateNormal(MultivariateNormalVIMixin, nn.Linear):
def forward(self, x, **kwargs):
super().cached_rsample()
x = x.matmul(self.weight.transpose(-1, -2))
if self.bias is not None:
x = x + self.bias.unsqueeze(-2)
return x
def make_fc2net(
in_dim,
h_dim,
out_dim,
n_layers=2,
linear_class=None,
nonl_class=None,
mc_samples=4,
residual=False,
**kwargs,
):
if linear_class is None:
linear_class = VILinearMultivariateNormal
if nonl_class is None:
nonl_class = nn.ReLU
net = nn.Sequential()
for i in range(n_layers):
net.add_module(
f"lin{i}", linear_class(in_dim if i == 0 else h_dim, h_dim, mc_samples=mc_samples, **kwargs)
)
net.add_module(f"nonl{i}", nonl_class())
"""
if residual:
skip_connection = nn.Linear(in_dim, h_dim)
net = ResNet(net, skip_connection)
"""
net.add_module("classifier", linear_class(h_dim, out_dim, mc_samples=mc_samples, **kwargs))
for module in net.modules():
module.mc_samples = mc_samples
return net
|
Blackbox-Coresets-VI-main
|
psvi/models/neural_net.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import stan
import torch
from torch.distributions.normal import Normal
def logreg_forward(thetas, x):
return x.matmul(thetas.T).sigmoid().mean(axis=1).squeeze()
def model(thetas, mu0, sigma0, x, y, single=False):
prior_val = Normal(mu0, sigma0).log_prob(thetas).sum()
if single:
return -torch.nn.BCEWithLogitsLoss(reduction="none")(x @ thetas, y), prior_val
return (
-torch.nn.BCEWithLogitsLoss(reduction="none")(
x.matmul(thetas.T).squeeze(), y.repeat(thetas.shape[0], 1).T
),
prior_val,
)
def prior(D):
mu0_w, sigma0_w, mu0_b, sigma0_b = (
torch.zeros(D),
torch.ones(D),
torch.zeros(1),
torch.ones(1),
)
return mu0_w, sigma0_w, mu0_b, sigma0_b
def inverse_softplus(x):
if torch.is_tensor(x):
return x.expm1().log()
return np.log(np.expm1(x))
# Stan model used for coreset posterior sampling in the original Sparse VI implementation
stan_representation = """
data {
int<lower=0> d; // 1 + dimensionality of x
int<lower=0> n; // number of observations
matrix[n,d] x; // inputs
int<lower=0,upper=1> y[n]; // outputs in {0, 1}
vector[n] w; // weights
}
parameters {
real theta0; // intercept
vector[d] theta; // logreg params
}
model {
theta0 ~ normal(0, 1);
theta ~ normal(0, 1);
for(i in 1:n){
target += w[i]*bernoulli_logit_lpmf(y[i]| theta0 + x[i]*theta);
}
}
"""
def mcmc_sample(sml, core_idcs, x, y, w, N_per=2000, seed=42, n_samples=5):
np.random.seed(seed=seed)
torch.manual_seed(seed)
sampler_data = {
"x": x[core_idcs, :].detach().cpu().numpy(),
"y": y[core_idcs].detach().cpu().numpy().astype(int),
"d": x.shape[1],
"n": len(core_idcs),
"w": w[core_idcs].detach().cpu().numpy(),
}
sml = stan.build(stan_representation, data=sampler_data, seed=seed)
sampling_output = sml.sample(
num_samples=N_per,
chains=1,
control={"adapt_delta": 0.9, "max_treedepth": 15},
verbose=False,
)[:, -n_samples:]
param_samples = torch.cat(
(
torch.tensor([d["theta"] for d in sampling_output]),
torch.tensor([d["theta0"] for d in sampling_output]).unsqueeze(axis=1),
),
axis=1,
)
return param_samples
def laplace_precision(z_core, theta, w, diagonal=False):
with torch.no_grad():
m = z_core @ theta
idcs = w > 0
p = m[idcs].sigmoid()
d = p * (1 - p) * w[idcs]
a = z_core[idcs].T * d.sqrt()
if diagonal:
return a.pow(2).sum(1) + 1
else:
nll_hessian = a.matmul(a.T)
negative_log_prior_hessian = torch.eye(z_core.shape[1])
return negative_log_prior_hessian + nll_hessian
|
Blackbox-Coresets-VI-main
|
psvi/models/logreg.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
"""from https://github.com/lrjconan/RBP/blob/9c6e68d1a7e61b1f4c06414fae04aeb43c8527cb/utils/model_helper.py"""
import torch
def cg(Ax, b, max_iter=100, epsilon=1.0e-5):
"""Conjugate Gradient
Args:
Ax: function, takes list of tensors as input
b: list of tensors
Returns:
x_star: list of tensors
"""
x_last = [torch.zeros_like(bb) for bb in b]
r_last = [torch.zeros_like(bb).copy_(bb) for bb in b]
p_last = [torch.zeros_like(rr).copy_(rr) for rr in r_last]
for _ in range(max_iter):
Ap = Ax(p_last)
Ap_vec = cat_list_to_tensor(Ap)
p_last_vec = cat_list_to_tensor(p_last)
r_last_vec = cat_list_to_tensor(r_last)
rTr = torch.sum(r_last_vec * r_last_vec)
pAp = torch.sum(p_last_vec * Ap_vec)
alpha = rTr / pAp
x = [xx + alpha * pp for xx, pp in zip(x_last, p_last)]
r = [rr - alpha * pp for rr, pp in zip(r_last, Ap)]
r_vec = cat_list_to_tensor(r)
if float(torch.norm(r_vec)) < epsilon:
break
beta = torch.sum(r_vec * r_vec) / rTr
p = [rr + beta * pp for rr, pp in zip(r, p_last)]
x_last = x
p_last = p
r_last = r
return x_last
def cat_list_to_tensor(list_tx):
return torch.cat([xx.view([-1]) for xx in list_tx])
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/CG_torch.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
from itertools import repeat
import torch
class DifferentiableOptimizer:
def __init__(self, loss_f, dim_mult, data_or_iter=None):
"""
Args:
loss_f: callable with signature (params, hparams, [data optional]) -> loss tensor
data_or_iter: (x, y) or iterator over the data needed for loss_f
"""
self.data_iterator = None
if data_or_iter:
self.data_iterator = (
data_or_iter
if hasattr(data_or_iter, "__next__")
else repeat(data_or_iter)
)
self.loss_f = loss_f
self.dim_mult = dim_mult
self.curr_loss = None
def get_opt_params(self, params):
opt_params = list(params)
for _ in range(self.dim_mult - 1):
opt_params.extend([torch.zeros_like(p) for p in params])
return opt_params
def step(self, params, hparams, create_graph):
raise NotImplementedError
def __call__(self, params, hparams, create_graph=True):
with torch.enable_grad():
return self.step(params, hparams, create_graph)
def get_loss(self, params, hparams):
if self.data_iterator:
data = next(self.data_iterator)
self.curr_loss = self.loss_f(params, hparams, data)
else:
self.curr_loss = self.loss_f(params, hparams)
return self.curr_loss
class GradientDescent(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, data_or_iter=None):
super(GradientDescent, self).__init__(
loss_f, dim_mult=1, data_or_iter=data_or_iter
)
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
def step(self, params, hparams, create_graph):
loss = self.get_loss(params, hparams)
sz = self.step_size_f(hparams)
return gd_step(params, loss, sz, create_graph=create_graph)
class HeavyBall(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, momentum, data_or_iter=None):
super(HeavyBall, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)
self.loss_f = loss_f
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
self.momentum_f = momentum if callable(momentum) else lambda x: momentum
def step(self, params, hparams, create_graph):
n = len(params) // 2
p, p_aux = params[:n], params[n:]
loss = self.get_loss(p, hparams)
sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)
p_new, p_new_aux = heavy_ball_step(
p, p_aux, loss, sz, mu, create_graph=create_graph
)
return [*p_new, *p_new_aux]
class Momentum(DifferentiableOptimizer):
"""
GD with momentum step as implemented in torch.optim.SGD
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
"""
def __init__(self, loss_f, step_size, momentum=0.9, data_or_iter=None):
super(Momentum, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)
self.loss_f = loss_f
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
self.momentum_f = momentum if callable(momentum) else lambda x: momentum
def step(self, params, hparams, create_graph):
n = len(params) // 2
p, p_aux = params[:n], params[n:]
loss = self.get_loss(p, hparams)
sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)
p_new, p_new_aux = torch_momentum_step(
p, p_aux, loss, sz, mu, create_graph=create_graph
)
return [*p_new, *p_new_aux]
class DifferentiableAdam(DifferentiableOptimizer):
"""
DifferentiableAdam optimizer as implemented in torch.optim.Adam
.. math::
m_{t+1} = beta_1 * m_{t} + (1 - beta1) * g_{t}
u_{t+1} = beta_2 * u_{t} + (1 - beta2) * g_{t}^2
mh_{t+1} = mh_{t+1} / (1 - beta1**t)
uh_{t+1} = uh_{t+1} / (1 - beta2**t)
p_{t+1} = p_{t} - lr * mh_{t+1} / (sqrt(uh_{t+1} + eps))
"""
def __init__(
self,
loss_f,
step_size,
data_or_iter=None,
betas=(0.9, 0.999),
eps=1e-8,
step_cnt=1,
):
super(DifferentiableAdam, self).__init__(
loss_f, dim_mult=3, data_or_iter=data_or_iter
)
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
(self.beta1, self.beta2) = betas
self.eps = eps
self.step_cnt = step_cnt
def step(self, params, hparams, create_graph):
n = len(params) // 3
p, m, u = params[:n], params[n : 2 * n], params[2 * n :]
loss = self.get_loss(p, hparams)
sz = self.step_size_f(hparams)
p_new, m_new, u_new = adam_step(
p,
m,
u,
loss,
sz,
self.step_cnt,
self.beta1,
self.beta2,
self.eps,
create_graph=create_graph,
)
self.step_cnt += 1
return [*p_new, *m_new, *u_new]
def gd_step(params, loss, step_size, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [w - step_size * g for w, g in zip(params, grads)]
def heavy_ball_step(params, aux_params, loss, step_size, momentum, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [
w - step_size * g + momentum * (w - v)
for g, w, v in zip(grads, params, aux_params)
], params
def torch_momentum_step(
params, aux_params, loss, step_size, momentum=0.9, create_graph=True
):
"""
GD with momentum step as implemented in torch.optim.SGD
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
"""
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
new_aux_params = [momentum * v + g for v, g in zip(aux_params, grads)]
return [w - step_size * nv for w, nv in zip(params, new_aux_params)], new_aux_params
def adam_step(
params,
ms,
us,
loss,
step_size,
step_cnt,
beta1,
beta2,
eps,
momentum=0.9,
create_graph=True, # False when used with approximate implicit gradient; should be True otherwise
):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
new_m = [beta1 * m + (1.0 - beta1) * g for m, g in zip(ms, grads)]
new_u = [beta2 * u + (1.0 - beta2) * g**2 + 1e-12 for u, g in zip(us, grads)]
return (
[
w
- step_size
* (
m
/ (1.0 - beta1**step_cnt)
/ (torch.sqrt(u / (1 - beta2**step_cnt)) + eps)
)
for w, m, u in zip(params, new_m, new_u)
],
new_m,
new_u,
)
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/diff_optimizers.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
from .hypergradients import *
from .diff_optimizers import *
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
from typing import Callable, List
import torch
from torch import Tensor
from torch.autograd import grad as torch_grad
from . import CG_torch
# noinspection PyUnusedLocal
def reverse_unroll(
params: List[Tensor],
hparams: List[Tensor],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True,
) -> List[Tensor]:
"""
Computes the hypergradient by backpropagating through a previously employed inner solver procedure.
Args:
params: the output of a torch differentiable inner solver (it must depend on hparams in the torch graph)
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
set_grad: if True set t.grad to the hypergradient for every t in hparams
Returns:
the list of hypergradients for each element in hparams
"""
o_loss = outer_loss(params, hparams)
grads = torch.autograd.grad(o_loss, hparams, retain_graph=True)
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# noinspection PyUnusedLocal
def reverse(
params_history: List[List[Tensor]],
hparams: List[Tensor],
update_map_history: List[Callable[[List[Tensor], List[Tensor]], List[Tensor]]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True,
) -> List[Tensor]:
"""
Computes the hypergradient by recomputing and backpropagating through each inner update
using the inner iterates and the update maps previously employed by the inner solver.
Similarly to checkpointing, this allows to save memory w.r.t. reverse_unroll by increasing computation time.
Truncated reverse can be performed by passing only part of the trajectory information, i.e. only the
last k inner iterates and updates.
Args:
params_history: the inner iterates (from first to last)
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
update_map_history: updates used to solve the inner problem (from first to last)
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
set_grad: if True set t.grad to the hypergradient for every t in hparams
Returns:
the list of hypergradients for each element in hparams
"""
params_history = [
[w.detach().requires_grad_(True) for w in params] for params in params_history
]
o_loss = outer_loss(params_history[-1], hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(
o_loss, params_history[-1], hparams
)
alphas = grad_outer_w
grads = [torch.zeros_like(w) for w in hparams]
K = len(params_history) - 1
for k in range(-2, -(K + 2), -1):
w_mapped = update_map_history[k + 1](params_history[k], hparams)
bs = grad_unused_zero(w_mapped, hparams, grad_outputs=alphas, retain_graph=True)
grads = [g + b for g, b in zip(grads, bs)]
alphas = torch_grad(w_mapped, params_history[k], grad_outputs=alphas)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def fixed_point(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False,
) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the fixed point method (it can end earlier when tol is reached).
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of fixed point iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the normed difference between two iterates is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
vs = [torch.zeros_like(w) for w in params]
vs_vec = cat_list_to_tensor(vs)
for _ in range(K):
vs_prev_vec = vs_vec
if stochastic:
w_mapped = fp_map(params, hparams)
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=False)
else:
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
vs = [v + gow for v, gow in zip(vs, grad_outer_w)]
vs_vec = cat_list_to_tensor(vs)
if float(torch.norm(vs_vec - vs_prev_vec)) < tol:
break
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def CG(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False,
) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the conjugate gradient method (CG).
It can end earlier when tol is reached.
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of conjugate gradient iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the norm of the residual is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
def dfp_map_dw(xs):
if stochastic:
w_mapped_in = fp_map(params, hparams)
Jfp_mapTv = torch_grad(
w_mapped_in, params, grad_outputs=xs, retain_graph=False
)
else:
Jfp_mapTv = torch_grad(w_mapped, params, grad_outputs=xs, retain_graph=True)
return [v - j for v, j in zip(xs, Jfp_mapTv)]
vs = CG_torch.cg(
dfp_map_dw, grad_outer_w, max_iter=K, epsilon=tol
) # K steps of conjugate gradient
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def CG_normaleq(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
) -> List[Tensor]:
"""Similar to CG but the conjugate gradient is applied on the normal equation (has a higher time complexity)"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
w_mapped = fp_map(params, hparams)
def dfp_map_dw(xs):
Jfp_mapTv = torch_grad(w_mapped, params, grad_outputs=xs, retain_graph=True)
v_minus_Jfp_mapTv = [v - j for v, j in zip(xs, Jfp_mapTv)]
# normal equation part
Jfp_mapv_minus_Jfp_mapJfp_mapTv = jvp(
lambda _params: fp_map(_params, hparams), params, v_minus_Jfp_mapTv
)
return [
v - vv for v, vv in zip(v_minus_Jfp_mapTv, Jfp_mapv_minus_Jfp_mapJfp_mapTv)
]
v_minus_Jfp_mapv = [
g - jfp_mapv
for g, jfp_mapv in zip(
grad_outer_w,
jvp(lambda _params: fp_map(_params, hparams), params, grad_outer_w),
)
]
vs = CG_torch.cg(
dfp_map_dw, v_minus_Jfp_mapv, max_iter=K, epsilon=tol
) # K steps of conjugate gradient
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def neumann(
params: List[Tensor],
hparams: List[Tensor],
K: int,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
) -> List[Tensor]:
"""Saves one iteration from the fixed point method"""
# from https://arxiv.org/pdf/1803.06396.pdf, should return the same gradient of fixed point K+1
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
w_mapped = fp_map(params, hparams)
vs, gs = grad_outer_w, grad_outer_w
gs_vec = cat_list_to_tensor(gs)
for k in range(K):
gs_prev_vec = gs_vec
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
gs = [g + v for g, v in zip(gs, vs)]
gs_vec = cat_list_to_tensor(gs)
if float(torch.norm(gs_vec - gs_prev_vec)) < tol:
break
grads = torch_grad(w_mapped, hparams, grad_outputs=gs)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def exact(
opt_params_f: Callable[[List[Tensor]], List[Tensor]],
hparams: List[Tensor],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True,
) -> List[Tensor]:
"""
Computes the exact hypergradient using backpropagation and exploting the closed form torch differentiable function
that computes the optimal parameters given the hyperparameters (opt_params_f).
"""
grads = torch_grad(outer_loss(opt_params_f(hparams), hparams), hparams)
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# UTILS
def grd(a, b):
return torch.autograd.grad(a, b, create_graph=True, retain_graph=True)
def list_dot(l1, l2): # extended dot product for lists
return torch.stack([(a * b).sum() for a, b in zip(l1, l2)]).sum()
def jvp(fp_map, params, vs):
dummy = [torch.ones_like(phw).requires_grad_(True) for phw in fp_map(params)]
g1 = grd(list_dot(fp_map(params), dummy), params)
return grd(list_dot(vs, g1), dummy)
def get_outer_gradients(outer_loss, params, hparams, retain_graph=True):
grad_outer_w = grad_unused_zero(outer_loss, params, retain_graph=retain_graph)
grad_outer_hparams = grad_unused_zero(
outer_loss, hparams, retain_graph=retain_graph
)
return grad_outer_w, grad_outer_hparams
def cat_list_to_tensor(list_tx):
return torch.cat([xx.view([-1]) for xx in list_tx])
def update_tensor_grads(hparams, grads):
for k, g in zip(hparams, grads):
if k.grad is None:
k.grad = torch.zeros_like(k)
if g is not None:
k.grad += g
def grad_unused_zero(
output, inputs, grad_outputs=None, retain_graph=False, create_graph=False
):
grads = torch.autograd.grad(
output,
inputs,
grad_outputs=grad_outputs,
allow_unused=True,
retain_graph=retain_graph,
create_graph=create_graph,
)
def grad_or_zeros(grad, var):
return torch.zeros_like(var) if grad is None else grad
return tuple(grad_or_zeros(g, v) for g, v in zip(grads, inputs))
|
Blackbox-Coresets-VI-main
|
psvi/hypergrad/hypergradients.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import time
import numpy as np
import torch
import torch.distributions as dist
from torch.distributions.normal import Normal
from typing import Any, Dict
from psvi.models.logreg import model, laplace_precision, mcmc_sample, logreg_forward
from psvi.models.neural_net import categorical_fn, gaussian_fn, VILinear
from tqdm import tqdm
from psvi.experiments.experiments_utils import set_up_model, update_hyperparams_dict
from psvi.inference.utils import *
from torch.utils.data import DataLoader
from psvi.inference.psvi_classes import SubsetPreservingTransforms
from functools import partial
r"""
Implementations of baseline inference methods.
"""
def run_laplace(
theta,
mu0,
sigma0,
x_core,
y_core,
w_core,
optim_net,
inner_it=1000,
diagonal=True,
mc_samples=4,
seed=0,
**kwargs,
):
r"""
Returns samples from Laplace approximation
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
for _ in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
optim_net.zero_grad()
ll_core, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -w_core.dot(ll_core) - prior # negative log-joint
loss.backward()
optim_net.step()
optim_net.zero_grad()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, w_core, diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
return laplace_approx.rsample((mc_samples,)).squeeze()
def run_random(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=4,
num_epochs=100,
log_every=10,
N=None,
D=None,
seed=0,
mcmc=False,
lr0net=1e-3, # initial learning rate for optimizer
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics from a Laplace or an MCMC fit on a random subset of the training data
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
w = torch.zeros(N).clone().detach() # coreset weights
nlls_random, accs_random, idcs_random, times_random, core_idcs = [], [], [], [0], []
x_test_aug = torch.cat((xt, torch.ones(xt.shape[0], 1)), dim=1)
x_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1)
t_start = time.time()
num_epochs = min(num_epochs, 2000) if mcmc else num_epochs
for it in tqdm(range(num_epochs)):
# Evaluate predictive performance of current coreset posterior
if it % log_every == 0:
if mcmc:
param_samples = mcmc_sample(sml, core_idcs, x, y, w, seed=seed)
else:
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
param_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[core_idcs, :],
y[core_idcs],
w[core_idcs],
optim_net,
inner_it=1000,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
times_random.append(times_random[-1] + time.time() - t_start)
test_probs = logreg_forward(param_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
nlls_random.append(test_nll.item()), accs_random.append(
test_acc.item()
), idcs_random.append(len(core_idcs))
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
new_coreset_point = random.choice(
tuple(set(range(N)).difference(set(core_idcs)))
)
core_idcs.append(new_coreset_point) # attach a new random point
w[core_idcs] = N / len(core_idcs)
# store results
return {
"accs": accs_random,
"nlls": nlls_random,
"csizes": idcs_random,
"times": times_random[1:],
}
def run_giga(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=100,
data_minibatch=512,
num_epochs=100,
log_every=10,
N=None,
D=None,
seed=0,
mcmc=False,
subset_size=200,
lr0net=1e-3,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics of a fit using the GIGA coreset (Campbell & Broderick, 2018)
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
mc_samples = max(
mc_samples,
50, # overwrite arg for num of mc_samples for more fine grained vectors
)
w = (
torch.zeros(N)
.clone()
.detach()
.requires_grad_(
requires_grad=False,
)
) # coreset weights
w_pred = (
torch.zeros(N)
.clone()
.detach()
.requires_grad_(
requires_grad=False,
)
) # rescaled weights for predictions
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
nlls_giga, accs_giga, idcs_giga, times_giga = [], [], [], [0]
x_aug, x_test_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1), torch.cat(
(xt, torch.ones(xt.shape[0], 1)), dim=1
)
core_idcs = []
t_start = time.time()
# Approximate the true posterior via MCMC sampling on a random subset
# [this computation occurs once]
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=subset_size),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
if mcmc:
with torch.no_grad():
param_samples = mcmc_sample(
sml,
core_idcs,
x[sub_idcs, :],
y[sub_idcs],
sum_scaling * torch.ones_like(y[sub_idcs]),
n_samples=mc_samples,
)
else:
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
param_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[sub_idcs, :],
y[sub_idcs],
sum_scaling * torch.ones_like(y[sub_idcs]),
torch.optim.Adam([theta], lr0net),
inner_it=1000,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
lw = torch.zeros(mc_samples) # initial vector of weighted log-likelihood of coreset
# Grow the coreset for a number of iterations
for it in tqdm(range(num_epochs)):
x_core, y_core = x_aug[core_idcs, :], y[core_idcs]
sub_idcs, _ = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = ll_all[: len(sub_idcs), :], ll_all[len(sub_idcs) :, :]
ll_data, ll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
sum_lls = ll_data.sum(axis=0)
norm_lls = torch.nn.functional.normalize(ll_data, dim=1) # ell_n
norm_sumlls = torch.nn.functional.normalize(sum_lls, dim=0) # ell
denom_sumlls = sum_lls.norm(p=2, dim=0) # ||L||
if it % log_every == 0: # log predictive performance
# Rescaling weights for unnormalized likelihoods in predictions
if len(core_idcs) > 0:
w_pred[core_idcs] = (
w[core_idcs]
* denom_sumlls
/ ll_core.norm(p=2, dim=1)
* lw.dot(norm_sumlls)
)
if mcmc:
predictive_samples = mcmc_sample(sml, core_idcs, x, y, w_pred)
else:
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
predictive_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[core_idcs, :],
y[core_idcs],
w[core_idcs].detach(),
optim_net,
inner_it=100,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
times_giga.append(times_giga[-1] + time.time() - t_start)
test_probs = logreg_forward(predictive_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
nlls_giga.append(test_nll.item())
accs_giga.append(test_acc.item())
idcs_giga.append(len(w[w > 0]))
# Compute geodesic direction of each datapoint, make greedy next point selection and compute the step size
d = torch.nn.functional.normalize(
norm_sumlls - norm_sumlls.dot(lw) * lw, dim=0
)
lwr = lw.repeat(len(sub_idcs), 1)
dns = torch.nn.functional.normalize(
norm_lls
- torch.einsum(
"n, ns -> ns", torch.einsum("ns, ns -> n", lwr, norm_lls), lwr
),
dim=1,
)
# new datapoint selection
pt_idx = sub_idcs[torch.argmax(torch.einsum("s, ns -> n", d, dns))]
if pt_idx not in core_idcs:
core_idcs.append(pt_idx) # list of coreset point indices
idx_new = -1
x_core, y_core = (
x_aug[core_idcs, :],
y[core_idcs],
) # updated coreset support
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_core = ll_all[len(sub_idcs) :, :]
ll_core = ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T
norm_ll_core = torch.nn.functional.normalize(
ll_core, dim=1
) # ell_n_core
else:
idx_new = core_idcs.index(pt_idx)
zeta0, zeta1, zeta2 = (
norm_sumlls.dot(norm_ll_core[idx_new, :]),
norm_sumlls.dot(lw),
norm_ll_core[idx_new, :].dot(lw),
)
gamma = (zeta0 - zeta1 * zeta2) / (
zeta0 - zeta1 * zeta2 + zeta1 - zeta0 * zeta2
)
lw = torch.nn.functional.normalize(
(1 - gamma) * lw + gamma * norm_ll_core[idx_new, :], dim=0
)
# Optimal weight calibration
w = (
(1 - gamma) * w
+ gamma
* torch.nn.functional.one_hot(torch.tensor(pt_idx), num_classes=N)
) / torch.norm((1 - gamma) * lw + gamma * norm_ll_core[idx_new, :])
with torch.no_grad():
torch.clamp_(w, min=0)
# store results
return {
"accs": accs_giga,
"nlls": nlls_giga,
"csizes": idcs_giga,
"times": times_giga[1:],
}
def run_sparsevi(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
N=None,
D=None,
diagonal=True,
inner_it=10,
outer_it=10,
lr0net=1e-3,
lr0v=1e-1,
seed=0,
mcmc=False,
**kwargs,
) -> Dict[str, Any]: # max coreset size
r"""
Returns diagnostics of a fit using Sparse VI (Campbell & Beronov, 2019)
"""
def resc(N, w, core_idcs):
return 1. #N/sum(w[core_idcs]) if sum(w[core_idcs])>0 else 1
outer_it = min(outer_it, 500) # cap to maximum value for num_epochs and outer_it
num_epochs = min(num_epochs, 2000) if mcmc else num_epochs
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
w = (
torch.zeros(N)
.clone()
.detach()
.requires_grad_(
requires_grad=True,
)
) # coreset weights
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
nlls_svi, accs_svi, idcs_svi, times_svi = [], [], [], [0]
x_aug, x_test_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1), torch.cat(
(xt, torch.ones(xt.shape[0], 1)), dim=1
)
# Grow the coreset for a number of iterations
core_idcs = []
t_start = time.time()
for it in tqdm(range(num_epochs)):
# Evaluate predictive performance of current coreset posterior
if it % log_every == 0:
if mcmc:
param_samples = mcmc_sample(sml, core_idcs, x, y, w)
else:
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
param_samples = run_laplace(
theta,
mu0,
sigma0,
x_aug[core_idcs, :],
y[core_idcs],
resc(N, w.detach(), core_idcs)*w[core_idcs].detach(),
torch.optim.Adam([theta], lr0net),
inner_it=1000,
diagonal=True,
mc_samples=mc_samples,
)
times_svi.append(times_svi[-1] + time.time() - t_start)
test_probs = logreg_forward(param_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
nlls_svi.append(test_nll.item())
accs_svi.append(test_acc.item())
idcs_svi.append(len(core_idcs))
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
# 1. Compute current coreset posterior using Laplace approximation on coreset points
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
x_core, y_core = x_aug[core_idcs, :], y[core_idcs]
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
optim_net = torch.optim.Adam([theta], lr0net)
for _ in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
optim_net.zero_grad()
ll_core, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -resc(N, w, core_idcs)*w[core_idcs].dot(ll_core) - prior # negative log-joint
loss.backward()
optim_net.step()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, resc(N, w, core_idcs)*w[core_idcs], diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
param_samples = laplace_approx.rsample((mc_samples,)).squeeze()
# 2. Compute loglikelihoods for each sample
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = ll_all[: len(sub_idcs), :], ll_all[len(sub_idcs) :, :]
cll_data, cll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
# 3. Select point to attach to the coreset next
resid = sum_scaling * cll_data.sum(axis=0) - resc(N, w, core_idcs)*w[core_idcs].matmul(cll_core)
corrs = (
cll_data.matmul(resid)
/ torch.sqrt((cll_data**2).sum(axis=1))
/ cll_data.shape[1]
)
corecorrs = (
torch.abs(cll_core.matmul(resid))
/ torch.sqrt((cll_core**2).sum(axis=1))
/ cll_core.shape[1]
)
if corecorrs.shape[0] == 0 or corrs.max() > corecorrs.max():
pt_idx = sub_idcs[torch.argmax(corrs)]
print(f"\nAdding new point. Support increased to {len(core_idcs)+1} \n") if pt_idx not in core_idcs else print("\nImproving fit with current support \n")
core_idcs.append(pt_idx) if pt_idx not in core_idcs else None
else:
print("\nImproving fit with current support \n")
print(f"weights vector {(resc(N, w, core_idcs)*w[w>0]).sum()}")
# 4. Sample for updated weights and take projected gradient descent steps on the weights
# sample from updated model
x_core, y_core = x_aug[core_idcs, :], y[core_idcs]
optim_w = torch.optim.Adam([w], lr0v) #/(1. + it))
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
for _ in range(outer_it):
optim_net = torch.optim.Adam([theta], lr0net)
for _ in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
# negative log-joint
optim_net.zero_grad()
ll, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -resc(N, w, core_idcs)*w[core_idcs].dot(ll) - prior
loss.backward()
optim_net.step()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, resc(N, w, core_idcs)*w[core_idcs], diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
param_samples = laplace_approx.rsample((mc_samples,)).squeeze()
sub_idcs, sum_scaling = (
np.random.randint(x_aug.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
# compute w_grad
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = (
ll_all[: len(sub_idcs), :],
ll_all[len(sub_idcs) :, :],
)
cll_data, cll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
resid = sum_scaling * cll_data.sum(axis=0) - resc(N, w, core_idcs) * w[core_idcs].matmul(
cll_core
)
w.grad.data[core_idcs] = (-cll_core.matmul(resid) / cll_core.shape[1]) / resc(N, w, core_idcs)
optim_w.step()
with torch.no_grad():
torch.clamp_(w, 0)
# store results
return {
"nlls": nlls_svi,
"accs": accs_svi,
"csizes": idcs_svi,
"times": times_svi[1:],
}
def run_opsvi(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=10,
data_minibatch=128,
num_epochs=100,
log_every=10,
N=None,
D=None,
num_pseudo=10,
inner_it=10,
diagonal=True,
lr0net=1e-3,
lr0u=1e-3,
lr0v=1e-3,
register_elbos=False,
init_args="subsample",
seed=0,
mcmc=False,
log_pseudodata=False,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics of a fit using the original PSVI construction (Manousakas et al, 2020)
"""
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
us, zs, ws, core_idcs_opsvi, elbos_opsvi = [], [], [], [], []
nlls_opsvi, accs_opsvi, idcs_opsvi, times_opsvi = [], [], [], [0]
with torch.no_grad():
w = N / num_pseudo * (torch.ones(num_pseudo).clone().detach())
w.requires_grad_(
requires_grad=True,
) # coreset weights
# model params prior
mu0, sigma0 = (
torch.zeros(D + 1),
torch.ones(D + 1),
)
theta0 = Normal(mu0, sigma0).rsample()
theta = torch.nn.Parameter(theta0, requires_grad=True)
x_aug, x_test_aug = torch.cat((x, torch.ones(x.shape[0], 1)), dim=1), torch.cat(
(xt, torch.ones(xt.shape[0], 1)), dim=1
)
# initialization of pseudodata
with torch.no_grad():
u, z = (
pseudo_rand_init(x, y, num_pseudo=num_pseudo, seed=seed)
if init_args == "random"
else pseudo_subsample_init(x, y, num_pseudo=num_pseudo, seed=seed)
)
u, z = (
torch.cat((u, torch.ones(u.shape[0], 1)), dim=1)
.clone()
.detach()
.requires_grad_(True)
).float(), z.float()
optim_net = torch.optim.Adam([theta], lr0net)
optim_u = torch.optim.Adam([u], lr0u)
optim_w = torch.optim.Adam([w], lr0v * N)
t_start = time.time()
for it in tqdm(range(num_epochs)):
# Evaluate predictive performance of current coreset posterior
if it % log_every == 0:
param_samples = (
mcmc_sample(sml, list(range(num_pseudo)), u[:, :-1], z, w)
if mcmc
else run_laplace(
theta,
mu0,
sigma0,
u,
z,
w.detach(),
torch.optim.Adam([theta], lr0net),
inner_it=inner_it,
diagonal=True,
mc_samples=mc_samples,
seed=seed,
)
)
times_opsvi.append(times_opsvi[-1] + time.time() - t_start)
test_probs = logreg_forward(param_samples, x_test_aug)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
core_idcs_opsvi.append(num_pseudo)
nlls_opsvi.append(test_nll.item())
accs_opsvi.append(test_acc.item())
idcs_opsvi.append(num_pseudo)
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
us.append(u.detach().numpy())
zs.append(z.detach().numpy())
ws.append(w.detach().numpy())
# 1. Compute current coreset posterior using Laplace approximation on coreset points
x_core, y_core = u, z
# Sample for updated weights and take projected gradient descent steps on the weights
optim_net = torch.optim.Adam([theta], lr0net)
for in_it in range(
inner_it
): # inner loop for Laplace approximation of current coreset iterate
# negative log-joint
optim_net.zero_grad()
ll, prior = model(theta, mu0, sigma0, x_core, y_core, single=True)
loss = -w.dot(ll) - prior
loss.backward()
if register_elbos and in_it % log_every == 0:
with torch.no_grad():
elbos_opsvi.append((1, -loss.item()))
optim_net.step()
optim_w.zero_grad()
optim_u.zero_grad()
with torch.no_grad():
# samples from coreset iterate
prec = laplace_precision(x_core, theta, w, diagonal=diagonal)
laplace_approx = (
dist.MultivariateNormal(theta, precision_matrix=prec)
if not diagonal
else Normal(theta, prec**-0.5)
)
param_samples = laplace_approx.rsample((mc_samples,)).squeeze()
sub_idcs, sum_scaling = (
np.random.randint(x_aug.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
# compute w_grad and u_grad
ll_all, _ = model(
param_samples,
mu0,
sigma0,
torch.cat((x_aug[sub_idcs, :], x_core)),
torch.cat((y[sub_idcs], y_core)),
)
ll_data, ll_core = (
ll_all[: len(sub_idcs), :],
ll_all[len(sub_idcs) :, :],
)
cll_data, cll_core = (
ll_data - ll_data.mean(axis=1).repeat(ll_data.shape[1], 1).T,
ll_core - ll_core.mean(axis=1).repeat(ll_core.shape[1], 1).T,
)
resid = sum_scaling * cll_data.sum(axis=0) - w.matmul(cll_core)
w.grad.data = -cll_core.matmul(resid) / cll_core.shape[1]
u_function = (
torch.matmul(torch.einsum("m,ms->s", -w.detach(), cll_core), resid.detach())
/ cll_core.shape[1]
)
u.grad.data = torch.autograd.grad(u_function, u)[0]
u.grad.data[:, -1] = 0 # zero gradient on the last column
optim_w.step()
optim_u.step()
with torch.no_grad():
torch.clamp_(w, 0)
# store results
results = {
"accs": accs_opsvi,
"nlls": nlls_opsvi,
"csizes": core_idcs_opsvi,
"times": times_opsvi[1:],
"elbos": elbos_opsvi,
}
if log_pseudodata:
results["us"], results["zs"], results["vs"] = us, zs, ws
return results
def run_mfvi(
xt=None,
yt=None,
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
N=None,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
mul_fact=2, # multiplicative factor for total number of gradient iterations in classical vi methods
seed=0,
distr_fn=categorical_fn,
architecture=None,
n_hidden=None,
nc=2,
log_pseudodata=False,
train_dataset=None,
test_dataset=None,
init_sd=None,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics using a mean-field VI fit on the full training dataset. Implementation supporting pytorch dataloaders
(To be used only in the BNN experiment flows)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
nlls_mfvi, accs_mfvi, times_mfvi, elbos_mfvi, grid_preds = [], [], [0], [], []
t_start = time.time()
net = set_up_model(
architecture=architecture, D=D, n_hidden=n_hidden, nc=nc, mc_samples=mc_samples, init_sd=init_sd,
).to(device)
train_loader = DataLoader(
train_dataset,
batch_size=data_minibatch,
pin_memory=True,
shuffle=True,
)
n_train = len(train_loader.dataset)
test_loader = DataLoader(
test_dataset,
batch_size=data_minibatch,
pin_memory=True,
shuffle=True,
)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
total_iterations = mul_fact * num_epochs
checkpts = list(range(mul_fact * num_epochs))[::log_every]
lpit = [checkpts[idx] for idx in [0, len(checkpts) // 2, -1]]
for i in tqdm(range(total_iterations)):
xbatch, ybatch = next(iter(train_loader))
xbatch, ybatch = xbatch.to(device, non_blocking=True), ybatch.to(
device, non_blocking=True
)
optim_vi.zero_grad()
data_nll = -(
n_train
/ xbatch.shape[0]
* distr_fn(logits=net(xbatch).squeeze(-1)).log_prob(ybatch).sum()
)
kl = sum(m.kl() for m in net.modules() if isinstance(m, VILinear))
mfvi_loss = data_nll + kl
mfvi_loss.backward()
optim_vi.step()
with torch.no_grad():
elbos_mfvi.append(-mfvi_loss.item())
if i % log_every == 0 or i == total_iterations -1:
total, test_nll, corrects = 0, 0, 0
for xt, yt in test_loader:
xt, yt = xt.to(device, non_blocking=True), yt.to(
device, non_blocking=True
)
with torch.no_grad():
test_logits = net(xt).squeeze(-1).mean(0)
corrects += test_logits.argmax(-1).float().eq(yt).float().sum()
total += yt.size(0)
test_nll += -distr_fn(logits=test_logits).log_prob(yt).sum()
if log_pseudodata and i in lpit:
grid_preds.append(pred_on_grid(net, device=device).detach().cpu().numpy().T)
times_mfvi.append(times_mfvi[-1] + time.time() - t_start)
nlls_mfvi.append((test_nll / float(total)).item())
accs_mfvi.append((corrects / float(total)).item())
print(f"predictive accuracy: {(100*accs_mfvi[-1]):.2f}%")
# store results
results = {
"accs": accs_mfvi,
"nlls": nlls_mfvi,
"times": times_mfvi[1:],
"elbos": elbos_mfvi,
"csizes": None,
}
if log_pseudodata:
results["grid_preds"] = grid_preds
return results
def run_mfvi_subset(
x=None,
y=None,
xt=None,
yt=None,
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
mul_fact=2, # multiplicative factor for total number of gradient iterations in classical vi methods
seed=0,
distr_fn=categorical_fn,
log_pseudodata=False,
train_dataset=None,
test_dataset=None,
num_pseudo=100, # constrain on random subset with size equal to the max coreset size in the experiment
init_args="subsample",
architecture=None,
n_hidden=None,
nc=2,
dnm=None,
init_sd=None,
**kwargs,
) -> Dict[str, Any]:
r"""
Returns diagnostics using a mean-field VI fit on a random subset of the training dataset with specified size. Implementation supporting pytorch dataloaders
(To be used only in the BNN experiment flows)
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
nlls_mfvi, accs_mfvi, times_mfvi, elbos_mfvi, grid_preds = [], [], [0], [], []
t_start = time.time()
net = set_up_model(
architecture=architecture, D=D, n_hidden=n_hidden, nc=nc, mc_samples=mc_samples, init_sd=init_sd,
).to(device)
if dnm=="MNIST":
train_loader = DataLoader(
train_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=True,
)
n_train = len(train_loader.dataset)
points_per_class = [num_pseudo // nc] * nc # split equally among classes
points_per_class[-1] = num_pseudo - sum(points_per_class[:-1])
ybatch = (
torch.tensor(
[
item
for sublist in [[i] * ppc for i, ppc in enumerate(points_per_class)]
for item in sublist
]
)
.float()
.to(device, non_blocking=True)
)
def get_x_from_label(ipc, _l):
indices = (
torch.as_tensor(train_dataset.targets).clone().detach() == _l
).nonzero()
return torch.utils.data.DataLoader(
SubsetPreservingTransforms(train_dataset, indices=indices, dnm=dnm),
batch_size=ipc,
shuffle=True,
)
distilled_lst = []
for c in range(nc):
u0 = next(iter(get_x_from_label(points_per_class[c], c)))
distilled_lst.append(u0.to(device=device, non_blocking=True))
xbatch = torch.cat(distilled_lst).to(device, non_blocking=True)
else:
xbatch, ybatch = (
pseudo_rand_init(x, y, num_pseudo=num_pseudo, seed=seed, nc=nc)
if init_args == "random"
else pseudo_subsample_init(x, y, num_pseudo=num_pseudo, seed=seed, nc=nc)
)
n_train = len(train_dataset)
test_loader = DataLoader(
test_dataset,
batch_size=data_minibatch,
pin_memory=True,
shuffle=True,
)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
sum_scaling = n_train / num_pseudo
total_iterations = mul_fact * num_epochs
checkpts = list(range(mul_fact * num_epochs))[::log_every]
lpit = [checkpts[idx] for idx in [0, len(checkpts) // 2, -1]]
for i in tqdm(range(total_iterations)):
xbatch, ybatch = xbatch.to(device), ybatch.to(device)
optim_vi.zero_grad()
data_nll = (
-sum_scaling
* distr_fn(logits=net(xbatch).squeeze(-1)).log_prob(ybatch).sum()
)
kl = sum(m.kl() for m in net.modules() if isinstance(m, VILinear))
mfvi_loss = data_nll + kl
mfvi_loss.backward()
optim_vi.step()
with torch.no_grad():
elbos_mfvi.append(-mfvi_loss.item())
if i % log_every == 0:
total, test_nll, corrects = 0, 0, 0
for xt, yt in test_loader:
xt, yt = xt.to(device, non_blocking=True), yt.to(
device, non_blocking=True
)
with torch.no_grad():
test_logits = net(xt).squeeze(-1).mean(0)
corrects += test_logits.argmax(-1).float().eq(yt).float().sum()
total += yt.size(0)
test_nll += -distr_fn(logits=test_logits).log_prob(yt).sum()
if log_pseudodata and i in lpit:
grid_preds.append(pred_on_grid(net, device=
device).detach().cpu().numpy().T)
times_mfvi.append(times_mfvi[-1] + time.time() - t_start)
nlls_mfvi.append((test_nll / float(total)).item())
accs_mfvi.append((corrects / float(total)).item())
print(f"predictive accuracy: {(100*accs_mfvi[-1]):.2f}%")
# store results
results = {
"accs": accs_mfvi,
"nlls": nlls_mfvi,
"times": times_mfvi[1:],
"elbos": elbos_mfvi,
"csizes": [num_pseudo] * (mul_fact * num_epochs),
}
if log_pseudodata:
results["grid_preds"] = grid_preds
results["us"], results["zs"], results["vs"] = xbatch.detach(), ybatch.detach(), [sum_scaling]*num_pseudo
return results
# MFVI for BNN regression
def run_mfvi_regressor(
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
seed=0,
architecture=None,
n_hidden=None,
train_dataset=None,
val_dataset=None,
test_dataset=None,
nc=1,
y_mean=None,
y_std=None,
taus=None,
init_sd=1e-6,
model_selection = True,
dnm=None,
**kwargs,
) -> Dict[str, Any]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
# normalized x train, normalized targets
train_loader = DataLoader(
train_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
)
# normalized x test, unnormalized targets
test_loader, val_loader, n_train = (
DataLoader(
test_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
DataLoader(
val_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
len(train_loader.dataset),
)
bpe = max(1, int(n_train / data_minibatch)) # batches per epoch
def revert_norm(y_pred):
return y_pred * y_std + y_mean
best_tau, best_ll = taus[0], -float("inf")
if model_selection:
# model selection
print("\nOptimizing precision hyperparameter")
for tau in taus:
print(f"\n\nTrying tau = {tau}")
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
tau_fit = fit(
net=net,
optim_vi=optim_vi,
train_loader=train_loader,
pred_loader=val_loader,
revert_norm=revert_norm,
log_every=-1,
tau=tau,
epochs=num_epochs * bpe,
device=device,
)
if tau_fit["lls"][-1] > best_ll:
best_tau, best_ll = tau, tau_fit["lls"][-1]
print(f"current best tau, best ll : {best_tau}, {best_ll}")
else:
best_tau = taus[0]
print(f"\n\nselected tau : {best_tau}\n\n")
update_hyperparams_dict(dnm, best_tau)
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
results = fit(
net=net,
optim_vi=optim_vi,
train_loader=train_loader,
pred_loader=test_loader,
revert_norm=revert_norm,
log_every=log_every,
tau=best_tau,
epochs=num_epochs * bpe,
device=device,
)
return results
# MFVI Subset for BNN regression
def run_mfvi_subset_regressor(
mc_samples=4,
data_minibatch=128,
num_epochs=100,
log_every=10,
D=None,
lr0net=1e-3, # initial learning rate for optimizer
seed=0,
architecture=None,
n_hidden=None,
train_dataset=None,
val_dataset=None,
test_dataset=None,
nc=1,
y_mean=None,
y_std=None,
init_sd=1e-6,
num_pseudo=100, # constrain on random subset with size equal to the max coreset size in the experiment
taus=None,
model_selection = False,
**kwargs,
) -> Dict[str, Any]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(seed), np.random.seed(seed), torch.manual_seed(seed)
# normalized x train, normalized targets
sample_idcs = random.sample(range(len(train_dataset)), num_pseudo)
subset_train_dataset = torch.utils.data.Subset(train_dataset, sample_idcs)
subset_train_loader = DataLoader(
subset_train_dataset,
batch_size=num_pseudo,
# pin_memory=True,
shuffle=False,
)
# normalized x test, unnormalized targets
test_loader, val_loader, n_train = (
DataLoader(
test_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
DataLoader(
val_dataset,
batch_size=data_minibatch,
# pin_memory=True,
shuffle=False,
),
len(train_dataset),
)
bpe = max(1, int(n_train / data_minibatch)) # batches per epoch
def revert_norm(y_pred):
return y_pred * y_std + y_mean
best_tau, best_ll = taus[0], -float("inf")
if model_selection:
# model selection
print("\nOptimizing precision hyperparameter")
for tau in taus:
print(f"\n\nTrying tau = {tau}")
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
tau_fit = fit(
net=net,
optim_vi=optim_vi,
train_loader=subset_train_loader,
pred_loader=val_loader,
revert_norm=revert_norm,
log_every=-1,
tau=tau,
epochs=num_epochs * bpe,
device=device,
)
if tau_fit["lls"][-1] > best_ll:
best_tau, best_ll = tau, tau_fit["lls"][-1]
print(f"current best tau, best ll : {best_tau}, {best_ll}")
else:
best_tau = taus[0]
print(f"\n\nselected tau : {best_tau}\n\n")
net = set_up_model(
architecture=architecture,
D=D,
n_hidden=n_hidden,
nc=nc,
mc_samples=mc_samples,
init_sd=init_sd,
).to(device)
optim_vi = torch.optim.Adam(net.parameters(), lr0net)
results = fit(
net=net,
optim_vi=optim_vi,
train_loader=subset_train_loader,
pred_loader=test_loader,
revert_norm=revert_norm,
log_every=log_every,
tau=best_tau,
epochs=num_epochs * bpe,
device=device,
)
results["csizes"] = [num_pseudo]
return results
# fit mean-field BNN using the standard ELBO and log predictive performance
def fit(
net=None,
optim_vi=None,
train_loader=None,
pred_loader=None,
revert_norm=None,
log_every=-1,
tau=1e-2,
epochs=40,
device=None,
):
distr_fn = partial(gaussian_fn, scale=1.0 / np.sqrt(tau))
logging_checkpoint = (
lambda it: (it % log_every) == 0 if log_every > 0 else it == (epochs - 1)
) # if log_every==-1 then log pred perf only at the end of training
lls, rmses, times, elbos = [], [], [0], []
t_start = time.time()
n_train = len(train_loader.dataset)
for e in tqdm(range(epochs)):
xbatch, ybatch = next(iter(train_loader))
xbatch, ybatch = xbatch.to(device, non_blocking=True), ybatch.to(
device, non_blocking=True
)
optim_vi.zero_grad()
data_nll = -(
n_train
/ xbatch.shape[0]
* distr_fn(net(xbatch).squeeze(-1)).log_prob(ybatch.squeeze()).sum()
)
kl = sum(m.kl() for m in net.modules() if isinstance(m, VILinear))
loss = data_nll + kl
loss.backward()
optim_vi.step()
with torch.no_grad():
elbos.append(-loss.item())
if logging_checkpoint(e):
total, test_ll, rmses_unnorm = 0, 0, 0
for (xt, yt) in pred_loader:
xt, yt = (
xt.to(device, non_blocking=True),
yt.to(device, non_blocking=True).squeeze(),
)
with torch.no_grad():
y_pred = net(xt).squeeze(-1)
y_pred = revert_norm(y_pred).mean(0).squeeze()
rmses_unnorm += (y_pred - yt).square().sum()
total += yt.size(0)
test_ll += distr_fn(y_pred).log_prob(yt.squeeze()).sum()
times.append(times[-1] + time.time() - t_start)
lls.append((test_ll / float(total)).item())
rmses.append((rmses_unnorm / float(total)).sqrt().item())
print(
f" \n\n\n Predictive rmse {rmses[-1]:.2f} | pred ll {lls[-1]:.2f}"
)
results = {
"rmses": rmses,
"lls": lls,
"times": times[1:],
"elbos": elbos,
"scale": 1.0 / np.sqrt(tau),
}
return results
|
Blackbox-Coresets-VI-main
|
psvi/inference/baselines.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
Blackbox-Coresets-VI-main
|
psvi/inference/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
Black-box PSVI parent and children classes accessing the dataset via pytorch dataloaders.
"""
import time
import random
import numpy as np
from sklearn.utils import shuffle
import torch
import torch.nn as nn
from PIL import Image
from psvi.experiments.experiments_utils import SynthDataset
from psvi.hypergrad.diff_optimizers import DifferentiableAdam, GradientDescent
from psvi.hypergrad.hypergradients import CG_normaleq, fixed_point
from psvi.models.neural_net import (
set_mc_samples,
categorical_fn,
gaussian_fn,
make_fcnet,
make_fc2net,
make_lenet,
make_alexnet,
make_regressor_net,
VILinear,
VILinearMultivariateNormal,
)
from psvi.robust_higher import innerloop_ctx
from psvi.robust_higher.patch import monkeypatch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from functools import partial
from psvi.inference.utils import make_dataloader, compute_empirical_mean
class SubsetPreservingTransforms(Dataset):
r"""
Subset of a dataset at specified indices with a specified list of transforms.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices=None, dim=2, dnm="Cifar10"):
self.dataset = dataset
self.indices = indices
self.dnm = dnm
self.dim = dim
def __getitem__(self, idx):
if self.dnm not in {"MNIST", "Cifar10"}:
return self.dataset.data[self.indices[idx]].reshape((self.dim,))
im = (
Image.fromarray(self.dataset.data[self.indices[idx]]) # Cifar10
if not self.dnm == "MNIST"
else Image.fromarray(
np.reshape(self.dataset.data[self.indices[idx]].numpy(), (28, 28)),
mode="L", # MNIST
) # TBC: Supporting only Cifar10 and MNIST
)
return self.dataset.transform(im)
def __len__(self):
return len(self.indices)
class PSVI(object):
r"""
PSVI
- with fixed rescaled coefficients on pseudodata supporting pytorch dataloaders
"""
def __init__(
self,
u=None, # pseudo x-coordinates
z=None, # pseudo y-coordinates
train_dataset=None, # true training data
test_dataset=None, # test data
N=None, # size of training data
D=None, # dimensionality of training data
model=None, # statistical model
optim=None, # joint variational model/pseudodata optimizer
optim_u=None, # optimizer for pseudodata
optim_net=None, # optimizer for variational model parameters
optim_v=None, # optimizer for log-likelihood rescaling vector
optim_z=None, # optimizer for soft labels on distilled data
register_elbos=True, # register values of objectives over inference
num_pseudo=None, # number of pseudodata
seed=0, # random seed for instantiation of the method (for reproducibility)
compute_weights_entropy=True, # compute the entropy of weights distribution used in importance sampling
mc_samples=None, # number of MC samples for computation of variational objectives and predictions on unseen data
reset=False, # reset variational parameters to initialization
reset_interval=10, # number of outer gradient steps between reinitializations
learn_v=False, # boolean indicating if the v vector is learnable
f=lambda *x: x[0], # transformation applied on the v vector
distr_fn=categorical_fn, # distribution of last nn layer
dnm="MNIST", # dataset name
nc=10, # number of classes (argument supported only for the psvi dataloader subclasses)
init_dataset=None, # populated when picking initializations from a disturbed version of the original datapoints
parameterised=False,
learn_z=False, # optimize in the label space
prune=False, # apply prunning over coreset training
prune_interval=None, # corresponding number of outer updates for prunning
prune_sizes=None, # list with budgets for pruned coreset
increment=False, # incremental learning setting
increment_interval=None, # corresponding number of outer updates between incrementing with new learning task
increment_sizes=None, # list of increasing coreset sizes after incrementally introducing new learning tasks
lr0alpha=1e-3,
retrain_on_coreset=False, # retrain variational parameters only on coreset datapoints after extracting a coreset using joint optimizer on the PSVI ELBO
device_id=None,
**kwargs,
):
np.random.seed(seed), torch.manual_seed(seed)
self.device = torch.device( f"cuda:{device_id}" if device_id else ("cuda" if torch.cuda.is_available() else "cpu"))
self.u, self.z = u, z
self.train_dataset, self.test_dataset = (
train_dataset,
test_dataset,
)
self.N, self.D, self.dnm = N, D, dnm
self.nc = nc # number of classes
self.distr_fn = distr_fn
(
self.model,
self.optim,
self.optim_u,
self.optim_net,
self.optim_v,
self.optim_z,
) = (
model,
optim,
optim_u,
optim_net,
optim_v,
optim_z,
)
self.register_elbos, self.compute_weights_entropy = (
register_elbos,
compute_weights_entropy,
)
self.elbos = []
self.num_pseudo, self.mc_samples = num_pseudo if not increment else increment_sizes[0], mc_samples
self.reset, self.reset_interval, self.learn_v, self.learn_z = (
reset,
reset_interval,
learn_v,
learn_z,
)
with torch.no_grad():
self.v = (
1.0 / self.num_pseudo * torch.ones(self.num_pseudo, device=self.device)
)
self.v.requires_grad_(
self.learn_v
) # initialize weights of coreset pseudodata to uniform and set to differentiable or not according to attribute learn_v
self.f, self.parameterised = f, parameterised
self.init_dataset = init_dataset
self.results = {}
self.prune, self.prune_interval, self.prune_sizes = (
prune,
prune_interval,
prune_sizes,
)
self.increment, self.increment_interval, self.increment_sizes = (
increment,
increment_interval,
increment_sizes,
)
if self.increment:
self.historical_coresets = []
self.lr0alpha = lr0alpha
self.retrain_on_coreset = retrain_on_coreset
def pseudo_subsample_init(self):
r"""
Initialization of pseudodata on random data subset with equal number of datapoints from each class
"""
chosen_dataset = (
self.train_dataset if self.init_dataset is None else self.init_dataset
)
# set up pseudodata by initializing to random subset from the existing dataset
points_per_class = [
self.num_pseudo // self.nc
] * self.nc # split equally among classes
points_per_class[-1] = self.num_pseudo - sum(
points_per_class[:-1]
) # assigning the remainder to the last class
with torch.no_grad():
self.z = (
torch.tensor(
[
item
for sublist in [
[i] * ppc for i, ppc in enumerate(points_per_class)
]
for item in sublist
]
)
.float()
.to(self.device, non_blocking=True)
)
if self.learn_z:
self.z = torch.nn.functional.one_hot(
self.z.to(torch.int64),
num_classes=self.nc,
).float() # initialize target logits close to one-hot-encoding [0,..., class, ..., 0]-vectors
self.z.requires_grad_(True)
def get_x_from_label(ipc, _l):
indices = (
torch.as_tensor(chosen_dataset.targets).clone().detach() == _l
).nonzero()
return torch.utils.data.DataLoader(
SubsetPreservingTransforms(
chosen_dataset,
indices=indices,
dnm=self.dnm,
dim=self.D,
),
batch_size=ipc,
shuffle=True,
)
distilled_lst = []
for c in range(self.nc):
u0 = next(iter(get_x_from_label(points_per_class[c], c)))
distilled_lst.append(u0.to(device=self.device, non_blocking=True))
self.u = torch.cat(distilled_lst).requires_grad_(True)
def pseudo_rand_init(self, variance=1.):
r"""
Initialize on noisy means of the observed datapoints and random labels equally split among classes
"""
# print(f"is leaf : {self.u.is_leaf}")
self.u = (
(compute_empirical_mean(self.train_loader) + variance * torch.randn(self.num_pseudo, self.D))
.clone()
).to(self.device).requires_grad_(True)
self.z = torch.Tensor([])
for c in range(self.nc):
self.z = torch.cat(
(
self.z.to(self.device),
c
* torch.ones(
self.num_pseudo // self.nc
if c < self.nc - 1
else self.num_pseudo - (self.nc - 1) * (self.num_pseudo // self.nc)
).to(self.device),
)
)
def psvi_elbo(self, xbatch, ybatch, model=None, params=None, hyperopt=False):
r"""
PSVI objective computation [negative PSVI-ELBO]
"""
assert self.mc_samples > 1
Nu, Nx = self.u.shape[0], xbatch.shape[0]
all_data, all_labels = torch.cat((self.u, xbatch)), torch.cat(
(
self.z,
ybatch
if not self.learn_z
else self.nc
* torch.nn.functional.one_hot(
ybatch.to(torch.int64),
num_classes=self.nc,
).float(),
)
)
logits = model(all_data) if not hyperopt else model(all_data, params=params)
log_probs = (nn.LogSoftmax(dim=-1)(logits)).permute(1, 2, 0)
all_nlls = (
-self.distr_fn(logits=logits.squeeze(-1)).log_prob(all_labels)
if not self.learn_z
else torch.nn.KLDivLoss(reduction="none")(
log_probs,
all_labels.softmax(0).unsqueeze(-1).expand(log_probs.shape),
)
.sum(1)
.T
)
pseudo_nll = (
all_nlls[:, :Nu].matmul(self.N * self.f(self.v, 0)) if Nu > 0 else 0.0
)
data_nll = self.N / Nx * all_nlls[:, Nu:].sum(-1)
sampled_nkl = sum(
m.sampled_nkl()
for m in model.modules()
if (isinstance(m, VILinear) or isinstance(m, VILinearMultivariateNormal))
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
return weights.mul(data_nll - pseudo_nll).sum() - log_weights.mean()
def inner_elbo(self, model=None, params=None, hyperopt=False):
r"""
Inner VI objective computation [negative ELBO]
"""
logits = model(self.u) if not hyperopt else model(self.u, params=params)
if len(logits.shape)==2:
logits.unsqueeze_(1)
log_probs = (nn.LogSoftmax(dim=-1)(logits)).permute(1, 2, 0)
pseudodata_nll = (
-self.distr_fn(logits=logits.squeeze(-1)).log_prob(self.z)
if not self.learn_z
else torch.nn.KLDivLoss(reduction="none")(
log_probs,
self.z.softmax(0).unsqueeze(-1).expand(log_probs.shape),
)
.sum(1)
.T
).matmul(self.N * self.f(self.v, 0))
kl = sum(
m.kl()
for m in model.modules()
if (isinstance(m, VILinear) or isinstance(m, VILinearMultivariateNormal))
)
return pseudodata_nll.sum() + kl if self.u.shape[0] > 0 else kl
r"""
Optimization methods
"""
def joint_step(self, xbatch, ybatch):
self.optim.zero_grad()
loss = self.psvi_elbo(xbatch, ybatch, model=self.model)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((2, -loss.item()))
loss.backward()
self.optim.step()
return loss
def alternating_step(self, xbatch, ybatch):
for i in range(2):
self.optim = self.optim_net if i == 0 else self.optim_u
self.optim.zero_grad()
loss = self.psvi_elbo(xbatch, ybatch, model=self.model)
with torch.no_grad():
if self.register_elbos:
self.elbos.append(
(1, -loss.item())
) if i == 1 else self.elbos.append((0, -loss.item()))
loss.backward()
self.optim.step()
return loss
def nested_step(self, xbatch, ybatch, truncated=False, K=5):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
if not truncated:
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
else:
inner_opt = torch.optim.Adam(list(self.model.parameters()), 1e-4)
for in_it in range(self.inner_it - K):
mfvi_loss = self.inner_elbo(model=self.model)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
mfvi_loss.backward()
inner_opt.step()
print('done non-differentiable part')
inner_opt.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(K):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
if not self.parameterised:
with torch.no_grad():
torch.clamp_(
self.v, min=0.0
) # clamp weights of coreset data point to be non-negative
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
if self.learn_z:
self.optim_z.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
def hyper_step(
self,
xbatch,
ybatch,
T=50, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=30, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-4, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
self.optim_u.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
raise NotImplementedError
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
if self.learn_v:
self.u, self.v = hp[0], hp[1]
else:
self.u = hp[0]
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
if self.learn_v:
self.u, self.v = hp[0], hp[1]
else:
self.u = hp[0]
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.u] + [self.v] if self.learn_v else [self.u],
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
linear_opt = GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.u] + [self.v] if self.learn_v else [self.u],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.u] + [self.v] if self.learn_v else [self.u],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
set_grad=True,
)
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
if not self.parameterised:
with torch.no_grad():
torch.clamp_(self.v, min=0.0)
ll = outer_loss_function(last_param, [self.u] + [self.v])
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
def set_up_model(self):
r"""
Specify the statistical model
"""
print("SETTING UP THE MODEL \n\n")
if self.logistic_regression:
self.model = nn.Sequential(
VILinear(
self.D, self.nc, init_sd=self.init_sd, mc_samples=self.mc_samples
),
).to(self.device)
elif self.architecture=="logistic_regression_fullcov":
self.model = nn.Sequential(
VILinearMultivariateNormal(
self.D, self.nc, init_sd=self.init_sd, mc_samples=self.mc_samples
),
).to(self.device)
elif self.architecture in {"fn", "residual_fn"}:
self.model = make_fcnet(
self.D,
self.n_hidden,
self.nc,
n_layers=self.n_layers,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
residual=(self.architecture == "residual_fn"),
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "fn2":
print(f"architecture : {self.architecture}")
self.model = make_fc2net(
self.D,
self.n_hidden,
self.nc, # does not support argument on the number of channels
linear_class=VILinearMultivariateNormal,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "lenet":
self.model = make_lenet(
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "alexnet":
self.model = make_alexnet(
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
init_sd=self.init_sd,
).to(self.device)
elif self.architecture == "regressor_net":
self.model = make_regressor_net(
self.D,
self.n_hidden,
self.nc,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=self.mc_samples,
residual=(self.architecture == "residual_fn"),
init_sd=self.init_sd,
).to(self.device)
def run_psvi(
self,
init_args="subsample",
trainer="nested",
n_layers=1,
logistic_regression=True,
n_hidden=None,
architecture=None,
log_every=10,
inner_it=10,
data_minibatch=None,
lr0net=1e-3,
lr0u=1e-3,
lr0joint=1e-3,
lr0v=1e-2,
lr0z=1e-2,
init_sd=1e-3,
num_epochs=1000,
log_pseudodata=False,
prune_idx=0,
increment_idx=0,
gamma=1.0,
**kwargs,
):
r"""
Run inference
"""
# experiment-specific hyperparameters
self.init_args = init_args
self.trainer = trainer
self.logistic_regression = logistic_regression
self.architecture, self.n_hidden, self.n_layers, self.init_sd = (
architecture,
n_hidden,
n_layers,
init_sd,
)
self.log_every, self.log_pseudodata = log_every, log_pseudodata
self.data_minibatch = data_minibatch
self.inner_it, self.num_epochs = inner_it, num_epochs
self.scheduler_optim_net = None
self.gamma = gamma
epoch_quarter = (self.N // self.data_minibatch) // 4
scheduler_kwargs = {
"step_size": epoch_quarter if epoch_quarter > 0 else 10000,
"gamma": self.gamma,
}
# load the training and test data on dataloaders
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=True,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=False,
)
# setup for training and test sets in incremental learning: we start with 2 classes, and keep adding 1 new class at a time
if self.increment:
self.incremental_train_datasets, self.incremental_test_datasets = [None]*(self.nc - 1), [None]*(self.nc - 1)
for c in range(1, self.nc):
self.incremental_train_datasets[c-1] = self.train_dataset.subset_where(cs=list(range(c+1)) if c==1 else [c])
self.incremental_test_datasets[c-1] = self.test_dataset.subset_where(cs=list(range(c+1)))
(self.train_loader, self.test_loader) = (make_dataloader(self.incremental_train_datasets[0], self.data_minibatch),
make_dataloader(self.incremental_test_datasets[0], self.data_minibatch, shuffle=False))
self.train_data_so_far = len(self.train_loader.dataset)
self.nc = 2 # in the incremental learning case start with a 2-class classification problem
self.set_up_model()
# initialization of results data structures
(
nlls_psvi,
accs_psvi,
core_idcs_psvi,
iws_entropy,
nesses,
vs_entropy,
us,
zs,
vs,
grid_preds,
times,
) = ([], [], [], [], [], [], [], [], [], [], [0])
# initialization of pseudodata
pseudodata_init = {
"random": self.pseudo_rand_init, # different transformations applied on `train_dataset`
"subsample": self.pseudo_subsample_init,
}
pseudodata_init[self.init_args]()
# optimization method
self.optim_net, self.optim_u = (
torch.optim.Adam(list(self.model.parameters()), lr0net),
torch.optim.Adam([self.u], lr0u),
)
self.scheduler_optim_net = torch.optim.lr_scheduler.StepLR(
self.optim_net, **scheduler_kwargs
)
if self.learn_v:
self.optim_v = torch.optim.Adam([self.v], lr0v)
if self.learn_z:
self.optim_z = torch.optim.Adam([self.z], lr0z)
optimizers = {
"alternating": self.alternating_step,
"nested": self.nested_step,
"hyper": self.hyper_step,
}
if self.trainer == "joint":
variational_params = (
list(self.model.parameters()) + [self.u] + [self.v]
if self.learn_v
else list(self.model.parameters()) + [self.u]
)
self.optim = torch.optim.Adam(variational_params, lr0joint)
psvi_step = self.joint_step
else:
psvi_step = optimizers[self.trainer]
t_start = time.time()
# training loop
total_checkpts = list(range(self.num_epochs))[::log_every]
downsample = 1 # downsample checkpoints for logging predictive uncertainty over a grid
lpit = total_checkpts[::downsample]
for it in tqdm(range(self.num_epochs)):
xbatch, ybatch = next(iter(self.train_loader))
xbatch, ybatch = xbatch.to(self.device, non_blocking=True), ybatch.to(
self.device, non_blocking=True
)
# evaluation
if it % self.log_every == 0:
test_acc, test_nll, iw_ent, ness, v_ent = self.evaluate()
if (
self.log_pseudodata
and it in lpit
and self.dnm not in {"MNIST", "Cifar10", "adult", "phishing", "webspam"}
):
print(f"\nlogging predictive grid at {it}")
grid_preds.append(self.pred_on_grid().detach().cpu().numpy().T)
with torch.no_grad():
nlls_psvi.append(test_nll.item())
accs_psvi.append(test_acc.item())
print(f"\npredictive accuracy: {(100*test_acc.item()):.2f}%")
core_idcs_psvi.append(self.num_pseudo)
times.append(times[-1] + time.time() - t_start)
vs.append((self.f(self.v, 0)).clone().cpu().detach().numpy())
if iw_ent is not None:
iws_entropy.append(iw_ent.item())
if ness is not None:
nesses.append(ness.item())
if v_ent is not None:
vs_entropy.append(v_ent.item())
if self.log_pseudodata:
us.append(self.u.clone().cpu().detach().numpy())
zs.append(self.z.clone().cpu().detach().numpy())
# variational nn reinitialization
if self.reset and it % self.reset_interval == 0:
self.weight_reset()
# take a single optimization step
psvi_step(xbatch, ybatch)
# prune coreset to smaller sizes
if self.prune and it > 0 and it % self.prune_interval == 0:
if prune_idx < len(self.prune_sizes):
self.prune_coreset(
to_size=self.prune_sizes[prune_idx], lr0v=lr0v, lr0net=lr0net
)
prune_idx += 1
self.weight_reset()
# reset model upon pruning
# add new learning task and increment coreset to enable fitting it
if self.increment and it > 0 and it % self.increment_interval == 0:
if increment_idx < len(self.increment_sizes)-1:
# self.historical_coresets.append({'v': self.v, 'u':self.u, 'z':self.z})
increment_idx += 1
#samples_from_coresets = [torch.multinomial(self.f(self.historical_coresets[_i]['v'], 0), self.train_data_so_far//increment_idx, replacement=True) for _i in range(increment_idx)] # sample summarising data from tasks so far using coreset points weighting
samples_from_coreset = torch.multinomial(self.f(self.v, 0), self.train_data_so_far, replacement=True) # sample summarising data from tasks so far using coreset points weighting
self.nc += 1 # added new class in training dataset
self.set_up_model() # reset model
self.increment_coreset(
to_size=self.increment_sizes[increment_idx], lr0v=lr0v, lr0u=lr0u, lr0net=lr0net, new_class=increment_idx+1, increment_idx=increment_idx
)
#self.train_loader = make_dataloader(self.incremental_train_datasets[increment_idx].concatenate(torch.cat([self.historical_coresets[_i]['u'][samples_from_coresets[_i]].clone().detach() for _i in range(increment_idx)], axis=0),
# torch.cat([self.historical_coressets[_i]['z'][samples_from_coresets[_i]].clone().detach() for _i in range(increment_idx)], axis=0)),
# self.data_minibatch) # augment with new training data
self.train_loader = make_dataloader(self.incremental_train_datasets[increment_idx].concatenate(self.u[samples_from_coreset].clone().detach(),
self.z[samples_from_coreset].clone().detach()),
self.data_minibatch) # augment with new training data
self.test_loader = make_dataloader(self.incremental_test_datasets[increment_idx], self.data_minibatch, shuffle=False) # augment with new test data
self.train_data_so_far = len(self.train_loader.dataset)
# retrain restricting only on coreset datapoints
if self.retrain_on_coreset:
print("\n\nRetrain on the extracted coreset for the same number of epochs")
self.weight_reset()
self.optim_retrain = torch.optim.Adam(list(self.model.parameters()), lr0joint)
for it in tqdm(range(self.num_epochs)):
# evaluation
if it % self.log_every == 0:
test_acc, test_nll, iw_ent, ness, v_ent = self.evaluate(correction=False)
if (
self.log_pseudodata
and it in lpit
and self.dnm not in {"MNIST", "Cifar10", "adult", "phishing", "webspam"}
):
print(f"\nlogging predictive grid at {it}")
grid_preds.append(self.pred_on_grid(correction=False).detach().cpu().numpy().T)
with torch.no_grad():
nlls_psvi.append(test_nll.item())
accs_psvi.append(test_acc.item())
print(f"\npredictive accuracy: {(100*test_acc.item()):.2f}%")
core_idcs_psvi.append(self.num_pseudo)
times.append(times[-1] + time.time() - t_start)
vs.append((self.f(self.v, 0)).clone().cpu().detach().numpy())
if iw_ent is not None:
iws_entropy.append(iw_ent.item())
if ness is not None:
nesses.append(ness.item())
if v_ent is not None:
vs_entropy.append(v_ent.item())
if self.log_pseudodata:
us.append(self.u.clone().cpu().detach().numpy())
zs.append(self.z.clone().cpu().detach().numpy())
self.optim_retrain.zero_grad()
loss = self.inner_elbo(model=self.model)
loss.backward()
self.optim_retrain.step()
# store results
self.results["accs"] = accs_psvi
self.results["nlls"] = nlls_psvi
self.results["csizes"] = core_idcs_psvi
self.results["times"] = times[1:]
self.results["elbos"] = self.elbos
self.results["went"] = iws_entropy
self.results["ness"] = nesses
self.results["vent"] = vs_entropy
self.results["vs"] = vs
if self.log_pseudodata:
self.results["us"], self.results["zs"], self.results["grid_preds"] = (
us,
zs,
grid_preds,
)
return self.results
## Compute predictive metrics
def evaluate(
self,
correction=True,
**kwargs,
):
assert self.mc_samples > 1
total, test_nll, corrects = 0, 0, 0
for xt, yt in self.test_loader:
xt, yt = xt.to(self.device, non_blocking=True), yt.to(
self.device, non_blocking=True
)
with torch.no_grad():
all_data = torch.cat((self.u, xt))
all_logits = self.model(all_data)
pseudo_logits = all_logits[:, : self.num_pseudo]
log_probs = (nn.LogSoftmax(dim=-1)(pseudo_logits)).permute(1, 2, 0)
pseudo_nll = (
(
(
self.distr_fn(logits=pseudo_logits).log_prob(self.z)
if not self.learn_z
else torch.nn.KLDivLoss(reduction="none")(
log_probs,
self.z.softmax(0).unsqueeze(-1).expand(log_probs.shape),
).sum((1, 2))
).matmul(self.N * self.f(self.v, 0))
)
if self.num_pseudo > 0
else 0.0
)
test_data_logits = all_logits[:, self.num_pseudo :]
sampled_nkl = sum(
m.sampled_nkl()
for m in self.model.modules()
if (
isinstance(m, VILinear)
or isinstance(m, VILinearMultivariateNormal)
)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
test_probs = (
(
test_data_logits.softmax(-1)
.mul(weights.unsqueeze(-1).unsqueeze(-1))
.sum(0)
)
if correction
else test_data_logits.softmax(-1).mean(0)
)
corrects += test_probs.argmax(-1).float().eq(yt).float().sum()
total += yt.size(0)
test_nll += -self.distr_fn(probs=test_probs).log_prob(yt).sum()
iw_entropy = (
-weights[weights > 0].log().mul(weights[weights > 0]).sum()
if self.compute_weights_entropy
else None
) # entropy of the importance weighting distribution
ness = (
weights.sum().square() / weights.square().sum() / weights.shape[0]
) # normalized effective sample size
vs = self.f(self.v, 0)
v_entropy = (
vs.sum().square()
/ vs.square().sum()
/ self.num_pseudo # normalize entropy with coreset size
if self.compute_weights_entropy
else None
)
return (
corrects / float(total),
test_nll / float(total),
iw_entropy,
ness,
v_entropy,
)
def weight_reset(self):
r"""
Reset variational parameters to initialization
"""
for layer in self.model.modules():
if (
isinstance(layer, VILinear)
or isinstance(layer, VILinearMultivariateNormal)
) and hasattr(layer, "reset_parameters_variational"):
layer.reset_parameters_variational()
elif (
isinstance(layer, nn.Conv2d)
or (
isinstance(layer, VILinear)
or isinstance(layer, VILinearMultivariateNormal)
)
and hasattr(layer, "reset_parameters")
):
layer.reset_parameters()
def pred_on_grid(
self,
n_test_per_dim=250,
correction=True,
**kwargs,
):
r"""
Predictions over a 2-d grid for visualization of predictive posterior on 2-d synthetic datasets
"""
_x0_test = torch.linspace(-3, 4, n_test_per_dim)
_x1_test = torch.linspace(-2, 3, n_test_per_dim)
x_test = torch.stack(torch.meshgrid(_x0_test, _x1_test), dim=-1).to(self.device)
with torch.no_grad():
all_data = torch.cat((self.u, x_test.view(-1, 2)))
all_logits = self.model(all_data).squeeze(-1)
pseudo_nll = (
(
self.distr_fn(logits=all_logits[:, : self.num_pseudo])
.log_prob(self.z)
.matmul(self.N * self.f(self.v, 0))
)
if self.num_pseudo > 0
else 0.0
)
grid_data_logits = all_logits[:, self.num_pseudo :]
sampled_nkl = sum(
m.sampled_nkl()
for m in self.model.modules()
if (
isinstance(m, VILinear) or isinstance(m, VILinearMultivariateNormal)
)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
grid_probs = (
(
grid_data_logits.softmax(-1)
.mul(weights.unsqueeze(-1).unsqueeze(-1))
.sum(0)
)
if correction
else grid_data_logits.softmax(-1).mean(0)
)
return grid_probs
def prune_coreset(
self,
to_size,
lr0v=1e-3,
lr0net=1e-4,
): # designed to work only for the fixed u methods
r"""
Prune coreset to a given smaller size
"""
self.num_pseudo = to_size
keep_v = torch.multinomial(self.f(self.v, 0), to_size, replacement=False) # torch.topk(self.v, to_size)
self.v = torch.zeros_like(self.v[keep_v]).clone().detach().requires_grad_(True)
self.optim_v = torch.optim.Adam([self.v], lr0v)
self.u = torch.index_select(self.u, 0, keep_v)
self.z = torch.index_select(self.z, 0, keep_v)
self.optim_net = torch.optim.Adam(list(self.model.parameters()), lr0net)
def increment_coreset(
self,
to_size,
lr0v=1e-3,
lr0u=1e-3,
lr0net=1e-4,
variance=1., # variance for random initialization of coreset for new class
new_class=2,
increment_idx=1,
):
r"""
Increment coreset to a given larger size
"""
self.num_pseudo, num_extra_points = to_size, to_size - len(self.v)
extra_weights = torch.ones(num_extra_points, device=self.device)
self.v = torch.cat(( self.v, 1. / (len(self.v) + num_extra_points) * self.v.sum() * extra_weights )).detach().requires_grad_(True)
self.optim_v = torch.optim.Adam([self.v], lr0v)
(new_us, new_zs) = (
((compute_empirical_mean(self.train_loader) + variance * torch.randn(num_extra_points, self.D)).clone(), new_class * torch.ones(num_extra_points))
if self.init_args == "random"
else self.incremental_train_datasets[increment_idx][torch.randperm(len(self.incremental_train_datasets[increment_idx]))[:num_extra_points]])
self.u, self.z = torch.cat((self.u, new_us)).detach().requires_grad_(True), torch.cat((self.z, new_zs))
self.optim_u = torch.optim.Adam([self.u], lr0u)
self.optim_net = torch.optim.Adam(list(self.model.parameters()), lr0net)
class PSVILearnV(PSVI):
r"""
PSVI
- with learnable v on a simplex (with constant sum constraint)
"""
def __init__(self, learn_v=True, parameterised=True, **kwargs):
super().__init__(**kwargs)
self.learn_v, self.parameterised = learn_v, parameterised
with torch.no_grad():
self.v = torch.zeros(self.num_pseudo, device=self.device)
self.v.requires_grad_(
True
) # initialize learnable weights of coreset pseudodata to uniform
self.f = (
torch.softmax
) # transform v via softmax to keep the sum over the pseudodata fixed
class PSVI_No_Rescaling(PSVI):
r"""
PSVI
- with no fixed or learnable coefficients on coreset datapoints whatsoever
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.v *= (
1.0 / self.N
) # we remove log-likelihood rescaling dependency on the true dataset size N
class PSVIFreeV(PSVI):
r"""
PSVI
- with learnable v (subject only to non-negativity constraints)
"""
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.learn_v = True
self.v.requires_grad_(True)
class PSVI_Ablated(PSVILearnV):
r"""
PSVI
- with ablated importance sampling from coreset variational posterior
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def psvi_elbo(self, xbatch, ybatch, model=None, params=None, hyperopt=False):
r"""
Ablated PSVI objective computation
"""
Nx = xbatch.shape[0]
logits = model(xbatch) if not hyperopt else model(xbatch, params=params)
nlls = -self.distr_fn(logits=logits.squeeze(-1)).log_prob(ybatch)
data_nll = self.N / Nx * nlls.sum(-1) # multi-sample training
sampled_nkl = sum(
m.sampled_nkl() for m in model.modules() if isinstance(m, VILinear)
)
return data_nll.mean() - sampled_nkl.mean()
class PSVI_No_IW(PSVI_Ablated):
r"""
PSVI
- with single-sample training / multi-sample testing
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mc_samples = 1
def evaluate(
self,
correction=True,
mc_samples_eval=5,
mc_samples_train=1,
**kwargs,
):
r"""
Compute predictive metrics
"""
with torch.no_grad():
self.mc_samples = mc_samples_eval
set_mc_samples(
self.model, self.mc_samples
) # set to multi-sample for testing
test_acc, test_nll, iw_entropy, ness, v_entropy = super().evaluate(
correction=True,
**kwargs,
)
self.mc_samples = 1
set_mc_samples(
self.model, mc_samples_train
) # set to single-sample for training
return test_acc, test_nll, iw_entropy, ness, v_entropy
def pred_on_grid(
self,
correction=True,
n_test_per_dim=250,
mc_samples_eval=5,
mc_samples_train=1,
**kwargs,
):
r"""
Predictions over a 2-d grid for visualization of predictive posterior on 2-d synthetic datasets
"""
# TODO: fix for correction via importance weighting
with torch.no_grad():
self.mc_samples = mc_samples_eval
set_mc_samples(
self.model, self.mc_samples
) # set to multi-sample for testing
test_probs = super().pred_on_grid(
correction=correction,
n_test_per_dim=n_test_per_dim,
**kwargs,
)
self.mc_samples = mc_samples_train
set_mc_samples(
self.model, mc_samples_train
) # set to single-sample for training
return test_probs
class PSVIAV(PSVILearnV):
r"""
PSVI subclass with
- learnable coreset point weights on a simplex,
- learnable rescaling of total coreset evidence
"""
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.alpha = torch.tensor([0.0], device=self.device)
self.alpha.requires_grad_(True)
self.f = lambda *x: (
torch.exp(self.alpha) * torch.softmax(x[0], x[1])
) # transform v via softmax to keep the sum over the pseudodata fixed and multiply by a learnable non-negative coefficient
self.optim_alpha = torch.optim.Adam([self.alpha], self.lr0alpha)
self.results["alpha"] = []
def evaluate(self, **kwargs):
self.results["alpha"].append(
self.alpha.clone()
.cpu()
.detach()
.numpy() # store the extra variational parameter
)
return super().evaluate(**kwargs)
def increment_coreset(self, lr0alpha=1e-3, **kwargs):
super().increment_coreset(**kwargs)
self.optim_alpha = torch.optim.Adam([self.alpha], lr0alpha)
def hyper_step(
self,
xbatch,
ybatch,
T=10, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=10, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-1, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
self.optim_u.zero_grad()
self.optim_v.zero_grad()
self.optim_alpha.zero_grad()
if self.optim_z:
raise NotImplementedError
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
self.u, self.v, self.alpha = hp[0], hp[1], hp[2]
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
self.u, self.v, self.alpha = hp[0], hp[1], hp[2]
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.u] + [self.v] + [self.alpha],
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
linear_opt = GradientDescent(
loss_f=inner_loss_function, step_size=linsys_lr
) # GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.u] + [self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.u] + [self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
set_grad=True,
)
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
ll = outer_loss_function(last_param, [self.u] + [self.v] + [self.alpha])
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
def nested_step(self, xbatch, ybatch):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
self.optim_alpha.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
if self.learn_z:
self.optim_z.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
class PSVIFixedU(PSVILearnV):
r"""
PSVI subclass
- with fixed coreset point locations
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def nested_step(self, xbatch, ybatch):
self.u.requires_grad_(False)
self.optim_net.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
if self.learn_v:
self.optim_v.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
def hyper_step(
self,
xbatch,
ybatch,
T=20, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=20, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-3, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
self.u.requires_grad_(False)
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
if self.learn_v:
self.optim_v.zero_grad()
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
if self.learn_v:
self.v = hp[0]
else:
pass
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
if self.learn_v:
self.v = hp[0]
else:
pass
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.v] if self.learn_v else None,
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
fp_map = DifferentiableAdam(
loss_f=inner_loss_function, step_size=linsys_lr
) # GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.v] if self.learn_v else None,
K=K,
fp_map=fp_map,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.v] if self.learn_v else None,
K=K,
fp_map=fp_map,
outer_loss=outer_loss_function,
set_grad=True,
)
if self.learn_v:
self.optim_v.step()
ll = outer_loss_function(last_param, [self.v])
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
class PSVIAFixedU(PSVILearnV):
r"""
PSVI subclass with
- fixed coreset point locations
- learnable coreset weights on a simplex
- learnable rescaling of total coreset evidence
"""
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.alpha = torch.tensor([0.0], device=self.device)
self.alpha.requires_grad_(True)
self.f = lambda *x: (
torch.exp(self.alpha) * torch.softmax(x[0], x[1])
) # transform v via softmax to keep the sum over the pseudodata fixed and multiply by a learnable non-negative coefficient
self.optim_alpha = torch.optim.Adam([self.alpha], self.lr0alpha)
self.results["alpha"] = []
def evaluate(self, **kwargs):
self.results["alpha"].append(
self.alpha.clone()
.cpu()
.detach()
.numpy() # store the extra variational parameter
)
return super().evaluate(**kwargs)
def hyper_step(
self,
xbatch,
ybatch,
T=5, # iterations for inner problem solver
inner_opt_class=DifferentiableAdam, # optimizer type for inner problem solver
K=5, # iterations for linear system solver (in approximate implicit differentiation methods)
linsys_lr=1e-1, # lr for the SGD optimizer used to solve the linear system on the Jacobian-vector products
hypergrad_approx="CG_normaleq",
**kwargs,
):
T = self.inner_it
inner_opt_kwargs = {"step_size": self.optim_net.param_groups[0]["lr"]}
fmodel = monkeypatch(self.model, copy_initial_weights=True)
self.optim_v.zero_grad()
self.optim_alpha.zero_grad()
self.u.requires_grad_(False)
def inner_loop(hparams, params, optim, n_steps, create_graph=False):
params_history = [optim.get_opt_params(params)]
for _ in range(n_steps):
params_history.append(
optim(params_history[-1], hparams, create_graph=create_graph)
)
return params_history
def get_inner_opt(train_loss):
return inner_opt_class(train_loss, **inner_opt_kwargs)
def inner_loss_function(p, hp, hyperopt=True):
self.v, self.alpha = hp[0], hp[1]
return self.inner_elbo(model=fmodel, params=p, hyperopt=hyperopt)
def outer_loss_function(p, hp):
self.v, self.alpha = hp[0], hp[1]
return self.psvi_elbo(xbatch, ybatch, model=fmodel, params=p, hyperopt=True)
inner_opt = get_inner_opt(inner_loss_function)
params = [p.detach().clone().requires_grad_(True) for p in fmodel.parameters()]
params_history = inner_loop(
[self.v] + [self.alpha],
params,
inner_opt,
T,
)
last_param = params_history[-1][: len(params)]
linear_opt = GradientDescent(loss_f=inner_loss_function, step_size=linsys_lr)
if hypergrad_approx == "fixed_point": # fixed-point AID
fixed_point(
last_param,
[self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
stochastic=True,
)
elif hypergrad_approx == "CG_normaleq": # CG on normal equations AID
CG_normaleq(
last_param,
[self.v] + [self.alpha],
K=K,
fp_map=linear_opt,
outer_loss=outer_loss_function,
set_grad=True,
)
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
ll = outer_loss_function(last_param, [self.v] + [self.alpha])
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(last_param),
self.model.parameters(),
)
return ll.item()
def nested_step(self, xbatch, ybatch):
self.optim_net.zero_grad()
self.optim_alpha.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
self.u.requires_grad_(False)
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
## PSVI subclass supporting regression
class PSVI_regressor(PSVI):
def __init__(
self,
u=None, # pseudo x-coordinates
z=None, # pseudo y-coordinates
train_dataset=None, # true training data
val_dataset=None,
test_dataset=None, # test data
y_mean=None,
y_std=None,
N=None, # size of training data
D=None, # dimensionality of training data
optim=None, # joint variational model/pseudodata optimizer
optim_u=None, # optimizer for pseudodata
optim_net=None, # optimizer for variational model parameters
optim_v=None, # optimizer for log-likelihood rescaling vector
optim_z=None, # optimizer for outputs on distilled data
register_elbos=False, # register values of objectives over inference
num_pseudo=None, # number of pseudodata
seed=0, # random seed for instantiation of the method (for reproducibility)
compute_weights_entropy=True, # compute the entropy of weights distribution used in importance sampling
mc_samples=None, # number of MC samples for computation of variational objectives and predictions on unseen data
learn_v=False, # boolean indicating if the v vector is learnable
f=lambda *x: x[0], # transformation applied on the v vector
dnm=None, # dataset name
nc=1, # dimension of output space
init_dataset=None, # populated when picking initializations from a disturbed version of the original datapoints
parameterised=False,
learn_z=True, # optimize in the label space
lr0alpha=1e-3,
tau=0.1,
logistic_regression=False,
**kwargs,
):
np.random.seed(seed), torch.manual_seed(seed)
print(f'device id {device_id} ')
self.device = torch.device( f"cuda:{device_id}" if device_id else ("cuda" if torch.cuda.is_available() else "cpu"))
self.u, self.z = u, z
self.train_dataset, self.val_dataset, self.test_dataset = (
train_dataset,
val_dataset,
test_dataset,
)
self.logistic_regression = logistic_regression
self.N, self.D, self.dnm = N, D, dnm
self.nc = nc # dimensionality of output
self.distr_fn = partial(gaussian_fn, scale=1.0 / np.sqrt(tau))
(self.optim, self.optim_u, self.optim_net, self.optim_v, self.optim_z,) = (
optim,
optim_u,
optim_net,
optim_v,
optim_z,
)
self.register_elbos, self.compute_weights_entropy = (
register_elbos,
compute_weights_entropy,
)
if self.register_elbos:
self.elbos = []
self.num_pseudo, self.mc_samples = num_pseudo, mc_samples
self.learn_v, self.learn_z = (
learn_v,
learn_z,
)
with torch.no_grad():
self.v = (
1.0 / self.num_pseudo * torch.ones(self.num_pseudo, device=self.device)
)
self.v.requires_grad_(
self.learn_v
) # initialize weights of coreset pseudodata to uniform and set to differentiable or not according to attribute learn_v
self.f, self.parameterised = f, parameterised
self.init_dataset = init_dataset
self.results = {}
self.lr0alpha = lr0alpha
self.y_mean, self.y_std = y_mean, y_std
### Initialization methods for the pseudodata
def pseudo_subsample_init(self):
sample_idcs = random.sample(range(len(self.train_dataset)), self.num_pseudo)
subset_train_dataset = torch.utils.data.Subset(self.train_dataset, sample_idcs)
self.cs_support = DataLoader(
subset_train_dataset,
batch_size=self.num_pseudo,
# pin_memory=True,
shuffle=False,
)
with torch.no_grad():
self.u, self.z = next(iter(self.cs_support))
self.u, self.z = self.u.to(self.device), self.z.to(self.device)
self.u.requires_grad_(True), self.z.requires_grad_(True)
## PSVI objective computation [negative PSVI-ELBO]
def psvi_elbo(self, xbatch, ybatch, model=None, params=None, hyperopt=False):
assert self.mc_samples > 1
Nu, Nx = self.u.shape[0], xbatch.shape[0]
all_xs, all_ys = torch.cat((self.u, xbatch)), torch.cat((self.z, ybatch))
all_nlls = -self.distr_fn(model(all_xs).squeeze(-1)).log_prob(all_ys.squeeze())
pseudo_nll = (
all_nlls[:, :Nu].matmul(self.N * self.f(self.v, 0)) if Nu > 0 else 0.0
)
data_nll = self.N / Nx * all_nlls[:, Nu:].sum(-1)
sampled_nkl = sum(
m.sampled_nkl() for m in model.modules() if isinstance(m, VILinear)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(0)
return weights.mul(data_nll - pseudo_nll).sum() - log_weights.mean()
## Inner VI objective computation [negative ELBO]
def inner_elbo(self, model=None, params=None, hyperopt=False):
pseudodata_nll = (
-self.distr_fn(model(self.u).squeeze(-1)).log_prob(self.z.squeeze())
).matmul(self.N * self.f(self.v, 0))
kl = sum(m.kl() for m in model.modules() if isinstance(m, VILinear))
return pseudodata_nll.sum() + kl if self.u.shape[0] > 0 else kl
## Optimization methods
def nested_step(self, xbatch, ybatch):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
if not self.parameterised:
with torch.no_grad():
torch.clamp_(
self.v, min=0.0
) # clamp weights of coreset data point to be non-negative
if self.learn_z:
self.optim_z.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
## Execution of inference
def run_psvi(
self,
init_args="subsample",
trainer="nested",
n_layers=1,
n_hidden=None,
architecture=None,
log_every=10,
inner_it=10,
data_minibatch=None,
lr0net=1e-3,
lr0u=1e-3,
lr0v=1e-2,
lr0z=1e-2,
init_sd=1e-3,
num_epochs=1000,
log_pseudodata=False,
**kwargs,
):
# experiment-specific hyperparameters
self.init_args = init_args
self.trainer = trainer
self.architecture, self.n_hidden, self.n_layers, self.init_sd = (
architecture,
n_hidden,
n_layers,
init_sd,
)
self.log_every, self.log_pseudodata = log_every, log_pseudodata
self.data_minibatch = data_minibatch
self.inner_it, self.num_epochs = inner_it, num_epochs
self.set_up_model()
# initialization of results data structures
(
lls_psvi,
rmses_psvi,
core_idcs_psvi,
iws_entropy,
nesses,
vs_entropy,
us,
zs,
vs,
times,
) = ([], [], [], [], [], [], [], [], [], [0])
# load the training and test data on dataloaders
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=True,
)
self.val_loader = DataLoader(
self.val_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=False,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.data_minibatch,
pin_memory=True,
shuffle=False,
)
# initialization of pseudodata
pseudodata_init = {
"subsample": self.pseudo_subsample_init,
}
pseudodata_init[self.init_args]()
# optimization method
self.optim_net, self.optim_u = (
torch.optim.Adam(list(self.model.parameters()), lr0net),
torch.optim.Adam([self.u], lr0u),
)
if self.learn_v:
self.optim_v = torch.optim.Adam([self.v], lr0v)
if self.learn_z:
self.optim_z = torch.optim.Adam([self.z], lr0z)
optimizers = {
"nested": self.nested_step,
}
psvi_step = optimizers[self.trainer]
t_start = time.time()
# training loop
for it in tqdm(range(self.num_epochs)):
xbatch, ybatch = next(iter(self.train_loader))
xbatch, ybatch = xbatch.to(self.device, non_blocking=True), ybatch.to(
self.device, non_blocking=True
)
# evaluation
if it % self.log_every == 0:
test_rmse, test_ll = self.evaluate(**kwargs)
with torch.no_grad():
lls_psvi.append(test_ll.item())
rmses_psvi.append(test_rmse.item())
core_idcs_psvi.append(self.num_pseudo)
times.append(times[-1] + time.time() - t_start)
vs.append((self.f(self.v, 0)).clone().cpu().detach().numpy())
if self.log_pseudodata:
us.append(self.u.clone().cpu().detach().numpy())
zs.append(self.z.clone().cpu().detach().numpy())
# take a single optimization step
outer_loss = psvi_step(xbatch, ybatch)
if it % self.log_every == 0:
print(
f" \n\n\n Predictive rmse {test_rmse.item():.2f} | pred ll {test_ll.item():.2f}| outer loss {outer_loss:.0f}"
)
# store results
self.results["rmses"] = rmses_psvi
self.results["lls"] = lls_psvi
self.results["csizes"] = core_idcs_psvi
self.results["times"] = times[1:]
self.results["went"] = iws_entropy
self.results["ness"] = nesses
self.results["vent"] = vs_entropy
self.results["vs"] = vs
print("rmses : ", ["%.4f" % el for el in self.results["rmses"]])
print("lls : ", ["%.4f" % el for el in self.results["lls"]])
return self.results
## Compute predictive metrics
def evaluate(
self,
correction=True,
**kwargs,
):
def revert_norm(y_pred):
return y_pred * self.y_std + self.y_mean
assert self.mc_samples > 1
total, test_ll, rmses_unnorm = 0, 0, 0
for xt, yt in self.test_loader:
xt, yt = (
xt.to(self.device, non_blocking=True),
yt.to(self.device, non_blocking=True).squeeze(),
)
with torch.no_grad():
all_data = torch.cat((self.u, xt)).squeeze(-1)
model_out = self.model(all_data).squeeze(-1)
pseudo_out = model_out[:, : self.num_pseudo]
pseudo_ll = (
self.distr_fn(pseudo_out)
.log_prob(self.z.squeeze())
.mul(self.N * self.f(self.v, 0))
if self.num_pseudo > 0
else 0.0
).sum()
test_data_out = model_out[:, self.num_pseudo :]
sampled_nkl = sum(
m.sampled_nkl()
for m in self.model.modules()
if isinstance(m, VILinear)
)
log_weights = -pseudo_ll + sampled_nkl
weights = log_weights.softmax(0)
y_pred = torch.matmul(revert_norm(test_data_out).T, weights)
rmses_unnorm += (y_pred - yt).square().sum()
total += yt.size(0)
test_ll += self.distr_fn(y_pred).log_prob(yt.squeeze()).sum()
return (
(rmses_unnorm / float(total)).sqrt(),
test_ll / float(total),
)
## PSVI with learnable v on a simplex (with constant sum constraint)
class PSVILearnV_regressor(PSVI_regressor):
def __init__(self, learn_v=True, parameterised=True, **kwargs):
super().__init__(**kwargs)
self.learn_v, self.parameterised = learn_v, parameterised
with torch.no_grad():
self.v = torch.zeros(self.num_pseudo, device=self.device)
self.v.requires_grad_(
True
) # initialize learnable weights of coreset pseudodata to uniform
self.f = (
torch.softmax
) # transform v via softmax to keep the sum over the pseudodata fixed
## PSVI with learnable v on a simplex and learnable rescaling on total coreset likelihood
class PSVIAV_regressor(PSVILearnV_regressor):
def __init__(self, learn_v=True, **kwargs):
super().__init__(**kwargs)
self.alpha = torch.tensor([0.0], device=self.device)
self.alpha.requires_grad_(True)
self.f = lambda *x: (
torch.exp(self.alpha) * torch.softmax(x[0], x[1])
) # transform v via softmax to keep the sum over the pseudodata fixed and multiply by a learnable non-negative coefficient
self.optim_alpha = torch.optim.Adam([self.alpha], self.lr0alpha)
self.results["alpha"] = []
def evaluate(self, **kwargs):
self.results["alpha"].append(
self.alpha.clone()
.cpu()
.detach()
.numpy() # store the extra variational parameter
)
return super().evaluate(**kwargs)
def nested_step(self, xbatch, ybatch):
self.optim_u.zero_grad()
self.optim_net.zero_grad()
self.optim_alpha.zero_grad()
if self.learn_v:
self.optim_v.zero_grad()
if self.learn_z:
self.optim_z.zero_grad()
with innerloop_ctx(self.model, self.optim_net) as (fmodel, diffopt):
for in_it in range(self.inner_it):
mfvi_loss = self.inner_elbo(model=fmodel)
with torch.no_grad():
if self.register_elbos and in_it % self.log_every == 0:
self.elbos.append((1, -mfvi_loss.item()))
diffopt.step(mfvi_loss)
psvi_loss = self.psvi_elbo(xbatch, ybatch, model=fmodel)
with torch.no_grad():
if self.register_elbos:
self.elbos.append((0, -psvi_loss.item()))
psvi_loss.backward()
self.optim_u.step()
if self.learn_v:
self.optim_v.step()
self.optim_alpha.step()
if self.learn_z:
self.optim_z.step()
if self.scheduler_optim_net:
self.scheduler_optim_net.step()
nn.utils.vector_to_parameters(
nn.utils.parameters_to_vector(list(fmodel.parameters())),
self.model.parameters(),
)
return psvi_loss
|
Blackbox-Coresets-VI-main
|
psvi/inference/psvi_classes.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.distributions as dist
from psvi.models.neural_net import VILinear
from torch.utils.data import DataLoader
def pseudo_subsample_init(x, y, num_pseudo=20, nc=2, seed=0):
r"""
Initialize on random subsets from each class with approximately equal
"""
torch.manual_seed(seed)
N, _ = x.shape
cnt = 0
u, z = torch.Tensor([]), torch.Tensor([])
for c in range(nc):
idx_c, pts_with_c = (
torch.arange(N)[y == c],
num_pseudo // nc if c < nc - 1 else num_pseudo - cnt,
)
u, z = torch.cat(
(u, x[idx_c[torch.randperm(len(idx_c))[:pts_with_c]]])
), torch.cat((z, c * torch.ones(pts_with_c)))
cnt += num_pseudo // nc
return u.requires_grad_(True), z
def pseudo_rand_init(x, y, num_pseudo=20, nc=2, seed=0, variance=0.1):
r"""
Initialize on noisy means of the observed datapoints and random labels equally split among classes
"""
torch.manual_seed(seed)
_, D = x.shape
u = (
(x[:, :].mean() + variance * torch.randn(num_pseudo, D))
.clone()
.requires_grad_(True)
)
z = torch.Tensor([])
for c in range(nc):
z = torch.cat(
(
z,
c
* torch.ones(
num_pseudo // nc
if c < nc - 1
else num_pseudo - (nc - 1) * (num_pseudo // nc)
),
)
)
return u, z
r"""
Model specific computations for psvi variational objective used to estimate the coreset posterior over black-box sparsevi construction
"""
def elbo(net, u, z, w):
r"""
ELBO computed on (u,z): variational objective for posterior approximation using only the coreset datapoints
"""
pseudo_nll = -dist.Bernoulli(logits=net(u).squeeze(-1)).log_prob(z).matmul(w)
sampled_nkl = sum(m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear))
return (pseudo_nll.sum() - sampled_nkl).sum()
def sparsevi_psvi_elbo(net, x, u, y, z, w, N): # variational objective for
r"""
PSVI-ELBO: variational objective for true data conditioned on coreset data (called in outer optimization of the sparse-bbvi construction)
"""
Nu, Nx = u.shape[0], x.shape[0]
all_data, all_labels = torch.cat((u, x)), torch.cat((z, y))
all_nlls = -dist.Bernoulli(logits=net(all_data).squeeze(-1)).log_prob(all_labels)
pseudo_nll, data_nll = N / Nu * all_nlls[:, :Nu].matmul(w), all_nlls[:, Nu:].sum(-1)
sampled_nkl = sum(m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear))
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(-1).squeeze()
return weights.mul(N / Nx * data_nll - pseudo_nll).sum() - log_weights.mean()
def forward_through_coreset(net, u, x, z, y, w):
r"""
Likelihood computations for coreset next datapoint selection step
"""
Nu = u.shape[0]
with torch.no_grad():
all_data, all_labels = torch.cat((u, x)), torch.cat((z, y))
all_lls = dist.Bernoulli(logits=net(all_data).squeeze(-1)).log_prob(all_labels)
core_ll, data_ll = all_lls[:, :Nu], all_lls[:, Nu:]
sampled_nkl = sum(
m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear)
)
log_weights = core_ll.matmul(w) + sampled_nkl
weights = log_weights.softmax(-1).squeeze()
return core_ll.T, data_ll.T, weights
def predict_through_coreset(net, xt, x, y, w=None):
r"""
Importance-weight correction for predictions using the coreset posterior
"""
Ntest = xt.shape[0]
with torch.no_grad():
all_data = torch.cat((xt, x))
all_logits = net(all_data).squeeze(-1)
pnlls = -dist.Bernoulli(logits=all_logits[:, Ntest:]).log_prob(y)
pseudo_nll = pnlls.matmul(w) if w is not None else pnlls.sum(-1)
test_data_logits = all_logits[:, :Ntest]
sampled_nkl = sum(
m.sampled_nkl() for m in net.modules() if isinstance(m, VILinear)
)
log_weights = -pseudo_nll + sampled_nkl
weights = log_weights.softmax(-1).squeeze()
return test_data_logits, weights
def make_dataloader(data, minibatch, shuffle=True):
r"""
Create pytorch dataloader from given dataset and minibatch size
"""
return DataLoader(data, batch_size=minibatch, pin_memory=True, shuffle=shuffle)
def compute_empirical_mean(dloader):
r"""
Compute the mean of the observed data distribution
"""
trainsum, nb_samples = 0., 0. # compute statistics of the training data
for data, _ in dloader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
trainsum += data.mean(2).sum(0) # use with caution: might raise overflow for large datasets
nb_samples += batch_samples
return trainsum / nb_samples
def pred_on_grid(
model,
n_test_per_dim=250,
device=None,
**kwargs,
):
r"""
Predictifons over a 2-d grid for visualization of predictive posterior on 2-d synthetic datasets
"""
_x0_test = torch.linspace(-3, 4, n_test_per_dim)
_x1_test = torch.linspace(-2, 3, n_test_per_dim)
x_test = torch.stack(torch.meshgrid(_x0_test, _x1_test), dim=-1).to(device)
with torch.no_grad():
return model(x_test.view(-1, 2)).squeeze(-1).softmax(-1).mean(0)
|
Blackbox-Coresets-VI-main
|
psvi/inference/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
r"""
Incremental variational coreset utilising the PSVI objective
"""
import time
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
from psvi.inference.utils import (
elbo,
forward_through_coreset,
predict_through_coreset,
sparsevi_psvi_elbo,
)
from psvi.models.neural_net import make_fcnet, VILinear
from tqdm import tqdm
def run_sparsevi_with_bb_elbo(
n_layers=1,
logistic_regression=True,
n_hidden=40,
log_every=10,
lr0=1e-3,
register_elbos=False,
seed=0,
**kwargs,
):
r"""
Inremental variational coreset construction, with greedy selection step and coreset points weight vector optimization using our generalized ELBO
"""
saved_args = locals()
print("saved_args is", saved_args)
np.random.seed(seed), torch.manual_seed(seed)
elbos = []
results = {}
num_epochs, inner_it, outer_it = (
kwargs["num_epochs"],
kwargs["inner_it"],
kwargs["outer_it"],
)
x, y, xt, yt, mc_samples, data_minibatch = (
kwargs["x"],
kwargs["y"],
kwargs["xt"],
kwargs["yt"],
kwargs["mc_samples"],
kwargs["data_minibatch"],
)
N, D = x.shape
net = (
nn.Sequential(
VILinear(D, 1, mc_samples=mc_samples),
)
if logistic_regression
else make_fcnet(
D,
n_hidden,
1,
n_layers=n_layers,
linear_class=VILinear,
nonl_class=nn.ReLU,
mc_samples=mc_samples,
)
)
w = (
torch.zeros(N).clone().detach().requires_grad_(True)
) # coreset weights initialised to 0
nlls_sbbvi, accs_sbbvi, core_idcs_sbbvi = [], [], []
optim_net0 = torch.optim.Adam(
list(net.parameters()), lr0
) # optimizer for ELBO on coreset datapoints
optim_w = torch.optim.Adam([w], lr0) # optimizer for PSVI-ELBO
core_idcs = []
times = [0]
t_start = time.time()
# Grow the coreset for num_epochs iterations
for it in tqdm(range(num_epochs)):
# Evaluate coreset posterior
if it % log_every == 0:
with torch.no_grad():
test_data_logits, weights = predict_through_coreset(net, xt, x, y, w)
test_probs = torch.clamp(weights @ (test_data_logits.sigmoid()), max=1)
test_acc = test_probs.gt(0.5).float().eq(yt).float().mean()
test_nll = -dist.Bernoulli(probs=test_probs).log_prob(yt).mean()
nlls_sbbvi.append(test_nll.item())
accs_sbbvi.append(test_acc.item())
print(f"predictive accuracy: {(100*test_acc.item()):.2f}%")
core_idcs_sbbvi.append(len(core_idcs))
times.append(times[-1] + time.time() - t_start)
if kwargs["scatterplot_coreset"]:
if it == num_epochs - 1:
test_data_logits, weights = predict_through_coreset(
net, kwargs["xgrid"], x, y, w
)
test_probs = torch.clamp(weights @ (test_data_logits.sigmoid()), max=1)
r = (
test_probs.reshape(
(
int(np.sqrt(kwargs["xgrid"].shape[0])),
int(np.sqrt(kwargs["xgrid"].shape[0])),
)
),
xt,
kwargs["plot_data"],
kwargs["plot_preds"],
x[w > 0],
y[w > 0],
)
kwargs["plot_classification_with_coreset"](*r, 1, "sparse bbvi")
x_core, y_core = x[core_idcs, :], y[core_idcs]
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
# 1. Approximate current coreset posterior via minimizing the ELBO on the coreset support
optim_net0.zero_grad()
for in_it in range(inner_it):
loss = elbo(net, x_core, y_core, w[core_idcs])
if register_elbos and in_it % log_every == 0:
with torch.no_grad():
elbos.append((1, -loss.item()))
loss.backward()
optim_net0.step()
with torch.no_grad():
# 2. Compute loglikelihoods for each sample using samples from the approximation to the coreset posterior
ll_core, ll_data, weights = forward_through_coreset(
net, x_core, x[sub_idcs, :], y_core, y[sub_idcs], w[core_idcs]
)
cll_data, cll_core = ll_data - torch.einsum(
"s, ns ->ns", weights, ll_data
), ll_core - torch.einsum("s, ms ->ms", weights, ll_core)
# 3. Select point to attach to the coreset next via max correlation with residual error
resid = sum_scaling * cll_data.sum(axis=0) - torch.einsum(
"m, ms ->s", w[core_idcs], cll_core
)
corrs = (
cll_data.matmul(resid)
/ torch.sqrt((cll_data**2).sum(axis=1))
/ cll_data.shape[1]
)
corecorrs = (
torch.abs(cll_core.matmul(resid))
/ torch.sqrt((cll_core**2).sum(axis=1))
/ cll_core.shape[1]
if len(core_idcs) > 0
else None
)
if corecorrs is None or corrs.max() > corecorrs.max():
pt_idx = sub_idcs[torch.argmax(torch.max(corrs))]
core_idcs.append(pt_idx) if pt_idx not in core_idcs else None
# 4. Sample for updated weights and take projected gradient descent steps on the weights
x_core, y_core = x[core_idcs, :], y[core_idcs]
sub_idcs, sum_scaling = (
np.random.randint(x.shape[0], size=data_minibatch),
x.shape[0] / data_minibatch,
) # sample minibatch when accessing full data and rescale corresponding log-likelihood
for out_it in range(outer_it):
optim_w.zero_grad()
loss_joint = sparsevi_psvi_elbo(
net, x[sub_idcs, :], x_core, y[sub_idcs], y_core, w[core_idcs], N
)
if register_elbos and out_it % log_every == 0:
with torch.no_grad():
elbos.append((0, -loss_joint.item()))
loss_joint.backward()
optim_w.step()
with torch.no_grad():
torch.clamp_(w, 0)
# store results
results["accs"] = accs_sbbvi
results["nlls"] = nlls_sbbvi
results["csizes"] = core_idcs_sbbvi
results["times"] = times[1:]
results["elbos"] = elbos
return results
|
Blackbox-Coresets-VI-main
|
psvi/inference/sparsebbvi.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for making ``torch.nn.Module`` subclass instances stateless."""
import abc as _abc
import typing as _typing
import warnings as _warnings
import weakref as _weakref
from collections import OrderedDict as _OrderedDict
from contextlib import contextmanager as _contextmanager
import torch as _torch
from . import utils as _utils
# ==============================================================================
# Helper functions and attributes for MonkeyPatch modules.
# ==============================================================================
_internal_attrs = {
"_backend",
"_parameters",
"_buffers",
"_backward_hooks",
"_forward_hooks",
"_forward_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_modules",
}
_BufferType = _typing.Dict[str, _typing.Optional[_torch.Tensor]]
@_contextmanager
def _modify_internally(fmodule):
fmodule._being_modified_internally = True
yield
fmodule._being_modified_internally = False
def _patched_parameters(
self, recurse: bool = True, time: _typing.Optional[int] = None
) -> _typing.Iterable[_torch.Tensor]:
r"""Returns an iterator over monkey patched module fast parameters.
Args:
recurse (bool): if True, then yields fast parameters of this module
and all submodules. Otherwise, this *still* yields parameters of
this module and all submodules, and raises a warning. This keyword
exists only to satisfy API compatibility with
``torch.nn.Module.parameters``.
time (int or None): if None, the most recent fast parameters are
provided. The int provided stands for the number of steps since the
module was created. *Note* that the step counter is incremented
every time parameters are updated, so this may not align with number
of training or evaluations steps.
Yields:
Parameter: module fast weights.
"""
if getattr(self, "_fast_params", None) is None:
raise Exception(
"Tried to get fast weights of a monkey patched module which does "
"not encapsulate fast weights."
)
if not recurse:
_warnings.warn(
"Calling parameters with recurse=False on a monkey patched module "
"still returns all the fast weights of of nested patched modules."
)
time = -1 if time is None else time
if not self.track_higher_grads and time not in (-1, 0):
raise ValueError(
"The patched model is not tracking higher gradients. Only the "
"latest parameters are available."
)
return iter(self._fast_params[time])
class _MonkeyPatchBase(_abc.ABC, _torch.nn.Module):
@_abc.abstractmethod
def __init__(self) -> None:
self._param_mapping: _typing.List[int] = []
self._being_modified_internally: bool = True
self._track_higher_grads: bool = True
def forward(self):
raise NotImplementedError(
"The monkey-patching logic has failed to override self.forward "
"on the new module, or you tried calling forward on a patched "
"version of a module which doesn't have forward (e.g. ModuleList)."
)
def _expand_params(
self, params: _typing.List[_torch.Tensor]
) -> _typing.List[_torch.Tensor]:
expanded = []
for index in self._param_mapping:
expanded.append(params[index])
return expanded
@property
def init_fast_params(self):
if not self.track_higher_grads:
raise Exception(
"Cannot get initial parameters when not tracking higher " "gradients."
)
return self._fast_params[0]
@property
def fast_params(self):
return None if self._fast_params is None else self._fast_params[-1]
@fast_params.setter
def fast_params(self, value):
value = list(value)
if self._fast_params is None:
self._fast_params = []
if self.track_higher_grads:
self._fast_params.append(value)
else:
self._fast_params[0] = value
@property
def track_higher_grads(self):
return self._track_higher_grads
@track_higher_grads.setter
def track_higher_grads(self, value):
if not isinstance(value, bool):
raise ValueError("Expected boolean argument. Got: {}.".format(type(value)))
self._track_higher_grads = value
def buffer_sync(
module: _torch.nn.Module,
fmodule: _MonkeyPatchBase,
device: _typing.Optional[_torch.device] = None,
) -> None:
r"""One off sync (copy) of buffers in ``fmodule`` with those from ``module``."""
for key, value in module._buffers.items():
if not _torch.is_tensor(value):
fmodule._buffers[key] = value
elif device is None:
fmodule._buffers[key] = value.clone().detach()
else:
fmodule._buffers[key] = value.clone().detach().to(device)
for name, child in module._modules.items():
if name in fmodule._modules:
buffer_sync(child, fmodule._modules[name], device)
else:
raise KeyError(
"Did not find expected submodule "
"{} of monkey-patched module {}.".format(name, fmodule)
)
# ==============================================================================
# Helper class to use instead of actual torch.nn.Parameters when patching.
# ==============================================================================
class _ParameterPlaceholder:
def __init__(self, name: str) -> None:
self._param_name = name
def __repr__(self) -> str:
return 'Parameter placeholder ("{}")'.format(self._param_name)
_ParameterPlaceholder.__name__ = "ParameterPlaceholder"
_ParameterPlaceholder.__qualname__ = "ParameterPlaceholder"
# ==============================================================================
# Helper function for recursively patching submodules.
# ==============================================================================
def _make_functional(
module: _torch.nn.Module,
params_box: _typing.Sequence[_typing.Optional[_typing.List[_torch.Tensor]]],
params_offset: int,
root_patched: _typing.Optional[_MonkeyPatchBase] = None,
) -> _typing.Tuple[int, _MonkeyPatchBase, _typing.Type[_MonkeyPatchBase]]:
if isinstance(module, _MonkeyPatchBase):
raise ValueError(
"Monkey-patching monkey-patched modules is untested uncharted "
"territory, so we're going to assume it's done in error. If you "
"are doing this intentionally and need this to be supported, "
"contact the developers of this library."
)
param_names = list(
name
for name in module._parameters.keys()
if module._parameters[name] is not None
)
_ModuleType: _typing.Type[_torch.nn.Module] = module.__class__
# type checking of next line disabled as mypy is iffy with dynamic types
class MonkeyPatched(_ModuleType, _MonkeyPatchBase): # type: ignore
_wrapped_name = type(module).__name__
def __init__(self, original_params, root) -> None:
_torch.nn.Module.__init__(self)
_MonkeyPatchBase.__init__(self)
self._root_ref = _weakref.ref(root) if root else None
self._fast_params = None
self._param_names = param_names
self._original_params = original_params
# for pretty printing
self._parameters = _OrderedDict(
(name, _ParameterPlaceholder(name)) for name in self._param_names
)
self._modules: _typing.Dict[str, _MonkeyPatchBase] = _OrderedDict()
@property
def direct_submodule_call(self):
return params_box[0] is None
@property
def is_root(self):
return self._root_ref is None
@property
def root(self):
if self.is_root:
return self
else:
return self._root_ref()
def __setattr__(self, name, value):
def remove_from(*dicts):
for d in dicts:
if name in d:
del d[name]
params = self.__dict__.get("_parameters")
if params is not None and name in params:
if not isinstance(value, _torch.Tensor):
raise TypeError(
"Require Tensor as fast weights. "
"Got {}".format(_torch.typename(value))
)
if not self._being_modified_internally:
# Additional behaviour for when fast weights are being
# directly modified goes here:
old_value = self._parameters[name]
fast_params = self.root.fast_params[:]
if not fast_params:
raise Exception(
"Cannot assign parameters to patched module which "
"does not have implicit fast parameters."
)
replacement_index = _utils._find_param_in_list(
old_value, fast_params
)
fast_params[replacement_index] = value
self.update_params(fast_params)
# Change parameters in place, usually during boxed_forward pass
self._parameters[name] = value
else:
modules = self.__dict__.get("_modules")
if isinstance(value, _torch.nn.Module):
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() " "call"
)
remove_from(self.__dict__, self._parameters, self._buffers)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError(
(
"cannot assign '{}' "
"as child module '{}'"
"(torch.nn.Module or None expected)"
).format(_torch.typename(value), name)
)
modules[name] = value
else:
buffers = self.__dict__.get("_buffers")
if buffers is not None and name in buffers:
if value is not None and not isinstance(value, _torch.Tensor):
raise TypeError(
"cannot assign '{}' as buffer '{}' "
"(torch.Tensor or None expected)".format(
_torch.typename(value), name
)
)
buffers[name] = value
else:
object.__setattr__(self, name, value)
MonkeyPatched.__name__ = "InnerFunctional" + type(module).__name__
MonkeyPatched.__qualname__ = MonkeyPatched.__name__
fmodule = MonkeyPatched(module.parameters(), root=root_patched)
# If a root module hasn't been defined yet, this fmodule is the root
if not root_patched:
root_patched = fmodule
# use 1 as dummy list item since we are only counting
num_params = len([1 for p in module._parameters.values() if p is not None])
# Copy over all attributes
for name, attr in module.__dict__.items():
if name in _internal_attrs:
continue
setattr(fmodule, name, attr)
# Deal with "None"-style params
with _modify_internally(fmodule):
for name, attr in module.__dict__["_parameters"].items():
if isinstance(attr, _torch.nn.Parameter):
continue
else:
setattr(fmodule, name, attr)
child_params_offset = params_offset + num_params
for name, child in module._modules.items():
child_params_offset, fchild, _ = _make_functional(
child, params_box, child_params_offset, root_patched
)
fmodule._modules[name] = fchild
setattr(fmodule, name, fchild)
true_forward = type(module).forward
def patched_forward(self, *args, params=None, **kwargs):
if self.direct_submodule_call:
# If submodule was called directly, run intialisation that happens
# at top level call. If *full set of params* is provided here, it
# will use those. If not, it will fall back on fast weights.
# In the future, we should be able to support passing only the
# submodule (+ children) weights here, but that's not simple.
self.root._refill_params_box(params)
with _modify_internally(self):
for name, param in zip(
self._param_names,
params_box[0][params_offset : params_offset + num_params],
):
setattr(self, name, param)
# This snippet deals with torch.nn.{RNN,GRU,LSTM}
if hasattr(self, "_flat_weights_names"):
self._flat_weights = [
self._parameters[wn] for wn in self._flat_weights_names
]
# Call true_forward after some checks
with _warnings.catch_warnings():
# If running RNNs on GPU, surpress the warnings due to flattening
# not happening here. Maybe we should raise a warning of our own?
is_RNN = isinstance(module, _torch.nn.RNNBase)
if is_RNN and _torch.cuda.is_available():
_warnings.simplefilter("ignore", category=UserWarning)
return true_forward(self, *args, **kwargs)
setattr(MonkeyPatched, "forward", patched_forward)
def flatten_parameters(self):
return # no-op
# This (hopefully) avoids trouble on GPU with torch.nn.{RNN,GRU,LSTM}
if hasattr(module, "flatten_parameters"):
setattr(MonkeyPatched, "flatten_parameters", flatten_parameters)
return child_params_offset, fmodule, type(fmodule)
def _update_patched_params(
fmodule: _MonkeyPatchBase,
params_box: _typing.Sequence[_typing.List[_torch.Tensor]],
params_offset: int,
) -> int:
num_params = len([1 for p in fmodule._parameters.values() if p is not None])
child_params_offset = params_offset + num_params
for name, child in fmodule._modules.items():
child_params_offset = _update_patched_params(
child, params_box, child_params_offset
)
with _modify_internally(fmodule):
for name, param in zip(
fmodule._param_names,
params_box[0][params_offset : params_offset + num_params],
):
setattr(fmodule, name, param)
return child_params_offset
# ==============================================================================
# The main function which does the monkey patching.
# ==============================================================================
_EncapsulatorType = _typing.Optional[
_typing.Callable[[_MonkeyPatchBase, _torch.nn.Module], None]
]
def make_functional(
module: _torch.nn.Module, encapsulator: _EncapsulatorType = None
) -> _MonkeyPatchBase:
r"""Returns a stateless version of an ``nn.Module`` instance."""
params_box = [None]
_, fmodule, MonkeyPatched = _make_functional(module, params_box, 0)
top_name = "Functional" + MonkeyPatched._wrapped_name
MonkeyPatched.__name__ = MonkeyPatched.__qualname__ = top_name
MonkeyPatched.boxed_forward = MonkeyPatched.forward
param_mapping = _utils._get_param_mapping(module, [], [])
setattr(fmodule, "_param_mapping", param_mapping)
def _refill_params_box(self, params):
if params is not None:
self.fast_params = params # update view on latest fast params
elif self.fast_params is None:
raise ValueError(
"params keyword must be provided if patched module not "
"tracking its own fast parameters"
)
# Copy fast parameters into params_box for use in boxed_forward
params_box[0] = self._expand_params(self.fast_params)
def _patched_forward(self, *args, params=None, **kwargs):
self._refill_params_box(params)
output = self.boxed_forward(*args, **kwargs)
# Clean up
params_box[0] = None
return output
def _update_params(self, params):
self.fast_params = params
params = self._expand_params(params)
_update_patched_params(self, [params], 0)
setattr(MonkeyPatched, "forward", _patched_forward)
setattr(MonkeyPatched, "parameters", _patched_parameters)
setattr(MonkeyPatched, "update_params", _update_params)
setattr(MonkeyPatched, "_refill_params_box", _refill_params_box)
if encapsulator is not None:
encapsulator(fmodule, module)
return fmodule
# ==============================================================================
# Convenience functions and decorators for hiding away a lot of the complexity
# of creating patched modules, taking their parameters, and linking patched
# modules to a differentiable optimizer.
# ==============================================================================
def monkeypatch(
module: _torch.nn.Module,
device: _typing.Optional[_torch.device] = None,
copy_initial_weights: bool = True,
track_higher_grads: bool = True,
) -> _MonkeyPatchBase:
r"""Create a monkey-patched stateless version of a module.
This function produces a monkey-patched version of a module, and returns a
copy of its parameters for use as fast weights. Where the original module
or any of its submodules have state (e.g. batch norm), this will be copied
too, but further updates (e.g. during inner loop training) will cause these
to diverge without changing the state of the original module.
Args:
module: a ``torch.nn.Module`` subclass instance.
device (optional): a device to cast the fast weights and state to.
copy_initial_weights: if True, the weights of the patched module are
copied to form the initial weights of the patched module, and thus
are not part of the gradient tape when unrolling the patched module.
If this is set to False, the actual module weights will be the
initial weights of the patched module. This is useful when doing
MAML, for example.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows ``monkeypatch`` to be used in "test mode", without
potentially tracking higher order gradients. This can be useful when
running the training loop at test time, e.g. in k-shot learning
experiments, without incurring a significant memory overhead.
Returns:
``fmodule``: a "stateless" version of the original module, for which calls
to forward take the additional kwarg-only parameter ``params``, which
should be a list of torch tensors requiring gradients, ideally
provided by this function (see below) or by an update step from one
of the optimizers in ``higher.optim``.
"""
def encapsulator(fmodule: _MonkeyPatchBase, module: _torch.nn.Module) -> None:
if copy_initial_weights:
params = _utils.get_func_params(module, device=device)
else:
params = [
p.clone() if device is None else p.clone().to(device)
for p in module.parameters()
]
buffer_sync(module, fmodule, device)
fmodule.update_params(params)
fmodule = make_functional(module, encapsulator=encapsulator)
fmodule.track_higher_grads = track_higher_grads
return fmodule
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/patch.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as _typing
from contextlib import contextmanager as _contextmanager
import torch as _torch
from . import optim
from .patch import monkeypatch
@_contextmanager
def innerloop_ctx(
model: _torch.nn.Module,
opt: _torch.optim.Optimizer,
device: _typing.Optional[_torch.device] = None,
copy_initial_weights: bool = True,
override: optim._OverrideType = None,
track_higher_grads: bool = True,
):
r"""A context manager for writing differentiable inner loops.
Args:
model: a ``torch.nn.Module`` subclass instance.
opt: an existing optimizer, assumed to be an instance of
``torch.optim.Optimizer``, of a supported type which is either
defined in ``torch.optim``, or a custom implemantation which has
been added to higher at runtime by using ``higher.register_optim``.
We assume this optimizer tracks the parameters (or some subset
thereof) of a single ``torch.nn.Module`` instance, with support for
parameter groups.
device (optional): a device to cast the fast weights and state to. If
not specified, the device used for corresponding weights of
``model`` will be used.
copy_initial_weights: if true, the weights of the patched module are
copied to form the initial weights of the patched module, and thus
are not part of the gradient tape when unrolling the patched module.
If this is set to False, the actual module weights will be the
initial weights of the patched module. This is useful when doing
MAML, for example.
override (optional): a dictionary mapping optimizer settings (i.e. those
which would be passed to the optimizer constructor or provided
within parameter groups) to either singleton lists of override
values, or to a list of override values of length equal to the
number of parameter groups. If a single override is provided for a
keyword, it is used for all parameter groups. If a list is provided,
the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing
of tensors requiring gradient to differentiable optimizers for use
as optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows ``innerloop_ctx`` to be used in "test mode", without
potentially tracking higher order gradients. This can be useful when
running the training loop at test time, e.g. in k-shot learning
experiments, without incurring a significant memory overhead.
Yields:
A ``(fmodule, diffopt)`` tuple. where ``fmodule`` is a "stateless"
version of the original module, for which calls to forward take the
additional kwarg-only parameter ``params``, which should be a list of
torch tensors requiring gradients, ideally provided by this function
(see below) or by an update step from one of the optimizers in
``higher.optim``. And ``diffopt`` is an initialized
``DifferentiableOptimizer`` instance of the right subtype.
"""
fmodel = monkeypatch(
model,
device,
copy_initial_weights=copy_initial_weights,
track_higher_grads=track_higher_grads,
)
diffopt = optim.get_diff_optim(
opt,
model.parameters(),
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
)
yield fmodel, diffopt
__all__: list = ["innerloop_ctx"]
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for components of ``higher``\ ."""
import typing as _typing
import torch as _torch
_T = _typing.TypeVar("_T")
_U = _typing.TypeVar("_U")
def _copy_tensor(
t: _torch.Tensor, safe_copy: bool, device: _typing.Optional[_torch.device] = None
) -> _torch.Tensor:
if safe_copy:
t = t.clone().detach().requires_grad_(t.requires_grad)
else:
t = t.detach().requires_grad_(t.requires_grad)
t = t if device is None else t.to(device)
return t
def _recursive_copy_and_cast(
target: _typing.Union[list, tuple, dict, set, _torch.Tensor],
device: _typing.Optional[_torch.device],
) -> _torch.Tensor:
def map_fn(x):
if _torch.is_tensor(x):
return _copy_tensor(x, True, device=device)
else:
return x
return _recursive_map(target, map_fn)
def _recursive_map(
target: _typing.Union[list, tuple, dict, set, _T],
map_fn: _typing.Callable[[_T], _U],
) -> _typing.Union[list, tuple, dict, set, _U]:
if isinstance(target, list):
return type(target)([_recursive_map(x, map_fn) for x in target])
elif isinstance(target, tuple):
return type(target)([_recursive_map(x, map_fn) for x in target])
elif isinstance(target, dict):
return type(target)({k: _recursive_map(v, map_fn) for k, v in target.items()})
elif isinstance(target, set):
return type(target)({_recursive_map(x, map_fn) for x in target})
else:
return map_fn(target)
def _is_container(target: _typing.Any) -> bool:
flag = (
isinstance(target, list)
or isinstance(target, tuple)
or isinstance(target, dict)
or isinstance(target, set)
)
return flag
def _find_param_in_list(
param: _torch.Tensor, l: _typing.Iterable[_torch.Tensor]
) -> _typing.Optional[int]:
for i, p in enumerate(l):
if p is param:
return i
else:
return None
def _get_param_mapping(
module: _torch.nn.Module,
seen: _typing.List[_torch.Tensor],
mapping: _typing.List[int],
) -> _typing.List[int]:
for param in module._parameters.values():
if param is None:
continue
found = _find_param_in_list(param, seen)
if found is None:
mapping.append(len(seen))
seen.append(param)
else:
mapping.append(found)
for name, child in module._modules.items():
_ = _get_param_mapping(child, seen, mapping)
return mapping
def flatten(x: _typing.Any) -> _typing.List[_typing.Any]:
r"""Returns a flattened list of objects from a nested structure."""
l: _typing.List[_typing.Any] = []
if isinstance(x, dict):
for y in x.values():
l.extend(flatten(y))
elif isinstance(x, list) or isinstance(x, set) or isinstance(x, tuple):
for y in x:
l.extend(flatten(y))
else:
l.append(x)
return l
def get_func_params(
module: _torch.nn.Module,
device: _typing.Optional[_torch.device] = None,
safe_copy: bool = True,
) -> _typing.List[_torch.Tensor]:
r"""Returns a detached copy of module parameters which requires gradient."""
params = [_copy_tensor(p, safe_copy, device) for p in module.parameters()]
return params
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All Rights Reserved.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Differentiable optimizer wrappers around ``torch.optim`` instances."""
import abc as _abc
import collections as _collections
import copy as _copy
import math as _math
import typing as _typing
import warnings as _warnings
import torch as _torch
from . import patch as _patch, utils as _utils
_GroupedGradsType = _typing.List[_typing.List[_torch.Tensor]]
_StateType = _typing.List[_typing.DefaultDict[int, _typing.Any]]
_GradClosureType = _typing.Callable[[_torch.Tensor], _torch.Tensor]
_OverrideType = _typing.Dict[str, _typing.List[_typing.Any]]
_GradCallbackType = _typing.Callable[
[_typing.List[_torch.Tensor]], _typing.List[_torch.Tensor]
]
def _get_mask_closure(mask: _torch.Tensor) -> _GradClosureType:
def closure(grad: _torch.Tensor) -> _torch.Tensor:
grad = _torch.where(mask, _torch.zeros_like(grad), grad)
if grad.requires_grad:
grad.register_hook(_get_mask_closure(mask))
return grad
return closure
def _maybe_mask(tensor: _torch.Tensor, mask: _torch.Tensor) -> None:
if tensor.requires_grad:
tensor.register_hook(_get_mask_closure(mask))
class DifferentiableOptimizer(_abc.ABC):
def __init__(
self,
other: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
track_higher_grads: bool = True,
**kwargs,
) -> None:
r"""Initialize the optimizer with the state of an existing optimizer.
Args:
other: an existing optimizer instance.
reference_params: an iterable over the parameters of the original
model.
fmodel (optional): a patched stateless module with a view on
weights.
device (optional): the device to cast state tensors to.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides the
corresponding setting in the ``i``\ th parameter group. This permits
the passing of tensors requiring gradient to differentiable
optimizers for use as optimizer settings.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. If this keyword argument is provided when calling the
step method, its value will override the default specified here.
track_higher_grads: if True, during unrolled optimization the graph
be retained, and the fast weights will bear grad funcs, so as to
permit backpropagation through the optimization process. Setting
this to False allows the differentiable optimizer to be used in
"test mode", without potentially tracking higher order
gradients. This can be useful when running the training loop at
test time, e.g. in k-shot learning experiments, without
incurring a significant memory overhead.
"""
reference_params = list(reference_params)
# Copy param groups and set up structures for copy state
self.param_groups = _copy.deepcopy(other.param_groups)
self._group_to_param_list: _typing.List[_typing.List[int]] = []
self.state: _StateType = [
_collections.defaultdict(dict) for _ in range(len(self.param_groups))
]
# Deal with override
if override is not None:
self._apply_override(override)
self._grad_callback = grad_callback
# Copy and cast state
zipped = zip(self.param_groups, other.param_groups)
for group_idx, (group, orig_group) in enumerate(zipped):
local_list = []
for p_idx, p in enumerate(orig_group["params"]):
if p in other.state:
self.state[group_idx][p_idx] = {
k: _utils._recursive_copy_and_cast(v, device)
for k, v in other.state[p].items()
}
index = _utils._find_param_in_list(p, reference_params)
if index is None:
raise ValueError(
"Could not find parameter {} in reference parameters.".format(
str(p)
)
)
local_list.append(index)
group["params"] = [None] * len(group["params"])
self._group_to_param_list.append(local_list)
self._fmodel = fmodel
self._track_higher_grads = track_higher_grads
def _apply_override(self, override: _OverrideType) -> None:
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(self.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(self.param_groups):
group[k] = v[0] if len(v) == 1 else v[group_idx]
def step(
self,
loss: _torch.Tensor,
params: _typing.Iterable[_torch.Tensor] = None,
override: _typing.Optional[_OverrideType] = None,
grad_callback: _typing.Optional[_GradCallbackType] = None,
**kwargs,
) -> _typing.Iterable[_torch.Tensor]:
r"""Perform a model update.
This would be used by replacing the normal sequence::
opt.zero_grad()
loss.backward()
opt.step()
with::
diffopt.step(loss)
Args:
loss: the loss tensor.
params (optional): the parameters with regard to which we measure
the loss. These must be provided if the differentiable optimizer
did not receive a patched model with a view over its own fast
weights at initialisation. If there is such a model, and params
are provided, they will overwrite the params of the encapsulated
model.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal
to the number of parameter groups. If a single override is
provided for a keyword, it is used for all parameter groups. If
a list is provided, the ``i``\ th element of the list overrides
the corresponding setting in the ``i``\ th parameter group. This
permits the passing of tensors requiring gradient to
differentiable optimizers for use as optimizer settings. Setting
override here has highest precedence, i.e. it will override any
tensors provided as override during the creation of the
differentiable optimizer, where there is name clash.
grad_callback: (optional) a single argument function which will be
applied to a list of gradients of parameters, which respects the
order specified by ``reference_params``. This can be used to
apply a function, such as gradient clipping, to all (or a
subset) of these gradients every time the step function is
called. This callback overrides the default provided when
constructing the differentiable optimizer.
Returns:
The updated parameters, which will individually have ``grad_fn``\ s
of their own. If the optimizer has an encapsulated patched model,
its view over its own fast weights will be updated with these
params.
"""
# Deal with override
if override is not None:
self._apply_override(override)
if self._fmodel is None or self._fmodel.fast_params is None:
if params is None:
raise ValueError(
"params kwarg must be passed to step if the differentiable "
"optimizer doesn't have a view on a patched model with "
"params."
)
else:
params = self._fmodel.fast_params if params is None else params
params = list(params)
# This allows us to gracefully deal with cases where params are frozen.
grad_targets = [
p if p.requires_grad else _torch.tensor([], requires_grad=True)
for p in params
]
all_grads = _torch.autograd.grad(
loss,
grad_targets,
create_graph=self._track_higher_grads,
allow_unused=True, # boo
)
if grad_callback is not None:
all_grads = grad_callback(all_grads)
elif self._grad_callback is not None:
all_grads = self._grad_callback(all_grads)
grouped_grads = []
for group, mapping in zip(self.param_groups, self._group_to_param_list):
grads = []
for i, index in enumerate(mapping):
group["params"][i] = params[index]
grads.append(all_grads[index])
grouped_grads.append(grads)
self._update(grouped_grads)
new_params = params[:]
for group, mapping in zip(self.param_groups, self._group_to_param_list):
for p, index in zip(group["params"], mapping):
if self._track_higher_grads:
new_params[index] = p
else:
new_params[index] = p.detach().requires_grad_()
if self._fmodel is not None:
self._fmodel.update_params(new_params)
return new_params
@_abc.abstractmethod
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
pass
class DifferentiableSGD(DifferentiableOptimizer):
r"""A differentiable version of the SGD optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if weight_decay != 0:
g = _add(g, weight_decay, p)
if momentum != 0:
param_state = self.state[group_idx][p_idx]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = g
else:
buf = param_state["momentum_buffer"]
buf = _add(buf.mul(momentum), 1 - dampening, g)
param_state["momentum_buffer"] = buf
if nesterov:
g = _add(g, momentum, buf)
else:
g = buf
group["params"][p_idx] = _add(p, -group["lr"], g)
class DifferentiableAdam(DifferentiableOptimizer):
r"""A differentiable version of the Adam optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
amsgrad = group["amsgrad"]
beta1, beta2 = group["betas"]
weight_decay = group["weight_decay"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = _torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = _torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. mov. avg. of sq. grad. vals
state["max_exp_avg_sq"] = _torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
if weight_decay != 0:
g = g + (weight_decay * p)
# Decay the first and second moment running average coefficient
state["exp_avg"] = exp_avg = (exp_avg * beta1) + (1 - beta1) * g
state["exp_avg_sq"] = exp_avg_sq = (exp_avg_sq * beta2) + (
1 - beta2
) * g * g
# Deal with stability issues
mask = exp_avg_sq == 0.0
_maybe_mask(exp_avg_sq, mask)
exp_avg_sq = exp_avg_sq + 1e-8
if amsgrad:
# Maintains the max of all 2nd moment running avg. till now
state["max_exp_avg_sq"] = max_exp_avg_sq = _torch.max(
max_exp_avg_sq, exp_avg_sq
)
# Use the max. for normalizing running avg. of gradient
denom = _add(
max_exp_avg_sq.sqrt() / _math.sqrt(bias_correction2),
group["eps"],
)
else:
denom = _add(
exp_avg_sq.sqrt() / _math.sqrt(bias_correction2), group["eps"]
)
step_size = group["lr"] / bias_correction1
group["params"][p_idx] = _addcdiv(p, -step_size, exp_avg, denom)
class DifferentiableAdamW(DifferentiableOptimizer):
r"""A differentiable version of the AdamW optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
amsgrad = group["amsgrad"]
beta1, beta2 = group["betas"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
# Perform stepweight decay
p = p * (1 - group["lr"] * group["weight_decay"])
if g.is_sparse:
raise RuntimeError("AdamW does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = _torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = _torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. mov. avg. of sq. grad. vals
state["max_exp_avg_sq"] = _torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
# Decay the first and second moment running average coefficient
state["exp_avg"] = exp_avg = (exp_avg * beta1) + (1 - beta1) * g
state["exp_avg_sq"] = exp_avg_sq = (exp_avg_sq * beta2) + (
1 - beta2
) * g * g
# Deal with stability issues
mask = exp_avg_sq == 0.0
_maybe_mask(exp_avg_sq, mask)
if amsgrad:
# Maintains the max of all 2nd moment running avg. till now
state["max_exp_avg_sq"] = max_exp_avg_sq = _torch.max(
max_exp_avg_sq, exp_avg_sq
)
# Use the max. for normalizing running avg. of gradient
denom = _add(
max_exp_avg_sq.sqrt() / _math.sqrt(bias_correction2),
group["eps"],
)
else:
denom = _add(
exp_avg_sq.sqrt() / _math.sqrt(bias_correction2), group["eps"]
)
step_size = group["lr"] / bias_correction1
group["params"][p_idx] = _addcdiv(p, -step_size, exp_avg, denom)
class DifferentiableAdadelta(DifferentiableOptimizer):
r"""A differentiable version of the Adadelta optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
rho, eps = group["rho"], group["eps"]
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.data.is_sparse:
raise RuntimeError("Adadelta does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = _torch.zeros_like(p.data)
state["acc_delta"] = _torch.zeros_like(p.data)
square_avg, acc_delta = state["square_avg"], state["acc_delta"]
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
square_avg = _addcmul(square_avg.mul(rho), 1 - rho, g, g)
state["square_avg"] = square_avg
std = _add(square_avg, eps).sqrt()
delta = _add(acc_delta, eps).sqrt().div(std).mul(g)
state["acc_delta"] = _addcmul(acc_delta.mul(rho), 1 - rho, delta, delta)
group["params"][p_idx] = _add(p, -group["lr"], delta)
class DifferentiableAdagrad(DifferentiableOptimizer):
r"""A differentiable version of the Adagrad optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
state = self.state[group_idx][p_idx]
state["step"] += 1
if group["weight_decay"] != 0:
if g.data.is_sparse:
raise RuntimeError(
"weight_decay option is not compatible with sparse "
"gradients"
)
g = _add(g, group["weight_decay"], p)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
if g.is_sparse:
# TODO: implement support for sparse gradients.
raise NotImplementedError(
"sparse gradient support for DifferentiableAdagrad not "
"implemented yet."
)
else:
state["sum"] = sum_ = _addcmul(state["sum"], 1, g, g)
mask = sum_ == 0.0
_maybe_mask(sum_, mask)
std = _add(
state["sum"].sqrt(), group["eps"] if "eps" in group else 1e-10
)
group["params"][p_idx] = _addcdiv(p, -clr, g, std)
class DifferentiableAdamax(DifferentiableOptimizer):
r"""A differentiable version of the Adamax optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("Adamax does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = _torch.zeros_like(p.data)
state["exp_inf"] = _torch.zeros_like(p.data)
exp_avg, exp_inf = state["exp_avg"], state["exp_inf"]
beta1, beta2 = group["betas"]
eps = group["eps"]
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
# Update biased first moment estimate
state["exp_avg"] = exp_avg = _add(exp_avg.mul(beta1), 1 - beta1, g)
# Update the exponentially weighted infinity norm.
state["exp_inf"] = exp_inf = exp_inf.mul(beta2).unsqueeze(0)
norm_buf = _torch.cat([exp_inf, _add(g.abs(), eps).unsqueeze(0)], 0)
exp_inf, _ = _torch.max(norm_buf, 0, keepdim=False)
state["exp_inf"] = exp_inf
bias_correction = 1 - beta1 ** state["step"]
clr = group["lr"] / bias_correction
group["params"][p_idx] = _addcdiv(p, -clr, exp_avg, exp_inf)
class DifferentiableASGD(DifferentiableOptimizer):
r"""A differentiable version of the ASGD optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("ASGD does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["eta"] = group["lr"]
state["mu"] = 1
state["ax"] = _torch.zeros_like(p.data)
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
# decay term
p = p.mul(1 - group["lambd"] * state["eta"])
# update parameter
group["params"][p_idx] = _add(p, -state["eta"], g)
# averaging
if state["mu"] != 1:
state["ax"] = _add(state["ax"], p.sub(state["ax"]).mul(state["mu"]))
else:
state["ax"] = p
# update eta and mu
state["eta"] = group["lr"] / _math.pow(
(1 + group["lambd"] * group["lr"] * state["step"]), group["alpha"]
)
state["mu"] = 1 / max(1, state["step"] - group["t0"])
class DifferentiableRMSprop(DifferentiableOptimizer):
r"""A differentiable version of the RMSprop optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_warnings.warn(
"Differentiable RMSprop suffers from gradient correctness issues. "
"Consider using another optimizer until we fix these..."
)
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = _torch.zeros_like(p.data)
if group["momentum"] > 0:
state["momentum_buffer"] = _torch.zeros_like(p.data)
if group["centered"]:
state["grad_avg"] = _torch.zeros_like(p.data)
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
g = _add(g, group["weight_decay"], p)
square_avg = _addcmul(square_avg.mul(alpha), 1 - alpha, g, g)
state["square_avg"] = square_avg
# NB: This prevents nans but is not sufficient to recover
# correct gradients.
mask = square_avg == 0.0
_maybe_mask(square_avg, mask)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg = _add(grad_avg.mul(alpha), 1 - alpha, g)
state["grad_avg"] = grad_avg
eps = group["eps"]
avg = _add(_addcmul(square_avg, -1, grad_avg, grad_avg).sqrt(), eps)
else:
avg = _add(square_avg.sqrt(), group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf = _addcdiv(buf.mul(group["momentum"]), g, avg)
state["momentum_buffer"] = buf
p = _add(p, -group["lr"], buf)
else:
p = _addcdiv(p, -group["lr"], g, avg)
group["params"][p_idx] = p
class DifferentiableRprop(DifferentiableOptimizer):
r"""A differentiable version of the Rprop optimizer.
This optimizer creates a gradient tape as it updates parameters."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_warnings.warn(
"Differentiable Rprop (correctly) yields zero second order "
"gradients, as only the sign of the gradient is used in updates. "
"Future versions will offer higher order gradients based on a "
"continuous relaxation of the forward pass."
)
def _update(self, grouped_grads: _GroupedGradsType, **kwargs) -> None:
zipped = zip(self.param_groups, grouped_grads)
for group_idx, (group, grads) in enumerate(zipped):
for p_idx, (p, g) in enumerate(zip(group["params"], grads)):
if g is None:
continue
if g.is_sparse:
raise RuntimeError("Rprop does not support sparse gradients")
state = self.state[group_idx][p_idx]
# State initialization
if len(state) == 0:
state["step"] = 0
state["prev"] = _torch.zeros_like(p.data)
state["step_size"] = g.new().resize_as_(g).fill_(group["lr"])
etaminus, etaplus = group["etas"]
step_size_min, step_size_max = group["step_sizes"]
step_size = state["step_size"]
state["step"] += 1
sign = g.mul(state["prev"]).sign()
sign[sign.gt(0)] = etaplus
sign[sign.lt(0)] = etaminus
sign[sign.eq(0)] = 1
# update stepsizes with step size updates
step_size = step_size.mul(sign).clamp(step_size_min, step_size_max)
state["step_size"] = step_size
# for dir<0, dfdx=0
# for dir>=0 dfdx=dfdx
g = _torch.where(sign.eq(etaminus), _torch.zeros_like(g), g)
# update parameters
group["params"][p_idx] = _addcmul(p, -1, g.sign(), step_size)
state["prev"] = g.clone()
_OptMappingType = _typing.Dict[
_torch.optim.Optimizer, _typing.Type[DifferentiableOptimizer]
]
_opt_mapping: _OptMappingType = {
_torch.optim.Adadelta: DifferentiableAdadelta,
_torch.optim.Adagrad: DifferentiableAdagrad,
_torch.optim.Adam: DifferentiableAdam,
_torch.optim.AdamW: DifferentiableAdamW,
_torch.optim.Adamax: DifferentiableAdamax,
_torch.optim.ASGD: DifferentiableASGD,
_torch.optim.RMSprop: DifferentiableRMSprop,
_torch.optim.Rprop: DifferentiableRprop,
_torch.optim.SGD: DifferentiableSGD,
}
def get_diff_optim(
opt: _torch.optim.Optimizer,
reference_params: _typing.Iterable[_torch.Tensor],
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
track_higher_grads: bool = True,
**kwargs,
) -> DifferentiableOptimizer:
r"""Construct/initialize a differentiable version of an existing optimizer.
Args:
opt: an existing optimizer, assumed to be an instance of
``torch.optim.Optimizer``, of a supported type which is either defined
in ``torch.optim``, or a custom implemantation which has been added to
higher at runtime by using ``higher.register_optim``. We assume this
optimizer tracks the parameters (or some subset thereof) of a single
``torch.nn.Module`` instance, with support for parameter groups.
reference_params: the parameters of the module tracked by ``opt``, as
returned by ``module.parameters()``.
fmodel (optional): a patched version of the ``module`` tracked by ``opt``.
It is assumed this patched instance has a view on its latest fast
weights through ``fmodel.parameters()``. If provided, it is not
necessary to pass the fast weights explicitly to the differentiable
optimizer's ``step`` function via the keyword arg ``params``. If not
provided, the fast weights to update must be provided to ``step``.
device (optional): the device to cast the optimizer state to when
creating the differentiable optimizer. If not provided, the same
device as used for the parameters tracked by ``opt`` will be used.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal to
the number of parameter groups. If a single override is provided for
a keyword, it is used for all parameter groups. If a list is
provided, the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing of
tensors requiring gradient to differentiable optimizers for use as
optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows the returned differentiable optimizer to be used in
"test mode", without potentially tracking higher order gradients.
This can be useful when running the training loop at test time,
e.g. in k-shot learning experiments, without incurring a significant
memory overhead.
Returns:
An initialized ``DifferentiableOptimizer`` instance of the right subtype.
"""
if type(opt) in _opt_mapping:
return _opt_mapping[type(opt)](
opt,
reference_params,
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
**kwargs,
)
else:
raise ValueError(
"Optimizer type {} not supported by higher yet.".format(type(opt))
)
def create_diff_optim(
opt_type: _typing.Type[_torch.optim.Optimizer],
opt_kwargs: _typing.Optional[_typing.Dict[str, _typing.Any]] = None,
params: _typing.Optional[_typing.List[_torch.Tensor]] = None,
fmodel: _typing.Optional[_patch._MonkeyPatchBase] = None,
device: _typing.Optional[_torch.device] = None,
override: _typing.Optional[_OverrideType] = None,
track_higher_grads: bool = True,
**kwargs,
) -> DifferentiableOptimizer:
r"""Construct a differentiable version of an new optimizer.
Args:
opt_type: the type (constructor) for a torch.optim.Optimizer subtype
from amongst the types supported by the library, or registered with
it a runtime.
opt_kwargs: a dictionary of keywords to be passed to the optimizer
constructor.
params (optional): a list of (fast) weights which the differentiable
optimizer will update. These must be provided if fmodel is not
provided. If both, these will be used in lieu. These will only
be used for shape inference when initializing the optimizer.
This argument can also take the same format as parameter groups,
i.e. an iterable over dictionaries which contain the 'params' key
with fast weights as value, and group-specific hyperparameters.
fmodel (optional): a patched version of the ``module`` tracked by ``opt``.
It is assumed this patched instance has a view on its latest fast
weights through ``fmodel.parameters()``. If provided, it is not
necessary to pass the fast weights explicitly to the differentiable
optimizer's ``step`` function via the keyword arg ``params``. If not
provided, the fast weights to update must be provided to ``step``.
device (optional): the device to cast the optimizer state to when
creating the differentiable optimizer. If not provided, the same
device as used for the parameters tracked by ``opt`` will be used.
override (optional): a dictionary mapping optimizer settings (i.e.
those which would be passed to the optimizer constructor or
provided within parameter groups) to either singleton lists of
override values, or to a list of override values of length equal to
the number of parameter groups. If a single override is provided for
a keyword, it is used for all parameter groups. If a list is
provided, the ``i``\ th element of the list overrides the corresponding
setting in the ``i``\ th parameter group. This permits the passing of
tensors requiring gradient to differentiable optimizers for use as
optimizer settings.
track_higher_grads: if True, during unrolled optimization the graph be
retained, and the fast weights will bear grad funcs, so as to permit
backpropagation through the optimization process. Setting this to
False allows the returned differentiable optimizer to be used in
"test mode", without potentially tracking higher order gradients.
This can be useful when running the training loop at test time,
e.g. in k-shot learning experiments, without incurring a significant
memory overhead.
Returns:
An initialized ``DifferentiableOptimizer`` instance of the right subtype.
"""
if opt_type in _opt_mapping:
if params is not None:
params = list(params)
if isinstance(params[0], dict):
dummy = [
{
k: _torch.zeros_like(v, requires_grad=True)
if k == "params"
else v
for k, v in group.items()
}
for group in params
]
else:
dummy = [_torch.zeros_like(p, requires_grad=True) for p in params]
elif fmodel is not None:
dummy = [
_torch.zeros_like(p, requires_grad=True) for p in fmodel.parameters()
]
else:
raise ValueError("Must specify one of fmodel or params in kwargs.")
opt_kwargs = {} if opt_kwargs is None else opt_kwargs
opt = opt_type(dummy, **opt_kwargs)
return _opt_mapping[opt_type](
opt,
dummy,
fmodel=fmodel,
device=device,
override=override,
track_higher_grads=track_higher_grads,
**kwargs,
)
else:
raise ValueError(
"Optimizer type {} not supported by higher yet.".format(opt_type)
)
def register_optim(
optim_type: _torch.optim.Optimizer,
diff_optim_type: _typing.Type[DifferentiableOptimizer],
) -> None:
r"""Registers a new optimizer type for use with higher functions.
Args:
optim_type: the type of a new optimizer, assumed to be an instance of
``torch.optim.Optimizer``.
diff_optim_type: the type of a new differentiable optimizer, assumed to
be an instance of ``higher.optim.DifferentiableOptimizer`` with
functionally equivalent logic to ``optim_type``.
"""
_opt_mapping[optim_type] = diff_optim_type
def get_trainable_opt_params(
opt: _torch.optim.Optimizer, device: _typing.Optional[_torch.device] = None
) -> _OverrideType:
r"""Get an override dictionary from an optimizer instance.
Args:
opt: the optimizer to obtain an override dictionary from.
device (optional): the device to cast the learnable tensors to.
Returns:
A dictionary of the format expected for the override kwarg of
differentiable optimizers. It is initialized with trainable tensors
with as values those float and int hyperparameters found in the
optimizer's parameter groups (or stuctures containing these).
Heuristically, hyperparameters containing mixtures of differentiable
and non-differentiable types will be ignored (and must be manually
specified when constructing an override dict).
"""
override: _OverrideType = _collections.defaultdict(list)
def map_fn(x: _typing.Union[_torch.Tensor, int, float]) -> _torch.Tensor:
if isinstance(x, _torch.Tensor):
return x.clone().detach().requires_grad_()
else:
return _torch.tensor(float(x), device=device, requires_grad=True)
for group in opt.param_groups:
for k, v in group.items():
if k == "params":
# Ignore actual model parameters tracked by optim
continue
# Ignore hyperparameters that aren't structures containing ints
# or floats
if all(
isinstance(x, int) or isinstance(x, float) for x in _utils.flatten(v)
):
override[k].append(_utils._recursive_map(v, map_fn))
return override
def apply_trainable_opt_params(
opt: _torch.optim.Optimizer, override: _OverrideType
) -> None:
r"""Apply learned hyperparameters back to original optimizer.
Args:
opt: the original optimizer. The hyperparameters in its parameter groups
will be modified in place.
override: dictionary of the format used for the override kwarg of
differentiable optimizers.
"""
for k, v in override.items():
# Sanity check
if (len(v) != 1) and (len(v) != len(opt.param_groups)):
raise ValueError(
"Mismatch between the number of override tensors for "
"optimizer parameter {} and the number of "
"parameter groups.".format(k)
)
for group_idx, group in enumerate(opt.param_groups):
replacement = v[0] if len(v) == 1 else v[group_idx]
group[k] = _recursive_apply(replacement, group[k])
## Local utility functions
# TODO(egrefen): use funcs below instead of x._add, in diffopt
def _add(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _typing.Optional[_torch.Tensor] = None,
) -> _torch.Tensor:
if a2 is None:
value: _typing.Union[_torch.Tensor, float] = 1.0
other = a1
else:
value = a1
other = a2
return tensor + (value * other)
def _addcdiv(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _torch.Tensor,
a3: _typing.Optional[_torch.Tensor] = None,
) -> _torch.Tensor:
if a3 is None:
value: _typing.Union[_torch.Tensor, float] = 1.0
tensor1 = a1
tensor2 = a2
else:
value = a1
tensor1 = a2
tensor2 = a3
return tensor + value * (tensor1 / tensor2)
def _addcmul(
tensor: _torch.Tensor,
a1: _typing.Union[float, int, _torch.Tensor],
a2: _torch.Tensor,
a3: _typing.Optional[_torch.Tensor] = None,
) -> _torch.Tensor:
if a3 is None:
value: _typing.Union[_torch.Tensor, float] = 1.0
tensor1 = a1
tensor2 = a2
else:
value = a1
tensor1 = a2
tensor2 = a3
return tensor + (value * tensor1 * tensor2)
# TODO(egrefen): this probably could be refactored into utils
def _recursive_apply(
replacement: _typing.Union[list, tuple, dict, set, _torch.Tensor],
target: _typing.Union[_torch.Tensor, int, float],
) -> _typing.Union[_torch.Tensor, int, float]:
if not isinstance(replacement, type(target)):
if isinstance(replacement, _torch.Tensor) and not _utils._is_container(target):
return type(target)(replacement.item())
raise ValueError(
"Expected an non-container type for target, but got {} with value "
"{}".format(type(target), target)
)
elif isinstance(replacement, _torch.Tensor) and isinstance(target, _torch.Tensor):
replacement = replacement.to(target.device)
target.data = replacement.data
return target
if isinstance(target, list):
return type(target)(
[_recursive_apply(r, t) for r, t in zip(replacement, target)]
)
elif isinstance(target, tuple):
return type(target)(
[_recursive_apply(r, t) for r, t in zip(replacement, target)]
)
elif isinstance(replacement, dict) and isinstance(target, dict):
return type(target)(
{
k: _recursive_apply(r, t)
for (_, r), (k, t) in zip(replacement.items(), target.items())
}
)
elif isinstance(target, set):
return type(target)(
{_recursive_apply(r, t) for r, t in zip(replacement, target)}
)
else:
raise ValueError(
"Couldn't apply replacement of type {} to target of type "
"{}".format(type(replacement), type(target))
)
|
Blackbox-Coresets-VI-main
|
psvi/robust_higher/optim.py
|
#!/usr/bin/env python
# a facade package to expose some useful things
from __future__ import absolute_import, print_function, unicode_literals
from ddlib.util import tsj_extractor,tsv_extractor,over,returns
from collections import OrderedDict
|
deepdive-master
|
ddlib/deepdive.py
|
#! /usr/bin/env python
#
# This file contains the generic features library that is included with ddlib.
#
# The three functions that a user should want to use are load_dictionary,
# get_generic_features_mention, and get_generic_features_relation.
# All the rest should be considered more or less private, except perhaps the
# get_sentence method, which is actually just a wrapper around unpack_words.
#
# Matteo, December 2014
#
from .dd import dep_path_between_words, materialize_span, Span, unpack_words
MAX_KW_LENGTH = 3
dictionaries = dict()
def load_dictionary(filename, dict_id="", func=lambda x: x):
"""Load a dictionary to be used for generic features.
Returns the id used to identify the dictionary.
Args:
filename: full path to the dictionary. The dictionary is actually a set
of words, one word per line.
dict_id: (optional) specify the id to be used to identify the
dictionary. By default it is a sequential number.
func: (optional) A function to be applied to each row of the file
"""
if dict_id == "":
dict_id = str(len(dictionaries))
with open(filename, 'rt') as dict_file:
dictionary = set()
for line in dict_file:
dictionary.add(func(line.strip()))
dictionary = frozenset(dictionary)
dictionaries[str(dict_id)] = dictionary
return str(dict_id)
def get_generic_features_mention(sentence, span, length_bin_size=5):
"""Yield 'generic' features for a mention in a sentence.
Args:
sentence: a list of Word objects
span: a Span namedtuple
length_bin_size: the size of the bins for the length feature
"""
# Mention sequence features (words, lemmas, ners, and poses)
for seq_feat in _get_seq_features(sentence, span):
yield seq_feat
# Window (left and right, up to size 3, with combinations) around the
# mention
for window_feat in _get_window_features(sentence, span):
yield window_feat
# Is (substring of) mention in a dictionary?
for dict_indicator_feat in _get_dictionary_indicator_features(
sentence, span):
yield dict_indicator_feat
# Dependency path(s) from mention to keyword(s). Various transformations of
# the dependency path are done.
for (i, j) in _get_substring_indices(len(sentence), MAX_KW_LENGTH):
if i >= span.begin_word_id and i < span.begin_word_id + span.length:
continue
if j > span.begin_word_id and j < span.begin_word_id + span.length:
continue
is_in_dictionary = False
for dict_id in dictionaries:
if " ".join(map(lambda x: str(x.lemma), sentence[i:j])) in \
dictionaries[dict_id]:
is_in_dictionary = True
yield "KW_IND_[" + dict_id + "]"
break
if is_in_dictionary:
kw_span = Span(begin_word_id=i, length=j-i)
for dep_path_feature in _get_min_dep_path_features(
sentence, span, kw_span, "KW"):
yield dep_path_feature
# The mention starts with a capital
if sentence[span.begin_word_id].word[0].isupper():
yield "STARTS_WITH_CAPITAL"
# Length of the mention
length = len(" ".join(materialize_span(sentence, span, lambda x: x.word)))
bin_id = length // length_bin_size
length_feat = "LENGTH_" + str(bin_id)
yield length_feat
def get_generic_features_relation(sentence, span1, span2, length_bin_size=5):
"""Yield 'generic' features for a relation in a sentence.
Args:
sentence: a list of Word objects
span1: the first Span of the relation
span2: the second Span of the relation
length_bin_size: the size of the bins for the length feature
"""
# Check whether the order of the spans is inverted. We use this information
# to add a prefix to *all* the features.
order = sorted([
span1.begin_word_id, span1.begin_word_id + span1.length,
span2.begin_word_id, span2.begin_word_id + span2.length])
begin = order[0]
betw_begin = order[1]
betw_end = order[2]
end = order[3]
if begin == span2.begin_word_id:
inverted = "INV_"
yield "IS_INVERTED"
else:
inverted = ""
betw_span = Span(begin_word_id=betw_begin, length=betw_end - betw_begin)
covering_span = Span(begin_word_id=begin, length=end - begin)
# Words, Lemmas, Ners, and Poses sequence between the mentions
for seq_feat in _get_seq_features(sentence, betw_span):
yield inverted + seq_feat
# Window feature (left and right, up to size 3, combined)
for window_feat in _get_window_features(
sentence, covering_span, isolated=False):
yield inverted + window_feat
# Ngrams of up to size 3 between the mentions
for ngram_feat in _get_ngram_features(sentence, betw_span):
yield inverted + ngram_feat
# Indicator features of whether the mentions are in dictionaries
found1 = False
for feat1 in _get_dictionary_indicator_features(
sentence, span1, prefix=inverted + "IN_DICT"):
found1 = True
found2 = False
for feat2 in _get_dictionary_indicator_features(
sentence, span2, prefix=""):
found2 = True
yield feat1 + feat2
if not found2:
yield feat1 + "_[_NONE]"
if not found1:
for feat2 in _get_dictionary_indicator_features(
sentence, span2, prefix=""):
found2 = True
yield inverted + "IN_DICT_[_NONE]" + feat2
# Dependency path (and transformations) between the mention
for betw_dep_path_feature in _get_min_dep_path_features(
sentence, span1, span2, inverted + "BETW"):
yield betw_dep_path_feature
# Dependency paths (and transformations) between the mentions and keywords
for (i, j) in _get_substring_indices(len(sentence), MAX_KW_LENGTH):
if (i >= begin and i < betw_begin) or (i >= betw_end and i < end):
continue
if (j > begin and j <= betw_begin) or (j > betw_end and j <= end):
continue
is_in_dictionary = False
for dict_id in dictionaries:
if " ".join(map(lambda x: str(x.lemma), sentence[i:j])) in \
dictionaries[dict_id]:
is_in_dictionary = True
yield inverted + "KW_IND_[" + dict_id + "]"
break
if is_in_dictionary:
kw_span = Span(begin_word_id=i, length=j-i)
path1 = _get_min_dep_path(sentence, span1, kw_span)
lemmas1 = []
labels1 = []
for edge in path1:
lemmas1.append(str(edge.word2.lemma))
labels1.append(edge.label)
both1 = []
for j in range(len(labels1)):
both1.append(labels1[j])
both1.append(lemmas1[j])
both1 = both1[:-1]
path2 = _get_min_dep_path(sentence, span2, kw_span)
lemmas2 = []
labels2 = []
for edge in path2:
lemmas2.append(str(edge.word2.lemma))
labels2.append(edge.label)
both2 = []
for j in range(len(labels2)):
both2.append(labels2[j])
both2.append(lemmas2[j])
both2 = both2[:-1]
yield inverted + "KW_[" + " ".join(both1) + "]_[" + \
" ".join(both2) + "]"
yield inverted + "KW_L_[" + " ".join(labels1) + "]_[" + \
" ".join(labels2) + "]"
for j in range(1, len(both1), 2):
for dict_id in dictionaries:
if both1[j] in dictionaries[dict_id]:
both1[j] = "DICT_" + str(dict_id)
break # Picking up the first dictionary we find
for j in range(1, len(both2), 2):
for dict_id in dictionaries:
if both2[j] in dictionaries[dict_id]:
both2[j] = "DICT_" + str(dict_id)
break # Picking up the first dictionary we find
yield inverted + "KW_D_[" + " ".join(both1) + "]_[" + \
" ".join(both2) + "]"
# The mentions start with a capital letter
first_capital = sentence[span1.begin_word_id].word[0].isupper()
second_capital = sentence[span2.begin_word_id].word[0].isupper()
capital_feat = inverted + "STARTS_WITH_CAPITAL_[" + str(first_capital) + \
"_" + str(second_capital) + "]"
yield capital_feat
# The lengths of the mentions
first_length = len(" ".join(materialize_span(
sentence, span1, lambda x: str(x.word))))
second_length = len(" ".join(materialize_span(
sentence, span2, lambda x: str(x.word))))
first_bin_id = first_length // length_bin_size
second_bin_id = second_length // length_bin_size
length_feat = inverted + "LENGTHS_[" + str(first_bin_id) + "_" + \
str(second_bin_id) + "]"
yield length_feat
def _get_substring_indices(_len, max_substring_len):
"""Yield the start-end indices for all substrings of a sequence with length
_len, up to length max_substring_len"""
for start in range(_len):
for end in reversed(range(start + 1, min(
_len, start + 1 + max_substring_len))):
yield (start, end)
def _get_ngram_features(sentence, span, window=3):
"""Yields ngram features. These are all substrings of size up to window in
the part of the sentence covered by the span.
In a typical usage, the span covers the words between two mentions, so
this function returns all ngrams of size up to window between the two
mentions
Args:
sentence: a list of Word objects
span: the Span identifying the area for generating the substrings
window: maximum size of a substring
"""
for i in range(span.begin_word_id, span.begin_word_id + span.length):
for j in range(1, window + 1):
if i+j <= span.begin_word_id + span.length:
yield "NGRAM_" + str(j) + "_[" + " ".join(
map(lambda x: str(x.lemma), sentence[i:i+j])) + "]"
def _get_min_dep_path(sentence, span1, span2):
"""Return the shortest dependency path between two Span objects
Args:
sentence: a list of Word objects
span1: the first Span
span2: the second Span
Returns: a list of DepEdge objects
"""
min_path = None
min_path_length = 200 # ridiculously high number?
for i in range(span1.begin_word_id, span1.begin_word_id + span1.length):
for j in range(
span2.begin_word_id, span2.begin_word_id + span2.length):
p = dep_path_between_words(sentence, i, j)
if len(p) < min_path_length:
min_path = p
return min_path
def _get_min_dep_path_features(sentence, span1, span2, prefix="BETW_"):
"""Yield the minimum dependency path features between two Span objects.
Various variants of the dependency path are yielded:
- using both labels and lemmas,
- using only labels
- using labels and lemmas, but with lemmas replaced by dict_id if the
lemma is in a dictionary
Args:
sentence: a list of Word objects
span1: the first Span
span2: the second Span
prefix: string prepended to all features
"""
min_path = _get_min_dep_path(sentence, span1, span2)
if min_path:
min_path_lemmas = []
min_path_labels = []
for edge in min_path:
min_path_lemmas.append(str(edge.word2.lemma))
min_path_labels.append(str(edge.label))
both = []
for j in range(len(min_path_labels)):
both.append(min_path_labels[j])
both.append(min_path_lemmas[j])
both = both[:-1]
yield prefix + "_[" + " ".join(both) + "]"
yield prefix + "_L_[" + " ".join(min_path_labels) + "]"
for j in range(1, len(both), 2):
for dict_id in dictionaries:
if both[j] in dictionaries[dict_id]:
both[j] = "DICT_" + str(dict_id)
break # Picking up the first dictionary we find
yield prefix + "_D_[" + " ".join(both) + "]"
def _get_seq_features(sentence, span):
"""Yield the sequence features in a Span
These include:
- words sequence in the span
- lemmas sequence in the span
- NER tags sequence in the span
- POS tags sequence in the span
Args:
sentence: a list of Word objects
span: the Span
"""
word_seq_feat = "WORD_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: x.word)) + "]"
yield word_seq_feat
lemma_seq_feat = "LEMMA_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: str(x.lemma))) + "]"
yield lemma_seq_feat
ner_seq_feat = "NER_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: str(x.ner))) + "]"
yield ner_seq_feat
pos_seq_feat = "POS_SEQ_[" + " ".join(materialize_span(
sentence, span, lambda x: str(x.pos))) + "]"
yield pos_seq_feat
def _get_window_features(
sentence, span, window=3, combinations=True, isolated=True):
"""Yield the window features around a Span
These are basically the n-grams around the span, up to a window of size
'window'
Args:
sentence: a list of Word objects
span: the span
window: the maximum size of the window
combinations: Whether to yield features that combine the windows on
the left and on the right
isolated: Whether to yield features that do not combine the windows on
the left and on the right
"""
span_end_idx = span.begin_word_id + span.length - 1
left_lemmas = []
left_ners = []
right_lemmas = []
right_ners = []
try:
for i in range(1, window + 1):
lemma = str(sentence[span.begin_word_id - i].lemma)
try:
float(lemma)
lemma = "_NUMBER"
except ValueError:
pass
left_lemmas.append(lemma)
left_ners.append(str(sentence[span.begin_word_id - i].ner))
except IndexError:
pass
left_lemmas.reverse()
left_ners.reverse()
try:
for i in range(1, window + 1):
lemma = str(sentence[span_end_idx + i].lemma)
try:
float(lemma)
lemma = "_NUMBER"
except ValueError:
pass
right_lemmas.append(lemma)
right_ners.append(str(sentence[span_end_idx + i].ner))
except IndexError:
pass
if isolated:
for i in range(len(left_lemmas)):
yield "W_LEFT_" + str(i+1) + "_[" + " ".join(left_lemmas[-i-1:]) + \
"]"
yield "W_LEFT_NER_" + str(i+1) + "_[" + " ".join(left_ners[-i-1:]) +\
"]"
for i in range(len(right_lemmas)):
yield "W_RIGHT_" + str(i+1) + "_[" + " ".join(right_lemmas[:i+1]) +\
"]"
yield "W_RIGHT_NER_" + str(i+1) + "_[" + \
" ".join(right_ners[:i+1]) + "]"
if combinations:
for i in range(len(left_lemmas)):
curr_left_lemmas = " ".join(left_lemmas[-i-1:])
try:
curr_left_ners = " ".join(left_ners[-i-1:])
except TypeError:
new_ners = []
for ner in left_ners[-i-1:]:
to_add = ner
if not to_add:
to_add = "None"
new_ners.append(to_add)
curr_left_ners = " ".join(new_ners)
for j in range(len(right_lemmas)):
curr_right_lemmas = " ".join(right_lemmas[:j+1])
try:
curr_right_ners = " ".join(right_ners[:j+1])
except TypeError:
new_ners = []
for ner in right_ners[:j+1]:
to_add = ner
if not to_add:
to_add = "None"
new_ners.append(to_add)
curr_right_ners = " ".join(new_ners)
yield "W_LEMMA_L_" + str(i+1) + "_R_" + str(j+1) + "_[" + \
curr_left_lemmas + "]_[" + curr_right_lemmas + "]"
yield "W_NER_L_" + str(i+1) + "_R_" + str(j+1) + "_[" + \
curr_left_ners + "]_[" + curr_right_ners + "]"
def _get_dictionary_indicator_features(
sentence, span, window=3, prefix="IN_DICT"):
"""Yield the indicator features for whether a substring of the span is in
the dictionaries
Args:
sentence: a list of Word objects
span: the span
window: the maximum size of a substring
prefix: a string to prepend to all yielded features
"""
in_dictionaries = set()
for i in range(window + 1):
for j in range(span.length - i):
phrase = " ".join(map(lambda x: str(x.lemma), sentence[j:j+i+1]))
for dict_id in dictionaries:
if phrase in dictionaries[dict_id]:
in_dictionaries.add(dict_id)
for dict_id in in_dictionaries:
yield prefix + "_[" + str(dict_id) + "]"
# yield prefix + "_JOIN_[" + " ".join(
# map(lambda x: str(x), sorted(in_dictionaries))) + "]"
def dep_graph_parser_parenthesis(edge_str):
"""Given a string representing a dependency edge in the 'parenthesis'
format, return a tuple of (parent_index, edge_label, child_index).
Args:
edge_str: a string representation of an edge in the dependency tree, in
the format edge_label(parent_word-parent_index, child_word-child_index)
Returns:
tuple of (parent_index, edge_label, child_index)
"""
tokens = edge_str.split("(")
label = tokens[0]
tokens = tokens[1].split(", ")
parent = int(tokens[0].split("-")[-1]) - 1
child = int(",".join(tokens[1:]).split("-")[-1][:-1]) - 1
return (parent, label, child)
def dep_graph_parser_triplet(edge_str):
"""Given a string representing a dependency edge in the 'triplet' format,
return a tuple of (parent_index, edge_label, child_index).
Args:
edge_str: a string representation of an edge in the dependency tree
in the format "parent_index\tlabel\child_index"
Returns:
tuple of (parent_index, edge_label, child_index)
"""
parent, label, child = edge_str.split()
# input edge used 1-based indexing
return (int(parent) - 1, label, int(child) - 1)
def dep_transform_parenthesis_to_triplet(edge_str):
"""Transform an edge representation from the parenthesis format to the
triplet format"""
parent, label, child = dep_graph_parser_parenthesis(edge_str)
return "\t".join((str(parent + 1), label, str(child + 1)))
def dep_transform_triplet_to_parenthesis(edge_str, parent_word, child_word):
"""Transform an edge representation from the triplet format to the
parenthesis format"""
parent, label, child = dep_graph_parser_triplet(edge_str)
return label + "(" + parent_word + "-" + str(parent + 1) + ", " + \
child_word + "-" + str(child + 1) + ")"
def dep_transform_test():
"""Test the transformation functions for the various dependency paths
formats"""
test = "a(b-1, c-2)"
transf = dep_transform_parenthesis_to_triplet(test)
assert transf == "1\ta\t2"
transf_back = dep_transform_triplet_to_parenthesis(transf, "b", "c")
assert transf_back == test
print("success")
def get_span(span_begin, span_length):
"""Return a Span object
Args:
span_begin: the index the Span begins at
span_length: the length of the span
"""
return Span(begin_word_id=span_begin, length=span_length)
def get_sentence(
begin_char_offsets, end_char_offsets, words, lemmas, poses,
dependencies, ners, dep_format_parser=dep_graph_parser_parenthesis):
"""Return a list of Word objects representing a sentence.
This is effectively a wrapper around unpack_words, but with a less
cumbersome interface.
Args:
begin_char_offsets: a list representing the beginning character offset
for each word in the sentence
end_char_offsets: a list representing the end character offset for each
word in the sentence
words: a list of the words in the sentence
lemmas: a list of the lemmas of the words in the sentence
poses: a list of the POS tags of the words in the sentence
dependencies: a list of the dependency path edges for the sentence
ners: a list of the NER tags of the words in the sentence
dep_format_parse: a function that takes as only argument an element of
dependencies (i.e., a dependency path edge) and returns a 3-tuple
(parent_index, label, child_index) representing the edge. Look at
the code for dep_graph_parser_parenthesis and
dep_graph_parser_triplet for examples.
"""
obj = dict()
obj['lemma'] = lemmas
obj['words'] = words
obj['ner'] = ners
obj['pos'] = poses
obj['dep_graph'] = dependencies
obj['ch_of_beg'] = begin_char_offsets
obj['ch_of_end'] = end_char_offsets
# list of Word objects
word_obj_list = unpack_words(
obj, character_offset_begin='ch_of_beg',
character_offset_end='ch_of_end', lemma='lemma', pos='pos',
ner='ner', words='words', dep_graph='dep_graph',
dep_graph_parser=dep_format_parser)
return word_obj_list
|
deepdive-master
|
ddlib/ddlib/gen_feats.py
|
from collections import namedtuple,OrderedDict
import re
import sys
from inspect import isgeneratorfunction,getargspec
import csv
from io import StringIO
from datetime import datetime
import json
def print_error(err_string):
"""Function to write to stderr"""
sys.stderr.write("ERROR[UDF]: " + str(err_string) + "\n")
# PostgreSQL COPY TO text Format parser
# See: http://www.postgresql.org/docs/9.1/static/sql-copy.html#AEN64302
escapeCodeToSpecial = {
'\\': '\\',
'b': '\b',
'f': '\f',
'r': '\r',
't': '\t',
'n': '\n',
'v': '\v',
}
specialToEscapeCode = {v: k for k, v in escapeCodeToSpecial.items()}
def decode_pg_text_escapes(m):
c = m.group(1)
if c in escapeCodeToSpecial:
return escapeCodeToSpecial[c]
elif c.startswith("x"):
return chr(int(c, base=16))
elif c.startswith("0"):
return chr(int(c, base=8))
else:
return c
def unescape_postgres_text_format(s):
# unescape PostgreSQL text format
return re.sub(r"\\(.|0[0-7]{1,2}|x[0-9A-Fa-f]{1,2})", decode_pg_text_escapes, s)
BOOL_PARSER = {
't' : True,
'f' : False,
'NULL' : None,
'\\N' : None
}
def timestamp(timestamp_str):
"""Given a timestamp string, return a timestamp string in ISO 8601 format to emulate
Postgres 9.5's to_json timestamp formatting.
This supports the `timestamp without time zone` PostgreSQL type.
Time zone offsets are not supported. http://bugs.python.org/issue6641
Examples:
>>> timestamp('2016-06-17 20:10:38')
'2016-06-17T20:10:38'
>>> timestamp('2016-06-17 20:10:37.9293')
'2016-06-17T20:10:37.929300'
"""
try:
parsed = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
parsed = datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S')
except ValueError:
return timestamp_str
return parsed.isoformat()
TYPE_PARSERS = {
'text' : lambda x : str(x),
'int' : lambda x : int(x.strip()),
'float' : lambda x : float(x.strip()),
'boolean' : lambda x : BOOL_PARSER[x.lower().strip()],
'timestamp': timestamp,
}
# how to normalize type names
CANONICAL_TYPE_BY_NAME = {
'integer' : 'int',
'bigint' : 'int',
'double' : 'float',
'double precision' : 'float',
'numeric' : 'float',
'unknown' : 'text',
}
CANONICAL_TYPE_BY_REGEX = {
re.compile(r'timestamp(\(\d\))? without time zone'): 'timestamp',
}
def normalize_type_name(ty):
ty = ty.lower()
if ty.endswith('[]'):
return normalize_type_name(ty[:-2]) + '[]'
if ty in CANONICAL_TYPE_BY_NAME:
return CANONICAL_TYPE_BY_NAME[ty]
else:
for patt,ty_canonical in CANONICAL_TYPE_BY_REGEX.items():
if patt.match(ty):
return ty_canonical
return ty
def check_supported_type(nm, ty, array_nesting=0):
if ty.endswith('[]'):
if array_nesting == 0:
check_supported_type(nm, ty[:-2], array_nesting=array_nesting+1)
else: # XXX the parser cannot parse nested arrays correctly
raise TypeError("column '%s' is of unsupported nested array type: %s" % (nm, ty + '[]'))
elif not ty in TYPE_PARSERS:
raise TypeError("column '%s' is of unsupported type: %s" % (nm, ty))
return nm, ty
def parse_pgtsv_element(s, t, array_nesting_depth=0):
"""
Parse an element in psql-compatible tsv format, i.e. {-format arrays
based on provided type and type-parser dictionary
"""
if s is None:
return s
if array_nesting_depth == 0:
if s == '\\N':
# NULLs outside arrays are represented as \N
# unless specified otherwise in the SQL statement (COPY ... NULL ...)
return None
elif not t.endswith('[]'):
# Need to handle PG TSV escape sequences for primitive types here,
# escapes for array elements are handled during array parsing
s = unescape_postgres_text_format(s)
if t.endswith('[]'): # Handle lists recursively
if s[0] == '{' and s[-1] == '}':
s_orig = s
s = s[1:-1] # to strip curly braces
def unescapeTSVBackslashes(matches):
c = matches.group(1)
return escapeCodeToSpecial[c] if c in escapeCodeToSpecial else c
s = re.sub(r'\\(.)', unescapeTSVBackslashes, s)
s = re.sub(r'\\(.)', lambda m : '""' if m.group(1) == '"' else m.group(1), s) # XXX quotes and backslashes in arrays are escaped another time
values = []
v = None
is_quoted = False
def null_or_itself(v): return None if not is_quoted and v == 'NULL' else v
while len(s) > 0:
if s[0] == ',': # found the end of a value
values.append(null_or_itself(v))
v = None
is_quoted = False
s = s[1:]
elif s[0] == '"': # found a quote
# e.g.: 1,this"is an error",2,3
if v is None: # this is a new value
v = ""
else: # this an escaped quote, append to the current value
v += '"'
# find the other end of the quote and consume
m = re.match(r'^"([^"]*)"', s)
if m:
v += m.group(1)
is_quoted = True # TODO error if quoting mixed
s = s[len(m.group(0)):]
else:
raise Exception("Unterminated quote in '%s'" % s_orig)
else:
m = re.match(r'^([^,]*)', s)
if m: # find the next comma to consume up to it
v = m.group(1)
else: # or consume the rest of the string as the value
v = s
s = s[len(v):]
values.append(null_or_itself(v))
split = values
else:
raise Exception("Surrounding curly braces ({...}) expected for array type %(type)s but found: '%(value)s'" % dict(
type=t,
value=s,
))
return [parse_pgtsv_element(ss, t[:-2], array_nesting_depth=array_nesting_depth+1) for ss in split]
else: # parse correct value using the parser corresponding to the type
try:
parser = TYPE_PARSERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
return parser(s)
class Row:
def __str__(self):
return '<Row(' + ', '.join("%s=%s" % x for x in self.__dict__.items()) + ')>'
def __repr__(self):
return str(self)
def _asdict(self):
return self.__dict__
class PGTSVParser:
"""
Initialized with a list of duples (field_name, field_type)
Is a factory for simple Row class
Parsed from Postgres-style TSV input lines
"""
def __init__(self, fields):
self.fields = [check_supported_type(nm,normalize_type_name(ty)) for nm,ty in fields]
def parse_line(self, line):
row = Row()
attribs = line.rstrip().split('\t')
if len(attribs) != len(self.fields):
raise ValueError("Expected %(num_rows_declared)d attributes, but found %(num_rows_found)d in input row:\n%(row)s" % dict(
num_rows_declared=len(self.fields), num_rows_found=len(attribs), row=row,
))
for i,attrib in enumerate(attribs):
field_name, field_type = self.fields[i]
setattr(row, field_name, parse_pgtsv_element(attrib, field_type))
return row
def parse_stdin(self):
for line in sys.stdin:
yield self.parse_line(line)
TYPE_CHECKERS = {
'text' : lambda x : type(x) == str,
'int' : lambda x : type(x) == int,
'float' : lambda x : type(x) == float,
'boolean' : lambda x : type(x) == bool,
# TODO timestamp
}
def print_pgtsv_element(x, n, t, array_nesting_depth=0):
"""Checks element x against type string t, then prints in PG-TSV format if a match"""
# Handle NULLs first
if x is None:
if array_nesting_depth == 0:
return r'\N'
elif t == 'text':
return 'NULL'
else:
return ''
# Handle lists recursively
if '[]' in t:
if not hasattr(x, '__iter__'):
raise ValueError("Mismatch between array type and non-iterable in output row:\n%s" % x)
else:
return '{%s}' % ','.join(print_pgtsv_element(e, n, t[:-2], array_nesting_depth=array_nesting_depth+1) for e in x)
# Else check type & print, hanlding special case of string in array
try:
checker = TYPE_CHECKERS[t]
except KeyError:
raise Exception("Unsupported type: %s" % t)
if not checker(x):
raise Exception("Output column '%(name)s' of type %(declared_type)s has incorrect value of %(value_type)s: '%(value)s'" % dict(
name=n, declared_type=t, value_type=type(x), value=x,
))
if t == 'text':
x = str(x)
def escapeWithTSVBackslashes(x):
return re.sub(r'[\b\f\n\r\t\\]', lambda m : "\\" + specialToEscapeCode[m.group(0)], x)
if array_nesting_depth == 0:
# primitive types just need TSV escaping
return escapeWithTSVBackslashes(x)
else:
if re.search(r'^[a-zA-Z0-9_.\b\x1c\x1d\x1e\x1f\x7f\[\]()]+$', x) \
and x not in ["", "NULL", "null"]:
# we don't need to quote the value in some special cases
return escapeWithTSVBackslashes(x)
else: # otherwise, surround value with quotes
x = re.sub(r'[\\"]', lambda m : '\\' + m.group(0), x) # XXX quotes and backslashes in arrays are escaped another time
return '"%s"' % escapeWithTSVBackslashes(x) # then, the TSV escaping
elif t == 'boolean':
return 't' if x else 'f'
# TODO timestamp
else:
return str(x)
class PGTSVPrinter:
"""
Initialized with a list of type strings
Prints out Postgres-format TSV output lines
"""
def __init__(self, fields):
self.fields = fields
def write(self, out):
if len(out) != len(self.fields):
raise ValueError("Expected %(num_rows_declared)d attributes, but found %(num_rows_found)d in output row:\n%(row)s" % dict(
num_rows_declared=len(self.fields), num_rows_found=len(out), row=out,
))
else:
print('\t'.join(print_pgtsv_element(x, n, t) for x,(n,t) in zip(out, self.fields)))
# how to get types specified as default values of a function
def format_from_args_defaults_of(aFunctionOrFormat):
if hasattr(aFunctionOrFormat, '__call__'):
# TODO in Python3, support types in function annotations (PEP 3107: https://www.python.org/dev/peps/pep-3107/)
spec = getargspec(aFunctionOrFormat)
return list(zip(spec.args, spec.defaults))
else:
return aFunctionOrFormat
## function decorators to be used directly in UDF implementations
# decorators for input and output formats
def format_decorator(attrName):
def decorator(*name_type_pairs, **name_type_dict):
"""
When a function is decorated with this (e.g., @returns(...) or @over(...)
preceding the def line), the pairs of column name and type given as
arguments are kept as the function's attribute to supply other decorators,
such as @tsv_extractor, with information for deciding how to parse the
input lines or format the output lines.
"""
# check single argument case with a function or dict
if len(name_type_pairs) == 1:
if hasattr(name_type_pairs[0], '__call__'):
name_type_pairs = format_from_args_defaults_of(name_type_pairs[0])
elif type(name_type_pairs[0]) in [dict, OrderedDict]:
name_type_pairs = name_type_pairs[0]
# XXX @over(collection.OrderedDict(foo="type", bar="type", ...)) doesn't work
# as Python forgets the order when calling with keyword argument binding.
# merge dictionaries
name_type_pairs = list(name_type_pairs) + list(name_type_dict.items())
def decorate(f):
setattr(f, attrName, name_type_pairs)
return f
return decorate
return decorator
over = format_decorator("input_format")
returns = format_decorator("output_format")
def get_generator_format(generator):
# Expects the input and output formats to have been decorated with @over and @returns
try:
# @over has precedence over default values of function arguments
input_format = generator.input_format
except AttributeError:
input_format = format_from_args_defaults_of(generator)
try:
output_format = generator.output_format
# also support function argument defaults for output_format for symmetry
output_format = format_from_args_defaults_of(output_format)
except AttributeError:
raise ValueError("The function must be decorated with @returns")
# TODO or maybe just skip type checking if @returns isn't present?
# Check generator function
if not isgeneratorfunction(generator):
raise ValueError("The function must be a *generator*, i.e., use yield not return")
return input_format, output_format
# decorators that initiate the main extractor loop
def tsv_extractor(generator):
"""
When a generator function is decorated with this (i.e., @tsv_extractor
preceding the def line), standard input is parsed as Postgres-style TSV
(PGTSV) input rows, the function is applied to generate output rows, and then
checks that each line of this generator is in the output format before
printing back as PGTSV rows.
"""
input_format, output_format = get_generator_format(generator)
# Create the input parser
parser = PGTSVParser(input_format)
# Create the output parser
printer = PGTSVPrinter(output_format)
for row in parser.parse_stdin():
for out_row in generator(**row._asdict()):
printer.write(out_row)
def tsj_extractor(generator):
"""
When a generator function is decorated with this (i.e., @tsj_extractor
preceding the def line), each standard input line is parsed as
tab-separated JSON (TSJ) values, then the function is applied to the parsed
array to generate output rows, and each output row expected to be an array
is formatted as TSJ.
"""
try: # For Python 2, set default encoding to UTF-8
reload(sys).setdefaultencoding("utf8") # to avoid UnicodeEncodeError of JSON values during conversion by str()
except:
pass # Python 3 raises an exception as reload() is not available
input_format, output_format = get_generator_format(generator)
input_names = [name for name,t in input_format]
num_input_values = len(input_format)
num_input_splits = num_input_values - 1
num_output_values = len(output_format)
def parse_json(column_index, json_value):
try:
return json.loads(json_value)
except ValueError as exc:
raise ValueError("JSON parse error in column %d (%s):\n %s\n" % (column_index, exc, json_value))
for line in sys.stdin:
try:
columns = line.rstrip("\n").split("\t", num_input_splits)
assert len(columns) == num_input_values
values_in = (parse_json(i,v) for i,v in enumerate(columns))
input_dict = dict(zip(input_names, values_in))
except ValueError as exc:
raise ValueError("could not parse TSJ line:\n %s\ndue to %s" % (line, exc))
for values_out in generator(**input_dict):
if len(values_out) == num_output_values:
for i,v in enumerate(values_out):
if i > 0: sys.stdout.write("\t")
sys.stdout.write(json.dumps(v))
sys.stdout.write("\n")
else:
raise ValueError("Expected %d values but got %d\n input: %s\n output: %s" % (
num_output_values, len(values_out),
json.dumps(values_in), json.dumps(values_out)))
|
deepdive-master
|
ddlib/ddlib/util.py
|
from .dd import *
from .gen_feats import *
from .util import *
|
deepdive-master
|
ddlib/ddlib/__init__.py
|
import sys
import collections
Word = collections.namedtuple('Word', ['begin_char_offset', 'end_char_offset', 'word', 'lemma', 'pos', 'ner', 'dep_par', 'dep_label'])
Span = collections.namedtuple('Span', ['begin_word_id', 'length'])
Sequence = collections.namedtuple('Sequence', ['is_inversed', 'elements'])
DepEdge = collections.namedtuple('DepEdge', ['word1', 'word2', 'label', 'is_bottom_up'])
def unpack_words(input_dict, character_offset_begin=None, character_offset_end=None, lemma=None,
pos=None, ner = None, words = None, dep_graph = None, dep_graph_parser = lambda x: x.split('\t')):
"""Return a list of Word objects representing a sentence
"""
array_character_offset_begin = input_dict[character_offset_begin] if character_offset_begin != None else ()
array_character_offset_end = input_dict[character_offset_end] if character_offset_end != None else ()
array_lemma = input_dict[lemma] if lemma != None else ()
array_pos = input_dict[pos] if pos != None else ()
array_ner = input_dict[ner] if ner != None else ()
array_words = input_dict[words] if words != None else ()
dep_graph = input_dict[dep_graph] if dep_graph != None else ()
dep_tree = {}
for path in dep_graph:
(parent, label, child) = dep_graph_parser(path)
parent, child = int(parent), int(child)
dep_tree[child] = {"parent":parent, "label":label}
if parent not in dep_tree: dep_tree[parent] = {"parent":-1, "label":"ROOT"}
# workaround for making `map(None, a, b, ...)` work consistently with Python 2 and 3
zip_with_None = lambda *ls: (tuple(l[i] if i < len(l) else None for l in ls) for i in range(max(map(len, ls))))
ziped_tags = list(zip_with_None(array_character_offset_begin, array_character_offset_end, array_lemma,
array_pos, array_ner, array_words))
wordobjs = []
for i in range(0,len(ziped_tags)):
if i not in dep_tree : dep_tree[i] = {"parent":-1, "label":"ROOT"}
wordobjs.append(Word(begin_char_offset=ziped_tags[i][0], end_char_offset=ziped_tags[i][1], lemma=ziped_tags[i][2], pos=ziped_tags[i][3],
ner=ziped_tags[i][4], word=ziped_tags[i][5],dep_par=dep_tree[i]["parent"], dep_label=dep_tree[i]["label"]))
return wordobjs
def log(obj):
"""Print the string form of an object to STDERR.
Args:
obj: The object that the user wants to log to STDERR.
"""
sys.stderr.write(obj.__str__() + "\n")
def materialize_span(words, span, func=lambda x:x):
"""Given a sequence of objects and a span, return the subsequence that corresponds to the span.
Args:
words: A sequence of objects.
span: A Span namedtuple
func: Optional function that will be applied to each element in the result subsequence.
"""
return map(func, words[span.begin_word_id:(span.begin_word_id+span.length)])
def _fe_seq_between_words(words, begin_idx, end_idx, func=lambda x:x):
if begin_idx < end_idx:
return Sequence(elements=map(func, words[begin_idx+1:end_idx]), is_inversed=False)
else:
return Sequence(elements=map(func, words[end_idx+1:begin_idx]), is_inversed=True)
def tokens_between_spans(words, span1, span2, func=lambda x:x):
"""Given a sequence of objects and two spans, return the subsequence that is between these spans.
Args:
words: A sequence of objects.
span1: A Span namedtuple
span2: A Span namedtuple
func: Optional function that will be applied to each element in the result subsequence.
Returns:
A Sequence namedtuple between these two spans. The "is_inversed" label is set
to be True if span1 is *AFTER* span 2.
"""
if span1.begin_word_id < span2.begin_word_id:
return _fe_seq_between_words(words, span1.begin_word_id+span1.length-1, span2.begin_word_id, func)
else:
return _fe_seq_between_words(words, span1.begin_word_id, span2.begin_word_id+span2.length-1, func)
def _path_to_root(words, word_idx):
rs = []
c_word_idx = word_idx
covered_indexes = set()
while True:
if c_word_idx in covered_indexes:
break
rs.append(words[c_word_idx])
covered_indexes.add(c_word_idx)
if words[c_word_idx].dep_par == -1 or words[c_word_idx].dep_par == c_word_idx:
break
c_word_idx = words[c_word_idx].dep_par
return rs
def dep_path_between_words(words, begin_idx, end_idx):
"""Given a sequence of Word objects and two indices, return the sequence of Edges
corresponding to the dependency path between these two words.
Args:
words: A sequence of Word objects.
span1: A word index
span2: A word index
Returns:
An Array of Edge objects, each of which corresponds to one edge on the dependency path.
"""
path_to_root1 = _path_to_root(words, begin_idx)
path_to_root2 = _path_to_root(words, end_idx)
common = set(path_to_root1) & set(path_to_root2)
#if len(common) == 0:
# raise Exception('Dep Path Must be Wrong: No Common Element Between Word %d & %d.' % (begin_idx, end_idx))
path = []
for word in path_to_root1:
if word in common: break
path.append(DepEdge(word1=word, word2=words[word.dep_par], label=word.dep_label, is_bottom_up=True))
path_right = []
for word in path_to_root2:
if word in common: break
path_right.append(DepEdge(word1=words[word.dep_par], word2=word, label=word.dep_label, is_bottom_up=False))
for e in reversed(path_right):
path.append(e)
return path
|
deepdive-master
|
ddlib/ddlib/dd.py
|
#!/usr/bin/env python
# an identity UDF for Nasty TSJ input/output
# for testing @tsj_extractor parser and formatter
from deepdive import *
def identity_for_nasty_tsj(
v1 = "text",
v2 = "float",
v3 = "boolean",
v4 = "boolean",
v5 = "text",
v6 = "text",
v7 = "text",
v8 = "text",
v9 = "text",
v10 = "text",
v11 = "text",
v12 = "int[]",
v13 = "float[]",
v14 = "text[]",
v15 = "text[]",
v16 = "text[]",
v17 = "text[]",
v18 = "text[]",
v19 = "text[]",
v20 = "text[]",
):
import sys
args = [
v1,
v2,
v3,
v4,
v5,
v6,
v7,
v8,
v9,
v10,
v11,
v12,
v13,
v14,
v15,
v16,
v17,
v18,
v19,
v20,
]
for i, x in enumerate(args):
print >>sys.stderr, "v%d =\t" % (i + 1),
print >>sys.stderr, x
yield args
nasty_tsj_types = identity_for_nasty_tsj
@tsj_extractor
@over (nasty_tsj_types)
@returns(nasty_tsj_types)
def extractor(**args):
for row in identity_for_nasty_tsj(**args):
yield row
|
deepdive-master
|
ddlib/test/tsj_extractor_identity_udf.py
|
#! /usr/bin/env python
# File: udf/ext_has_spouse_features.py
import sys, json
import ddlib
def my_dep_format_parser(s):
parent, label, child = s.split('\t')
return (int(parent)-1, label, int(child)-1)
for row in sys.stdin:
obj = json.loads(row)
words = list(ddlib.unpack_words(obj, character_offset_begin='character_offset_begin',
character_offset_end='character_offset_end', lemma='lemma',
pos='pos', words = 'words', dep_graph = 'dep_graph', dep_graph_parser=my_dep_format_parser))
edges = ddlib.dep_path_between_words(words, 0, len(words)-1)
for e in edges:
print("%s %s" % (e.word1.lemma, e.word2.lemma))
|
deepdive-master
|
ddlib/test/test_dep.py
|
#!/usr/bin/env python
# an identity UDF for Nasty TSV input/output
# for testing @tsv_extractor parser and formatter
from __future__ import print_function
from deepdive import *
def identity_for_nasty_tsv(
v1 = "text",
v2 = "float",
v3 = "boolean",
v4 = "boolean",
v5 = "text",
v6 = "text",
v7 = "text",
v8 = "text",
v9 = "text",
v10 = "text",
v11 = "text",
v12 = "int[]",
v13 = "float[]",
v14 = "text[]",
v15 = "text[]",
v16 = "text[]",
v17 = "text[]",
v18 = "text[]",
v19 = "text[]",
v20 = "text[]",
):
import sys
args = [
v1,
v2,
v3,
v4,
v5,
v6,
v7,
v8,
v9,
v10,
v11,
v12,
v13,
v14,
v15,
v16,
v17,
v18,
v19,
v20,
]
for i, x in enumerate(args):
print("v%d =\t" % (i + 1), file=sys.stderr)
print(x, file=sys.stderr)
yield args
nasty_tsv_types = identity_for_nasty_tsv
@tsv_extractor
@over (nasty_tsv_types)
@returns(nasty_tsv_types)
def extractor(**args):
for row in identity_for_nasty_tsv(**args):
yield row
|
deepdive-master
|
ddlib/test/tsv_extractor_identity_udf.py
|
#! /usr/bin/env python
import unittest
import ddlib as dd
class TestDDLib(unittest.TestCase):
def setUp(self):
self.words = ["Tanja", "married", "Jake", "five", "years", "ago"]
self.lemma = ["Tanja", "marry", "Jake", "five", "years", "ago"]
def test_materialize_span(self):
span1 = dd.Span(0, 3)
materialized_span = dd.materialize_span(self.words, span1)
self.assertEqual(list(materialized_span), ["Tanja", "married", "Jake"])
def test_tokens_between_spans(self):
span1 = dd.Span(0, 2)
span2 = dd.Span(3, 5)
words_between = dd.tokens_between_spans(self.words, span1, span2)
self.assertEqual([words_between[0], list(words_between[1])], [False, ["Jake"]])
words_between = dd.tokens_between_spans(self.words, span2, span1)
self.assertEqual([words_between[0], list(words_between[1])], [True, ["Jake"]])
words_between = dd.tokens_between_spans(self.words, span1, span1)
self.assertEqual([words_between[0], list(words_between[1])], [False, []])
if __name__ == '__main__':
unittest.main()
|
deepdive-master
|
ddlib/test/test.py
|
#! /usr/bin/env python
# File: udf/ext_has_spouse_features.py
import sys, json
import ddlib
# For each input tuple
# TODO: Sample Data and the input schema.
# sample json
for row in sys.stdin:
# Unpack input into tuples.
#
obj = json.loads(row)
words, lemmas = obj["words"], obj["lemma"]
span1 = ddlib.Span(begin_word_id=obj['p1.start_position'], length=obj['p1.length'])
span2 = ddlib.Span(begin_word_id=obj['p2.start_position'], length=obj['p2.length'])
features = set()
# Feature 1: Find out if a lemma of marry occurs.
# A better feature would ensure this is on the dependency path between the two.
#
lemma_between = ddlib.tokens_between_spans(lemmas, span1, span2)
married_words = ('marry', 'widow')
for lemma in lemma_between.elements:
if lemma in married_words:
features.add("important_word=%s" % lemma)
# Feature 2: The number of words between the two phrases.
# Intuition: if they are close by, the link may be stronger.
#
words_between = ddlib.tokens_between_spans(words, span1, span2)
l = len(list(words_between.elements))
features.add("num_words_between=%s" % l if l<5 else "many_words_between")
# Feature 3: Check if the last name matches heuristically.
#
last_word_left = list(ddlib.materialize_span(words, span1))[-1]
last_word_right = list(ddlib.materialize_span(words, span2))[-1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
# Use this line if you want to print out all features extracted
#
#ddlib.log(features)
for feature in sorted(features):
print(json.dumps({
"relation_id": obj["relation_id"],
"feature": feature
}, sort_keys=True))
|
deepdive-master
|
ddlib/test/with_ddlib.py
|
#! /usr/bin/env python
# File: udf/ext_has_spouse_features.py
import sys, json
# For each input tuple
# TODO: Sample Data and the input schema.
# sample json
for row in sys.stdin:
obj = json.loads(row)
# Library/DSL??? This is a span, it should be an object.
p1_start = obj["p1.start_position"]
p1_length = obj["p1.length"]
p1_end = p1_start + p1_length
p2_start = obj["p2.start_position"]
p2_length = obj["p2.length"]
p2_end = p2_start + p2_length
p1_text = obj["words"][p1_start:p1_length]
p2_text = obj["words"][p2_start:p2_length]
left_idx = min(p1_end, p2_end)
right_idx = max(p1_start, p2_start)
# Features for this pair come in here
features = set()
# Feature 1: Find out if a lemma of marry occurs.
# A better feature would ensure this is on the dependency path between the two.
lemma_between = obj["lemma"][left_idx:right_idx]
married_words = ['marry', 'widow']
for mw in married_words:
if mw in lemma_between:
features.add("important_word=%s" % mw)
# Feature 2: The number of words between the two phrases.
# Intuition: if they are close by, the link may be stronger.
words_between = obj["words"][left_idx:right_idx]
l = len(words_between)
if l < 5: features.add("num_words_between=%s" % l)
else: features.add("many_words_between")
# Feature 3: Check if the last name matches heuristically.
last_word_left = obj["words"][p1_end - 1]
last_word_right = obj["words"][p2_end - 1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
# TODO: Add more features, look at dependency paths, etc
for feature in sorted(features):
print(json.dumps({
"relation_id": obj["relation_id"],
"feature": feature
}, sort_keys=True))
|
deepdive-master
|
ddlib/test/without_ddlib.py
|
#! /usr/bin/env python
# Usage: calibration.py [target/calibration_data_file.csv] [output_file.png]
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
CALIBRATION_FILE = sys.argv[1]
OUT_IMG_FILE = sys.argv[2]
labels = []
counts = []
prec = []
counts_train = []
for l in open(CALIBRATION_FILE):
(a,b,c,d,e) = l.rstrip().split('\t')
labels.append((float(a) + float(b))/2)
counts.append(int(c))
if float(d) + float(e) == 0:
prec.append(0.0)
else:
prec.append(float(d)/(float(d) + float(e)))
counts_train.append(float(d)+float(e))
fig, ax = plt.subplots(figsize=(12,3))
MARGIN = 1
fig.subplots_adjust(right=0.99, left=0.05, top=0.9, bottom=0.25)
gs = gridspec.GridSpec(1, 3, width_ratios=[1,1,1])
plt.subplot(gs[0])
width = 0.1
labels_nz = []
prec_nz = []
for i in range(0, len(labels)):
if counts_train[i] != 0:
labels_nz.append(labels[i])
prec_nz.append(prec[i])
plt.plot(labels_nz, prec_nz, 'ro-')
plt.plot([0,1],[0,1],'b--')
plt.title("(a) Accuracy (Testing Set)")
plt.ylabel("Accuracy")
plt.xlabel("Probability")
plt.ylim(0,1)
plt.xlim(0,1.1)
plt.text(0, -0.35 , "* (a) and (b) are produced using 50% held-out on evidence variables; (c) also includes all non-evidence variables of the same relation.", fontsize=10, style='italic')
plt.subplot(gs[1])
width = 0.1
plt.bar(labels, counts_train, width, color='b')
plt.title("(b) # Predictions (Testing Set)")
plt.ylabel("# Predictions")
plt.xlabel("Probability")
plt.xlim(0,1.1)
plt.subplot(gs[2])
width = 0.1
plt.bar(labels, counts, width, color='b')
plt.title("(c) # Predictions (Whole Set)")
plt.ylabel("# Predictions")
plt.xlabel("Probability")
plt.xlim(0,1.1)
plt.savefig(OUT_IMG_FILE)
|
deepdive-master
|
util/calibration.py
|
#!/usr/bin/env python
from deepdive import *
@tsv_extractor
@returns(lambda
column1 = "text",
column2 = "int",
column3 = "float",
:[])
def my_udf(
column1 = "text",
column2 = "int",
column3 = "float",
):
yield [
column1,
column2,
column3,
]
|
deepdive-master
|
examples/template/udf/extractor_template.py
|
import os,sys # needed by most
import random # random
import yaml # yaml parsing
import pprint # pretty print
if __name__ == "__main__":
inpath = 'f52-c3-m1011/'
outpath = ''
# ...
if len(sys.argv) == 3:
inpath = sys.argv[1]
num = int(sys.argv[2])
else:
print 'Usage:',sys.argv[0],'<path> <num>'
sys.exit(1)
|
deepdive-master
|
examples/ocr/input/raw/real2bool-feature.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys # needed by most
import random # random
import yaml # yaml parsing
import pprint # pretty print
dirbase = 'boolean-f52-c3-m620/'
ids = [f.rstrip('.features.txt') for f in os.listdir(dirbase) if f.endswith('.features.txt')]
print 'Process files:', ids
wid = 1
fout = open('feature_table.csv', 'w')
for fid in ids:
lines = open(dirbase + fid+'.features.txt').readlines()
for l in lines:
vals = [b for b in l.strip().split('\t')]
# print vals
for sub in range(0, len(vals)):
print >>fout, str(wid) + ',' + str(sub)+','+ str(vals[sub])
wid += 1
totid = wid
wid = 1
fl1 = open('label1_table.csv', 'w')
fl2 = open('label2_table.csv', 'w')
for fid in ids:
labels = [int(s) for s in open(dirbase + fid+'.labels.txt').readlines()]
for l in labels:
l1 = False
l2 = False
if l == 1: l1 = True
if l == 2: l2 = True
print >>fl1, str(wid) + ',' + str(l1)
print >>fl2, str(wid) + ',' + str(l2)
wid += 1
fl1.close()
fl2.close()
|
deepdive-master
|
examples/ocr/input/raw/gen_feature_table.py
|
#!/usr/bin/env python
from deepdive import *
import ddlib
@tsj_extractor
@returns(lambda
p1_id = "text",
p2_id = "text",
feature = "text",
:[])
def extract(
p1_id = "text",
p2_id = "text",
p1_begin_index = "int",
p1_end_index = "int",
p2_begin_index = "int",
p2_end_index = "int",
doc_id = "text",
sent_index = "int",
tokens = "text[]",
lemmas = "text[]",
pos_tags = "text[]",
ner_tags = "text[]",
dep_types = "text[]",
dep_parents = "int[]",
):
"""
Uses DDLIB to generate features for the spouse relation.
"""
# Create a DDLIB sentence object, which is just a list of DDLIB Word objects
sent = []
for i,t in enumerate(tokens):
sent.append(ddlib.Word(
begin_char_offset=None,
end_char_offset=None,
word=t,
lemma=lemmas[i],
pos=pos_tags[i],
ner=ner_tags[i],
dep_par=dep_parents[i] - 1, # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
dep_label=dep_types[i]))
# Create DDLIB Spans for the two person mentions
p1_span = ddlib.Span(begin_word_id=p1_begin_index, length=(p1_end_index-p1_begin_index+1))
p2_span = ddlib.Span(begin_word_id=p2_begin_index, length=(p2_end_index-p2_begin_index+1))
# Generate the generic features using DDLIB
for feature in ddlib.get_generic_features_relation(sent, p1_span, p2_span):
yield [p1_id, p2_id, feature]
|
deepdive-master
|
examples/spouse/udf/extract_spouse_features.py
|
#!/usr/bin/env python
from deepdive import *
# for python 3 compatibility
try:
xrange
except NameError:
xrange = range
@tsj_extractor
@returns(lambda
mention_id = "text",
mention_text = "text",
doc_id = "text",
sentence_index = "int",
begin_index = "int",
end_index = "int",
:[])
def extract(
doc_id = "text",
sentence_index = "int",
tokens = "text[]",
ner_tags = "text[]",
):
"""
Finds phrases that are continuous words tagged with PERSON.
"""
num_tokens = len(ner_tags)
# find all first indexes of series of tokens tagged as PERSON
first_indexes = (i for i in xrange(num_tokens) if ner_tags[i] == "PERSON" and (i == 0 or ner_tags[i-1] != "PERSON"))
for begin_index in first_indexes:
# find the end of the PERSON phrase (consecutive tokens tagged as PERSON)
end_index = begin_index + 1
while end_index < num_tokens and ner_tags[end_index] == "PERSON":
end_index += 1
end_index -= 1
# generate a mention identifier
mention_id = "%s_%d_%d_%d" % (doc_id, sentence_index, begin_index, end_index)
mention_text = " ".join(map(lambda i: tokens[i], xrange(begin_index, end_index + 1)))
# Output a tuple for each PERSON phrase
yield [
mention_id,
mention_text,
doc_id,
sentence_index,
begin_index,
end_index,
]
|
deepdive-master
|
examples/spouse/udf/map_person_mention.py
|
#!/usr/bin/env python
from deepdive import *
import random
from collections import namedtuple
SpouseLabel = namedtuple('SpouseLabel', 'p1_id, p2_id, label, type')
@tsj_extractor
@returns(lambda
p1_id = "text",
p2_id = "text",
label = "int",
rule_id = "text",
:[])
# heuristic rules for finding positive/negative examples of spouse relationship mentions
def supervise(
p1_id="text", p1_begin="int", p1_end="int",
p2_id="text", p2_begin="int", p2_end="int",
doc_id="text", sentence_index="int",
tokens="text[]", lemmas="text[]", pos_tags="text[]", ner_tags="text[]",
dep_types="text[]", dep_token_indexes="int[]",
):
# Constants
MARRIED = frozenset(["wife", "husband"])
FAMILY = frozenset(["mother", "father", "sister", "brother", "brother-in-law"])
MAX_DIST = 10
# Common data objects
p1_end_idx = min(p1_end, p2_end)
p2_start_idx = max(p1_begin, p2_begin)
p2_end_idx = max(p1_end,p2_end)
intermediate_lemmas = lemmas[p1_end_idx+1:p2_start_idx]
intermediate_ner_tags = ner_tags[p1_end_idx+1:p2_start_idx]
tail_lemmas = lemmas[p2_end_idx+1:]
spouse = SpouseLabel(p1_id=p1_id, p2_id=p2_id, label=None, type=None)
# Rule: Candidates that are too far apart
if len(intermediate_lemmas) > MAX_DIST:
yield spouse._replace(label=-1, type='neg:far_apart')
# Rule: Candidates that have a third person in between
if 'PERSON' in intermediate_ner_tags:
yield spouse._replace(label=-1, type='neg:third_person_between')
# Rule: Sentences that contain wife/husband in between
# (<P1>)([ A-Za-z]+)(wife|husband)([ A-Za-z]+)(<P2>)
if len(MARRIED.intersection(intermediate_lemmas)) > 0:
yield spouse._replace(label=1, type='pos:wife_husband_between')
# Rule: Sentences that contain and ... married
# (<P1>)(and)?(<P2>)([ A-Za-z]+)(married)
if ("and" in intermediate_lemmas) and ("married" in tail_lemmas):
yield spouse._replace(label=1, type='pos:married_after')
# Rule: Sentences that contain familial relations:
# (<P1>)([ A-Za-z]+)(brother|stster|father|mother)([ A-Za-z]+)(<P2>)
if len(FAMILY.intersection(intermediate_lemmas)) > 0:
yield spouse._replace(label=-1, type='neg:familial_between')
|
deepdive-master
|
examples/spouse/udf/supervise_spouse.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
# The script to randomly split a dataset for hyperparameter tunning
import os
from absl import app
from absl import flags
import pdb
from datasets import load_dataset, concatenate_datasets
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input directory that contains train.tsv and test.tsv .")
flags.DEFINE_string("dataset", "", "Input dataset name. Output will be stored at dataset_hp")
def main(unused_argv):
# Concatenate train and test file
data_files = {}
data_files["train"] = FLAGS.input + '/train.tsv'
data_files["test"] = FLAGS.input + '/test.tsv'
# pdb.set_trace()
raw_datasets = load_dataset("csv", data_files=data_files, sep='\t', column_names=["input", "output"])
concat_data = concatenate_datasets([raw_datasets["train"], raw_datasets["test"]])
# Split the dataset by 90:10 train test ratio
splitted = concat_data.train_test_split(test_size=0.1, shuffle=True, seed=42)
if not os.path.exists('data/' + FLAGS.dataset + '_hp'):
os.makedirs('data/' + FLAGS.dataset + '_hp')
# Output the corresponding splits to target directory
splitted["train"].to_csv('data/' + FLAGS.dataset + '_hp' + '/train.csv', sep="\t", index=False)
splitted["test"].to_csv('data/' + FLAGS.dataset + '_hp' + '/test.csv', sep="\t", index=False)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
split_dataset_for_hp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
# This file include utility functions to compute stats given a dataset
import re
import os
import csv
import pdb
from prettytable import PrettyTable
from torchaudio.functional import edit_distance
from transformers import AutoTokenizer
from utils.helper_utils.helper_methods import load_dataset_with_name, list_datasets_and_their_splits
def build_table_for_all_datasets(data_type, sub_datatype=None, model_name='facebook/bart-base'):
"""
Build a csv table for all data & splits
Input:
Data_type in {num_instances, raw_avg_length, tok_seq_length, lexical_overlap}
"""
base_dir = os.getenv('BASE_DIR')
# Construct table with dataset stats
tab = PrettyTable(header=True)
optim_splits = ['train', 'validation', 'test', 'gen', 'Overall']
tab.field_names = ['Dataset', 'Split'] + optim_splits
dataset_names, splits_mapping = list_datasets_and_their_splits(base_dir + '/baseline_replication/TMCD/data')
for dataset_name in dataset_names:
for split in splits_mapping[dataset_name]:
curr_row = []
curr_row.append(dataset_name)
curr_row.append(split)
# Compute the data
if data_type == 'num_instances':
res, _ = number_of_instances(dataset_name, split)
elif data_type == 'raw_avg_length':
input_avg_len, output_avg_len, _ = compute_avg_length(dataset_name, split)
if sub_datatype == 'input':
res = input_avg_len
else:
res = output_avg_len
elif data_type == 'tok_seq_length':
input_avg_len, output_avg_len, _ = compute_avg_tokenized_length_hf(dataset_name, split, target_model_name = model_name)
if sub_datatype == 'input':
res = input_avg_len
else:
res = output_avg_len
elif data_type == 'lexical_overlap':
res, _ = compute_lexical_overlap(dataset_name, split)
else:
raise ValueError('The data_type can only be {num_instances, raw_avg_length, tok_seq_length, lexical_overlap}.')
# Build the table
for optim_split in optim_splits:
if optim_split in res:
curr_row.append(res[optim_split])
elif optim_split == 'Overall' and 'avg' in res:
# For seq length, the overall equiv to avg
curr_row.append(res['avg'])
else:
curr_row.append('-')
tab.add_row(curr_row)
if not os.path.exists(base_dir + '/results/analysis_res/'):
os.makedirs(base_dir + '/results/analysis_res/')
# Construct CSV filename
file_name = data_type
if sub_datatype:
file_name += '_' + sub_datatype
if data_type == 'tok_seq_length':
if '/' in model_name:
file_name += '_' + model_name.split('/')[-1]
else:
file_name += '_' + model_name
with open(base_dir + '/results/analysis_res/' + file_name + '.csv', 'w', newline='') as f:
f.write(tab.get_csv_string())
print(tab)
def number_of_instances():
"""
Output number of instances for each dataset
Outputs:
avg_overlap (dict): avg lexical overlap between input and output, keys are train/test/dev
"""
base_dir = os.getenv('BASE_DIR')
# Construct table with dataset stats
tab = PrettyTable()
optim_splits = ['train', 'validation', 'test', 'gen', 'Overall']
tab.add_row(['Dataset', 'Split'] + optim_splits)
dataset_names, splits_mapping = list_datasets_and_their_splits(base_dir + '/baseline_replication/TMCD/data')
for dataset_name in dataset_names:
for split in splits_mapping[dataset_name]:
curr_row = []
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
curr_row.append(dataset_name)
curr_row.append(split)
for optim_split in optim_splits:
if optim_split in dataset:
curr_row.append(len(dataset[optim_split]))
else:
curr_row.append(0.0)
# Add up the instance count for overal
curr_row[-1] = sum(curr_row[2:])
tab.add_row(curr_row)
if not os.path.exists(base_dir + '/results/analysis_res/'):
os.makedirs(base_dir + '/results/analysis_res/')
with open(base_dir + '/results/analysis_res/num_instances.csv', 'w', newline='') as f:
f.write(tab.get_csv_string())
print(tab)
def number_of_instances(dataset_name, split):
"""
Output number of instances for each dataset
Outputs:
num_instances (dict): number of instance in each optimization split, keys are train/test/dev
"""
# Construct table with dataset stats
tab = PrettyTable()
num_instances = dict()
split_names = []
overall_num = 0
num_column = []
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
for optim_split in dataset:
split_names.append(optim_split)
num_instances[optim_split] = len(dataset[optim_split])
num_column.append(len(dataset[optim_split]))
overall_num += len(dataset[optim_split])
# Add the instance count for overal
num_column.append(overall_num)
num_instances['Overall'] = overall_num
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instances', num_column)
return num_instances, tab
def compute_avg_length(dataset_name, split):
"""
Computes the average number of words of input and output
Outputs:
input_avg_len (dict): avg number of words in input, keys are train/test/dev
output_avg_len (dict): avg number of words in output, keys are train/test/dev
tab (PrettyTable): the table with a display of dataset stat
"""
# TODO: Maybe plot the distribution of length, too?
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
# Construct table with dataset stats
tab = PrettyTable()
input_avg_len = dict()
output_avg_len = dict()
# Loop through the split
split_names = []
dataset_lens = []
input_lens_column = []
output_lens_column = []
overall_input_len = 0
overall_output_len = 0
for ft_split in dataset:
split_names.append(ft_split)
dataset_lens.append(len(dataset[ft_split]))
tot_input_len = 0
tot_output_len = 0
for instance in dataset[ft_split]:
tot_input_len += len(re.findall(r'\w+', instance['input']))
tot_output_len += len(re.findall(r'\w+', instance['output']))
input_avg_len[ft_split] = tot_input_len / len(dataset[ft_split])
output_avg_len[ft_split] = tot_output_len / len(dataset[ft_split])
input_lens_column.append(input_avg_len[ft_split])
output_lens_column.append(output_avg_len[ft_split])
overall_input_len += tot_input_len
overall_output_len += tot_output_len
# Add the averaged length to table data for display
input_lens_column.append(overall_input_len / sum(dataset_lens))
output_lens_column.append(overall_output_len / sum(dataset_lens))
input_avg_len['avg'] = input_lens_column[-1]
output_avg_len['avg'] = output_lens_column[-1]
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instances', dataset_lens + [0])
tab.add_column('Avg input length', input_lens_column)
tab.add_column('Avg output length', output_lens_column)
return input_avg_len, output_avg_len, tab
def compute_lexical_overlap(dataset_name, split):
"""
Computes the average lexical overlap (Levenshtein distance / input_len) between input and output
Outputs:
avg_overlap (dict): avg lexical overlap between input and output, keys are train/test/dev
"""
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
# Construct table with dataset stats
tab = PrettyTable()
avg_overlap = dict()
# Loop through the split
split_names = []
dataset_lens = []
overlap_column = []
overall_overlap = 0.0
for ft_split in dataset:
split_names.append(ft_split)
dataset_lens.append(len(dataset[ft_split]))
tot_overlap = 0.0
for instance in dataset[ft_split]:
tot_overlap += edit_distance(instance['input'], instance['output']) / len(instance['input'])
avg_overlap[ft_split] = tot_overlap / len(dataset[ft_split])
overlap_column.append(avg_overlap[ft_split])
overall_overlap += tot_overlap
# Add the averaged length to table data for display
overlap_column.append(overall_overlap / sum(dataset_lens))
avg_overlap['avg'] = overlap_column[-1]
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instaces', dataset_lens + [0])
tab.add_column('Avg Lev(input, output) / input_len', overlap_column)
return avg_overlap, tab
def compute_avg_tokenized_length_hf(dataset_name, split, target_model_name, max_seq_length=512, max_output_length=512):
"""
Computes the average number of tokens of input and output after tokenization
Inputs:
dataset_name={COGS, geoquery, spider, SCAN}
target_model_name=model name from Huggingface that has a tokenizer or a path
Outputs:
input_avg_len (dict): avg number of tokens in input, keys are train/test/dev
output_avg_len (dict): avg number of tokens in output, keys are train/test/dev
"""
# Construct table with dataset stats
tab = PrettyTable()
input_avg_len = dict()
output_avg_len = dict()
# Load the dataset
dataset = load_dataset_with_name(dataset_name, split)
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(target_model_name, use_fast=True)
# Loop through the split
split_names = []
dataset_lens = []
input_lens_column = []
output_lens_column = []
overall_input_len = 0
overall_output_len = 0
for optim_split in dataset:
# Tokenize
inputs = dataset[optim_split]['input']
if 't5' in target_model_name:
inputs = ['semanticparse: ' + x for x in inputs]
else:
inputs = [x for x in inputs]
model_inputs = tokenizer(inputs, max_length=max_seq_length, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(dataset[optim_split]['output'], max_length=max_output_length, truncation=True)
# Compute the length
split_names.append(optim_split)
dataset_lens.append(len(dataset[optim_split]))
tot_input_len = 0
tot_output_len = 0
for input_tok, output_tok in zip(model_inputs['input_ids'], labels['input_ids']):
tot_input_len += len(input_tok)
tot_output_len += len(input_tok)
input_avg_len[optim_split] = tot_input_len / len(dataset[optim_split])
output_avg_len[optim_split] = tot_output_len / len(dataset[optim_split])
input_lens_column.append(input_avg_len[optim_split])
output_lens_column.append(output_avg_len[optim_split])
overall_input_len += tot_input_len
overall_output_len += tot_output_len
# Add the averaged length to table data for display
input_lens_column.append(overall_input_len / sum(dataset_lens))
output_lens_column.append(overall_output_len / sum(dataset_lens))
input_avg_len['avg'] = input_lens_column[-1]
output_avg_len['avg'] = output_lens_column[-1]
tab.add_column('Split', split_names + ['Overall'])
tab.add_column('Number of Instances', dataset_lens + [0])
tab.add_column('Avg input length', input_lens_column)
tab.add_column('Avg output length', output_lens_column)
return input_avg_len, output_avg_len, tab
|
CompGenRep_MLRC2022-main
|
utils/dataset_stat.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import os
BASE_DIR = os.environ.get('BASE_DIR')
MODEL_DIR = os.path.join(BASE_DIR, 'trained_models/')
TMCD_MODEL_DIR = os.path.join(BASE_DIR, 'baseline_replication/TMCD/trained_models/')
DATA_DIR = os.path.join(BASE_DIR, 'data/')
TMCD_DATA_DIR = os.path.join(BASE_DIR, 'baseline_replication/TMCD/data/')
TMCD_DATASETS = {'SCAN', 'geoquery', 'spider'}
|
CompGenRep_MLRC2022-main
|
utils/constants.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import os
import json
from constants import TMCD_DATASETS, TMCD_MODEL_DIR, MODEL_DIR
def load_training_curve_info(model_name, dataset, split, checkpoint=None):
"""
Returns steps [list], ems [list], best_em float
"""
ems = []
steps = []
best_em = 0.0
# Find the path to the model
if dataset in TMCD_DATASETS:
# Load the model in TMCD data dir
path = os.path.join(TMCD_MODEL_DIR, dataset, model_name + '_' + split + '_1e-4')
else:
path = os.path.join(MODEL_DIR, dataset, model_name + '_' + split + '_1e-4')
if checkpoint is not None:
path = os.path.join(path, 'checkpoint-' + checkpoint)
# Load the model's trainer_state
trainer_state = json.load(open(path + '/trainer_state.json'))
for metrics in trainer_state['log_history']:
if 'eval_exact_match' in metrics:
ems.append(metrics['eval_exact_match'])
steps.append(metrics['step'])
if metrics['eval_exact_match'] > best_em:
best_em = metrics['eval_exact_match']
return steps, ems, best_em
|
CompGenRep_MLRC2022-main
|
utils/analysis_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import os
import re
from datasets import load_dataset
base_dir = os.environ['BASE_DIR']
def load_dataset_with_name(dataset_name, split):
"""
Take a dataset name and split name, load the dataset.
Returns a huggingface dataset dict.
"""
# TODO: Uncomment this line after refactor
# path = base_dir + '/data/' + dataset_name + '/' + split + '_split/'
path = base_dir + '/baseline_replication/TMCD/data/' + dataset_name + '/' + split + '_split/'
data_files = {}
if os.path.exists(path + 'train.tsv'):
data_files["train"] = path + 'train.tsv'
if os.path.exists(path + 'dev.tsv'):
data_files["validation"] = path + 'dev.tsv'
if os.path.exists(path + 'test.tsv'):
data_files["test"] = path + 'test.tsv'
if os.path.exists(path + 'gen.tsv'):
data_files["gen"] = path + 'gen.tsv'
raw_datasets = load_dataset("csv", data_files=data_files, sep='\t', column_names=["input", "output"])
return raw_datasets
def list_datasets_and_their_splits(data_path):
"""
data_path (str): The directory that include all the dataset files
returns:
dataset_names (list of str)
splits_mapping (dict, key in dataset_names): values are the available splits
"""
avail_datasets = os.listdir(data_path)
dataset_names = []
splits_mapping = dict()
for dir in avail_datasets:
if 'orig' not in dir and '_hp' not in dir:
dataset_names.append(dir)
avail_splits = os.listdir(data_path +'/' + dir)
# Add splits to the dict mapping
for split in avail_splits:
if '_split' in split:
if dir not in splits_mapping:
splits_mapping[dir] = []
splits_mapping[dir].append(re.sub('_split', '', split))
return dataset_names, splits_mapping
|
CompGenRep_MLRC2022-main
|
utils/helper_utils/helper_methods.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import logging
import os
import re
import sys
import json
import torch
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import pdb
import datasets
from datasets import load_dataset, load_metric
from ast import literal_eval
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
AdamW,
Adafactor,
get_scheduler,
)
from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from trainer_seq2seq_sp import SemanticParsingSeq2SeqTrainer
torch.cuda.empty_cache()
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0")
require_version("datasets>=1.8.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
is_tuning: bool = field(
default=False,
metadata={
"help": "Whether we are tunning hyperparameters. "
"If True, will automatically split the training set into validation set "
},
)
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
max_seq_length: int = field(
default=512,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_output_length: int = field(
default=512,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch (which can "
"be faster on GPU but will be slower on TPU)."
},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
num_beams: Optional[int] = field(
default=20,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "tsv"], "`train_file` should be a csv or tsv file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "tsv"], "`validation_file` should be a csv or tsv file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "tsv"], "`test_file` should be a csv or tsv file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if data_args.dataset_name == 'scan':
raw_datasets = raw_datasets.rename_column('commands', 'input')
raw_datasets = raw_datasets.rename_column('actions', 'output')
# Temporaraily set val to be test
raw_datasets["validation"] = raw_datasets["test"]
logger.warning(f"Changed column names of SCAN dataset into {raw_datasets}")
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
if extension == "tsv":
# When extension is tsv, it follows NQG format and will not have column names
raw_datasets = load_dataset("csv", data_files=data_files, sep='\t', column_names=["input", "output"])
else:
raw_datasets = load_dataset(extension, data_files=data_files, sep='\t')
if data_args.is_tuning:
raw_datasets = raw_datasets['train'].train_test_split(test_size=0.1)
raw_datasets['validation'] = raw_datasets['test']
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# Temporarily set max_answer_length for training.
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
max_answer_length = min(data_args.max_output_length, tokenizer.model_max_length)
def preprocess_function(examples):
inputs = examples['input']
if 't5' in model_args.model_name_or_path or 'COGS' not in data_args.train_file:
inputs = ['semanticparse: ' + x for x in inputs]
else:
inputs = [x for x in inputs]
targets = examples['output']
model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True, return_offsets_mapping=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_answer_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
# Create train feature from dataset
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on train dataset",
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
# Validation Feature Creation
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
preprocess_function,
batched=True,
desc="Running tokenizer on validation dataset",
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
metric = load_metric("exact_match")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids, ignore_case=True, ignore_punctuation=True, regexes_to_ignore=' ')
# Post-processing:
def post_processing_function(
examples: datasets.Dataset, features: datasets.Dataset, outputs: EvalLoopOutput, stage="eval"
):
# Decode the predicted tokens.
preds = outputs.predictions
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds)
decoded_preds = [pred.replace(" ⁇ ", "<").replace("<pad> ", "").replace("<pad>", "").replace("</s>", "").replace("<unk>", "<").replace("<s>", "") for pred in decoded_preds]
predictions = []
raw_references = []
# Fix white space
def white_space_fix(text):
return " ".join(text.split())
# Let's loop over all the examples!
for i in range(len(features)):
predictions.append(white_space_fix(decoded_preds[i]))
raw_references.append(white_space_fix(features[i]['output'].replace(" ,", ",")))
# Save predictions
prefix = 'eval'
prediction_file = os.path.join(
training_args.output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(predictions, indent=4) + "\n")
# Save ground truth
ground_truth_file = os.path.join(
training_args.output_dir, "golds.json" if prefix is None else f"{prefix}_golds.json"
)
logger.info(f"Saving predictions to {ground_truth_file}.")
with open(ground_truth_file, "w") as writer:
writer.write(json.dumps(raw_references, indent=4) + "\n")
return EvalPrediction(predictions=predictions, label_ids=raw_references)
# Initialize optimizer and scheduler
if training_args.do_train:
if 't5' in model_args.model_name_or_path:
# optimizer = AdamW(model.parameters(), lr=training_args.learning_rate, weight_decay=0.01)
optimizer = Adafactor(model.parameters(), lr=training_args.learning_rate, relative_step=False)
else:
optimizer = AdamW(model.parameters(), lr=training_args.learning_rate, weight_decay=0.01)
lr_scheduler = get_scheduler('linear', optimizer, num_warmup_steps=0, num_training_steps= training_args.num_train_epochs * (len(train_dataset) // training_args.per_device_train_batch_size))
# Initialize our Trainer
if training_args.do_train:
trainer = SemanticParsingSeq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
optimizers=(optimizer, lr_scheduler),
# generation_num_beams=data_args.num_beams,
post_process_function=post_processing_function,
# num_beams=data_args.num_beams,
)
else:
trainer = SemanticParsingSeq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
post_process_function=post_processing_function,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_length = (
data_args.max_seq_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.do_eval and not training_args.do_predict:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval")
max_eval_samples = len(eval_dataset)
metrics["eval_samples"] = len(eval_dataset)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
res = trainer.predict(eval_dataset)
# Save the prediction files for spider evaluation
prediction_list = []
for pred_idx, pred_id in enumerate(res.predictions):
prediction_list.append(pred_id)
# Output to result dir
base_dir = os.environ["BASE_DIR"]
# Strip the dataset name and split
test_list = data_args.validation_file.split('/')
dataset_name = test_list[test_list.index('data') + 1]
split = test_list[test_list.index('data') + 2].split('_')[0]
if 't5' in model_args.model_name_or_path:
model_name = 't5'
else:
model_name = 'bart'
logger.info("Writing model predictions to txt file...")
with open(base_dir + '/results/predictions/' + dataset_name + '/' + model_name + '_' + split + '.txt', 'w') as f:
for line in prediction_list:
f.write(f"{line}\n")
if __name__ == "__main__":
main()
|
CompGenRep_MLRC2022-main
|
hf_training/fine_tune_t5.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A subclass of `Trainer` specific to Semantic Parsing tasks
"""
from typing import Dict, List, Optional, Union, Tuple, Any
import torch
from torch import nn
from torch.utils.data import Dataset
import transformers
from transformers import Seq2SeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import pdb
class SemanticParsingSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
# def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
eval_examples=None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
num_beams: Optional[int] = None,
) -> Dict[str, float]:
self._max_length = max_length if max_length is not None else self.args.generation_max_length
self._num_beams = num_beams if num_beams is not None else self.args.generation_num_beams
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
# pdb.set_trace()
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
self.log(metrics)
else:
metrics = {}
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples=None, ignore_keys=None, metric_key_prefix: str = "test"):
self._max_length = self.args.generation_max_length
self._num_beams = self.args.generation_num_beams
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
predict_examples = self.eval_examples if predict_examples is None else predict_examples
try:
output = eval_loop(
predict_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
)
finally:
self.compute_metrics = compute_metrics
if self.post_process_function is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output, "predict")
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=None)
|
CompGenRep_MLRC2022-main
|
hf_training/trainer_seq2seq_sp.py
|
#!/usr/bin496/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
import sys
import os
import time
import argparse
import json
import random
import shutil
import copy
import pickle
import torch
from torch import cuda
import numpy as np
import time
import logging
from tokenizer import Tokenizer
from utils import *
from torch.nn.init import xavier_uniform_
from torch.nn.utils.rnn import pad_sequence
parser = argparse.ArgumentParser()
parser.add_argument('--resume_from_checkpoint', default=False, type=bool)
parser.add_argument('--nqg_dataset', default=False, type=bool)
parser.add_argument('--train_file', default='data/SCAN/tasks_train_length.txt')
parser.add_argument('--dev_file', default='data/SCAN/tasks_test_simple.txt')
parser.add_argument('--save_path', default='model.pt', help='where to save the model')
parser.add_argument('--min_freq', default=1, type=int)
parser.add_argument('--sent_max_length_x', default=100, type=int)
parser.add_argument('--sent_max_length_y', default=100, type=int)
# Encoder
parser.add_argument('--enc_dim', default=256, type=int)
parser.add_argument('--enc_layers', default=0, type=int)
parser.add_argument('--enc_dropout', default=0.0, type=float)
# Decoder
parser.add_argument('--pt_states', default=0, type=int)
parser.add_argument('--nt_states', default=0, type=int)
parser.add_argument('--src_pt_states', default=1, type=int)
parser.add_argument('--src_nt_states', default=10, type=int)
parser.add_argument('--dec_dim', default=256, type=int)
parser.add_argument('--dec_dropout', default=0.0, type=float)
parser.add_argument('--dec_layers', default=3, type=int)
parser.add_argument('--dec_nt_span_min', default=2, type=int)
parser.add_argument('--dec_nt_span_max', default=1000, type=int)
parser.add_argument('--dec_pt_span_min', default=1, type=int)
parser.add_argument('--dec_pt_span_max', default=1, type=int)
parser.add_argument('--rule_constraint_type', default=1, type=int)
# Parser
parser.add_argument('--parser_pt_states', default=20, type=int)
parser.add_argument('--parser_nt_states', default=20, type=int)
parser.add_argument('--parser_dim', default=256, type=int)
# Optimization
parser.add_argument('--num_epochs', default=15, type=int, help='number of training epochs')
parser.add_argument('--lr', default=5e-4, type=float, help='starting learning rate')
parser.add_argument('--weight_decay', default=1e-5, type=float, help='l2 weight decay')
parser.add_argument('--max_grad_norm', default=3, type=float, help='gradient clipping parameter')
parser.add_argument('--beta1', default=0.75, type=float, help='beta1 for adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 for adam')
parser.add_argument('--gpu', default=0, type=int, help='which gpu to use')
parser.add_argument('--seed', default=17, type=int, help='random seed')
parser.add_argument('--print_every', type=int, default=1000, help='print stats after N examples')
parser.add_argument('--print_trees', type=int, default=1, help='print trees')
parser.add_argument('--eval_every', type=int, default=1000, help='eval on dev set after N examples')
parser.add_argument('--update_every', type=int, default=4, help='grad update after N examples')
import pdb
def get_data(data_file):
data = []
for d in open(data_file, "r"):
src, tgt = d.split("IN: ")[1].split(" OUT: ")
src = src.strip().split()
tgt = tgt.strip().split()
if len(src) == 1 or len(tgt) == 1:
src = src + src
tgt = tgt + tgt
data.append({"src": src, "tgt": tgt})
return data
def get_other_data(data_file, sent_max_length_x, sent_max_length_y):
"""
Added, loading tsv files instead
"""
data = []
num_data_removed = 0
for d in open(data_file, "r"):
src, tgt = d.split("\t")
src = src.strip().split()
tgt = tgt.strip().split()
# Testin, otherwise it will lead to OOM
if len(src) == 1 or len(tgt) == 1:
src = src + src
tgt = tgt + tgt
# Truncate instaces that are too long
if len(tgt) > sent_max_length_y:
tgt = tgt[:sent_max_length_y]
if len(src) > sent_max_length_x:
src = src[:sent_max_length_x]
data.append({"src": src, "tgt": tgt})
return data
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cuda.set_device(args.gpu)
device = torch.device("cuda:"+str(args.gpu))
if args.nqg_dataset:
train_data = get_other_data(args.train_file, args.sent_max_length_x, args.sent_max_length_y)
# if "COGS" in args.train_file:
# val_data = get_other_data(args.dev_file)
else:
train_data = get_data(args.train_file)
val_data = get_data(args.dev_file)
x_tokenizer = Tokenizer()
x_tokenizer.train([d["src"] for d in train_data])
y_tokenizer = Tokenizer()
y_tokenizer.train([d["tgt"] for d in train_data])
from models import BinaryTreeLSTM as Encoder
from models import NeuralQCFG as Decoder
from models import NeuralPCFG as Parser
encoder = Encoder(vocab = len(x_tokenizer.vocab2idx),
dim = args.enc_dim,
dropout = args.enc_dropout,
layers = args.enc_layers)
decoder = Decoder(vocab = len(y_tokenizer.vocab2idx),
dim = args.dec_dim,
num_layers = args.dec_layers,
pt_states = args.pt_states,
nt_states = args.nt_states,
src_dim = args.enc_dim,
src_pt_states = args.src_pt_states,
src_nt_states = args.src_nt_states,
dropout = args.dec_dropout,
rule_constraint_type = args.rule_constraint_type,
nt_span_range = [args.dec_nt_span_min, args.dec_nt_span_max],
pt_span_range = [args.dec_pt_span_min, args.dec_pt_span_max])
parser = Parser(vocab = len(x_tokenizer.vocab2idx),
dim = args.parser_dim,
nt_states = args.parser_nt_states,
pt_states = args.parser_pt_states)
if args.resume_from_checkpoint:
model_checkpoint = torch.load(args.save_path)
encoder = model_checkpoint["encoder"]
decoder = model_checkpoint["decoder"]
parser = model_checkpoint["parser"]
x_tokenizer = model_checkpoint["x_tokenizer"]
y_tokenizer = model_checkpoint["y_tokenizer"]
model_args = model_checkpoint["args"]
encoder.to(device)
decoder.to(device)
parser.to(device)
model = torch.nn.ModuleList([encoder, decoder, parser])
for m in [encoder, decoder, parser]:
for name, param in m.named_parameters():
if param.dim() > 1:
xavier_uniform_(param)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
betas = (args.beta1, args.beta2),
weight_decay = args.weight_decay)
best_val_ppl = 1e5
epoch = 0
b = 0
model.to(device)
torch.autograd.set_detect_anomaly(True)
model.train()
while epoch < args.num_epochs:
start_time = time.time()
epoch += 1
print('Starting epoch: %d' % (epoch))
train_nll = 0.
src_nll = 0.
tgt_nll = 0.
num_sents = 0.
num_src_words = 0.
num_words = 0.
random.shuffle(train_data)
# Amount of instances without result because it's output is too long
# that will lead to OOM
num_no_res = 0
for d in train_data:
b += 1
x = [d["src"]]
y = [d["tgt"]]
x_tensor, _, _ = x_tokenizer.convert_batch(x)
y_tensor, _, _ = y_tokenizer.convert_batch(y)
x_tensor, y_tensor = x_tensor.to(device), y_tensor.to(device)
x_lengths = torch.Tensor([len(d["src"])]).long().to(device)
y_lengths = torch.Tensor([len(d["tgt"])]).long().to(device)
# # Added because of OOM
# if y_lengths[0] > 50:
# num_no_res += 1
# # num_examples += 1
# continue
# print(y_lengths, x_tensor, x_lengths, d["src"])
parse_sample, parse_argmax, parse_log_prob, parse_actions, parse_nll = parser(
x_tensor, x_lengths)
node_features, node_spans = encoder(x_tensor, x_lengths, spans = parse_sample)
nll = decoder(y_tensor, y_lengths, node_features, node_spans,
x_str = y, argmax=False)
dec_loss = nll.mean()
(dec_loss / args.update_every).backward()
train_nll += nll.sum().item()
with torch.no_grad():
node_features_argmax, node_spans_argmax = encoder(x_tensor, x_lengths,
spans = parse_argmax)
nll_argmax = decoder(y_tensor, y_lengths, node_features_argmax, node_spans_argmax,
x_str = y, argmax=False)
neg_reward = (nll - nll_argmax).detach().item()
obj = (neg_reward*parse_log_prob).mean() + parse_nll.mean()
(obj / args.update_every).backward()
src_nll += parse_nll.sum().item()
if b % args.update_every == 0:
torch.nn.utils.clip_grad_norm_(parser.parameters(), args.max_grad_norm)
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.max_grad_norm)
torch.nn.utils.clip_grad_norm_(decoder.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
num_sents += 1
num_words += y_lengths.sum().item()
num_src_words += x_lengths.sum().item()
if b % args.print_every == 0:
enc_param_norm = sum([p.norm()**2 for p in encoder.parameters()]).item()**0.5
dec_param_norm = sum([p.norm()**2 for p in decoder.parameters()]).item()**0.5
parser_param_norm = sum([p.norm()**2 for p in parser.parameters()]).item()**0.5
log_str = 'Epoch: %d, Batch: %d/%d, |EncParam|: %.4f, |DecParam|: %.4f, ' + \
'|SrcParserParam|: %.4f, LR: %.4f, SrcPPL: %.4f, ' + \
'PPL: %.4f, ValPPL: %.4f, ' + \
'Throughput: %.2f examples/sec'
print("-"*80)
print(log_str %
(epoch, b, len(train_data),
enc_param_norm, dec_param_norm, parser_param_norm,
args.lr, np.exp(src_nll / num_src_words),
np.exp(train_nll / num_words), best_val_ppl,
num_sents / (time.time() - start_time)))
print("-"*80)
if args.print_trees == 1:
print("")
with torch.no_grad():
y_tree, all_spans, all_spans_node = decoder(
y_tensor, y_lengths, node_features, node_spans,
x_str = y, argmax=True)
x_str = [x_tokenizer.idx2vocab[idx] for idx in x_tensor[0].tolist()]
y_str = [y_tokenizer.idx2vocab[idx] for idx in y_tensor[0].tolist()]
x_length = x_lengths[0].item()
y_length = y_lengths[0].item()
print("Source: %s\nTarget: %s" % (" ".join(x_str), " ".join(y_str)))
print("")
print("Source Tree: %s" % get_tree(parse_actions[0], x_str))
action = get_actions(y_tree[0])
print("QCFG Tree: %s" % get_tree(action, y_str))
print("")
for span, span_node in zip(all_spans[0], all_spans_node[0]):
if span_node[0] == -1:
if span[0] == span[1]:
x_span = "T" + str(span_node[2])
else:
x_span = "NT" + str(span_node[2])
else:
x_span = " ".join(x_str[span_node[0]:span_node[1]+1])
y_span = " ".join(y_str[span[0]:span[1]+1])
if span[0] == span[1]:
denom = len(decoder.pt_spans[0])
else:
denom = len(decoder.nt_spans[0])
print((y_span, x_span, "N" + str(span[2] // denom)))
if b % args.eval_every == 0 and epoch > 1 and not args.nqg_dataset:
print('--------------------------------')
print('Checking validation perf...')
if not args.nqg_dataset:
# if not args.nqg_dataset or "COGS" in args.train_file:
val_ppl = eval(val_data, encoder, decoder, parser, device,
x_tokenizer, y_tokenizer)
print('--------------------------------')
if val_ppl < best_val_ppl:
best_val_ppl = val_ppl
checkpoint = {
'args': args.__dict__,
'encoder': encoder.cpu(),
'decoder': decoder.cpu(),
'parser': parser.cpu(),
'x_tokenizer': x_tokenizer,
'y_tokenizer': y_tokenizer,
}
print('Saving checkpoint to %s' % args.save_path)
torch.save(checkpoint, args.save_path)
model.to(device)
if best_val_ppl < 1.01:
assert False
if args.nqg_dataset:
# if args.nqg_dataset or "COGS" not in args.train_file:
# No dev set, directly save the final checkpoint
checkpoint = {
'args': args.__dict__,
'encoder': encoder.cpu(),
'decoder': decoder.cpu(),
'parser': parser.cpu(),
'x_tokenizer': x_tokenizer,
'y_tokenizer': y_tokenizer,
}
print('Saving checkpoint to %s' % args.save_path)
torch.save(checkpoint, args.save_path)
model.to(device)
print("Number of too long examples: ", num_no_res)
def eval(data, encoder, decoder, parser, device, x_tokenizer, y_tokenizer):
encoder.eval()
decoder.eval()
parser.eval()
num_sents = 0
num_words = 0
total_nll = 0.
b = 0
for d in data:
if any([s not in x_tokenizer.vocab2idx for s in d["src"]]) or \
any([s not in y_tokenizer.vocab2idx for s in d["tgt"]]):
continue
b += 1
x = [d["src"]]
y = [d["tgt"]]
x_tensor, _, _ = x_tokenizer.convert_batch(x)
y_tensor, _, _ = y_tokenizer.convert_batch(y)
x_tensor, y_tensor = x_tensor.to(device), y_tensor.to(device)
x_lengths = torch.Tensor([len(d["src"])]).long().to(device)
y_lengths = torch.Tensor([len(d["tgt"])]).long().to(device)
parse_nll, parse_argmax, _ = parser.forward_nll_argmax(x_tensor, x_lengths)
with torch.no_grad():
node_features, node_spans = encoder(x_tensor, x_lengths, spans = parse_argmax)
new_spans = []
for span, x_str in zip(node_spans, x):
new_span = []
for s in span:
new_span.append([s[0], s[1], x_str[s[0]:s[1]+1]])
new_spans.append(new_span)
node_spans = new_spans
nll = decoder(y_tensor, y_lengths, node_features, node_spans,
x_str = y, argmax=False)
total_nll += nll.sum().item()
num_words += y_lengths.sum().item()
ppl = np.exp(total_nll / num_words)
print('PPL: %.4f' % ppl)
encoder.train()
decoder.train()
parser.train()
return ppl
if __name__ == '__main__':
start_time = time.time()
args = parser.parse_args()
main(args)
print("--- %s seconds ---" % (time.time() - start_time))
# Adding set_trace to enforce slurm to output log
pdb.set_trace()
|
CompGenRep_MLRC2022-main
|
baseline_replication/neural-qcfg/train_scan.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
### Convert tsv data to csv format for transformers training
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("tsv", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output tsv file.")
def main(unused_argv):
with open(FLAGS.tsv, 'r') as tsv_file:
with open(FLAGS.output, 'w') as csv_file:
for line in tsv_file:
# remove the type column
csv_file.write(line.split("\t")[0] + "\t" + line.split("\t")[1] + "\n")
print("Writting done")
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/COGS/convert_to_nqg_format.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
### Convert tsv data to csv format for transformers training
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("tsv", "", "Input tsv file.")
flags.DEFINE_string("csv", "", "Output csv file.")
def main(unused_argv):
with open(FLAGS.tsv, 'r') as tsv_file:
with open(FLAGS.csv, 'w') as csv_file:
csv_file.write('input\toutput\ttype\n')
for line in tsv_file:
csv_file.write(line)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/COGS/convert_to_csv.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
### Convert tsv data to csv format for transformers training
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string("tsv", "", "Input tsv file.")
flags.DEFINE_string("csv", "", "Output csv file.")
def main(unused_argv):
with open(FLAGS.tsv, 'r') as tsv_file:
with open(FLAGS.csv, 'w') as csv_file:
csv_file.write('input\toutput\n')
for line in tsv_file:
csv_file.write(line)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/convert_to_csv.py
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/__init__.py
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strip targets from a tsv file and write as newline-separated txt.
This file can be useful as input to generate predictions (e.g. for evaluation).
"""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
import pdb
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output txt file.")
flags.DEFINE_string("prefix", "", "Optional prefix to prepend to source.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
with gfile.GFile(FLAGS.output, "w") as txt_file:
for example in examples:
txt_file.write("%s%s\n" % (FLAGS.prefix, example[0]))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/strip_targets.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split tsv dataset file based on predefined sets of example ids."""
import json
import os
import sys
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("split", "", "Json split file.")
flags.DEFINE_string("output_dir", "", "Output directory for dataset files.")
def load_splits():
"""Reads a JSON file containing split IDs.
Returns:
A dictionary where keys are a split name (e.g. `train` or `test`) and values
are a list of integer example IDs.
"""
with gfile.GFile(FLAGS.split, "r") as reader:
text = reader.read()
splits = json.loads(text)
return splits
def main(unused_argv):
splits = load_splits()
examples = tsv_utils.read_tsv(FLAGS.input)
example_id_to_example = {
example_id: example for example_id, example in enumerate(examples)
}
for split, split_ids in splits.items():
examples = []
for split_id in split_ids:
examples.append(example_id_to_example[split_id])
filename = os.path.join(FLAGS.output_dir, "%s.tsv" % split)
tsv_utils.write_tsv(examples, filename)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/split_dataset.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilties for reading and writing files.
Expected format for TSV file is that each line has one example, with each
element separated by \t. The number of element should be the same as
expected_num_columns.
Expected format for examples in memory is a list where each element is:
(element_1, element_2, ...), or [element_1, element_2, ...]
The number of element should be the same as expected_num_columns.
"""
from tensorflow.io import gfile
def read_tsv(filename, expected_num_columns=2):
"""Read file to list of examples."""
examples = []
with gfile.GFile(filename, "r") as tsv_file:
for line in tsv_file:
line = line.rstrip()
cols = line.split("\t")
if len(cols) != expected_num_columns:
raise ValueError("Line '%s' has %s columns (%s)" %
(line, len(cols), cols))
examples.append(cols)
print("Loaded %s examples from %s." % (len(examples), filename))
return examples
def write_tsv(examples, filename, expected_num_columns=2):
"""Write examples to tsv file."""
with gfile.GFile(filename, "w") as tsv_file:
for example in examples:
if len(example) != expected_num_columns:
raise ValueError("Example '%s' has %s columns." %
(example, len(example)))
example = "\t".join(example)
line = "%s\n" % example
tsv_file.write(line)
print("Wrote %s examples to %s." % (len(examples), filename))
def merge_shared_tsvs(filename):
"""Merge multiple tsv files into one."""
output_files = gfile.glob("%s-*-of-*" % filename)
all_examples = []
for output_file in output_files:
examples = read_tsv(output_file)
all_examples.extend(examples)
write_tsv(all_examples, filename)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/tsv_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert SCAN txt format to standard TSV format."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input txt file.")
flags.DEFINE_string("output", "", "Output tsv file.")
def load_examples(filename):
"""Load SCAN examples from original data file."""
examples = []
with gfile.GFile(filename, "r") as input_file:
for line in input_file:
splits = line.split("OUT:")
# Trim "IN:" prefix.
input_string = splits[0][3:].strip()
output_string = splits[1].strip()
examples.append((input_string, output_string))
return examples
def main(unused_argv):
examples = load_examples(FLAGS.input)
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/convert_to_tsv.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocesses a specific split of the CFQ dataset."""
from absl import app
from absl import flags
import preprocess as preprocessor
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', None,
'Name of the TFDS dataset. Use cfq or scan.')
flags.DEFINE_string('split', None, 'Name of the to the JSON file containing '
'split information.')
flags.DEFINE_string('save_path', None, 'Path to the directory where to '
'save the files to.')
flags.mark_flag_as_required('save_path')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
dataset = preprocessor.get_dataset_from_tfds(FLAGS.dataset, FLAGS.split)
preprocessor.write_dataset(dataset, FLAGS.save_path)
token_vocab = preprocessor.get_token_vocab(FLAGS.save_path)
preprocessor.write_token_vocab(token_vocab, FLAGS.save_path)
if __name__ == '__main__':
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/preprocess_main.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for preprocessing the CFQ dataset."""
import collections
import os
import string
from typing import Any, Dict, List, Tuple
from absl import logging
from tensorflow.compat.v1.io import gfile
import tensorflow_datasets as tfds
Dataset = Dict[str, List[Tuple[str, str]]]
def tokenize_punctuation(text):
text = map(lambda c: ' %s ' % c if c in string.punctuation else c, text)
return ' '.join(''.join(text).split())
def preprocess_sparql(query):
"""Do various preprocessing on the SPARQL query."""
# Tokenize braces.
query = query.replace('count(*)', 'count ( * )')
tokens = []
for token in query.split():
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens).replace('\\n', ' ')
def get_encode_decode_pair(sample):
# Apply some simple preprocessing on the tokenizaton, which improves the
# performance of the models significantly.
encode_text = tokenize_punctuation(sample['questionPatternModEntities'])
decode_text = preprocess_sparql(sample['sparqlPatternModEntities'])
return (encode_text, decode_text)
def get_dataset_from_tfds(dataset, split):
"""Load dataset from TFDS and do some basic preprocessing."""
logging.info('Loading dataset via TFDS.')
allsplits = tfds.load(dataset + '/' + split, as_supervised=True)
if 'validation' in allsplits:
# CFQ and divergence splits of StarCFQ have all three sets.
split_names = {'train': 'train', 'dev': 'validation', 'test': 'test'}
else:
# Scan and non-divergence splits of StarCFQ have 'train' and 'test' sets
# only. We simply output the test set as both dev and test. We only really
# use the dev set but t2t-datagen expects all three.
split_names = {'train': 'train', 'dev': 'test', 'test': 'test'}
dataset = collections.defaultdict(list)
for cfq_split_name, tfds_split_name in split_names.items():
for raw_x, raw_y in tfds.as_numpy(allsplits[tfds_split_name]):
encode_decode_pair = (tokenize_punctuation(raw_x.decode()),
preprocess_sparql(raw_y.decode()))
dataset[cfq_split_name].append(encode_decode_pair)
size_str = ', '.join(f'{s}={len(dataset[s])}' for s in split_names)
logging.info('Finished loading splits. Size: %s', size_str)
return dataset
def write_dataset(dataset, save_path):
"""Saves the given dataset into the given location."""
if not dataset:
logging.info('No dataset to write.')
return
logging.info('Writing dataset to %s', save_path)
for split_name, list_of_input_output_pairs in dataset.items():
folder_name = os.path.join(save_path, split_name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
encode_name = os.path.join(folder_name, '%s_encode.txt' % split_name)
decode_name = os.path.join(folder_name, '%s_decode.txt' % split_name)
with gfile.GFile(encode_name,
'w') as encode_f, gfile.GFile(decode_name,
'w') as decode_f:
for pair in list_of_input_output_pairs:
encode_f.write(pair[0] + '\n')
decode_f.write(pair[1] + '\n')
logging.info('Dataset written to %s', save_path)
def write_token_vocab(words,
save_path,
problem = 'cfq'):
""""Writes token vocabulary from @words to @save_path."""
# Sort tokens by frequency and then lexically to break ties.
words_with_counts = words.most_common()
words_with_counts.sort(key=lambda x: (x[1], x[0]), reverse=True)
vocab_path = os.path.join(save_path, 'vocab.%s.tokens' % problem)
with gfile.GFile(vocab_path, 'w') as f:
# Tensor2tensor needs these additional tokens.
f.write('<pad>\n<EOS>\n<OOV>\n')
for word, _ in words_with_counts:
f.write(word + '\n')
logging.info('Token vocabulary written to %s (%s distinct tokens).',
vocab_path, len(words))
def get_lines(path, filename):
with gfile.GFile(os.path.join(path, 'train', filename)) as f:
lines = [l.strip() for l in f.readlines() if l.strip()]
return lines
def get_token_vocab(path):
words = collections.Counter()
lines = get_lines(path, 'train_encode.txt')
lines.extend(get_lines(path, 'train_decode.txt'))
for line in lines:
words.update(line.split(' '))
return words
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/preprocess.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Join source and target text files generated for MCD splits to TSV."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("source", "", "Source txt file.")
flags.DEFINE_string("target", "", "Target txt file.")
flags.DEFINE_string("output", "", "Joined tsv file.")
def read_examples(source_file, target_file):
"""Return list of (source, target) tuples."""
sources = []
targets = []
with gfile.GFile(source_file, "r") as txt_file:
for line in txt_file:
sources.append(line.rstrip("\n"))
with gfile.GFile(target_file, "r") as txt_file:
for line in txt_file:
targets.append(line.rstrip("\n"))
examples = list(zip(sources, targets))
return examples
def main(unused_argv):
examples = read_examples(FLAGS.source, FLAGS.target)
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/scan/join_txt_to_tsv.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run SQL parser on dataset to verify all targets have exactly 1 parse."""
from absl import app
from absl import flags
from language.compgen.nqg.tasks import tsv_utils
from language.compgen.nqg.tasks.spider import sql_parser
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("offset", 0, "Example index to start at. Ignored if 0.")
flags.DEFINE_integer("limit", 0, "Example index to stop at. Ignored if 0.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
for idx, (_, target) in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
print("Processing example %s." % idx)
try:
_ = sql_parser.parse_sql(target)
except ValueError as e:
print(e)
# Retry parsing with verbose debugging.
_ = sql_parser.parse_sql(target, verbose=True)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/sql_parser_main.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-tokenize dataset for NQG which uses space-separated tokenization.
Input should be a TSV file, e.g. generated by applying `split_dataset.py` to
the output ofr `spider/write_dataset.py`.
"""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.spider import nqg_tokenization
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output tsv file.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
new_examples = []
for source, target in examples:
new_examples.append((nqg_tokenization.process_source(source),
nqg_tokenization.process_target(target)))
tsv_utils.write_tsv(new_examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/nqg_preprocess.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
################################
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
from __future__ import print_function
import os, sys
import json
import sqlite3
import traceback
import argparse
from process_sql import tokenize, get_schema, get_tables_with_alias, Schema, get_sql
# Flag to disable value evaluation
DISABLE_VALUE = True
# Flag to disable distinct in select evaluation
DISABLE_DISTINCT = True
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
HARDNESS = {
"component1": ('where', 'group', 'order', 'limit', 'join', 'or', 'like'),
"component2": ('except', 'union', 'intersect')
}
def condition_has_or(conds):
return 'or' in conds[1::2]
def condition_has_like(conds):
return WHERE_OPS.index('like') in [cond_unit[1] for cond_unit in conds[::2]]
def condition_has_sql(conds):
for cond_unit in conds[::2]:
val1, val2 = cond_unit[3], cond_unit[4]
if val1 is not None and type(val1) is dict:
return True
if val2 is not None and type(val2) is dict:
return True
return False
def val_has_op(val_unit):
return val_unit[0] != UNIT_OPS.index('none')
def has_agg(unit):
return unit[0] != AGG_OPS.index('none')
def accuracy(count, total):
if count == total:
return 1
return 0
def recall(count, total):
if count == total:
return 1
return 0
def F1(acc, rec):
if (acc + rec) == 0:
return 0
return (2. * acc * rec) / (acc + rec)
def get_scores(count, pred_total, label_total):
if pred_total != label_total:
return 0,0,0
elif count == pred_total:
return 1,1,1
return 0,0,0
def eval_sel(pred, label):
pred_sel = pred['select'][1]
label_sel = label['select'][1]
label_wo_agg = [unit[1] for unit in label_sel]
pred_total = len(pred_sel)
label_total = len(label_sel)
cnt = 0
cnt_wo_agg = 0
for unit in pred_sel:
if unit in label_sel:
cnt += 1
label_sel.remove(unit)
if unit[1] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[1])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if unit in label_conds:
cnt += 1
label_conds.remove(unit)
if unit[2] in label_wo_agg:
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return label_total, pred_total, cnt, cnt_wo_agg
def eval_group(pred, label):
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
pred_total = len(pred_cols)
label_total = len(label_cols)
cnt = 0
pred_cols = [pred.split(".")[1] if "." in pred else pred for pred in pred_cols]
label_cols = [label.split(".")[1] if "." in label else label for label in label_cols]
for col in pred_cols:
if col in label_cols:
cnt += 1
label_cols.remove(col)
return label_total, pred_total, cnt
def eval_having(pred, label):
pred_total = label_total = cnt = 0
if len(pred['groupBy']) > 0:
pred_total = 1
if len(label['groupBy']) > 0:
label_total = 1
pred_cols = [unit[1] for unit in pred['groupBy']]
label_cols = [unit[1] for unit in label['groupBy']]
if pred_total == label_total == 1 \
and pred_cols == label_cols \
and pred['having'] == label['having']:
cnt = 1
return label_total, pred_total, cnt
def eval_order(pred, label):
pred_total = label_total = cnt = 0
if len(pred['orderBy']) > 0:
pred_total = 1
if len(label['orderBy']) > 0:
label_total = 1
if len(label['orderBy']) > 0 and pred['orderBy'] == label['orderBy'] and \
((pred['limit'] is None and label['limit'] is None) or (pred['limit'] is not None and label['limit'] is not None)):
cnt = 1
return label_total, pred_total, cnt
def eval_and_or(pred, label):
pred_ao = pred['where'][1::2]
label_ao = label['where'][1::2]
pred_ao = set(pred_ao)
label_ao = set(label_ao)
if pred_ao == label_ao:
return 1,1,1
return len(pred_ao),len(label_ao),0
def get_nestedSQL(sql):
nested = []
for cond_unit in sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]:
if type(cond_unit[3]) is dict:
nested.append(cond_unit[3])
if type(cond_unit[4]) is dict:
nested.append(cond_unit[4])
if sql['intersect'] is not None:
nested.append(sql['intersect'])
if sql['except'] is not None:
nested.append(sql['except'])
if sql['union'] is not None:
nested.append(sql['union'])
return nested
def eval_nested(pred, label):
label_total = 0
pred_total = 0
cnt = 0
if pred is not None:
pred_total += 1
if label is not None:
label_total += 1
if pred is not None and label is not None:
cnt += Evaluator().eval_exact_match(pred, label)
return label_total, pred_total, cnt
def eval_IUEN(pred, label):
lt1, pt1, cnt1 = eval_nested(pred['intersect'], label['intersect'])
lt2, pt2, cnt2 = eval_nested(pred['except'], label['except'])
lt3, pt3, cnt3 = eval_nested(pred['union'], label['union'])
label_total = lt1 + lt2 + lt3
pred_total = pt1 + pt2 + pt3
cnt = cnt1 + cnt2 + cnt3
return label_total, pred_total, cnt
def get_keywords(sql):
res = set()
if len(sql['where']) > 0:
res.add('where')
if len(sql['groupBy']) > 0:
res.add('group')
if len(sql['having']) > 0:
res.add('having')
if len(sql['orderBy']) > 0:
res.add(sql['orderBy'][0])
res.add('order')
if sql['limit'] is not None:
res.add('limit')
if sql['except'] is not None:
res.add('except')
if sql['union'] is not None:
res.add('union')
if sql['intersect'] is not None:
res.add('intersect')
# or keyword
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
if len([token for token in ao if token == 'or']) > 0:
res.add('or')
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
# not keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[0]]) > 0:
res.add('not')
# in keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('in')]) > 0:
res.add('in')
# like keyword
if len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')]) > 0:
res.add('like')
return res
def eval_keywords(pred, label):
pred_keywords = get_keywords(pred)
label_keywords = get_keywords(label)
pred_total = len(pred_keywords)
label_total = len(label_keywords)
cnt = 0
for k in pred_keywords:
if k in label_keywords:
cnt += 1
return label_total, pred_total, cnt
def count_agg(units):
return len([unit for unit in units if has_agg(unit)])
def count_component1(sql):
count = 0
if len(sql['where']) > 0:
count += 1
if len(sql['groupBy']) > 0:
count += 1
if len(sql['orderBy']) > 0:
count += 1
if sql['limit'] is not None:
count += 1
if len(sql['from']['table_units']) > 0: # JOIN
count += len(sql['from']['table_units']) - 1
ao = sql['from']['conds'][1::2] + sql['where'][1::2] + sql['having'][1::2]
count += len([token for token in ao if token == 'or'])
cond_units = sql['from']['conds'][::2] + sql['where'][::2] + sql['having'][::2]
count += len([cond_unit for cond_unit in cond_units if cond_unit[1] == WHERE_OPS.index('like')])
return count
def count_component2(sql):
nested = get_nestedSQL(sql)
return len(nested)
def count_others(sql):
count = 0
# number of aggregation
agg_count = count_agg(sql['select'][1])
agg_count += count_agg(sql['where'][::2])
agg_count += count_agg(sql['groupBy'])
if len(sql['orderBy']) > 0:
agg_count += count_agg([unit[1] for unit in sql['orderBy'][1] if unit[1]] +
[unit[2] for unit in sql['orderBy'][1] if unit[2]])
agg_count += count_agg(sql['having'])
if agg_count > 1:
count += 1
# number of select columns
if len(sql['select'][1]) > 1:
count += 1
# number of where conditions
if len(sql['where']) > 1:
count += 1
# number of group by clauses
if len(sql['groupBy']) > 1:
count += 1
return count
class Evaluator:
"""A simple evaluator"""
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ == 0:
return "easy"
elif (count_others_ <= 2 and count_comp1_ <= 1 and count_comp2_ == 0) or \
(count_comp1_ <= 2 and count_others_ < 2 and count_comp2_ == 0):
return "medium"
elif (count_others_ > 2 and count_comp1_ <= 2 and count_comp2_ == 0) or \
(2 < count_comp1_ <= 3 and count_others_ <= 2 and count_comp2_ == 0) or \
(count_comp1_ <= 1 and count_others_ == 0 and count_comp2_ <= 1):
return "hard"
else:
return "extra"
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for _, score in partial_scores.items():
if score['f1'] != 1:
return 0
if len(label['from']['table_units']) > 0:
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return label_tables == pred_tables
return 1
def eval_partial_match(self, pred, label):
res = {}
label_total, pred_total, cnt, cnt_wo_agg = eval_sel(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt, cnt_wo_agg = eval_where(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
acc, rec, f1 = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_group(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_having(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_order(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_and_or(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_IUEN(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
label_total, pred_total, cnt = eval_keywords(pred, label)
acc, rec, f1 = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1,'label_total':label_total,'pred_total':pred_total}
return res
def isValidSQL(sql, db):
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(sql)
except:
return False
return True
def print_scores(scores, etype):
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
print("{:20} {:20} {:20} {:20} {:20} {:20}".format("", *levels))
counts = [scores[level]['count'] for level in levels]
print("{:20} {:<20d} {:<20d} {:<20d} {:<20d} {:<20d}".format("count", *counts))
if etype in ["all", "exec"]:
print('===================== EXECUTION ACCURACY =====================')
this_scores = [scores[level]['exec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("execution", *this_scores))
if etype in ["all", "match"]:
print('\n====================== EXACT MATCHING ACCURACY =====================')
exact_scores = [scores[level]['exact'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format("exact match", *exact_scores))
print('\n---------------------PARTIAL MATCHING ACCURACY----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['acc'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING RECALL ----------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['rec'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
print('---------------------- PARTIAL MATCHING F1 --------------------------')
for type_ in partial_types:
this_scores = [scores[level]['partial'][type_]['f1'] for level in levels]
print("{:20} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f} {:<20.3f}".format(type_, *this_scores))
def evaluate(gold, predict, db_dir, etype, kmaps):
with open(gold) as f:
glist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
with open(predict) as f:
plist = [l.strip().split('\t') for l in f.readlines() if len(l.strip()) > 0]
# plist = [("select max(Share),min(Share) from performance where Type != 'terminal'", "orchestra")]
# glist = [("SELECT max(SHARE) , min(SHARE) FROM performance WHERE TYPE != 'Live final'", "orchestra")]
evaluator = Evaluator()
levels = ['easy', 'medium', 'hard', 'extra', 'all']
partial_types = ['select', 'select(no AGG)', 'where', 'where(no OP)', 'group(no Having)',
'group', 'order', 'and/or', 'IUEN', 'keywords']
entries = []
scores = {}
for level in levels:
scores[level] = {'count': 0, 'partial': {}, 'exact': 0.}
scores[level]['exec'] = 0
for type_ in partial_types:
scores[level]['partial'][type_] = {'acc': 0., 'rec': 0., 'f1': 0.,'acc_count':0,'rec_count':0}
eval_err_num = 0
for p, g in zip(plist, glist):
p_str = p[0]
g_str, db = g
db_name = db
db = os.path.join(db_dir, db, db + ".sqlite")
schema = Schema(get_schema(db))
g_sql = get_sql(schema, g_str)
hardness = evaluator.eval_hardness(g_sql)
scores[hardness]['count'] += 1
scores['all']['count'] += 1
try:
p_sql = get_sql(schema, p_str)
except:
# If p_sql is not valid, then we will use an empty sql to evaluate with the correct sql
p_sql = {
"except": None,
"from": {
"conds": [],
"table_units": []
},
"groupBy": [],
"having": [],
"intersect": None,
"limit": None,
"orderBy": [],
"select": [
False,
[]
],
"union": None,
"where": []
}
eval_err_num += 1
print("eval_err_num:{}".format(eval_err_num))
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
if etype in ["all", "exec"]:
exec_score = eval_exec_match(db, p_str, g_str, p_sql, g_sql)
if exec_score:
scores[hardness]['exec'] += 1.0
scores['all']['exec'] += 1.0
if etype in ["all", "match"]:
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
partial_scores = evaluator.partial_scores
if exact_score == 0:
print("{} pred: {}".format(hardness,p_str))
print("{} gold: {}".format(hardness,g_str))
print("")
scores[hardness]['exact'] += exact_score
scores['all']['exact'] += exact_score
for type_ in partial_types:
if partial_scores[type_]['pred_total'] > 0:
scores[hardness]['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores[hardness]['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores[hardness]['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores[hardness]['partial'][type_]['rec_count'] += 1
scores[hardness]['partial'][type_]['f1'] += partial_scores[type_]['f1']
if partial_scores[type_]['pred_total'] > 0:
scores['all']['partial'][type_]['acc'] += partial_scores[type_]['acc']
scores['all']['partial'][type_]['acc_count'] += 1
if partial_scores[type_]['label_total'] > 0:
scores['all']['partial'][type_]['rec'] += partial_scores[type_]['rec']
scores['all']['partial'][type_]['rec_count'] += 1
scores['all']['partial'][type_]['f1'] += partial_scores[type_]['f1']
entries.append({
'predictSQL': p_str,
'goldSQL': g_str,
'hardness': hardness,
'exact': exact_score,
'partial': partial_scores
})
for level in levels:
if scores[level]['count'] == 0:
continue
if etype in ["all", "exec"]:
scores[level]['exec'] /= scores[level]['count']
if etype in ["all", "match"]:
scores[level]['exact'] /= scores[level]['count']
for type_ in partial_types:
if scores[level]['partial'][type_]['acc_count'] == 0:
scores[level]['partial'][type_]['acc'] = 0
else:
scores[level]['partial'][type_]['acc'] = scores[level]['partial'][type_]['acc'] / \
scores[level]['partial'][type_]['acc_count'] * 1.0
if scores[level]['partial'][type_]['rec_count'] == 0:
scores[level]['partial'][type_]['rec'] = 0
else:
scores[level]['partial'][type_]['rec'] = scores[level]['partial'][type_]['rec'] / \
scores[level]['partial'][type_]['rec_count'] * 1.0
if scores[level]['partial'][type_]['acc'] == 0 and scores[level]['partial'][type_]['rec'] == 0:
scores[level]['partial'][type_]['f1'] = 1
else:
scores[level]['partial'][type_]['f1'] = \
2.0 * scores[level]['partial'][type_]['acc'] * scores[level]['partial'][type_]['rec'] / (
scores[level]['partial'][type_]['rec'] + scores[level]['partial'][type_]['acc'])
print_scores(scores, etype)
def eval_exec_match(db, p_str, g_str, pred, gold):
"""
return 1 if the values between prediction and gold are matching
in the corresponding index. Currently not support multiple col_unit(pairs).
"""
conn = sqlite3.connect(db)
cursor = conn.cursor()
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
except:
return False
cursor.execute(g_str)
q_res = cursor.fetchall()
def res_map(res, val_units):
rmap = {}
for idx, val_unit in enumerate(val_units):
key = tuple(val_unit[1]) if not val_unit[2] else (val_unit[0], tuple(val_unit[1]), tuple(val_unit[2]))
rmap[key] = [r[idx] for r in res]
return rmap
p_val_units = [unit[1] for unit in pred['select'][1]]
q_val_units = [unit[1] for unit in gold['select'][1]]
return res_map(p_res, p_val_units) == res_map(q_res, q_val_units)
# Rebuild SQL functions for value evaluation
def rebuild_cond_unit_val(cond_unit):
if cond_unit is None or not DISABLE_VALUE:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
if type(val1) is not dict:
val1 = None
else:
val1 = rebuild_sql_val(val1)
if type(val2) is not dict:
val2 = None
else:
val2 = rebuild_sql_val(val2)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_val(condition):
if condition is None or not DISABLE_VALUE:
return condition
res = []
for idx, it in enumerate(condition):
if idx % 2 == 0:
res.append(rebuild_cond_unit_val(it))
else:
res.append(it)
return res
def rebuild_sql_val(sql):
if sql is None or not DISABLE_VALUE:
return sql
sql['from']['conds'] = rebuild_condition_val(sql['from']['conds'])
sql['having'] = rebuild_condition_val(sql['having'])
sql['where'] = rebuild_condition_val(sql['where'])
sql['intersect'] = rebuild_sql_val(sql['intersect'])
sql['except'] = rebuild_sql_val(sql['except'])
sql['union'] = rebuild_sql_val(sql['union'])
return sql
# Rebuild SQL functions for foreign key evaluation
def build_valid_col_units(table_units, schema):
col_ids = [table_unit[1] for table_unit in table_units if table_unit[0] == TABLE_TYPE['table_unit']]
prefixs = [col_id[:-2] for col_id in col_ids]
valid_col_units= []
for value in schema.idMap.values():
if '.' in value and value[:value.index('.')] in prefixs:
valid_col_units.append(value)
return valid_col_units
def rebuild_col_unit_col(valid_col_units, col_unit, kmap):
if col_unit is None:
return col_unit
agg_id, col_id, distinct = col_unit
if col_id in kmap and col_id in valid_col_units:
col_id = kmap[col_id]
if DISABLE_DISTINCT:
distinct = None
return agg_id, col_id, distinct
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if val_unit is None:
return val_unit
unit_op, col_unit1, col_unit2 = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return unit_op, col_unit1, col_unit2
def rebuild_table_unit_col(valid_col_units, table_unit, kmap):
if table_unit is None:
return table_unit
table_type, col_unit_or_sql = table_unit
if isinstance(col_unit_or_sql, tuple):
col_unit_or_sql = rebuild_col_unit_col(valid_col_units, col_unit_or_sql, kmap)
return table_type, col_unit_or_sql
def rebuild_cond_unit_col(valid_col_units, cond_unit, kmap):
if cond_unit is None:
return cond_unit
not_op, op_id, val_unit, val1, val2 = cond_unit
val_unit = rebuild_val_unit_col(valid_col_units, val_unit, kmap)
return not_op, op_id, val_unit, val1, val2
def rebuild_condition_col(valid_col_units, condition, kmap):
for idx in range(len(condition)):
if idx % 2 == 0:
condition[idx] = rebuild_cond_unit_col(valid_col_units, condition[idx], kmap)
return condition
def rebuild_select_col(valid_col_units, sel, kmap):
if sel is None:
return sel
distinct, _list = sel
new_list = []
for it in _list:
agg_id, val_unit = it
new_list.append((agg_id, rebuild_val_unit_col(valid_col_units, val_unit, kmap)))
if DISABLE_DISTINCT:
distinct = None
return distinct, new_list
def rebuild_from_col(valid_col_units, from_, kmap):
if from_ is None:
return from_
from_['table_units'] = [rebuild_table_unit_col(valid_col_units, table_unit, kmap) for table_unit in from_['table_units']]
from_['conds'] = rebuild_condition_col(valid_col_units, from_['conds'], kmap)
return from_
def rebuild_group_by_col(valid_col_units, group_by, kmap):
if group_by is None:
return group_by
return [rebuild_col_unit_col(valid_col_units, col_unit, kmap) for col_unit in group_by]
def rebuild_order_by_col(valid_col_units, order_by, kmap):
if order_by is None or len(order_by) == 0:
return order_by
direction, val_units = order_by
new_val_units = [rebuild_val_unit_col(valid_col_units, val_unit, kmap) for val_unit in val_units]
return direction, new_val_units
def rebuild_sql_col(valid_col_units, sql, kmap):
if sql is None:
return sql
sql['select'] = rebuild_select_col(valid_col_units, sql['select'], kmap)
sql['from'] = rebuild_from_col(valid_col_units, sql['from'], kmap)
sql['where'] = rebuild_condition_col(valid_col_units, sql['where'], kmap)
sql['groupBy'] = rebuild_group_by_col(valid_col_units, sql['groupBy'], kmap)
sql['orderBy'] = rebuild_order_by_col(valid_col_units, sql['orderBy'], kmap)
sql['having'] = rebuild_condition_col(valid_col_units, sql['having'], kmap)
sql['intersect'] = rebuild_sql_col(valid_col_units, sql['intersect'], kmap)
sql['except'] = rebuild_sql_col(valid_col_units, sql['except'], kmap)
sql['union'] = rebuild_sql_col(valid_col_units, sql['union'], kmap)
return sql
def build_foreign_key_map(entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
def build_foreign_key_map_from_json(table):
with open(table) as f:
data = json.load(f)
tables = {}
for entry in data:
tables[entry['db_id']] = build_foreign_key_map(entry)
return tables
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gold', dest='gold', type=str)
parser.add_argument('--pred', dest='pred', type=str)
parser.add_argument('--db', dest='db', type=str)
parser.add_argument('--table', dest='table', type=str)
parser.add_argument('--etype', dest='etype', type=str)
args = parser.parse_args()
gold = args.gold
pred = args.pred
db_dir = args.db
table = args.table
etype = args.etype
assert etype in ["all", "exec", "match"], "Unknown evaluation method"
kmaps = build_foreign_key_map_from_json(table)
evaluate(gold, pred, db_dir, etype, kmaps)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/evaluation.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Replace T5 SPM OOV character with `<`.
Certain punctuation characters are mapped to the OOV symbol in T5's
sentence-piece model. For Spider, this appears to only affect the `<` symbol,
so it can be deterministically recovered by running this script.
An alternative is to preprocess dataset to avoid OOV symbols for T5.
"""
from absl import app
from absl import flags
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input txt file.")
flags.DEFINE_string("output", "", "Output txt file.")
def main(unused_argv):
with open(FLAGS.output, "w") as output_file:
with open(FLAGS.input, "r") as input_file:
for line in input_file:
pred = line.replace(" ⁇ ", "<").replace("<pad>", "").replace("</s>", "").replace("<unk>", "<")
if line != pred:
print("Original: %s" % line)
print("New: %s" % pred)
output_file.write("%s" % pred)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/restore_oov.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Spider dataset in TSV format."""
import json
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.spider import database_constants
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("examples", "", "Path to Spider json examples.")
flags.DEFINE_string("output", "", "Output tsv file.")
flags.DEFINE_bool(
"filter_by_database", True,
"Whether to only select examples for databases used for the Spider-SSP"
"setting proposed in the paper. Should be False to follow the standard"
"Spider-XSP setting.")
def normalize_whitespace(source):
tokens = source.split()
return " ".join(tokens)
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def main(unused_argv):
examples_json = load_json(FLAGS.examples)
examples = []
for example_json in examples_json:
database = example_json["db_id"]
source = example_json["question"]
target = example_json["query"]
# Optionally skip if database not in set of databases with >= 50 examples.
if (FLAGS.filter_by_database and
database not in database_constants.DATABASES):
continue
# Prepend database.
source = "%s: %s" % (database, source)
target = normalize_whitespace(target)
examples.append((source.lower(), target.lower()))
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/write_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
################################
# Assumptions:
# 1. sql is correct
# 2. only table name has alias
# 3. only one intersect/union/except
#
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
import json
import sqlite3
from nltk import word_tokenize
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {'*': "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = "__" + key.lower() + "." + val.lower() + "__"
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_schema_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
schema = {}
for entry in data:
table = str(entry['table'].lower())
cols = [str(col['column_name'].lower()) for col in entry['col_data']]
schema[table] = cols
return schema
def tokenize(string):
string = str(string)
string = string.replace("\'", "\"") # ensures all string values wrapped by "" problem??
quote_idxs = [idx for idx, char in enumerate(string) if char == '"']
assert len(quote_idxs) % 2 == 0, "Unexpected quote"
# keep string value as token
vals = {}
for i in range(len(quote_idxs)-1, -1, -2):
qidx1 = quote_idxs[i-1]
qidx2 = quote_idxs[i]
val = string[qidx1: qidx2+1]
key = "__val_{}_{}__".format(qidx1, qidx2)
string = string[:qidx1] + key + string[qidx2+1:]
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
# replace with string value token
for i in range(len(toks)):
if toks[i] in vals:
toks[i] = vals[toks[i]]
# find if there exists !=, >=, <=
eq_idxs = [idx for idx, tok in enumerate(toks) if tok == "="]
eq_idxs.reverse()
prefix = ('!', '>', '<')
for eq_idx in eq_idxs:
pre_tok = toks[eq_idx-1]
if pre_tok in prefix:
toks = toks[:eq_idx-1] + [pre_tok + "="] + toks[eq_idx+1: ]
return toks
def scan_alias(toks):
"""Scan the index of 'as' and build the map for all alias"""
as_idxs = [idx for idx, tok in enumerate(toks) if tok == 'as']
alias = {}
for idx in as_idxs:
alias[toks[idx+1]] = toks[idx-1]
return alias
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert key not in tables, "Alias {} has the same name in table".format(key)
tables[key] = key
return tables
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, column id
"""
tok = toks[start_idx]
if tok == "*":
return start_idx + 1, schema.idMap[tok]
if '.' in tok: # if token is a composite
alias, col = tok.split('.')
key = tables_with_alias[alias] + "." + col
return start_idx+1, schema.idMap[key]
assert default_tables is not None and len(default_tables) > 0, "Default tables should not be None or empty"
for alias in default_tables:
table = tables_with_alias[alias]
if tok in schema.schema[table]:
key = table + "." + tok
return start_idx+1, schema.idMap[key]
assert False, "Error col: {}".format(tok)
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, (agg_op id, col_id)
"""
idx = start_idx
len_ = len(toks)
isBlock = False
isDistinct = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
assert idx < len_ and toks[idx] == '('
idx += 1
if toks[idx] == "distinct":
idx += 1
isDistinct = True
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
assert idx < len_ and toks[idx] == ')'
idx += 1
return idx, (agg_id, col_id, isDistinct)
if toks[idx] == "distinct":
idx += 1
isDistinct = True
agg_id = AGG_OPS.index("none")
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
return idx, (agg_id, col_id, isDistinct)
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
col_unit1 = None
col_unit2 = None
unit_op = UNIT_OPS.index('none')
idx, col_unit1 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if idx < len_ and toks[idx] in UNIT_OPS:
unit_op = UNIT_OPS.index(toks[idx])
idx += 1
idx, col_unit2 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
return idx, (unit_op, col_unit1, col_unit2)
def parse_table_unit(toks, start_idx, tables_with_alias, schema):
"""
:returns next idx, table id, table name
"""
idx = start_idx
len_ = len(toks)
key = tables_with_alias[toks[idx]]
if idx + 1 < len_ and toks[idx+1] == "as":
idx += 3
else:
idx += 1
return idx, schema.idMap[key], key
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] == 'select':
idx, val = parse_sql(toks, idx, tables_with_alias, schema)
elif "\"" in toks[idx]: # token is a string value
val = toks[idx]
idx += 1
else:
try:
val = float(toks[idx])
idx += 1
except:
end_idx = idx
while end_idx < len_ and toks[end_idx] != ',' and toks[end_idx] != ')'\
and toks[end_idx] != 'and' and toks[end_idx] not in CLAUSE_KEYWORDS and toks[end_idx] not in JOIN_KEYWORDS:
end_idx += 1
idx, val = parse_col_unit(toks[start_idx: end_idx], 0, tables_with_alias, schema, default_tables)
idx = end_idx
if isBlock:
assert toks[idx] == ')'
idx += 1
return idx, val
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
conds = []
while idx < len_:
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
not_op = False
if toks[idx] == 'not':
not_op = True
idx += 1
assert idx < len_ and toks[idx] in WHERE_OPS, "Error condition: idx: {}, tok: {}".format(idx, toks[idx])
op_id = WHERE_OPS.index(toks[idx])
idx += 1
val1 = val2 = None
if op_id == WHERE_OPS.index('between'): # between..and... special case: dual values
idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
assert toks[idx] == 'and'
idx += 1
idx, val2 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
else: # normal case: single value
idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
val2 = None
conds.append((not_op, op_id, val_unit, val1, val2))
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";") or toks[idx] in JOIN_KEYWORDS):
break
if idx < len_ and toks[idx] in COND_OPS:
conds.append(toks[idx])
idx += 1 # skip and/or
return idx, conds
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
assert toks[idx] == 'select', "'select' not found"
idx += 1
isDistinct = False
if idx < len_ and toks[idx] == 'distinct':
idx += 1
isDistinct = True
val_units = []
while idx < len_ and toks[idx] not in CLAUSE_KEYWORDS:
agg_id = AGG_OPS.index("none")
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append((agg_id, val_unit))
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
return idx, (isDistinct, val_units)
def parse_from(toks, start_idx, tables_with_alias, schema):
"""
Assume in the from clause, all table units are combined with join
"""
assert 'from' in toks[start_idx:], "'from' not found"
len_ = len(toks)
idx = toks.index('from', start_idx) + 1
default_tables = []
table_units = []
conds = []
while idx < len_:
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] == 'select':
idx, sql = parse_sql(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['sql'], sql))
else:
if idx < len_ and toks[idx] == 'join':
idx += 1 # skip join
idx, table_unit, table_name = parse_table_unit(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['table_unit'],table_unit))
default_tables.append(table_name)
if idx < len_ and toks[idx] == "on":
idx += 1 # skip on
idx, this_conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
if len(conds) > 0:
conds.append('and')
conds.extend(this_conds)
if isBlock:
assert toks[idx] == ')'
idx += 1
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
break
return idx, table_units, conds, default_tables
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != 'where':
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
col_units = []
if idx >= len_ or toks[idx] != 'group':
return idx, col_units
idx += 1
assert toks[idx] == 'by'
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, col_unit = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
col_units.append(col_unit)
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
else:
break
return idx, col_units
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
val_units = []
order_type = 'asc' # default type is 'asc'
if idx >= len_ or toks[idx] != 'order':
return idx, val_units
idx += 1
assert toks[idx] == 'by'
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append(val_unit)
if idx < len_ and toks[idx] in ORDER_OPS:
order_type = toks[idx]
idx += 1
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
else:
break
return idx, (order_type, val_units)
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != 'having':
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_limit(toks, start_idx):
idx = start_idx
len_ = len(toks)
if idx < len_ and toks[idx] == 'limit':
idx += 2
return idx, int(toks[idx-1])
return idx, None
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False # indicate whether this is a block of sql/sub-sql
len_ = len(toks)
idx = start_idx
sql = {}
if toks[idx] == '(':
isBlock = True
idx += 1
# parse from clause in order to get default tables
from_end_idx, table_units, conds, default_tables = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
# select clause
_, select_col_units = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
# where clause
idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
# group by clause
idx, group_col_units = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
# having clause
idx, having_conds = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
# order by clause
idx, order_col_units = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
# limit clause
idx, limit_val = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
idx = skip_semicolon(toks, idx)
# intersect/union/except clause
for op in SQL_OPS: # initialize IUE
sql[op] = None
if idx < len_ and toks[idx] in SQL_OPS:
sql_op = toks[idx]
idx += 1
idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return idx, sql
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def skip_semicolon(toks, start_idx):
idx = start_idx
while idx < len(toks) and toks[idx] == ";":
idx += 1
return idx
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/process_sql.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to preprocess Spider to accomondate tokenization of NQG.
NQG performs simple space-separated tokenization. The tokenization in this
module to accomondate this primarily involves splitting on punctuation, e.g.
`"foo"` becomes `" foo "`.
"""
import string
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks.spider import sql_tokenizer
def _split_punc(source):
"""Split leading or trailing punctuation."""
tokens = source.split(" ")
new_tokens = []
for token in tokens:
if all(char in string.punctuation for char in token):
new_tokens.append(token)
continue
leading_punc = None
for punc in string.punctuation:
if token.startswith(punc):
leading_punc = punc
token = token.lstrip(punc)
break
trailing_punc = None
for punc in string.punctuation:
if token.endswith(punc):
trailing_punc = punc
token = token.rstrip(punc)
break
if leading_punc:
new_tokens.append(leading_punc)
if token:
new_tokens.append(token)
if trailing_punc:
new_tokens.append(trailing_punc)
return " ".join(new_tokens)
def process_source(source):
source = _split_punc(source)
# Remove extra whitespace.
source = " ".join(source.split())
return source
def process_target(target):
"""Preprocess target for space-separated tokenization."""
target_sql_tokens = sql_tokenizer.tokenize_sql(target)
target = " ".join(target_sql_tokens)
target = _split_punc(target)
# Split punc twice, to handle "%foo%" wrapped in two punc chars.
# TODO(petershaw): Update _split_punc to correctly handle this case with
# a single invocation.
target = _split_punc(target)
# Remove extra whitespace.
target = " ".join(target.split())
return target
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/nqg_tokenization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for tokenizing SQL."""
import sqlparse
def _is_whitespace(sqlparse_token):
return sqlparse_token.ttype == sqlparse.tokens.Whitespace
def tokenize_sql(sql_exp):
sql_exp = sql_exp.lower()
sql_exp = sql_exp.rstrip(";")
parse = sqlparse.parse(sql_exp)
sql = parse[0]
flat_tokens = sql.flatten()
sql_tokens = [
token.value for token in flat_tokens if not _is_whitespace(token)
]
return sql_tokens
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/sql_tokenizer.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serialize and append database schema to inputs."""
import collections
import json
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output", "", "Output tsv file.")
flags.DEFINE_string("tables", "", "Spider tables JSON file.")
def load_json(filepath):
with gfile.GFile(filepath, "r") as reader:
text = reader.read()
return json.loads(text)
def _get_schema_string(table_json):
"""Returns the schema serialized as a string."""
table_id_to_column_names = collections.defaultdict(list)
for table_id, name in table_json["column_names_original"]:
table_id_to_column_names[table_id].append(name.lower())
tables = table_json["table_names_original"]
table_strings = []
for table_id, table_name in enumerate(tables):
column_names = table_id_to_column_names[table_id]
table_string = " | %s : %s" % (table_name.lower(), " , ".join(column_names))
table_strings.append(table_string)
return "".join(table_strings)
def main(unused_argv):
tables_json = load_json(FLAGS.tables)
db_id_to_schema_string = {}
for table_json in tables_json:
db_id = table_json["db_id"].lower()
db_id_to_schema_string[db_id] = _get_schema_string(table_json)
examples = tsv_utils.read_tsv(FLAGS.input)
new_examples = []
for source, target in examples:
db_id = source.split()[0].rstrip(":")
schema_string = db_id_to_schema_string[db_id]
new_source = "%s%s" % (source, schema_string)
new_examples.append((new_source.lower(), target.lower()))
tsv_utils.write_tsv(new_examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/append_schema.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate gold targets with database ID for Spider evaluation."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.spider import database_constants
from tensorflow.io import gfile
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "",
"Input tsv file (e.g. output of split_dataset.py).")
flags.DEFINE_string("output", "", "Output txt file.")
def main(unused_argv):
formatted_db_id_to_db_id = {}
for db_id in database_constants.DATABASES:
formatted_db_id_to_db_id[db_id.lower()] = db_id
formatted_db_id_to_db_id[db_id] = db_id
examples = tsv_utils.read_tsv(FLAGS.input)
with gfile.GFile(FLAGS.output, "w") as txt_file:
for example in examples:
db_id = example[0].split()[0].rstrip(":")
db_id = formatted_db_id_to_db_id[db_id]
txt_file.write("%s\t%s\n" % (example[1], db_id))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/generate_gold.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants related to Spider databases."""
# Databases with >= 50 examples.
DATABASES = [
"student_assessment",
"bike_1",
"flight_1",
"allergy_1",
"store_1",
"customers_card_transactions",
"chinook_1",
"match_season",
"apartment_rentals",
"college_2",
"customers_and_invoices",
"small_bank_1",
"formula_1",
"csu_1",
"movie_1",
"inn_1",
"election",
"icfp_1",
"sakila_1",
"loan_1",
"college_1",
"sports_competition",
"hr_1",
"music_1",
"baseball_1",
"e_learning",
"hospital_1",
"student_1",
"cre_Doc_Tracking_DB",
"club_1",
"tracking_grants_for_research",
"network_2",
"college_3",
"department_store",
"soccer_2",
"cre_Drama_Workshop_Groups",
"music_2",
"manufactory_1",
"voter_2",
"products_gen_characteristics",
"dorm_1",
"cre_Theme_park",
"game_1",
"customers_and_addresses",
"music_4",
"cre_Docs_and_Epenses",
"wine_1",
"driving_school",
"activity_1",
"flight_4",
"tracking_orders",
]
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/spider/database_constants.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for extracting entities from geobase file.
geobase file is available at:
http://www.cs.utexas.edu/users/ml/nldata/geoquery.html
"""
import collections
from tensorflow.io import gfile
GeoEntity = collections.namedtuple(
"GeoEntity",
[
"aliases", # List of Strings.
"attribute", # String.
"identifier", # String.
])
def _add_underspecified_city_constant(city_name, identifiers_to_entities):
identifier = "cityid('%s',_)" % city_name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="cityid", aliases=[city_name])
def _add_city_constants(city_name, state_name, state_abbreviation,
identifiers_to_entities):
"""Add constants for fully and under-specified city."""
_add_underspecified_city_constant(city_name, identifiers_to_entities)
identifier = "cityid('%s','%s')" % (city_name, state_abbreviation)
if identifier in identifiers_to_entities:
return
aliases = [
"%s %s" % (city_name, state_name),
"%s %s" % (city_name, state_abbreviation),
]
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="cityid", aliases=aliases)
def _add_state_constant(name, identifiers_to_entities):
identifier = "stateid('%s')" % name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="stateid", aliases=[name])
def _add_river_constant(name, identifiers_to_entities):
"""Add entities for rivers."""
identifier = "riverid('%s')" % name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="riverid", aliases=[name])
def _add_place_constant(name, identifiers_to_entities):
identifier = "placeid('%s')" % name
if identifier in identifiers_to_entities:
return
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="placeid", aliases=[name])
def _add_usa(identifiers_to_entities):
"""Add constant for usa."""
# Only one `country` predicate appears in geobase:
# country('usa',307890000,9826675)
# Special-case `usa` and add some known aliases.
identifier = "countryid(usa)"
aliases = [
"america",
"continental us",
"united states",
"us",
"usa",
"country",
]
identifiers_to_entities[identifier] = GeoEntity(
identifier=identifier, attribute="countryid", aliases=aliases)
import pdb
def load_entities(geobase_file):
"""Returns list of GeoEntity tuples for geobase entities."""
# Identifier string to GeoEntity tuple.
identifiers_to_entities = {}
with gfile.GFile(geobase_file, "r") as inputfile:
for line in inputfile:
# line = line.decode("latin1")
if line.startswith("state"):
splits = line.split("'")
state_name = splits[1]
state_abbreviation = splits[3]
city_capital = splits[5]
city_1 = splits[7]
city_2 = splits[9]
city_3 = splits[11]
city_4 = splits[13]
_add_state_constant(state_name, identifiers_to_entities)
for city_name in [city_capital, city_1, city_2, city_3, city_4]:
_add_city_constants(city_name, state_name, state_abbreviation,
identifiers_to_entities)
elif line.startswith("city"):
state_name = line.split("'")[1]
state_abbreviation = line.split("'")[3]
city_name = line.split("'")[5]
_add_city_constants(city_name, state_name, state_abbreviation,
identifiers_to_entities)
elif line.startswith("river"):
river_name = line.split("'")[1]
_add_river_constant(river_name, identifiers_to_entities)
elif line.startswith("mountain"):
mountain_name = line.split("'")[5]
_add_place_constant(mountain_name, identifiers_to_entities)
elif line.startswith("highlow"):
lowpoint_name = line.split("'")[5]
highpoint_name = line.split("'")[7]
_add_place_constant(lowpoint_name, identifiers_to_entities)
_add_place_constant(highpoint_name, identifiers_to_entities)
# This city is not mentioned in geobase directly, but referenced by a query
# in the train set.
_add_city_constants("springfield", "south dakota", "sd",
identifiers_to_entities)
_add_usa(identifiers_to_entities)
return identifiers_to_entities.values()
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/geobase_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split dataset tsv file based on target templates."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import template_utils
from tasks import tsv_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string(
"output_1", "",
"Output tsv file containing up to `max_num_examples_1` examples.")
flags.DEFINE_string("output_2", "",
"Output tsv file containing the remaining examples.")
flags.DEFINE_float("max_num_examples_1", 440,
"Maximum number of examples for output_1.")
flags.DEFINE_integer("seed", 1, "Seed for splitting examples.")
def funql_template_fn(target):
"""Simply returns target since entities are already anonymized in targets."""
return target
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
examples_1, examples_2 = template_utils.split_by_template(
examples,
template_fn=funql_template_fn,
max_num_examples_1=FLAGS.max_num_examples_1,
seed=FLAGS.seed)
tsv_utils.write_tsv(examples_1, FLAGS.output_1)
tsv_utils.write_tsv(examples_2, FLAGS.output_2)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/gen_template_split.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for defining atoms and compounds for FunQL."""
import collections
# Placeholder symbol for compounds.
_PLACEHOLDER = "__"
def _split_arguments(args_string):
"""Splits comma-joined argument list.
For example, an input of "foo, bar(xyz, abc), bar" will be split
into: ["foo", "bar(xyz, abc)", "bar"].
Args:
args_string: String input for comma-separated argument list.
Returns:
List of Strings for each argument.
"""
argument_buffer = []
arguments = []
open_parens = 0
for char in args_string:
if char == "," and open_parens == 0:
arguments.append("".join(argument_buffer))
argument_buffer = []
elif char == " " and not argument_buffer:
continue
else:
if char == "(":
open_parens += 1
elif char == ")":
open_parens -= 1
argument_buffer.append(char)
arguments.append("".join(argument_buffer))
return arguments
def _get_name_and_arguments(funql):
"""Returns function name and argument sub-expressions."""
funql = funql.strip()
paren_index = funql.find("(")
if paren_index == -1:
return funql, None
name = funql[:paren_index].strip()
arguments = funql[paren_index + 1:].strip()
if arguments[-1] != ")":
raise ValueError("Invalid arguments string ends with %s: %s" %
(arguments[-1], arguments))
arguments = _split_arguments(arguments[:-1])
return name, arguments
def _get_compound_string(outer, outer_arity, inner, inner_idx):
arguments = [_PLACEHOLDER] * outer_arity
arguments[inner_idx] = inner
return "%s( %s )" % (outer, " , ".join(arguments))
def _get_compounds_inner(funql, compounds_to_counts):
"""Recursively add compound counts to compounds_to_counts."""
name, arguments = _get_name_and_arguments(funql)
if not arguments:
return
for argument_idx, argument in enumerate(arguments):
argument_name, _ = _get_name_and_arguments(argument)
compound = _get_compound_string(name, len(arguments), argument_name,
argument_idx)
compounds_to_counts[compound] += 1
_get_compounds_inner(argument, compounds_to_counts)
def get_compounds(target):
"""Use combinations of 2 atoms as compounds."""
compounds_to_count = collections.Counter()
_get_compounds_inner(target, compounds_to_count)
return compounds_to_count
def get_atoms(target):
"""Use individual tokens as atoms."""
atoms = set()
for token in target.split():
if token not in ("(", ")", ","):
atoms.add(token)
return atoms
def get_atoms_with_num_arguments(target):
"""Consider symbols and their number of arguments."""
name, arguments = _get_name_and_arguments(target)
if arguments:
atoms = set()
atoms.add("%s_(%s)" % (name, len(arguments)))
for argument in arguments:
atoms |= get_atoms_with_num_arguments(argument)
return atoms
else:
return {name}
def get_example_compounds(example):
return get_compounds(example[1])
def get_example_atoms(example):
return get_atoms(example[1])
def get_example_atoms_with_num_arguments(example):
return get_atoms_with_num_arguments(example[1])
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/tmcd_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes % of examples in input_2 containing an atom not input_1."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import mcd_utils
from tasks import tsv_utils
from tasks.geoquery import tmcd_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input_1", "", "Input tsv file.")
flags.DEFINE_string("input_2", "", "Input tsv file.")
def main(unused_argv):
examples_1 = tsv_utils.read_tsv(FLAGS.input_1)
examples_2 = tsv_utils.read_tsv(FLAGS.input_2)
atoms_1 = mcd_utils.get_all_atoms(
examples_1, get_atoms_fn=tmcd_utils.get_example_atoms)
num_examples = 0
num_examples_with_unseen_atom = 0
for example in examples_2:
atoms = tmcd_utils.get_example_atoms(example)
num_examples += 1
for atom in atoms:
if atom not in atoms_1:
print("New atom: %s" % atom)
num_examples_with_unseen_atom += 1
break
print("num_examples: %s" % num_examples)
print("num_examples_with_unseen_atom: %s" % num_examples_with_unseen_atom)
print("pct: %s" % (float(num_examples_with_unseen_atom) / num_examples))
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/measure_unseen_atoms.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating geoquery data."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import tsv_utils
from tasks.geoquery import entity_utils
from tasks.geoquery import funql_normalization
from tasks.geoquery import geobase_utils
from tasks.geoquery import xml_file_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("corpus", "", "Path to geoquery xml file.")
flags.DEFINE_string("geobase", "", "Path to geobase file.")
flags.DEFINE_string("output", "", "Output dataset file.")
def get_examples():
"""Return list of example tuples."""
xml_examples = xml_file_utils.read_examples(FLAGS.corpus)
examples = []
geobase_entities = geobase_utils.load_entities(FLAGS.geobase)
for utterance, funql in xml_examples:
funql = funql_normalization.normalize_funql(funql)
funql, utterance, _ = entity_utils.replace_entities(funql, utterance,
geobase_entities)
funql = funql_normalization.add_space_separation(funql)
examples.append((utterance, funql))
return examples
def main(unused_argv):
examples = get_examples()
tsv_utils.write_tsv(examples, FLAGS.output)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/write_dataset.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading XML datafile for GeoQuery."""
from xml.etree import ElementTree
from tensorflow.io import gfile
def process_utterance(utterance):
"""Lowercase and remove punctuation."""
return utterance.lower().rstrip("?").rstrip(".").rstrip().replace(" '", "")
def process_funql(funql):
"""Remove quotes and unnecessary spaces."""
funql = funql.replace("'", "")
funql = funql.replace(", ", ",")
funql = funql.replace(", ", ",")
funql = funql.replace(" ,", ",")
return funql
def load_xml_tree(corpus):
with gfile.GFile(corpus, "r") as xml_file:
return ElementTree.fromstring(xml_file.read())
def get_utterance(example_root):
for utterance in example_root.findall("nl"):
if utterance.attrib["lang"] == "en":
return process_utterance(utterance.text.strip())
raise ValueError("Could not find utterance.")
def get_funql(example_root):
for mrl in example_root.findall("mrl"):
if mrl.attrib["lang"] == "geo-funql":
return process_funql(mrl.text.strip())
raise ValueError("Could not find funql.")
def read_examples(corpus):
examples = []
root = load_xml_tree(corpus)
for example_root in root:
examples.append((get_utterance(example_root), get_funql(example_root)))
return examples
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/xml_file_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Measures and prints compound divergence between two sets of examples."""
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import mcd_utils
from tasks import tsv_utils
from tasks.geoquery import tmcd_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input_1", "", "Input tsv file.")
flags.DEFINE_string("input_2", "", "Input tsv file.")
def main(unused_argv):
examples_1 = tsv_utils.read_tsv(FLAGS.input_1)
examples_2 = tsv_utils.read_tsv(FLAGS.input_2)
divergence = mcd_utils.measure_example_divergence(
examples_1, examples_2, get_compounds_fn=tmcd_utils.get_example_compounds)
print("Compound divergence: %s" % divergence)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/measure_compound_divergence.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for replacing entities in FunQL."""
def _maybe_list_replace(lst, sublist, replacement):
"""Replace first occurrence of sublist in lst with replacement."""
new_list = []
idx = 0
replaced = False
while idx < len(lst):
if not replaced and lst[idx:idx + len(sublist)] == sublist:
new_list.append(replacement)
replaced = True
idx += len(sublist)
else:
new_list.append(lst[idx])
idx += 1
if not replaced:
return None
return new_list
def _maybe_replace_entity(funql, utterance, mention_map, geobase_entity):
"""Replace entity identifiers so that they can be generated using copy."""
# GeoQuery has <= 2 mentions per query.
mention_marker = "m1" if "m0" in utterance else "m0"
# Split utterance to avoid replacing some substring of a token.
tokens = utterance.split(" ")
# Order aliases by longest alias, since some can be nested in others.
aliases = sorted(geobase_entity.aliases, key=lambda x: -len(x))
for alias in aliases:
alias_tokens = alias.split(" ")
new_tokens = _maybe_list_replace(tokens, alias_tokens, mention_marker)
if new_tokens:
normalized_identifier = geobase_entity.identifier.replace("'", "")
new_funql = funql.replace(normalized_identifier, mention_marker)
new_utterance = " ".join(new_tokens)
mention_map[mention_marker] = geobase_entity.identifier
return new_funql, new_utterance, mention_map
# Could not find alias.
return funql, utterance, mention_map
def replace_entities(funql, utterance, geobase_entities):
"""Replace entity references with something more copy friendly."""
# Order entities by longest identifier, since some can be nested
# in others.
geobase_entities = sorted(geobase_entities, key=lambda x: -len(x.identifier))
mention_map = {}
for geobase_entity in geobase_entities:
normalized_identifier = geobase_entity.identifier.replace("'", "")
if normalized_identifier in funql:
funql, utterance, mention_map = _maybe_replace_entity(
funql, utterance, mention_map, geobase_entity)
return funql, utterance, mention_map
def restore_entities(funql, mention_map):
"""Restore entities in funql."""
for mention_mark, identifier in mention_map.items():
funql = funql.replace(mention_mark, "%s" % identifier)
return funql
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/entity_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for (reversible) normalization of FunQL.
FunQL is defined here:
https://www.cs.utexas.edu/~ml/wasp/geo-funql.html
We use the corresponding lambda term definitions to expand various functions
to a more intuitive form that better reflects the arity of the underlying
operations.
"""
RELATION_CONSTANTS = [
"area_1", "capital_1", "capital_2", "density_1", "elevation_1",
"elevation_2", "high_point_1", "high_point_2", "higher_2", "loc_1", "loc_2",
"low_point_1", "low_point_2", "lower_2", "next_to_1", "next_to_2",
"population_1", "traverse_1", "traverse_2", "longer", "len", "size"
]
# Can occur with `all` as argumnent.
UNARY_CONSTANTS = [
"capital", "city", "lake", "major", "mountain", "place", "river", "state"
]
ENTITY_FUNCTIONS = ["cityid", "stateid", "riverid", "placeid", "countryid"]
ARITY_1 = [
"largest", "smallest", "highest", "lowest", "longest", "shortest", "count",
"sum"
]
ARITY_2 = ["largest_one", "smallest_one"]
ARITY_3 = ["most", "fewest"]
def _split_arguments(span):
"""Splits span into list of spans based on commas."""
argument_buffer = []
arguments = []
open_parens = 0
for char in span:
if char == "," and open_parens == 0:
arguments.append("".join(argument_buffer))
argument_buffer = []
elif char == " " and not argument_buffer:
continue
else:
if char == "(":
open_parens += 1
elif char == ")":
open_parens -= 1
argument_buffer.append(char)
arguments.append("".join(argument_buffer))
return arguments
def _get_name_and_arguments(span):
"""Returns function name and argument sub-expressions."""
span = span.strip()
paren_index = span.find("(")
if paren_index == -1:
raise ValueError("Funql contains no `(`: %s" % span)
name = span[:paren_index]
arguments = span[paren_index + 1:]
if arguments[-1] != ")":
raise ValueError("Invalid arguments string ends with %s: %s" %
(arguments[-1], arguments))
arguments = _split_arguments(arguments[:-1])
return name, arguments
def _convert_function(name, argument_0, arity):
"""Converts a function that contains nested arguments."""
output_arguments = []
inner_funql = argument_0
for _ in range(arity - 1):
nested_argument, arguments = _get_name_and_arguments(inner_funql)
if len(arguments) > 1:
raise ValueError
inner_funql = arguments[0]
output_arguments.append(nested_argument)
output_arguments.append(normalize_funql(inner_funql))
output = "%s(%s)" % (name, ",".join(output_arguments))
return output
def normalize_funql(funql):
"""Recursively parse FunQL string to re-formatted string."""
# Special constant used for "sea level".
if funql == "0":
return "0"
name, arguments = _get_name_and_arguments(funql)
if name == "answer":
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return "%s(%s)" % (name, normalize_funql(argument_0))
elif name in ENTITY_FUNCTIONS:
return funql
elif name in RELATION_CONSTANTS:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
reformatted_argument_0 = normalize_funql(argument_0)
if not reformatted_argument_0:
raise ValueError("Failed to reformat: %s" % argument_0)
return "%s(%s)" % (name, reformatted_argument_0)
elif name in UNARY_CONSTANTS:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
if argument_0 == "all":
return name
else:
recursive_term = normalize_funql(argument_0)
return "intersection(%s,%s)" % (name, recursive_term)
elif name == "intersection" or name == "exclude":
if len(arguments) != 2:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
term_a = normalize_funql(argument_0)
term_b = normalize_funql(argument_1)
return "%s(%s,%s)" % (name, term_a, term_b)
elif name in ARITY_1:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return _convert_function(name, argument_0, 1)
elif name in ARITY_2:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return _convert_function(name, argument_0, 2)
elif name in ARITY_3:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return _convert_function(name, argument_0, 3)
else:
raise ValueError("No match for name: %s" % name)
def restore_funql(funql):
"""Recursively parse FunQL string back to original string."""
# Special constant used for "sea level".
if funql == "0":
return "0"
if funql in UNARY_CONSTANTS:
return "%s(all)" % funql
name, arguments = _get_name_and_arguments(funql)
if name == "answer" or name in RELATION_CONSTANTS or name in ARITY_1:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
return "%s(%s)" % (name, restore_funql(argument_0))
elif name in RELATION_CONSTANTS:
if len(arguments) != 1:
raise ValueError
argument_0 = arguments[0]
restored_argument_0 = restore_funql(argument_0)
if not restored_argument_0:
raise ValueError("Failed to restore: %s" % argument_0)
return "%s(%s)" % (name, restored_argument_0)
elif name in ENTITY_FUNCTIONS:
return funql
elif name == "intersection":
if len(arguments) != 2:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
term_a = restore_funql(argument_0)
term_b = restore_funql(argument_1)
if argument_0 in UNARY_CONSTANTS:
return "%s(%s)" % (argument_0, restore_funql(argument_1))
if argument_1 in UNARY_CONSTANTS:
raise ValueError
return "%s(%s,%s)" % (name, term_a, term_b)
elif name == "exclude":
if len(arguments) != 2:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
term_a = restore_funql(argument_0)
term_b = restore_funql(argument_1)
return "%s(%s,%s)" % (name, term_a, term_b)
elif name in ARITY_2:
if len(arguments) != 2:
raise ValueError("Unexpected number of arguments `%s` for `%s`" %
(arguments, name))
argument_0 = arguments[0]
argument_1 = arguments[1]
return "%s(%s(%s))" % (name, argument_0, restore_funql(argument_1))
elif name in ARITY_3:
if len(arguments) != 3:
raise ValueError
argument_0 = arguments[0]
argument_1 = arguments[1]
argument_2 = arguments[2]
return "%s(%s(%s(%s)))" % (name, argument_0, argument_1,
restore_funql(argument_2))
else:
raise ValueError("No match for name: %s" % name)
def add_space_separation(funql):
"""Split funql and join with space separator."""
separators = "(),"
buffer = ""
symbols = []
for char in funql:
if char in separators:
if buffer:
symbols.append(buffer)
buffer = ""
symbols.append(char)
else:
buffer += char
if buffer:
symbols.append(buffer)
return " ".join(symbols)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/funql_normalization.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split dataset tsv file based on TMCD methodology."""
import random
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from tasks import mcd_utils
from tasks import tsv_utils
from tasks.geoquery import tmcd_utils
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_string("output_1", "",
"Output tsv file containing `num_examples_1` examples.")
flags.DEFINE_string("output_2", "",
"Output tsv file containing the remaining examples.")
flags.DEFINE_integer("num_examples_1", 440, "Number of examples for output_1.")
flags.DEFINE_integer("seed", 1, "Seed for splitting examples.")
flags.DEFINE_integer("min_atom_count", 1, "Min occurrences of atoms.")
flags.DEFINE_bool(
"get_atoms_with_num_arguments", False,
"Whether to treat symbols that appear with different numbers "
"of arguments as different atoms.")
def main(unused_argv):
examples = tsv_utils.read_tsv(FLAGS.input)
# First, randomly split examples.
random.seed(FLAGS.seed)
random.shuffle(examples)
examples_1 = examples[:FLAGS.num_examples_1]
examples_2 = examples[FLAGS.num_examples_1:]
# Swap examples to meet atom constraint and maximize compound divergence.
get_atoms_fn = (
tmcd_utils.get_example_atoms_with_num_arguments
if FLAGS.get_atoms_with_num_arguments else tmcd_utils.get_example_atoms)
examples_1, examples_2 = mcd_utils.swap_examples(
examples_1,
examples_2,
get_compounds_fn=tmcd_utils.get_example_compounds,
get_atoms_fn=get_atoms_fn,
max_iterations=1000,
max_divergence=None,
min_atom_count=FLAGS.min_atom_count)
tsv_utils.write_tsv(examples_1, FLAGS.output_1)
tsv_utils.write_tsv(examples_2, FLAGS.output_2)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/tasks/geoquery/gen_tmcd_split.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CFG parser for non-binarized grammars.
The parser uses callbacks so that it can be flexibly extended to various
use cases, such as QCFG parsing.
There are two equivalent implementations, with the Trie variant being a bit
more complicated but faster for most applications, especially for longer inputs.
"""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cky_utils
from common.cky import trie_utils
def parse(input_ids,
rules,
nonterminals,
start_idx,
populate_fn,
postprocess_fn,
use_trie=True,
verbose=False):
"""Run bottom up parser.
Let T be an arbitrary type for chart entries, specified by the return type
of populate_fn. Examples for T are simple types that simply indicate presenece
of a parse for a given span, or more complex structures that represent
parse forests.
Args:
input_ids: List of integers corresponding to idx of terminal CFGSymbols in
rules.
rules: A list of CFGRule instances.
nonterminals: Collection of CFGSymbol objects for possible non-terminals.
start_idx: Index of non-terminal that is start symbol.
populate_fn: A function that takes: (span_begin (Interger), span_end
(Integer), parser_rule (CFGRule), substitutions (List of T)) and returns
an object of type T, which can be any type. This object is added to the
chart. Depending on what information is desired about completed parses, T
can be anything from a simple count to a complex parse forest object.
postprocess_fn: A function that takes and returns a list of T. This function
post-processes each cell after it has been populated. This function is
useful for pruning the chart, or merging equivalent entries. Ignored if
None.
use_trie: Whether to use the Trie-based parsing algorithm.
verbose: Print debug logging if True.
Returns:
A list of T.
"""
if use_trie:
return trie_utils.parse(input_ids, rules, nonterminals, start_idx,
populate_fn, postprocess_fn, verbose)
else:
return cky_utils.parse(input_ids, rules, nonterminals, start_idx,
populate_fn, postprocess_fn, verbose)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/cfg_parser.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements a CFG parser based on a variant of the CKY algorithm.
The parser is naively extended to consider non-binarized rules containing up
to 2 RHS non-terminals and any number of terminals. The runtime for this
naive implementation is therefore O(n^6), which can be too slow for longer
inputs.
"""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_rule
def parse(input_ids,
rules,
nonterminals,
start_idx,
populate_fn,
postprocess_fn,
verbose=False):
"""Run bottom up parser using variant of CKY algorithm."""
input_len = len(input_ids)
input_symbols = tuple(
[cfg_rule.CFGSymbol(idx, cfg_rule.TERMINAL) for idx in input_ids])
# Initialize the empty chart.
# Keys are a 3-tuple of Integers: (span_begin, span_end, nonterminal_idx)
# Values are a list of T.
chart = collections.defaultdict(list)
# Index rules by RHS.
rhs_to_rules = collections.defaultdict(list)
for rule in rules:
rhs_to_rules[rule.rhs].append(rule)
# Populate the chart.
for span_end in range(1, input_len + 1):
for span_begin in range(span_end - 1, -1, -1):
# Find matching rules with 0 NTs.
rhs_key_0_nt = input_symbols[span_begin:span_end]
if rhs_key_0_nt in rhs_to_rules:
for rule in rhs_to_rules[rhs_key_0_nt]:
chart[span_begin, span_end,
rule.lhs].append(populate_fn(span_begin, span_end, rule, []))
# Find matching rules with 1 NTs.
for nt_0_start in range(span_begin, span_end):
for nt_0_end in range(nt_0_start + 1, span_end + 1):
for nt_0 in nonterminals:
rhs_key_1_nt = (
input_symbols[span_begin:nt_0_start] +
(cfg_rule.CFGSymbol(nt_0, cfg_rule.NON_TERMINAL),) +
input_symbols[nt_0_end:span_end])
if rhs_key_1_nt in rhs_to_rules:
for node_0 in chart[nt_0_start, nt_0_end, nt_0]:
for rule in rhs_to_rules[rhs_key_1_nt]:
chart[span_begin, span_end, rule.lhs].append(
populate_fn(span_begin, span_end, rule, [node_0]))
# Find matching rules with 2 NTs.
for nt_0_start in range(span_begin, span_end - 1):
for nt_0_end in range(nt_0_start + 1, span_end):
for nt_1_start in range(nt_0_end, span_end):
for nt_1_end in range(nt_1_start + 1, span_end + 1):
for nt_0 in nonterminals:
for nt_1 in nonterminals:
rhs_key_2_nt = (
input_symbols[span_begin:nt_0_start] +
(cfg_rule.CFGSymbol(nt_0, cfg_rule.NON_TERMINAL),) +
input_symbols[nt_0_end:nt_1_start] +
(cfg_rule.CFGSymbol(nt_1, cfg_rule.NON_TERMINAL),) +
input_symbols[nt_1_end:span_end])
if rhs_key_2_nt in rhs_to_rules:
nt_0_index = (nt_0_start, nt_0_end, nt_0)
nt_1_index = (nt_1_start, nt_1_end, nt_1)
for node_0 in chart[nt_0_index]:
for node_1 in chart[nt_1_index]:
for rule in rhs_to_rules[rhs_key_2_nt]:
chart[span_begin, span_end, rule.lhs].append(
populate_fn(span_begin, span_end, rule,
[node_0, node_1]))
if postprocess_fn:
for nt in nonterminals:
chart[span_begin, span_end, nt] = postprocess_fn(chart[span_begin,
span_end, nt])
if verbose:
for nt in nonterminals:
cell = chart[span_begin, span_end, nt]
if cell:
print("Populated (%s,%s): %s - %s" %
(span_begin, span_end, nt, cell))
# Return completed parses.
return chart[(0, input_len, start_idx)]
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/cky_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define structures to represent CFG symbols and rules.
For efficiency, all symbols are referenced by integers rather than strings.
This typically requires some pre-processing to define terminal
and non-terminal vocabularies and map symbols to corresponding integers.
"""
import collections
# CFGSymbol type constants.
TERMINAL = 0
NON_TERMINAL = 1
# Represents a TERMINAL or NON_TERMINAL symbol.
CFGSymbol = collections.namedtuple(
"CFGSymbol",
[
"idx", # Integer (considered as separate id spaces for different type).
"type", # Integer (TERMINAL or NON_TERMINAL).
])
# Represents a CFG rule.
CFGRule = collections.namedtuple(
"CFGRule",
[
"idx", # Integer to optionally reference additional rule information.
"lhs", # Integer non-terminal index.
"rhs", # Tuple of >= 1 CFGSymbols.
])
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/cfg_rule.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements CKY parsing using a Trie data structure to index rules.
This implementation supports non-binarized grammars with rules containing
up to 2 non-terminals.
For each span, rather than enumerating every possible sub-span for up to
2 non-terminals, the algorithm iterates across the span left-to-right and
attempts to match rules stored in a Trie.
"""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from common.cky import cfg_rule
class TrieNode(object):
"""Represents a node in a generic Trie data structure."""
def __init__(self, symbol=None):
# The individual symbol associated with this node.
self.symbol = symbol # Can only be None for root.
# Map from symbol to TrieNode.
self.symbol_to_child = {}
# A set of arbitrarily-typed values associated with this node.
self.values = []
def maybe_add_child(self, symbol):
"""Adds a new node for a given child symbol if not already in Trie."""
if symbol in self.symbol_to_child:
return self.symbol_to_child[symbol]
else:
node = TrieNode(symbol)
self.symbol_to_child[symbol] = node
return node
def maybe_get_child(self, symbol):
return self.symbol_to_child.get(symbol)
def __str__(self):
return "%s %s" % (self.symbol, set(self.symbol_to_child.keys()))
def __repr__(self):
return str(self)
def print_trie(trie_node, indent=0):
"""Recursively prints Trie for debugging purposes."""
print("%s %s" % ("-" * indent, trie_node.symbol))
for value in trie_node.values:
print("%s value: %s" % ("-" * indent, value))
for child in trie_node.symbol_to_child.values():
print_trie(child, indent=indent + 1)
def add_rule_to_trie(trie_root, rule):
current_node = trie_root
for symbol in rule.rhs:
current_node = current_node.maybe_add_child(symbol)
current_node.values.append(rule)
class Chart(object):
"""Represents parse chart state."""
def __init__(self, populate_fn, postprocess_fn):
# The key_map stores chart entries (of type T) indexed by:
# (span_begin, span_end, nonterminal)
self.key_map = collections.defaultdict(list)
# For optimization purposes, we also index chart entries by their
# span_begin index only in start_map.
# Key is span_begin and value is List of (span_end, nonterminal).
self.start_map = collections.defaultdict(set)
# See `cfg_parser.py` for definitions of populate_fn and postprocess_fn.
self.populate_fn = populate_fn
self.postprocess_fn = postprocess_fn
def add(self, span_begin, span_end, rule, children):
"""Add an entry to the chart."""
entry = self.populate_fn(span_begin, span_end, rule, children)
nonterminal = rule.lhs
self.key_map[(span_begin, span_end, nonterminal)].append(entry)
self.start_map[span_begin].add((span_end, nonterminal))
def get_from_key(self, span_begin, span_end, nonterminal):
"""Get entries based on full key."""
return self.key_map[(span_begin, span_end, nonterminal)]
def get_from_start(self, span_begin):
"""Get entries based on start index only."""
return self.start_map[span_begin]
def postprocess(self, span_begin, span_end, nonterminal):
"""Apply postpostprocess_fn to a chart cell."""
if self.postprocess_fn:
self.key_map[(span_begin, span_end, nonterminal)] = self.postprocess_fn(
self.key_map[(span_begin, span_end, nonterminal)])
# For a given span, SearchState represents a potential match with a ParserRule.
SearchState = collections.namedtuple(
"SearchState",
[
"anchored_nonterminals", # List of (span_begin, span_end, nonterminal).
"trie_node", # TrieNode.
])
# The maximum number of RHS non-terminals in ParserRules that are supported.
MAX_NONTERMINALS = 2
def parse(input_ids,
rules,
nonterminals,
start_idx,
populate_fn,
postprocess_fn,
verbose=False):
"""Run bottom up parser using Trie-based implementation."""
input_len = len(input_ids)
input_symbols = tuple(
[cfg_rule.CFGSymbol(idx, cfg_rule.TERMINAL) for idx in input_ids])
# Initialize the empty chart.
chart = Chart(populate_fn, postprocess_fn)
# Initialize Trie of rules.
trie_root = TrieNode()
for rule in rules:
add_rule_to_trie(trie_root, rule)
# Populate the chart.
for span_end in range(1, input_len + 1):
for span_begin in range(span_end - 1, -1, -1):
# Map of span_begin to List of SearchState.
search_map = collections.defaultdict(list)
search_map[span_begin].append(SearchState([], trie_root))
# Iterate across every input token in the span range to find rule matches.
for idx in range(span_begin, span_end):
# End early if there are no remaining candidate matches.
if not search_map[idx]:
continue
terminal_symbol = input_symbols[idx]
# Iterate through partial matches.
while search_map[idx]:
search_state = search_map[idx].pop()
# Consider matching terminal.
new_trie_node = search_state.trie_node.maybe_get_child(
terminal_symbol)
if new_trie_node:
# Found a match for the terminal in the Trie.
# Add a partial match to search_map with idx incremented by 1 token.
new_search_state = SearchState(search_state.anchored_nonterminals,
new_trie_node)
search_map[idx + 1].append(new_search_state)
# Consider matching non-terminal.
nonterminal_tuples = chart.get_from_start(idx)
if len(search_state.anchored_nonterminals) < MAX_NONTERMINALS:
# Iterate through lower chart entries with a completed sub-tree
# that starts at the current index.
for nt_end, nonterminal in nonterminal_tuples:
nonterminal_symbol = cfg_rule.CFGSymbol(nonterminal,
cfg_rule.NON_TERMINAL)
new_trie_node = search_state.trie_node.maybe_get_child(
nonterminal_symbol)
if new_trie_node:
# Found a match for the non-terminal in the Trie.
# Add a partial match to search_map with idx set to the end
# of the sub-tree span.
new_anchored_nonterminals = search_state.anchored_nonterminals[:]
new_anchored_nonterminals.append((idx, nt_end, nonterminal))
search_map[nt_end].append(
SearchState(new_anchored_nonterminals, new_trie_node))
# Loop through search_map for completed matches at span_end.
for search_state in search_map[span_end]:
# Get the ParserRule(s) associated with the particular Trie path.
matched_rules = search_state.trie_node.values
if not matched_rules:
continue
for rule in matched_rules:
# Given the ParserRule and anchored nonterminal positions, generate
# new chart entries and add chart.
if len(search_state.anchored_nonterminals) == 1:
# Matched rule contains 1 non-terminal.
for child in chart.get_from_key(
*search_state.anchored_nonterminals[0]):
chart.add(span_begin, span_end, rule, [child])
elif len(search_state.anchored_nonterminals) == 2:
# Matched rule contains 2 non-terminals.
for child_0 in chart.get_from_key(
*search_state.anchored_nonterminals[0]):
for child_1 in chart.get_from_key(
*search_state.anchored_nonterminals[1]):
chart.add(span_begin, span_end, rule, [child_0, child_1])
elif len(search_state.anchored_nonterminals) > 2:
raise ValueError
else:
# Matched rule contains 0 non-terminals.
chart.add(span_begin, span_end, rule, [])
for nt in nonterminals:
chart.postprocess(span_begin, span_end, nt)
if verbose:
for nt in nonterminals:
cell = chart.get_from_key(span_begin, span_end, nt)
if cell:
print("Populated (%s,%s): %s - %s" %
(span_begin, span_end, nt, cell))
# Return completed parses.
return chart.get_from_key(0, input_len, start_idx)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/common/cky/trie_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties for testing."""
from official.nlp.bert import configs
# Tokens used in tests.
_TOKENS = [
"[PAD]", "[CLS]", "[SEP]", "[unused0]", "[unused1]", "foo", "bar", "what",
"river", "traverses", "the", "most", "states", "and"
]
class MockTokenizer(object):
"""Mock tokenizer to replace `tokenization.FullTokenizer` in tests."""
def __init__(self, **kwargs):
del kwargs
self.tokens_to_ids = {
token: token_id for token_id, token in enumerate(_TOKENS)
}
def tokenize(self, input_str):
return input_str.split()
def convert_tokens_to_ids(self, tokens):
return [self.tokens_to_ids[token] for token in tokens]
def get_test_config():
return {
"batch_size": 4,
"learning_rate": 0.001,
"training_steps": 10000,
"warmup_steps": 100,
"steps_per_iteration": 8,
"model_dims": 16,
"max_num_wordpieces": 8,
"max_num_applications": 8,
"max_num_numerator_nodes": 8,
"max_num_denominator_nodes": 8,
"max_num_rules": 8,
}
def get_test_bert_config():
return configs.BertConfig(
vocab_size=32,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/test_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the NQG neural parsing model.
The parsing model consists of a BERT encoder, feed forward layers to
compute vector representations for spans, and an embedding table for rules.
The model produces scores for anchored rule applications, which are based on
the span representations of the anchored span and a learned embedding for each
rule.
Note that the model is implemented in TensorFlow 2.x, based on the TF 2.x BERT
implementation here:
https://github.com/tensorflow/models/tree/master/official/nlp/bert
You can find documentation for downloading or converting BERT checkpoints to
be compatible with this implementation here:
https://github.com/tensorflow/models/tree/master/official/nlp/bert#pre-trained-models
"""
import tensorflow as tf
from official.nlp.bert import bert_models
def _feed_forward(output_dims, hidden_dims, name):
return tf.keras.Sequential([
tf.keras.layers.Dense(hidden_dims, activation="relu", name="%s_1" % name),
tf.keras.layers.Dense(output_dims, name="%s_2" % name)
],
name=name)
class ApplicationScoreLayer(tf.keras.layers.Layer):
"""Layer for computing scores for anchored rule applications.
Span begin and end indexes should both be *inclusive*, i.e.
for a span consisting of a single token the span begin and end indexes
will be the same.
It is up to the caller to establish consistent indexing of rules,
as this layer simply allocates an embedding table of size equal to the
max_num_rules in the config.
"""
def __init__(self, config):
super(ApplicationScoreLayer, self).__init__()
self.feed_forward = _feed_forward(
config["model_dims"], config["model_dims"], name="application_ffn")
self.span_feed_forward = _feed_forward(
1, config["model_dims"], name="span_ffn")
self.rule_embeddings = tf.keras.layers.Embedding(config["max_num_rules"],
config["model_dims"])
self.config = config
def score_application(self, wordpiece_encodings, application_span_begin,
application_span_end, application_rule_idx):
"""Computes scores for a single anchored rule applications.
Args:
wordpiece_encodings: <float>[max_num_wordpieces, bert_dims]
application_span_begin: <int>[1]
application_span_end: <int>[1]
application_rule_idx: <int>[1]
Returns:
application_score: <float>[1]
"""
# <float>[bert_dims]
span_begin_encoding = tf.gather(wordpiece_encodings, application_span_begin)
span_end_encoding = tf.gather(wordpiece_encodings, application_span_end)
# <float>[bert_dims * 2]
span_encoding = tf.concat([span_begin_encoding, span_end_encoding], axis=0)
# <float>[1, bert_dims * 2]
span_encoding = tf.expand_dims(span_encoding, 0)
# <float>[1, model_dims]
span_ffn_encoding = self.feed_forward(span_encoding)
# <float>[model_dims]
application_rule_embedddings = self.rule_embeddings(application_rule_idx)
# <float>[model_dims, 1]
application_rule_embedddings = tf.expand_dims(application_rule_embedddings,
1)
# <float>[1, 1]
application_score = tf.matmul(span_ffn_encoding,
application_rule_embedddings)
# <float>[]
application_score = tf.squeeze(application_score, [0, 1])
# <float>[1, 1]
span_score = self.span_feed_forward(span_encoding)
# <float>[]
span_score = tf.squeeze(span_score, [0, 1])
return application_score + span_score
def call(self, wordpiece_encodings, application_span_begin,
application_span_end, application_rule_idx):
"""Computes scores for a batch of anchored rule applications.
Args:
wordpiece_encodings: <float>[batch_size, max_num_wordpieces, bert_dims]
application_span_begin: <int>[batch_size, max_num_applications]
application_span_end: <int>[batch_size, max_num_applications]
application_rule_idx: <int>[batch_size, max_num_applications]
Returns:
application_scores: <float>[batch_size, max_num_applications]
"""
# <float>[batch_size, max_num_applications, bert_dims]
span_begin_encoding = tf.gather(
wordpiece_encodings, application_span_begin, batch_dims=1)
span_end_encoding = tf.gather(
wordpiece_encodings, application_span_end, batch_dims=1)
# <float>[batch_size, max_num_applications, bert_dims * 2]
span_encodings = tf.concat([span_begin_encoding, span_end_encoding], axis=2)
# <float>[batch_size, max_num_applications, model_dims]
span_encodings_ffn = self.feed_forward(span_encodings)
# <float>[batch_size, max_num_applications, 1, model_dims]
span_encodings_ffn = tf.expand_dims(span_encodings_ffn, 2)
# <float>[batch_size, max_num_applications, model_dims]
application_rule_embedddings = self.rule_embeddings(application_rule_idx)
# <float>[batch_size, max_num_applications, model_dims, 1]
application_rule_embedddings = tf.expand_dims(application_rule_embedddings,
3)
# <float>[batch_size, max_num_applications, 1, 1]
application_scores = tf.matmul(span_encodings_ffn,
application_rule_embedddings)
# <float>[batch_size, max_num_applications]
application_scores = tf.squeeze(application_scores, [2, 3])
# <float>[batch_size, max_num_applications, 1]
span_scores = self.span_feed_forward(span_encodings)
# <float>[batch_size, max_num_applications]
span_scores = tf.squeeze(span_scores, [2])
return application_scores + span_scores
class Model(tf.keras.layers.Layer):
"""Defines NQG neural parsing model."""
def __init__(self, batch_size, config, bert_config, training, verbose=False):
super(Model, self).__init__()
self.config = config
self.bert_encoder = bert_models.get_transformer_encoder(
bert_config, sequence_length=self.config["max_num_wordpieces"])
self.application_score_layer = ApplicationScoreLayer(config)
self.training = training
self.batch_size = batch_size
def call(self, wordpiece_ids_batch, num_wordpieces, application_span_begin,
application_span_end, application_rule_idx):
"""Returns scores for a batch of anchored rule applications.
Args:
wordpiece_ids_batch: <int>[batch_size, max_num_wordpieces]
num_wordpieces: <int>[batch_size, 1]
application_span_begin: <int>[batch_size, max_num_applications]
application_span_end: <int>[batch_size, max_num_applications]
application_rule_idx: <int>[batch_size, max_num_applications]
Returns:
application_scores: <float>[batch_size, max_num_applications]
"""
wordpiece_encodings_batch = self.get_wordpiece_encodings(
wordpiece_ids_batch, num_wordpieces)
application_scores_batch = self.application_score_layer(
wordpiece_encodings_batch, application_span_begin, application_span_end,
application_rule_idx)
return application_scores_batch
def get_wordpiece_encodings(self, wordpiece_ids_batch, num_wordpieces):
"""Returns contextualized encodings for a batch of wordpieces.
Args:
wordpiece_ids_batch: <int>[batch_size, max_num_wordpieces]
num_wordpieces: <int>[batch_size, 1]
Returns:
wordpiece_encodings: <float>[batch_size, max_num_wordpieces, bert_dims]
"""
num_wordpieces = tf.squeeze(num_wordpieces, 1)
bert_input_mask = tf.sequence_mask(
num_wordpieces, self.config["max_num_wordpieces"], dtype=tf.int32)
bert_type_ids = tf.zeros(
shape=[self.batch_size, self.config["max_num_wordpieces"]],
dtype=tf.int32)
wordpiece_encodings_batch, unused_cls_output = self.bert_encoder(
[wordpiece_ids_batch, bert_input_mask, bert_type_ids],
training=self.training)
return wordpiece_encodings_batch
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/nqg_model.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for loading config json file."""
import json
from tensorflow.io import gfile
def json_file_to_dict(json_file):
"""Constructs a dictionary from a json file."""
with gfile.GFile(json_file, "r") as reader:
text = reader.read()
return json.loads(text)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/config_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run model training.
Currently, CPU and parallel GPU training is supported. TPU training is not
currently supported.
"""
import os
from absl import app
from absl import flags
from absl import logging
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import config_utils
from model.parser import nqg_model
from model.parser.training import input_utils
from model.parser.training import training_utils
import tensorflow as tf
from official.nlp import optimization
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input", "",
"TFRecord(s) of tf.Examples (use * for matching multiple files).")
flags.DEFINE_string("model_dir", "", "Directory to save model files.")
flags.DEFINE_string(
"bert_dir", "",
"Directory for BERT, including config and (optionally) checkpoint.")
flags.DEFINE_string("config", "", "Config json file.")
flags.DEFINE_bool("restore_checkpoint", False,
"Whether to restore checkpoint if one exists in model_dir.")
flags.DEFINE_bool(
"init_bert_checkpoint", True,
"If True, init from checkpoint in bert_dir, otherwise use random init.")
flags.DEFINE_bool("use_gpu", False, "Whether to use GPU for training.")
flags.DEFINE_bool("verbose", False, "Whether to print debug output.")
def train_model(strategy):
"""Run model training."""
config = config_utils.json_file_to_dict(FLAGS.config)
dataset_fn = input_utils.get_dataset_fn(FLAGS.input, config)
writer = tf.summary.create_file_writer(os.path.join(FLAGS.model_dir, "train"))
dataset_iterator = iter(
strategy.experimental_distribute_datasets_from_function(dataset_fn))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
logging.info("Loaded BERT config: %s", bert_config.to_dict())
batch_size = int(config["batch_size"] / strategy.num_replicas_in_sync)
logging.info("num_replicas: %s.", strategy.num_replicas_in_sync)
logging.info("per replica batch_size: %s.", batch_size)
with strategy.scope():
model = nqg_model.Model(
batch_size, config, bert_config, training=True, verbose=FLAGS.verbose)
optimizer = optimization.create_optimizer(config["learning_rate"],
config["training_steps"],
config["warmup_steps"])
train_for_n_steps_fn = training_utils.get_train_for_n_steps_fn(
strategy, optimizer, model)
if FLAGS.init_bert_checkpoint:
bert_checkpoint = tf.train.Checkpoint(model=model.bert_encoder)
bert_checkpoint_path = os.path.join(FLAGS.bert_dir, "bert_model.ckpt")
logging.info("Restoring bert checkpoint: %s", bert_checkpoint_path)
logging.info("Bert vars: %s", model.bert_encoder.trainable_variables)
logging.info("Checkpoint vars: %s",
tf.train.list_variables(bert_checkpoint_path))
status = bert_checkpoint.restore(bert_checkpoint_path).expect_partial()
status.assert_existing_objects_matched()
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
current_step = 0
if FLAGS.restore_checkpoint:
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
# TODO(petershaw): This is a hacky way to read current step.
current_step = int(latest_checkpoint.split("-")[-2])
logging.info("Restoring %s at step %s.", latest_checkpoint, current_step)
status = checkpoint.restore(latest_checkpoint)
status.assert_existing_objects_matched()
with writer.as_default():
while current_step < config["training_steps"]:
logging.info("current_step: %s.", current_step)
mean_loss = train_for_n_steps_fn(
dataset_iterator,
tf.convert_to_tensor(config["steps_per_iteration"], dtype=tf.int32))
tf.summary.scalar("loss", mean_loss, step=current_step)
current_step += config["steps_per_iteration"]
if current_step and current_step % config["save_checkpoint_every"] == 0:
checkpoint_prefix = os.path.join(FLAGS.model_dir,
"ckpt-%s" % current_step)
logging.info("Saving checkpoint to %s.", checkpoint_prefix)
checkpoint.save(file_prefix=checkpoint_prefix)
def main(unused_argv):
if FLAGS.use_gpu:
strategy = tf.distribute.MirroredStrategy()
logging.info("Number of devices: %d", strategy.num_replicas_in_sync)
train_model(strategy)
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
train_model(strategy)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/train_model.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for iterating over serialized parse forests in TensorFlow."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser.data import data_constants
import tensorflow as tf
def get_forest_score_function(verbose=False):
"""Return forest_score_function."""
# TODO(petershaw): In order to use TPU, it is likely necessary to consider
# max_num_nodes as another input argument to initialize the arrays and
# while loop.
# However, this appears to still be insufficient for TPU compilation,
# so this requires further investigation.
@tf.function
def forest_score_function(application_scores, num_nodes, node_type_list,
node_1_idx_list, node_2_idx_list,
node_application_idx_list):
"""Iterate over nodes in forest and return score for root.
Note that the returned score is not exponentiated, i.e. it is equivalent to
the log of the sum of the exponentiated scores for individual parses in
the forest:
log(sum over parses(exp(sum over applications in parse(application score))))
This function benefits from dynamic programming to compute this sum more
efficiently.
Also note that input arguments should not be batched. This function could
potentially be made more efficient by implementing a batched version of
this computation. However, the computation in this function is limited to:
1. Control flow (while loop) and TensorArray read/write operations
2. Gather operations over application_scores
2. Summation and logsumexp
So the overall amount of computation should be small relative to
large encoders and computation of application_scores. Using an
implementation that is not batched also allows for returning early
for examples where the number of nodes is less than the maximum limit.
Args:
application_scores: <float>[max_num_applications] of raw scores (not
exponentiated) for anchored rule applications.
num_nodes: Integer number of nodes. By convention, the final non-padding
node is the root node and should correspond to the `num_nodes - 1` index
of the 4 `node_x` input tensors below.
node_type_list: <int>[max_num_nodes].
node_1_idx_list: <int>[max_num_nodes].
node_2_idx_list: <int>[max_num_nodes].
node_application_idx_list: <int>[max_num_nodes].
Returns:
Score for root node (see description above).
"""
if verbose:
tf.print("application_scores:", application_scores, summarize=1000)
# Write once / read array storing scores for each node.
# Note that the scores are not exponentiated.
node_array = tf.TensorArray(
tf.float32,
size=num_nodes,
dynamic_size=False,
clear_after_read=False,
element_shape=[])
# Return early, i.e. iterate only for num_nodes not max_num_nodes.
for idx in tf.range(num_nodes):
node_type = node_type_list[idx]
node_1_idx = node_1_idx_list[idx]
node_2_idx = node_2_idx_list[idx]
node_application_idx = node_application_idx_list[idx]
if verbose:
tf.print("idx:", idx)
tf.print("node_type:", node_type)
tf.print("node_1_idx:", node_1_idx)
tf.print("node_2_idx:", node_2_idx)
tf.print("node_application_idx:", node_application_idx)
if node_type == data_constants.RULE_APPLICATION:
score = 0.0
# All rule application nodes are associated with some application
# score.
score += application_scores[node_application_idx]
# Additionally, we add the scores for any children.
if node_1_idx != -1:
score += node_array.read(node_1_idx)
if node_2_idx != -1:
score += node_array.read(node_2_idx)
node_array = node_array.write(idx, score)
if verbose:
tf.print("Write RULE_APPLICATION node: ", idx, score)
elif node_type == data_constants.AGGREGATION:
# Merge nodes for common sub-trees.
node_1_score = node_array.read(node_1_idx)
node_2_score = node_array.read(node_2_idx)
# Use logsumexp trick for stable calculation.
score = tf.math.reduce_logsumexp(tf.stack([node_1_score, node_2_score]))
node_array = node_array.write(idx, score)
if verbose:
tf.print("Write AGGREGATION node: ", idx, score)
# Return final score (note that it is not exponentiated).
return node_array.read(num_nodes - 1)
return forest_score_function
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/forest_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to define model training loop."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser.training import forest_utils
import tensorflow as tf
def get_training_step(optimizer, model, verbose=False):
"""Get training step function."""
forest_score_function = forest_utils.get_forest_score_function(
verbose=verbose)
def training_step(inputs):
"""Executes a step of training."""
with tf.GradientTape() as tape:
loss = tf.constant(0.0, dtype=tf.float32)
application_scores_batch = model(inputs["wordpiece_ids"],
inputs["num_wordpieces"],
inputs["application_span_begin"],
inputs["application_span_end"],
inputs["application_rule_idx"])
nu_num_nodes_batch = tf.squeeze(inputs["nu_num_nodes"], 1)
de_num_nodes_batch = tf.squeeze(inputs["de_num_nodes"], 1)
with tf.name_scope("forest_score"):
# TODO(petershaw): Consider a batched implementation of
# forest_score_function to avoid iteration over examples in the batch.
for idx in tf.range(model.batch_size):
application_scores = application_scores_batch[idx]
nu_node_type = inputs["nu_node_type"][idx]
nu_node_1_idx = inputs["nu_node_1_idx"][idx]
nu_node_2_idx = inputs["nu_node_2_idx"][idx]
nu_application_idx = inputs["nu_application_idx"][idx]
nu_num_nodes = nu_num_nodes_batch[idx]
# Log score for numerator (sum over derivations of target).
nu_score = forest_score_function(application_scores, nu_num_nodes,
nu_node_type, nu_node_1_idx,
nu_node_2_idx, nu_application_idx)
de_node_type = inputs["de_node_type"][idx]
de_node_1_idx = inputs["de_node_1_idx"][idx]
de_node_2_idx = inputs["de_node_2_idx"][idx]
de_application_idx = inputs["de_application_idx"][idx]
de_num_nodes = de_num_nodes_batch[idx]
# Log score for denominator (partition function).
de_score = forest_score_function(application_scores, de_num_nodes,
de_node_type, de_node_1_idx,
de_node_2_idx, de_application_idx)
# -log(numerator/denominator) = log(denominator) - log(numerator)
example_loss = de_score - nu_score
loss += example_loss
loss /= tf.cast(model.batch_size, dtype=tf.float32)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return loss
return training_step
def get_train_for_n_steps_fn(strategy, optimizer, model):
"""Return train_for_n_steps_fn."""
training_step = get_training_step(optimizer, model)
@tf.function
def train_for_n_steps_fn(iterator, steps):
mean_loss = tf.constant(0.0, dtype=tf.float32)
for _ in tf.range(steps):
inputs = next(iterator)
loss = strategy.run(training_step, args=(inputs,))
mean_loss += strategy.reduce(tf.distribute.ReduceOp.MEAN, loss, axis=None)
mean_loss /= tf.cast(steps, dtype=tf.float32)
return mean_loss
return train_for_n_steps_fn
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/training_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for input pipeline.
The input pipeline should be both GPU and TPU friendly.
"""
import tensorflow as tf
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but tf.int32 can be faster and more
# memory efficient on certain hardware.
for name in list(example.keys()):
tensor = example[name]
if tensor.dtype == tf.int64:
tensor = tf.cast(tensor, dtype=tf.int32)
example[name] = tensor
return example
def _create_int_feature(length):
return tf.io.FixedLenFeature([length], tf.int64)
def create_training_dataset(input_file, batch_size, config):
"""Returns `tf.data.Dataset` for training."""
name_to_features = {}
name_to_features["wordpiece_ids"] = _create_int_feature(
config["max_num_wordpieces"])
name_to_features["num_wordpieces"] = _create_int_feature(1)
name_to_features["application_span_begin"] = _create_int_feature(
config["max_num_applications"])
name_to_features["application_span_end"] = _create_int_feature(
config["max_num_applications"])
name_to_features["application_rule_idx"] = _create_int_feature(
config["max_num_applications"])
name_to_features["nu_node_type"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_node_1_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_node_2_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_application_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_num_nodes"] = _create_int_feature(1)
name_to_features["de_node_type"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_node_1_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_node_2_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_application_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_num_nodes"] = _create_int_feature(1)
if "*" in input_file:
# Potentially match multiple input files.
files = tf.io.matching_files(input_file)
files = tf.random.shuffle(files)
shards = tf.data.Dataset.from_tensor_slices(files)
dataset = shards.interleave(tf.data.TFRecordDataset)
else:
# Only using single input file.
dataset = tf.data.TFRecordDataset(input_file)
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=1000)
decode_fn = lambda record: _decode_record(record, name_to_features)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Send the single file to all workers.
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
dataset = dataset.with_options(options)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1024)
return dataset
def get_dataset_fn(input_file, config):
"""Gets a closure to create a dataset.."""
global_batch_size = config["batch_size"]
def dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = create_training_dataset(input_file, batch_size, config)
return dataset
return dataset_fn
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/training/input_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating one-best targets given neural scoring model."""
import collections
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.qcfg import qcfg_parser
from model.qcfg import qcfg_rule
ScoredAnchoredRuleApplication = collections.namedtuple(
"ScoredAnchoredRuleApplication",
[
"rule", # QCFGRule.
"span_begin", # Integer.
"span_end", # Integer.
"score", # Float.
])
class ScoredChartNode(object):
"""Represents node in chart."""
def __init__(self, score_fn, span_begin, span_end, rule, children):
# Get score.
application_score = score_fn(rule, span_begin, span_end)
self.score = application_score
for node in children:
self.score += node.score
# Get target string.
target_string = qcfg_rule.apply_target(
rule, [node.target_string for node in children])
self.target_string = target_string
application = ScoredAnchoredRuleApplication(rule, span_begin, span_end,
application_score)
# List of ScoredAnchoredRuleApplication, which can be used to inspect
# parse tree for a given prediction.
self.applications = [application]
for node in children:
for application in node.applications:
self.applications.append(application)
def __str__(self):
return "%s (%s) [%s]" % (self.target_string, self.score, self.applications)
def __repr__(self):
return self.__str__()
def get_node_fn(score_fn):
"""Return node_fn."""
def node_fn(span_begin, span_end, rule, children):
return ScoredChartNode(score_fn, span_begin, span_end, rule, children)
return node_fn
def postprocess_cell_fn(nodes):
if not nodes:
return []
# Prune all nodes except the highest scoring node.
sorted_nodes = sorted(nodes, key=lambda x: -x.score)
return [sorted_nodes[0]]
def run_inference(source, rules, score_fn):
"""Determine one-best parse using score_fn.
Args:
source: Input string.
rules: Set of QCFGRules.
score_fn: Function with inputs (rule, span_begin, span_end) and returns
float score for a given anchored rule application. Note that `span_begin`
and `span_end` refer to token indexes, where span_end is exclusive, and
`rule` is a QCFGRule.
Returns:
(target string, score) for highest scoring derivation, or (None, None)
if there is no derivation for given source.
"""
tokens = source.split(" ")
node_fn = get_node_fn(score_fn)
nodes = qcfg_parser.parse(
tokens, rules, node_fn=node_fn, postprocess_cell_fn=postprocess_cell_fn)
if not nodes:
return None, None
if len(nodes) > 1:
raise ValueError("Multiple nodes returned for inference: %s" % nodes)
return nodes[0].target_string, nodes[0].score
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/inference_parser.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to generate predicted targets given input txt file of sources.
An input txt file of sources can be generated from a TSV file using
the `nqg/tasks/strip_targets.py` script.
This binary also supports evaluations for settings such as NQG-T5, where
predictions from T5 are used when NQG does not produce an output. Such
'fallback' predictions can be supplied via the `--fallback_predictions` flag.
"""
import os
import pdb
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import config_utils
from model.parser.data import tokenization_utils
from model.parser.inference import inference_wrapper
from model.parser.inference.targets import target_grammar
from model.qcfg import qcfg_file
import tensorflow as tf
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input txt file for sources.")
flags.DEFINE_string("output", "", "Output txt file for predicted targets.")
flags.DEFINE_bool("verbose", True, "Whether to print debug output.")
flags.DEFINE_string("model_dir", "", "Model directory.")
flags.DEFINE_string("checkpoint", "", "Checkpoint prefix, or None for latest.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string(
"bert_dir", "",
"Directory for BERT vocab, config, and (optionally) checkpoint.")
flags.DEFINE_string("rules", "", "QCFG rules txt file.")
flags.DEFINE_string("fallback_predictions", "",
"Optional fallback predictions txt file.")
flags.DEFINE_string("target_grammar", "", "Optional target CFG.")
def get_checkpoint():
if FLAGS.checkpoint:
return os.path.join(FLAGS.model_dir, FLAGS.checkpoint)
else:
return tf.train.latest_checkpoint(FLAGS.model_dir)
def get_inference_wrapper(config):
"""Construct and return InferenceWrapper."""
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
target_grammar_rules = None
if FLAGS.target_grammar:
target_grammar_rules = target_grammar.load_rules_from_file(
FLAGS.target_grammar)
wrapper = inference_wrapper.InferenceWrapper(tokenizer, rules, config,
bert_config,
target_grammar_rules)
# Restore checkpoint.
checkpoint = get_checkpoint()
print("Loading from checkpoint: %s" % checkpoint)
wrapper.restore_checkpoint(checkpoint)
return wrapper
def get_predicted_target(wrapper, source, fallback_prediction):
nqg_prediction, _ = wrapper.get_output(source)
if nqg_prediction is None:
return fallback_prediction
else:
return nqg_prediction
def get_fallback_predictions(sources):
"""Return List of fallback predictions or List of `None` if not provided."""
if FLAGS.fallback_predictions:
fallback_predictions = []
with tf.io.gfile.GFile(FLAGS.fallback_predictions, "r") as predictions_file:
for line in predictions_file:
fallback_predictions.append(line.rstrip())
if len(sources) != len(fallback_predictions):
raise ValueError(
"Number of inputs != number of fallback predictions: %s vs. %s." %
(len(sources), len(fallback_predictions)))
return fallback_predictions
else:
return [None] * len(sources)
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
wrapper = get_inference_wrapper(config)
sources = []
with tf.io.gfile.GFile(FLAGS.input, "r") as input_file:
for line in input_file:
sources.append(line.rstrip())
fallback_predictions = get_fallback_predictions(sources)
with tf.io.gfile.GFile(FLAGS.output, "w") as output_file:
for source, fallback_prediction in zip(sources, fallback_predictions):
try:
predicted_target = get_predicted_target(wrapper, source,
fallback_prediction)
except:
predicted_target = None
output_file.write("%s\n" % predicted_target)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/generate_predictions.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for generating predictions with NQG model."""
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import nqg_model
from model.parser.data import example_converter
from model.parser.data import tokenization_utils
from model.parser.inference import inference_parser
from model.parser.inference.targets import target_grammar
import tensorflow as tf
import pdb
def _convert_to_int_tensor(values, padded_length):
if len(values) > padded_length:
raise ValueError("length %s is > %s" % (len(values), padded_length))
for _ in range(len(values), padded_length):
values.append(0)
# Add outer dimension for batch size of 1.
feature = tf.convert_to_tensor([values])
return feature
def _get_score_fn(wordpiece_encodings, rules, model, token_start_wp_idx,
token_end_wp_idx):
"""Return score_fn."""
# Assigns same rule to idx mapping as used for training.
rule_key_to_idx_map = example_converter.get_rule_to_idx_map(rules)
def score_fn(rule, span_begin, span_end):
"""Returns scalar score for anchored rule application."""
application_span_begin = token_start_wp_idx[span_begin]
# Need to convert between token index used by QCFG rules,
# and wordpiece indexes used by neural model.
# token_end_wp_idx is an *inclusive* idx.
# span_end is an *exclusive* idx.
# application_span_end is an *inclusive* idx.
application_span_end = token_end_wp_idx[span_end - 1]
application_rule_idx = rule_key_to_idx_map[rule]
application_score = model.application_score_layer.score_application(
wordpiece_encodings, application_span_begin, application_span_end,
application_rule_idx)
return application_score.numpy()
return score_fn
class InferenceWrapper(object):
"""Provides interface for inference."""
def __init__(self,
tokenizer,
rules,
config,
bert_config,
target_grammar_rules=None,
verbose=False):
self.tokenizer = tokenizer
self.config = config
self.batch_size = 1
self.model = nqg_model.Model(
self.batch_size, config, bert_config, training=False)
self.checkpoint = tf.train.Checkpoint(model=self.model)
self.rules = rules
self.target_grammar_rules = target_grammar_rules
self.verbose = verbose
def restore_checkpoint(self, latest_checkpoint):
"""Restore model parameters from checkpoint."""
status = self.checkpoint.restore(latest_checkpoint)
status.assert_existing_objects_matched()
print("Restored checkpoint: %s" % latest_checkpoint)
def get_output(self, source):
"""Returns (one-best target string, score) or (None, None)."""
# Tokenize.
tokens = source.split(" ")
(wordpiece_ids, num_wordpieces, token_start_wp_idx,
token_end_wp_idx) = tokenization_utils.get_wordpiece_inputs(
tokens, self.tokenizer, self.config["max_num_wordpieces"])
# pdb.set_trace()
wordpieces_batch = _convert_to_int_tensor(wordpiece_ids,
self.config["max_num_wordpieces"])
# Run encoder.
wordpiece_encodings_batch = self.model.get_wordpiece_encodings(
wordpieces_batch, [[num_wordpieces]])
wordpiece_encodings = wordpiece_encodings_batch[0]
# Create score_fn.
score_fn = _get_score_fn(wordpiece_encodings, self.rules, self.model,
token_start_wp_idx, token_end_wp_idx)
# Run parser.
target_string, score = inference_parser.run_inference(
source, self.rules, score_fn)
# Validate target if target CFG provided.
if (target_string and self.target_grammar_rules and
not target_grammar.can_parse(target_string, self.target_grammar_rules)):
if self.verbose:
print("Invalid target: %s" % target_string)
return None, None
return target_string, score
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/inference_wrapper.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright (c) Meta Platforms, Inc. and affiliates All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to evaluate model.
This binary can also be configured to run alongside a training job
and poll for new model checkpoints, writing eval metrics (e.g. for TensorBoard).
This binary also supports evaluations for settings such as NQG-T5, where
predictions from T5 are used when NQG does not produce an output. Such
'fallback' predictions can be supplied via the `--fallback_predictions` flag.
"""
import os
import time
import pdb
from absl import app
from absl import flags
import sys
import os
sys.path.append(os.getenv("BASE_DIR")+"/baseline_replication/TMCD")
from model.parser import config_utils
from model.parser.data import tokenization_utils
from model.parser.inference import inference_wrapper
from model.parser.inference.targets import target_grammar
from model.qcfg import qcfg_file
from tasks import tsv_utils
import tensorflow as tf
from official.nlp.bert import configs
FLAGS = flags.FLAGS
flags.DEFINE_string("input", "", "Input tsv file.")
flags.DEFINE_integer("limit", 0,
"Index of example to begin processing (Ignored if 0).")
flags.DEFINE_integer("offset", 0,
"Index of example to end processing (Ignored if 0).")
flags.DEFINE_bool("verbose", True, "Whether to print debug output.")
flags.DEFINE_string("model_dir", "", "Model directory.")
flags.DEFINE_bool("poll", False, "Whether to poll.")
flags.DEFINE_bool("write", False, "Whether to write metrics to model_dir.")
flags.DEFINE_string("subdir", "eval_test",
"Sub-directory of model_dir for writing metrics.")
flags.DEFINE_string("checkpoint", "", "Checkpoint prefix, or None for latest.")
flags.DEFINE_string("config", "", "Config file.")
flags.DEFINE_string("bert_dir", "",
"Directory for BERT, including vocab and config.")
flags.DEFINE_string("rules", "", "QCFG rules txt file.")
flags.DEFINE_string("fallback_predictions", "",
"Optional fallback predictions txt file.")
flags.DEFINE_string("target_grammar", "", "Optional target CFG.")
def compute_metrics(wrapper, examples):
"""Compute accuracy on examples."""
# Initialize stats.
num_examples = 0
num_nqg_correct = 0
num_nqg_predictions = 0
num_fallback_correct = 0
num_hybrid_correct = 0
# pdb.set_trace()
fallback_predictions = None
if FLAGS.fallback_predictions:
fallback_predictions = []
predictions_file=FLAGS.fallback_predictions
print("Prediction file: ", predictions_file)
with tf.io.gfile.GFile(FLAGS.fallback_predictions, "r") as predictions_file:
for line in predictions_file:
fallback_predictions.append(line.rstrip())
for idx, example in enumerate(examples):
if FLAGS.offset and idx < FLAGS.offset:
continue
if FLAGS.limit and idx >= FLAGS.limit:
break
if FLAGS.verbose:
print("Processing example %s: %s" % (idx, example[0]))
num_examples += 1
source = example[0]
gold_target = example[1]
nqg_prediction, _ = wrapper.get_output(source)
# try:
# nqg_prediction, _ = wrapper.get_output(source)
# except:
# # The model cannot hande wordpieces that are too long
# # Skip the ones that are longer than max wordpieces
# nqg_prediction = None
if nqg_prediction:
num_nqg_predictions += 1
if nqg_prediction is not None and nqg_prediction.replace(" ", "") == gold_target.replace(" ", ""):
num_nqg_correct += 1
else:
if FLAGS.verbose:
print("nqg incorrect (gold vs. predicted):\n%s\n%s\n" %
(gold_target, nqg_prediction))
fallback_prediction = (
fallback_predictions[idx] if fallback_predictions else None)
if fallback_prediction is not None and fallback_prediction.replace(" ", "") == gold_target.replace(" ", ""):
num_fallback_correct += 1
else:
if FLAGS.verbose:
print("fallback incorrect (gold vs. predicted):\n%s\n%s\n" %
(gold_target, fallback_prediction))
hybrid_prediction = nqg_prediction or fallback_prediction
if hybrid_prediction is None:
print("None hybrid prediction, fallback pred: ", fallback_prediction)
if hybrid_prediction is not None and hybrid_prediction.replace(" ", "") == gold_target.replace(" ", ""):
num_hybrid_correct += 1
if FLAGS.verbose:
print("hybrid correct.")
else:
if FLAGS.verbose:
print("hybrid incorrect.")
metrics_dict = {
"nqg_accuracy": float(num_nqg_correct) / float(num_examples),
"fallback_accuracy": float(num_fallback_correct) / float(num_examples),
"hybrid_accuracy": float(num_hybrid_correct) / float(num_examples),
"nqg_coverage": float(num_nqg_predictions) / float(num_examples),
"nqg_precision": float(num_nqg_correct) / float(num_nqg_predictions) if num_nqg_predictions != 0 else 0,
}
if FLAGS.verbose:
print("num_examples: %s" % num_examples)
print("num_nqg_correct: %s" % num_nqg_correct)
print("num_nqg_predictions: %s" % num_nqg_predictions)
print("num_fallback_correct: %s" % num_fallback_correct)
print("num_hybrid_correct: %s" % num_hybrid_correct)
print("metrics_dict: %s" % metrics_dict)
return metrics_dict
def get_summary_writer():
if not FLAGS.write:
return None
return tf.summary.create_file_writer(
os.path.join(FLAGS.model_dir, FLAGS.subdir))
def write_metric(writer, name, metric, step):
with writer.as_default():
tf.summary.scalar(name, metric, step=step)
def get_checkpoint():
"""Return checkpoint path and step, or (None, None)."""
if FLAGS.checkpoint:
checkpoint = os.path.join(FLAGS.model_dir, FLAGS.checkpoint)
else:
checkpoint = tf.train.latest_checkpoint(FLAGS.model_dir)
# TODO(petershaw): Consider less hacky way to get current step.
step = None
if checkpoint is not None:
step = int(checkpoint.split("-")[-2])
print("Using checkpoint %s at step %s" % (checkpoint, step))
return checkpoint, step
def get_inference_wrapper(config):
"""Construct and return InferenceWrapper."""
rules = qcfg_file.read_rules(FLAGS.rules)
tokenizer = tokenization_utils.get_tokenizer(
os.path.join(FLAGS.bert_dir, "vocab.txt"))
bert_config = configs.BertConfig.from_json_file(
os.path.join(FLAGS.bert_dir, "bert_config.json"))
target_grammar_rules = None
if FLAGS.target_grammar:
target_grammar_rules = target_grammar.load_rules_from_file(
FLAGS.target_grammar)
wrapper = inference_wrapper.InferenceWrapper(tokenizer, rules, config,
bert_config,
target_grammar_rules)
return wrapper
def run_inference(writer, wrapper, examples, checkpoint, step=None):
"""Run inference."""
wrapper.restore_checkpoint(checkpoint)
metrics_dict = compute_metrics(wrapper, examples)
for metric_name, metric_value in metrics_dict.items():
print("%s at %s: %s" % (metric_name, step, metric_value))
if FLAGS.write:
write_metric(writer, metric_name, metric_value, step)
def main(unused_argv):
config = config_utils.json_file_to_dict(FLAGS.config)
wrapper = get_inference_wrapper(config)
examples = tsv_utils.read_tsv(FLAGS.input)
writer = get_summary_writer()
if FLAGS.poll:
last_checkpoint = None
while True:
checkpoint, step = get_checkpoint()
if checkpoint == last_checkpoint:
print("Waiting for new checkpoint...\nLast checkpoint: %s" %
last_checkpoint)
else:
run_inference(writer, wrapper, examples, checkpoint, step=step)
last_checkpoint = checkpoint
if step and step >= config["training_steps"]:
# Stop eval job after completing eval for last training step.
break
time.sleep(10)
else:
checkpoint, _ = get_checkpoint()
run_inference(writer, wrapper, examples, checkpoint)
if __name__ == "__main__":
app.run(main)
|
CompGenRep_MLRC2022-main
|
baseline_replication/TMCD/model/parser/inference/eval_model.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.