Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def batch_indices(batch_nb, data_length, batch_size):
# Batch start and end index
start = int(batch_nb * batch_size)
end = int((batch_nb + 1) * batch_size)
# When there are not enough inputs left, we reuse some to complete the
# batch
if end > data_length:
shift = end - data_length
start -= shift
end -= shift
return start, end
|
[
"\n This helper function computes a batch start and end index\n :param batch_nb: the batch number\n :param data_length: the total length of the data being parsed by batches\n :param batch_size: the number of inputs in each batch\n :return: pair of (start, end) indices\n "
] |
Please provide a description of the function:def other_classes(nb_classes, class_ind):
if class_ind < 0 or class_ind >= nb_classes:
error_str = "class_ind must be within the range (0, nb_classes - 1)"
raise ValueError(error_str)
other_classes_list = list(range(nb_classes))
other_classes_list.remove(class_ind)
return other_classes_list
|
[
"\n Returns a list of class indices excluding the class indexed by class_ind\n :param nb_classes: number of classes in the task\n :param class_ind: the class index to be omitted\n :return: list of class indices excluding the class indexed by class_ind\n "
] |
Please provide a description of the function:def to_categorical(y, nb_classes, num_classes=None):
if num_classes is not None:
if nb_classes is not None:
raise ValueError("Should not specify both nb_classes and its deprecated "
"alias, num_classes")
warnings.warn("`num_classes` is deprecated. Switch to `nb_classes`."
" `num_classes` may be removed on or after 2019-04-23.")
nb_classes = num_classes
del num_classes
y = np.array(y, dtype='int').ravel()
n = y.shape[0]
categorical = np.zeros((n, nb_classes))
categorical[np.arange(n), y] = 1
return categorical
|
[
"\n Converts a class vector (integers) to binary class matrix.\n This is adapted from the Keras function with the same name.\n :param y: class vector to be converted into a matrix\n (integers from 0 to nb_classes).\n :param nb_classes: nb_classes: total number of classes.\n :param num_classses: depricated version of nb_classes\n :return: A binary matrix representation of the input.\n "
] |
Please provide a description of the function:def random_targets(gt, nb_classes):
# If the ground truth labels are encoded as one-hot, convert to labels.
if len(gt.shape) == 2:
gt = np.argmax(gt, axis=1)
# This vector will hold the randomly selected labels.
result = np.zeros(gt.shape, dtype=np.int32)
for class_ind in xrange(nb_classes):
# Compute all indices in that class.
in_cl = gt == class_ind
size = np.sum(in_cl)
# Compute the set of potential targets for this class.
potential_targets = other_classes(nb_classes, class_ind)
# Draw with replacement random targets among the potential targets.
result[in_cl] = np.random.choice(potential_targets, size=size)
# Encode vector of random labels as one-hot labels.
result = to_categorical(result, nb_classes)
result = result.astype(np.int32)
return result
|
[
"\n Take in an array of correct labels and randomly select a different label\n for each label in the array. This is typically used to randomly select a\n target class in targeted adversarial examples attacks (i.e., when the\n search algorithm takes in both a source class and target class to compute\n the adversarial example).\n :param gt: the ground truth (correct) labels. They can be provided as a\n 1D vector or 2D array of one-hot encoded labels.\n :param nb_classes: The number of classes for this task. The random class\n will be chosen between 0 and nb_classes such that it\n is different from the correct class.\n :return: A numpy array holding the randomly-selected target classes\n encoded as one-hot labels.\n "
] |
Please provide a description of the function:def pair_visual(*args, **kwargs):
warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs)
|
[
"Deprecation wrapper"
] |
Please provide a description of the function:def grid_visual(*args, **kwargs):
warnings.warn("`grid_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.grid_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import grid_visual as new_grid_visual
return new_grid_visual(*args, **kwargs)
|
[
"Deprecation wrapper"
] |
Please provide a description of the function:def get_logits_over_interval(*args, **kwargs):
warnings.warn("`get_logits_over_interval` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.get_logits_over_interval may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import get_logits_over_interval as new_get_logits_over_interval
return new_get_logits_over_interval(*args, **kwargs)
|
[
"Deprecation wrapper"
] |
Please provide a description of the function:def linear_extrapolation_plot(*args, **kwargs):
warnings.warn("`linear_extrapolation_plot` has moved to "
"`cleverhans.plot.pyplot_image`. "
"cleverhans.utils.linear_extrapolation_plot may be removed on "
"or after 2019-04-24.")
# pylint:disable=line-too-long
from cleverhans.plot.pyplot_image import linear_extrapolation_plot as new_linear_extrapolation_plot
return new_linear_extrapolation_plot(*args, **kwargs)
|
[
"Deprecation wrapper"
] |
Please provide a description of the function:def create_logger(name):
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base
|
[
"\n Create a logger object with the given name.\n\n If this is the first time that we call this method, then initialize the\n formatter.\n "
] |
Please provide a description of the function:def deterministic_dict(normal_dict):
out = OrderedDict()
for key in sorted(normal_dict.keys()):
out[key] = normal_dict[key]
return out
|
[
"\n Returns a version of `normal_dict` whose iteration order is always the same\n "
] |
Please provide a description of the function:def ordered_union(l1, l2):
out = []
for e in l1 + l2:
if e not in out:
out.append(e)
return out
|
[
"\n Return the union of l1 and l2, with a deterministic ordering.\n (Union of python sets does not necessarily have a consisten iteration\n order)\n :param l1: list of items\n :param l2: list of items\n :returns: list containing one copy of each item that is in l1 or in l2\n "
] |
Please provide a description of the function:def safe_zip(*args):
length = len(args[0])
if not all(len(arg) == length for arg in args):
raise ValueError("Lengths of arguments do not match: "
+ str([len(arg) for arg in args]))
return list(zip(*args))
|
[
"like zip but with these properties:\n - returns a list, rather than an iterator. This is the old Python2 zip behavior.\n - a guarantee that all arguments are the same length.\n (normal zip silently drops entries to make them the same length)\n "
] |
Please provide a description of the function:def shell_call(command, **kwargs):
# Regular expression to find instances of '${NAME}' in a string
CMD_VARIABLE_RE = re.compile('^\\$\\{(\\w+)\\}$')
command = list(command)
for i in range(len(command)):
m = CMD_VARIABLE_RE.match(command[i])
if m:
var_id = m.group(1)
if var_id in kwargs:
command[i] = kwargs[var_id]
str_command = ' '.join(command)
logging.debug('Executing shell command: %s' % str_command)
return subprocess.check_output(command)
|
[
"Calls shell command with argument substitution.\n\n Args:\n command: command represented as a list. Each element of the list is one\n token of the command. For example \"cp a b\" becomes ['cp', 'a', 'b']\n If any element of the list looks like '${NAME}' then it will be replaced\n by value from **kwargs with key 'NAME'.\n **kwargs: dictionary with argument substitution\n\n Returns:\n output of the command\n\n Raises:\n subprocess.CalledProcessError if command return value is not zero\n\n This function is useful when you need to do variable substitution prior\n running the command. Below are few examples of how it works:\n\n shell_call(['cp', 'a', 'b'], a='asd') calls command 'cp a b'\n\n shell_call(['cp', '${a}', 'b'], a='asd') calls command 'cp asd b',\n '${a}; was replaced with 'asd' before calling the command\n "
] |
Please provide a description of the function:def deep_copy(numpy_dict):
out = {}
for key in numpy_dict:
out[key] = numpy_dict[key].copy()
return out
|
[
"\n Returns a copy of a dictionary whose values are numpy arrays.\n Copies their values rather than copying references to them.\n "
] |
Please provide a description of the function:def data_mnist(datadir=tempfile.gettempdir(), train_start=0,
train_end=60000, test_start=0, test_end=10000):
assert isinstance(train_start, int)
assert isinstance(train_end, int)
assert isinstance(test_start, int)
assert isinstance(test_end, int)
X_train = download_and_parse_mnist_file(
'train-images-idx3-ubyte.gz', datadir=datadir) / 255.
Y_train = download_and_parse_mnist_file(
'train-labels-idx1-ubyte.gz', datadir=datadir)
X_test = download_and_parse_mnist_file(
't10k-images-idx3-ubyte.gz', datadir=datadir) / 255.
Y_test = download_and_parse_mnist_file(
't10k-labels-idx1-ubyte.gz', datadir=datadir)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]
Y_train = utils.to_categorical(Y_train, nb_classes=10)
Y_test = utils.to_categorical(Y_test, nb_classes=10)
return X_train, Y_train, X_test, Y_test
|
[
"\n Load and preprocess MNIST dataset\n :param datadir: path to folder where data should be stored\n :param train_start: index of first training set example\n :param train_end: index of last training set example\n :param test_start: index of first test set example\n :param test_end: index of last test set example\n :return: tuple of four arrays containing training data, training labels,\n testing data and testing labels.\n "
] |
Please provide a description of the function:def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000):
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
if tf.keras.backend.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
x_train = x_train[train_start:train_end, :, :, :]
y_train = y_train[train_start:train_end, :]
x_test = x_test[test_start:test_end, :]
y_test = y_test[test_start:test_end, :]
return x_train, y_train, x_test, y_test
|
[
"\n Preprocess CIFAR10 dataset\n :return:\n "
] |
Please provide a description of the function:def print_accuracies(filepath, train_start=TRAIN_START, train_end=TRAIN_END,
test_start=TEST_START, test_end=TEST_END,
batch_size=BATCH_SIZE, which_set=WHICH_SET,
base_eps_iter=BASE_EPS_ITER,
nb_iter=NB_ITER):
# Set TF random seed to improve reproducibility
tf.set_random_seed(20181014)
set_log_level(logging.INFO)
sess = tf.Session()
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
x_data, y_data = dataset.get_set(which_set)
impl(sess, model, dataset, factory, x_data, y_data, base_eps_iter, nb_iter)
|
[
"\n Load a saved model and print out its accuracy on different data distributions\n\n This function works by running a single attack on each example.\n This provides a reasonable estimate of the true failure rate quickly, so\n long as the model does not suffer from gradient masking.\n However, this estimate is mostly intended for development work and not\n for publication. A more accurate estimate may be obtained by running\n an attack bundler instead.\n\n :param filepath: path to model to evaluate\n :param train_start: index of first training set example to use\n :param train_end: index of last training set example to use\n :param test_start: index of first test set example to use\n :param test_end: index of last test set example to use\n :param batch_size: size of evaluation batches\n :param which_set: 'train' or 'test'\n :param base_eps_iter: step size if the data were in [0,1]\n (Step size will be rescaled proportional to the actual data range)\n :param nb_iter: Number of iterations of PGD to run per class\n "
] |
Please provide a description of the function:def impl(sess, model, dataset, factory, x_data, y_data,
base_eps_iter=BASE_EPS_ITER, nb_iter=NB_ITER,
batch_size=BATCH_SIZE):
center = dataset.kwargs['center']
max_val = dataset.kwargs['max_val']
value_range = max_val * (1. + center)
min_value = 0. - center * max_val
if 'CIFAR' in str(factory.cls):
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
elif 'MNIST' in str(factory.cls):
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
else:
raise NotImplementedError(str(factory.cls))
pgd_params = {'eps': base_eps * value_range,
'eps_iter': base_eps_iter * value_range,
'nb_iter': nb_iter,
'clip_min': min_value,
'clip_max': max_val}
semantic = Semantic(model, center, max_val, sess)
pgd = ProjectedGradientDescent(model, sess=sess)
jobs = [('clean', None, None, None),
('Semantic', semantic, None, None),
('pgd', pgd, pgd_params, None)]
out = {}
for job in jobs:
name, attack, attack_params, job_batch_size = job
if job_batch_size is None:
job_batch_size = batch_size
t1 = time.time()
acc = accuracy(sess, model, x_data, y_data, batch_size=job_batch_size,
devices=devices, attack=attack, attack_params=attack_params)
t2 = time.time()
out[name] = acc
print("Accuracy on " + name + " examples: ", acc)
print("Evaluation took", t2 - t1, "seconds")
return out
|
[
"\n The actual implementation of the evaluation.\n :param sess: tf.Session\n :param model: cleverhans.model.Model\n :param dataset: cleverhans.dataset.Dataset\n :param factory: the dataset factory corresponding to `dataset`\n :param x_data: numpy array of input examples\n :param y_data: numpy array of class labels\n :param base_eps_iter: step size for PGD if data were in [0, 1]\n :param nb_iter: number of PGD iterations\n :returns: dict mapping string adversarial example names to accuracies\n "
] |
Please provide a description of the function:def main(argv=None):
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print_accuracies(filepath=filepath, test_start=FLAGS.test_start,
test_end=FLAGS.test_end, which_set=FLAGS.which_set,
nb_iter=FLAGS.nb_iter, base_eps_iter=FLAGS.base_eps_iter,
batch_size=FLAGS.batch_size)
|
[
"\n Print accuracies\n "
] |
Please provide a description of the function:def fast_gradient_method(model_fn, x, eps, ord,
clip_min=None, clip_max=None, y=None, targeted=False, sanity_checks=False):
if ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
assert_ge = torch.all(torch.ge(x, torch.tensor(clip_min, device=x.device, dtype=x.dtype)))
asserts.append(assert_ge)
if clip_max is not None:
assert_le = torch.all(torch.le(x, torch.tensor(clip_max, device=x.device, dtype=x.dtype)))
asserts.append(assert_le)
# x needs to be a leaf variable, of floating point type and have requires_grad being True for
# its grad to be computed and stored properly in a backward call
x = x.clone().detach().to(torch.float).requires_grad_(True)
if y is None:
# Using model predictions as ground truth to avoid label leaking
_, y = torch.max(model_fn(x), 1)
# Compute loss
loss_fn = torch.nn.CrossEntropyLoss()
loss = loss_fn(model_fn(x), y)
# If attack is targeted, minimize loss of target label rather than maximize loss of correct label
if targeted:
loss = -loss
# Define gradient of loss wrt input
loss.backward()
optimal_perturbation = optimize_linear(x.grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = torch.clamp(adv_x, clip_min, clip_max)
if sanity_checks:
assert np.all(asserts)
return adv_x
|
[
"\n PyTorch implementation of the Fast Gradient Method.\n :param model_fn: a callable that takes an input tensor and returns the model logits.\n :param x: input tensor.\n :param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.\n :param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2.\n :param clip_min: (optional) float. Minimum float value for adversarial example components.\n :param clip_max: (optional) float. Maximum float value for adversarial example components.\n :param y: (optional) Tensor with true labels. If targeted is true, then provide the\n target label. Otherwise, only provide this parameter if you'd like to use true\n labels when crafting adversarial samples. Otherwise, model predictions are used\n as labels to avoid the \"label leaking\" effect (explained in this paper:\n https://arxiv.org/abs/1611.01236). Default is None.\n :param targeted: (optional) bool. Is the attack targeted or untargeted?\n Untargeted, the default, will try to make the label incorrect.\n Targeted will instead try to move in the direction of being more like y.\n :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime /\n memory or for unit tests that intentionally pass strange input)\n :return: a tensor for the adversarial example\n "
] |
Please provide a description of the function:def load_images(input_dir, batch_shape):
images = np.zeros(batch_shape)
filenames = []
idx = 0
batch_size = batch_shape[0]
for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')):
with tf.gfile.Open(filepath) as f:
image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0
# Images for inception classifier are normalized to be in [-1, 1] interval.
images[idx, :, :, :] = image * 2.0 - 1.0
filenames.append(os.path.basename(filepath))
idx += 1
if idx == batch_size:
yield filenames, images
filenames = []
images = np.zeros(batch_shape)
idx = 0
if idx > 0:
yield filenames, images
|
[
"Read png images from input directory in batches.\n\n Args:\n input_dir: input directory\n batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3]\n\n Yields:\n filenames: list file names without path of each image\n Lenght of this list could be less than batch_size, in this case only\n first few images of the result are elements of the minibatch.\n images: array with all images from this batch\n "
] |
Please provide a description of the function:def save_images(images, filenames, output_dir):
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
|
[
"Saves images to the output directory.\n\n Args:\n images: array with minibatch of images\n filenames: list of filenames without path\n If number of file names in this list less than number of images in\n the minibatch then only first len(filenames) images will be saved.\n output_dir: directory where to save images\n "
] |
Please provide a description of the function:def main(_):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
nb_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
model = InceptionModel(nb_classes)
fgsm = FastGradientMethod(model)
x_adv = fgsm.generate(x_input, eps=eps, clip_min=-1., clip_max=1.)
# Run computation
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
adv_images = sess.run(x_adv, feed_dict={x_input: images})
save_images(adv_images, filenames, FLAGS.output_dir)
|
[
"Run the sample attack"
] |
Please provide a description of the function:def ld_cifar10():
train_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
test_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
train_dataset = torchvision.datasets.CIFAR10(root='/tmp/data', train=True, transform=train_transforms, download=True)
test_dataset = torchvision.datasets.CIFAR10(root='/tmp/data', train=False, transform=test_transforms, download=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False, num_workers=2)
return EasyDict(train=train_loader, test=test_loader)
|
[
"Load training and test data."
] |
Please provide a description of the function:def plot_report_from_path(path, success_name=DEFAULT_SUCCESS_NAME,
fail_names=DEFAULT_FAIL_NAMES, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
report = load(path)
plot_report(report, success_name, fail_names, label, is_max_confidence,
linewidth, plot_upper_bound)
|
[
"\n Plots a success-fail curve from a confidence report stored on disk,\n :param path: string filepath for the stored report.\n (Should be the output of make_confidence_report*.py)\n :param success_name: The name (confidence report key) of the data that\n should be used to measure success rate\n :param fail_names: A list of names (confidence report keys) of the data\n that should be used to measure failure rate.\n *Only one of these keys will be plotted*. Each key will be tried in\n order until one is found in the report. This is to support both the\n output of `make_confidence_report` and `make_confidence_report_bundled`.\n :param label: Optional string. Name to use for this curve in the legend.\n :param is_max_confidence: bool.\n If True, when measuring the failure rate, treat the data as the output\n of a maximum confidence attack procedure.\n This means that the attack is optimal (assuming the underlying optimizer\n is good enough, *which is probably false*, so interpret the plot\n accordingly) for thresholds >= .5 but for lower thresholds the observed\n failure rate is a lower bound on the true worst failure rate and the\n observed coverage is an upper bound (assuming good enough optimization)\n on the true failure rate.\n The plot thus draws the threshold >= .5 portion of the curve with a solid\n line and the upper and lower bounds with a dashed line.\n See https://openreview.net/forum?id=H1g0piA9tQ for details.\n If False, the attack procedure is regarded as an ad hoc way of obtaining\n a loose lower bound, and thus the whole curve is drawn with dashed lines.\n :param linewidth: thickness of the line to draw\n :param plot_upper_bound: include upper bound on error rate in plot\n "
] |
Please provide a description of the function:def plot_report(report, success_name, fail_names, label=None,
is_max_confidence=True,
linewidth=LINEWIDTH,
plot_upper_bound=True):
(fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded) = make_curve(report, success_name, fail_names)
assert len(fail_lower_bound) == len(fail_upper_bound)
fail_optimal = np.array(fail_optimal)
fail_lower_bound = np.array(fail_lower_bound)
fail_upper_bound = np.array(fail_upper_bound)
if is_max_confidence:
p, = pyplot.plot(fail_optimal, success_optimal, label=label,
linewidth=linewidth)
color = p.get_color()
pyplot.plot(fail_lower_bound, success_bounded, '--', color=color)
if plot_upper_bound:
pyplot.plot(fail_upper_bound, success_bounded, '--', color=color)
else:
# If the attack was not MaxConfidence, then this whole curve is just
# a loose lower bound
all_fail = np.concatenate((fail_optimal, fail_lower_bound), axis=0)
pyplot.plot(all_fail, success_optimal + success_bounded,
'--', label=label, linewidth=linewidth)
pyplot.xlabel("Failure rate on adversarial examples")
pyplot.ylabel("Success rate on clean examples")
gap = fail_upper_bound - fail_lower_bound
if gap.size > 0:
assert gap.min() >= 0.
print("Max gap: ", gap.max())
|
[
"\n Plot a success fail curve from a confidence report\n :param report: A confidence report\n (the type of object saved by make_confidence_report.py)\n :param success_name: see plot_report_from_path\n :param fail_names: see plot_report_from_path\n :param label: see plot_report_from_path\n :param is_max_confidence: see plot_report_from_path\n :param linewidth: see plot_report_from_path\n "
] |
Please provide a description of the function:def make_curve(report, success_name, fail_names):
success_results = report[success_name]
fail_name = None # pacify pylint
found = False
for fail_name in fail_names:
if fail_name in report:
found = True
break
if not found:
raise ValueError(fail_name + " not in report."
"Available keys: " + str(report.keys()))
fail_results = report[fail_name]
# "good" means drawn from the distribution where we measure success rate.
# "bad" means drawn from the distribution where we measure failure rate.
# From here on out we use those terms, to avoid confusion between examples
# that actually failed and examples that were drawn from the distribution
# where we measured failure rate.
old_all_probs_version = False
if isinstance(success_results, dict):
# This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary
# style of report, so we don't want to do a dictionary lookup unless we really are using the old version.
old_all_probs_version = 'all_probs' in success_results
if old_all_probs_version:
warnings.warn("The 'all_probs' key is included only to support "
" old files from a private development codebase. "
"Support for this key can be dropped at any time "
" without warning.")
good_probs = success_results['all_probs']
bad_probs = fail_results['all_probs']
bad_corrects = fail_results['correctness_mask']
good_corrects = success_results['correctness_mask']
else:
if isinstance(success_results, dict):
# Still using dict, but using newer key names
warnings.warn("Support for dictionary confidence reports is deprecated. Switch to using the classes in "
"cleverhans.confidence_report. Support for old dictionary-style reports may be removed "
"on or after 2019-07-19.")
good_probs = success_results['confidence']
bad_probs = fail_results['confidence']
good_corrects = success_results['correctness']
bad_corrects = fail_results['correctness']
else:
# current version
good_probs = success_results.confidence
bad_probs = fail_results.confidence
good_corrects = success_results.correctness
bad_corrects = fail_results.correctness
good_triplets = [(prob, correct, True) for prob, correct
in safe_zip(good_probs, good_corrects)]
bad_triplets = [(prob, correct, False) for prob, correct
in safe_zip(bad_probs, bad_corrects)]
total_good = len(good_triplets)
total_bad = len(bad_triplets)
if total_good != 10000:
warnings.warn("Not using full test set? Found " + str(total_good) +
" examples for measuring success rate")
if total_bad != 10000:
warnings.warn("Not using full test set for adversarial examples?")
all_triplets = good_triplets + bad_triplets
all_triplets = sorted(all_triplets, key=lambda x: -x[0])
# Start with the case for threshold t = 1.
# Examples are covered only if prob > t (strict inequality)
# So initially nothing is covered
good_covered_and_correct = 0
bad_covered_and_incorrect = 0
# Number of examples that are bad, incorrect, and covered by
# a t >= 0.5, or that were merely covered by a t < 0.5
failure_opportunities = 0
next_idx = 0
fail_optimal = []
success_optimal = []
fail_upper_bound = []
fail_lower_bound = []
success_bounded = []
bounded = False
# NOTE: the loop always exits via an internal break statement.
# Copied the termination condition to the while statement for ease
# of reading.
while next_idx < len(all_triplets):
gs = float(good_covered_and_correct) / total_good
bf = float(bad_covered_and_incorrect) / total_bad
# Add results for current threshold to the list
if not bounded:
# Sometimes when there are big jumps the failure rate it makes
# artifacts in the plot, where there's a long linear track.
# This implies the real success-fail curve is linear when
# actually it just isn't sampled by the data.
# To avoid implying that the model reaches a higher success
# rate than it actually does, we avoid these plotting artifacts
# by introducing extra points that make the graph move horizontally
# to the right first, then vertically.
if len(fail_optimal) > 0:
prev_bf = fail_optimal[-1]
prev_gs = success_optimal[-1]
if gs > prev_gs and bf > prev_bf:
fail_optimal.append(bf)
success_optimal.append(prev_gs)
success_optimal.append(gs)
fail_optimal.append(bf)
else:
success_bounded.append(gs)
fail_lower_bound.append(bf)
fail_upper_bound.append(float(failure_opportunities) / total_bad)
if next_idx == len(all_triplets):
break
# next_prob_to_include is not quite the same thing as the threshold.
# The threshold is infinitesimally smaller than this value.
next_prob_to_include = all_triplets[next_idx][0]
# Process all ties
while next_prob_to_include == all_triplets[next_idx][0]:
_prob, correct, is_good = all_triplets[next_idx]
if is_good:
good_covered_and_correct += correct
else:
if next_prob_to_include <= .5:
failure_opportunities += 1
else:
failure_opportunities += 1 - correct
bad_covered_and_incorrect += 1 - correct
next_idx += 1
if next_idx == len(all_triplets):
break
if next_prob_to_include <= .5:
bounded = True
out = (fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,
success_bounded)
return out
|
[
"\n Make a success-failure curve.\n :param report: A confidence report\n (the type of object saved by make_confidence_report.py)\n :param success_name: see plot_report_from_path\n :param fail_names: see plot_report_from_path\n :returns:\n fail_optimal: list of failure rates on adversarial data for the optimal\n (t >= .5) part of the curve. Each entry corresponds to a different\n threshold. Thresholds are chosen to make the smoothest possible curve\n from the available data, e.g. one threshold between each unique\n confidence value observed in the data. To make sure that linear\n interpolation between points in the curve never overestimates the\n failure rate for a specific success rate, the curve also includes\n extra points that increment the failure rate prior to any point\n that increments the success rate, so the curve moves up and to the\n right in a series of backwards \"L\" shapes rather than moving up\n and to the right along diagonal lines. For large datasets these\n maximally pessimistic points will usually not be visible and the\n curve will appear smooth.\n success_optimal: list of success rates on clean data on the optimal\n part of the curve. Matches up with `fail_optimal`.\n fail_lower_bound: list of observed failure rates on the t < .5 portion\n of the curve where MaxConfidence is not optimal.\n fail_upper_bound: list of upper bounds (assuming good enough optimization,\n so not a true upper bound) on the failure rates on the t < .5 portion\n of the curve where MaxConfidence is not optimal. Matches up with\n `fail_lower_bound`.\n success_bounded: success rates on the non-optimal part of the curve.\n Matches up with `fail_lower_bound` and `fail_upper_bound`.\n "
] |
Please provide a description of the function:def model_train(self):
assert self.runner is not None, (
)
hparams = self.hparams
batch_size = hparams.batch_size
nb_epochs = hparams.nb_epochs
train_dir = hparams.save_dir
filename = 'model.ckpt'
X_train = self.X_train
Y_train = self.Y_train
sess = self.sess
with sess.as_default():
X_batch = X_train[:batch_size]
Y_batch = Y_train[:batch_size]
self._init_tf(X_batch, Y_batch)
for epoch in six.moves.xrange(nb_epochs):
logging.info("Epoch " + str(epoch))
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / batch_size))
assert nb_batches * batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
self.rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), batch_size)
# Perform one training step
self._update_learning_params()
# Train step
X_batch = X_train[index_shuf[start:end]]
Y_batch = Y_train[index_shuf[start:end]]
self._run({'x_pre': X_batch, 'y': Y_batch})
self._sync_params()
# Clean up the queue
while not self.runner.is_finished():
self._run()
self._sync_params(forced=True)
assert end >= len(X_train), (
'Not all training examples are used.')
cur = time.time()
logging.info("\tEpoch took " + str(cur - prev) + " seconds")
prev = cur
self.eval()
# Save model
cond = ((epoch+1) % hparams.save_steps == 0
or epoch == nb_epochs)
if hparams.save and cond:
save_path = os.path.join(train_dir, filename)
saver = tf.train.Saver()
saver.save(sess, save_path)
logging.info("Model saved at: " + str(save_path))
logging.info("Completed model training.")
|
[
"\n Train a TF graph\n :param sess: TF session to use when training the graph\n :param x: input placeholder\n :param y: output placeholder (for labels)\n :param predictions: model output predictions\n :param X_train: numpy array with training inputs\n :param Y_train: numpy array with training outputs\n :param hparams.save: boolean controlling the save operation\n :param predictions_adv: if set with the adversarial example tensor,\n will run adversarial training\n :param evaluate: function that is run after each training iteration\n (typically to display the test/validation accuracy).\n ",
"Runner is not initialized. TrainerSingleGPU or TrainerMultiGPU\n instantiate a Runner object at initialization time."
] |
Please provide a description of the function:def clone_g0_inputs_on_ngpus(self, inputs, outputs, g0_inputs):
assert len(inputs) == len(outputs), (
'Inputs and outputs should have the same number of elements.')
inputs[0].update(g0_inputs)
outputs[0].update(g0_inputs)
# Copy g0_inputs forward
for i in range(1, len(inputs)):
# Create the graph for i'th step of attack
device_name = inputs[i]['x'].device
with tf.device(device_name):
with tf.variable_scope('step%d' % i):
for k, v in g0_inputs.iteritems():
if k not in inputs[i]:
v_copy = clone_variable(k, v)
inputs[i][k] = v_copy
outputs[i][k] = v_copy
return inputs, outputs
|
[
"\n Clone variables unused by the attack on all GPUs. Specifically, the\n ground-truth label, y, has to be preserved until the training step.\n\n :param inputs: A list of dictionaries as the inputs to each step.\n :param outputs: A list of dictionaries as the outputs of each step.\n :param g0_inputs: Initial variables to be cloned.\n :return: Updated inputs and outputs.\n "
] |
Please provide a description of the function:def generate(self, x, **kwargs):
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
if self.y_target is None:
self.y_target, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = False
else:
_, nb_classes = self.get_or_guess_labels(x, kwargs)
self.targeted_attack = True
attack = LBFGS_impl(
self.sess, x, self.model.get_logits(x),
self.y_target, self.targeted_attack,
self.binary_search_steps, self.max_iterations, self.initial_const,
self.clip_min, self.clip_max, nb_classes, self.batch_size)
def lbfgs_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap
|
[
"\n Return a tensor that constructs adversarial examples for the given\n input. Generate uses tf.py_func in order to operate over tensors.\n :param x: (required) A tensor with the inputs.\n :param kwargs: See `parse_params`\n ",
"\n Wrapper creating TensorFlow interface for use with py_func\n "
] |
Please provide a description of the function:def parse_params(self,
y_target=None,
batch_size=1,
binary_search_steps=5,
max_iterations=1000,
initial_const=1e-2,
clip_min=0,
clip_max=1):
self.y_target = y_target
self.batch_size = batch_size
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
|
[
"\n :param y_target: (optional) A tensor with the one-hot target labels.\n :param batch_size: The number of inputs to include in a batch and\n process simultaneously.\n :param binary_search_steps: The number of times we perform binary\n search to find the optimal tradeoff-\n constant between norm of the purturbation\n and cross-entropy loss of classification.\n :param max_iterations: The maximum number of iterations.\n :param initial_const: The initial tradeoff-constant to use to tune the\n relative importance of size of the perturbation\n and cross-entropy loss of the classification.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n "
] |
Please provide a description of the function:def attack(self, x_val, targets):
def lbfgs_objective(adv_x, self, targets, oimgs, CONST):
loss = self.sess.run(
self.loss,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
grad = self.sess.run(
self.grad,
feed_dict={
self.x: adv_x.reshape(oimgs.shape),
self.targeted_label: targets,
self.ori_img: oimgs,
self.const: CONST
})
return loss, grad.flatten().astype(float)
def attack_success(out, target, targeted_attack):
if targeted_attack:
return out == target
else:
return out != target
# begin the main part for the attack
from scipy.optimize import fmin_l_bfgs_b
oimgs = np.clip(x_val, self.clip_min, self.clip_max)
CONST = np.ones(self.batch_size) * self.initial_const
# set the lower and upper bounds accordingly
lower_bound = np.zeros(self.batch_size)
upper_bound = np.ones(self.batch_size) * 1e10
# set the box constraints for the optimization function
clip_min = self.clip_min * np.ones(oimgs.shape[:])
clip_max = self.clip_max * np.ones(oimgs.shape[:])
clip_bound = list(zip(clip_min.flatten(), clip_max.flatten()))
# placeholders for the best l2 and instance attack found so far
o_bestl2 = [1e10] * self.batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.binary_search_steps):
_logger.debug(" Binary search step %s of %s",
outer_step, self.binary_search_steps)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.binary_search_steps - 1:
CONST = upper_bound
# optimization function
adv_x, _, __ = fmin_l_bfgs_b(
lbfgs_objective,
oimgs.flatten().astype(float),
args=(self, targets, oimgs, CONST),
bounds=clip_bound,
maxiter=self.max_iterations,
iprint=0)
adv_x = adv_x.reshape(oimgs.shape)
assert np.amax(adv_x) <= self.clip_max and \
np.amin(adv_x) >= self.clip_min, \
'fmin_l_bfgs_b returns are invalid'
# adjust the best result (i.e., the adversarial example with the
# smallest perturbation in terms of L_2 norm) found so far
preds = np.atleast_1d(
utils_tf.model_argmax(self.sess, self.x, self.logits,
adv_x))
_logger.debug("predicted labels are %s", preds)
l2s = np.zeros(self.batch_size)
for i in range(self.batch_size):
l2s[i] = np.sum(np.square(adv_x[i] - oimgs[i]))
for e, (l2, pred, ii) in enumerate(zip(l2s, preds, adv_x)):
if l2 < o_bestl2[e] and attack_success(pred, np.argmax(targets[e]),
self.targeted_attack):
o_bestl2[e] = l2
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(self.batch_size):
if attack_success(preds[e], np.argmax(targets[e]),
self.targeted_attack):
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples "
"on %s of %s instances.",
sum(upper_bound < 1e9), self.batch_size)
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack
|
[
"\n Perform the attack on the given instance for the given targets.\n ",
" returns the function value and the gradient for fmin_l_bfgs_b ",
" returns attack result "
] |
Please provide a description of the function:def set_device(self, device_name):
device_name = unify_device_name(device_name)
self.device_name = device_name
for layer in self.layers:
layer.device_name = device_name
|
[
"\n Set the device before the next fprop to create a new graph on the\n specified device.\n "
] |
Please provide a description of the function:def create_sync_ops(self, host_device):
host_device = unify_device_name(host_device)
sync_ops = []
for layer in self.layers:
if isinstance(layer, LayernGPU):
sync_ops += layer.create_sync_ops(host_device)
return sync_ops
|
[
"\n Return a list of assignment operations that syncs the parameters\n of all model copies with the one on host_device.\n :param host_device: (required str) the name of the device with latest\n parameters\n "
] |
Please provide a description of the function:def get_variable(self, name, initializer):
v = tf.get_variable(name, shape=initializer.shape,
initializer=(lambda shape, dtype, partition_info:
initializer),
trainable=self.training)
return v
|
[
"\n Create and initialize a variable using a numpy array and set trainable.\n :param name: (required str) name of the variable\n :param initializer: a numpy array or a tensor\n "
] |
Please provide a description of the function:def set_input_shape_ngpu(self, new_input_shape):
assert self.device_name, "Device name has not been set."
device_name = self.device_name
if self.input_shape is None:
# First time setting the input shape
self.input_shape = [None] + [int(d) for d in list(new_input_shape)]
if device_name in self.params_device:
# There is a copy of weights on this device
self.__dict__.update(self.params_device[device_name])
return
# Stop recursion
self.params_device[device_name] = {}
# Initialize weights on this device
with tf.device(device_name):
self.set_input_shape(self.input_shape)
keys_after = self.__dict__.keys()
if self.params_names is None:
# Prevent overriding training
self.params_names = [k for k in keys_after if isinstance(
self.__dict__[k], tf.Variable)]
params = {k: self.__dict__[k] for k in self.params_names}
self.params_device[device_name] = params
|
[
"\n Create and initialize layer parameters on the device previously set\n in self.device_name.\n\n :param new_input_shape: a list or tuple for the shape of the input.\n "
] |
Please provide a description of the function:def create_sync_ops(self, host_device):
sync_ops = []
host_params = self.params_device[host_device]
for device, params in (self.params_device).iteritems():
if device == host_device:
continue
for k in self.params_names:
if isinstance(params[k], tf.Variable):
sync_ops += [tf.assign(params[k], host_params[k])]
return sync_ops
|
[
"Create an assignment operation for each weight on all devices. The\n weight is assigned the value of the copy on the `host_device'.\n "
] |
Please provide a description of the function:def vatm(model,
x,
logits,
eps,
num_iterations=1,
xi=1e-6,
clip_min=None,
clip_max=None,
scope=None):
with tf.name_scope(scope, "virtual_adversarial_perturbation"):
d = tf.random_normal(tf.shape(x), dtype=tf_dtype)
for _ in range(num_iterations):
d = xi * utils_tf.l2_batch_normalize(d)
logits_d = model.get_logits(x + d)
kl = utils_tf.kl_with_logits(logits, logits_d)
Hd = tf.gradients(kl, d)[0]
d = tf.stop_gradient(Hd)
d = eps * utils_tf.l2_batch_normalize(d)
adv_x = x + d
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x
|
[
"\n Tensorflow implementation of the perturbation method used for virtual\n adversarial training: https://arxiv.org/abs/1507.00677\n :param model: the model which returns the network unnormalized logits\n :param x: the input placeholder\n :param logits: the model's unnormalized output tensor (the input to\n the softmax layer)\n :param eps: the epsilon (input variation parameter)\n :param num_iterations: the number of iterations\n :param xi: the finite difference parameter\n :param clip_min: optional parameter that can be used to set a minimum\n value for components of the example returned\n :param clip_max: optional parameter that can be used to set a maximum\n value for components of the example returned\n :param seed: the seed for random generator\n :return: a tensor for the adversarial example\n "
] |
Please provide a description of the function:def generate(self, x, **kwargs):
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
return vatm(
self.model,
x,
self.model.get_logits(x),
eps=self.eps,
num_iterations=self.num_iterations,
xi=self.xi,
clip_min=self.clip_min,
clip_max=self.clip_max)
|
[
"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param kwargs: See `parse_params`\n "
] |
Please provide a description of the function:def parse_params(self,
eps=2.0,
nb_iter=None,
xi=1e-6,
clip_min=None,
clip_max=None,
num_iterations=None,
**kwargs):
# Save attack-specific parameters
self.eps = eps
if num_iterations is not None:
warnings.warn("`num_iterations` is deprecated. Switch to `nb_iter`."
" The old name will be removed on or after 2019-04-26.")
# Note: when we remove the deprecated alias, we can put the default
# value of 1 for nb_iter back in the method signature
assert nb_iter is None
nb_iter = num_iterations
del num_iterations
if nb_iter is None:
nb_iter = 1
self.num_iterations = nb_iter
self.xi = xi
self.clip_min = clip_min
self.clip_max = clip_max
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
|
[
"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (optional float )the epsilon (input variation parameter)\n :param nb_iter: (optional) the number of iterations\n Defaults to 1 if not specified\n :param xi: (optional float) the finite difference parameter\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n :param num_iterations: Deprecated alias for `nb_iter`\n "
] |
Please provide a description of the function:def iterate_with_exp_backoff(base_iter,
max_num_tries=6,
max_backoff=300.0,
start_backoff=4.0,
backoff_multiplier=2.0,
frac_random_backoff=0.25):
try_number = 0
if hasattr(base_iter, '__iter__'):
base_iter = iter(base_iter)
while True:
try:
yield next(base_iter)
try_number = 0
except StopIteration:
break
except TooManyRequests as e:
logging.warning('TooManyRequests error: %s', tb.format_exc())
if try_number >= max_num_tries:
logging.error('Number of tries exceeded, too many requests: %s', e)
raise
# compute sleep time for truncated exponential backoff
sleep_time = start_backoff * math.pow(backoff_multiplier, try_number)
sleep_time *= (1.0 + frac_random_backoff * random.random())
sleep_time = min(sleep_time, max_backoff)
logging.warning('Too many requests error, '
'retrying with exponential backoff %.3f', sleep_time)
time.sleep(sleep_time)
try_number += 1
|
[
"Iterate with exponential backoff on failures.\n\n Useful to wrap results of datastore Query.fetch to avoid 429 error.\n\n Args:\n base_iter: basic iterator of generator object\n max_num_tries: maximum number of tries for each request\n max_backoff: maximum backoff, in seconds\n start_backoff: initial value of backoff\n backoff_multiplier: backoff multiplier\n frac_random_backoff: fraction of the value of random part of the backoff\n\n Yields:\n values of yielded by base iterator\n "
] |
Please provide a description of the function:def list_blobs(self, prefix=''):
return [b.name for b in self.bucket.list_blobs(prefix=prefix)]
|
[
"Lists names of all blobs by their prefix."
] |
Please provide a description of the function:def begin(self):
if self._cur_batch:
raise ValueError('Previous batch is not committed.')
self._cur_batch = self._client.batch()
self._cur_batch.begin()
self._num_mutations = 0
|
[
"Begins a batch."
] |
Please provide a description of the function:def rollback(self):
try:
if self._cur_batch:
self._cur_batch.rollback()
except ValueError:
# ignore "Batch must be in progress to rollback" error
pass
self._cur_batch = None
self._num_mutations = 0
|
[
"Rolls back pending mutations.\n\n Keep in mind that NoTransactionBatch splits all mutations into smaller\n batches and commit them as soon as mutation buffer reaches maximum length.\n That's why rollback method will only roll back pending mutations from the\n buffer, but won't be able to rollback already committed mutations.\n "
] |
Please provide a description of the function:def put(self, entity):
self._cur_batch.put(entity)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
[
"Adds mutation of the entity to the mutation buffer.\n\n If mutation buffer reaches its capacity then this method commit all pending\n mutations from the buffer and emties it.\n\n Args:\n entity: entity which should be put into the datastore\n "
] |
Please provide a description of the function:def delete(self, key):
self._cur_batch.delete(key)
self._num_mutations += 1
if self._num_mutations >= MAX_MUTATIONS_IN_BATCH:
self.commit()
self.begin()
|
[
"Adds deletion of the entity with given key to the mutation buffer.\n\n If mutation buffer reaches its capacity then this method commit all pending\n mutations from the buffer and emties it.\n\n Args:\n key: key of the entity which should be deleted\n "
] |
Please provide a description of the function:def get(self, key, transaction=None):
return self._client.get(key, transaction=transaction)
|
[
"Retrieves an entity given its key."
] |
Please provide a description of the function:def mnist_tutorial_cw(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=VIZ_ENABLED,
nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
source_samples=SOURCE_SAMPLES,
learning_rate=LEARNING_RATE,
attack_iterations=ATTACK_ITERATIONS,
model_path=MODEL_PATH,
targeted=TARGETED):
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
sess = tf.Session()
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
nb_filters = 64
# Define TF model graph
model = ModelBasicCNN('model1', nb_classes, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=0.1)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate,
'filename': os.path.split(model_path)[-1]
}
rng = np.random.RandomState([2017, 8, 30])
# check if we've trained before, and if we have, use that pre-trained model
if os.path.exists(model_path + ".meta"):
tf_model_load(sess, model_path)
else:
train(sess, loss, x_train, y_train, args=train_params, rng=rng)
saver = tf.train.Saver()
saver.save(sess, model_path)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
assert x_test.shape[0] == test_end - test_start, x_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using Carlini and Wagner's approach
###########################################################################
nb_adv_per_sample = str(nb_classes - 1) if targeted else '1'
print('Crafting ' + str(source_samples) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Instantiate a CW attack object
cw = CarliniWagnerL2(model, sess=sess)
if viz_enabled:
assert source_samples == nb_classes
idxs = [np.where(np.argmax(y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
if targeted:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols,
nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = np.array(
[[instance] * nb_classes for instance in x_test[idxs]],
dtype=np.float32)
else:
adv_inputs = np.array(
[[instance] * nb_classes for
instance in x_test[:source_samples]], dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(source_samples * nb_classes, img_rows, img_cols, nchannels))
adv_ys = np.array([one_hot] * source_samples,
dtype=np.float32).reshape((source_samples *
nb_classes, nb_classes))
yname = "y_target"
else:
if viz_enabled:
# Initialize our array for grid visualization
grid_shape = (nb_classes, 2, img_rows, img_cols, nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
adv_inputs = x_test[idxs]
else:
adv_inputs = x_test[:source_samples]
adv_ys = None
yname = "y"
if targeted:
cw_params_batch_size = source_samples * nb_classes
else:
cw_params_batch_size = source_samples
cw_params = {'binary_search_steps': 1,
yname: adv_ys,
'max_iterations': attack_iterations,
'learning_rate': CW_LEARNING_RATE,
'batch_size': cw_params_batch_size,
'initial_const': 10}
adv = cw.generate_np(adv_inputs,
**cw_params)
eval_params = {'batch_size': np.minimum(nb_classes, source_samples)}
if targeted:
adv_accuracy = model_eval(
sess, x, y, preds, adv, adv_ys, args=eval_params)
else:
if viz_enabled:
err = model_eval(sess, x, y, preds, adv, y_test[idxs], args=eval_params)
adv_accuracy = 1 - err
else:
err = model_eval(sess, x, y, preds, adv, y_test[:source_samples],
args=eval_params)
adv_accuracy = 1 - err
if viz_enabled:
for j in range(nb_classes):
if targeted:
for i in range(nb_classes):
grid_viz_data[i, j] = adv[i * nb_classes + j]
else:
grid_viz_data[j, 0] = adv_inputs[j]
grid_viz_data[j, 1] = adv[j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
_ = grid_visual(grid_viz_data)
return report
|
[
"\n MNIST tutorial for Carlini and Wagner's attack\n :param train_start: index of first training set example\n :param train_end: index of last training set example\n :param test_start: index of first test set example\n :param test_end: index of last test set example\n :param viz_enabled: (boolean) activate plots of adversarial examples\n :param nb_epochs: number of epochs to train model\n :param batch_size: size of training batches\n :param nb_classes: number of output classes\n :param source_samples: number of test inputs to attack\n :param learning_rate: learning rate for training\n :param model_path: path to the model file\n :param targeted: should we run a targeted attack? or untargeted?\n :return: an AccuracyReport object\n "
] |
Please provide a description of the function:def attack_selection(attack_string):
# List of Implemented attacks
attacks_list = AVAILABLE_ATTACKS.keys()
# Checking for requested attack in list of available attacks.
if attack_string is None:
raise AttributeError("Attack type is not specified, "
"list of available attacks\t".join(attacks_list))
if attack_string not in attacks_list:
raise AttributeError("Attack not available "
"list of available attacks\t".join(attacks_list))
# Mapping attack from string to class.
attack_class = AVAILABLE_ATTACKS[attack_string]
return attack_class
|
[
"\n Selects the Attack Class using string input.\n :param attack_string: adversarial attack name in string format\n :return: attack class defined in cleverhans.attacks_eager\n "
] |
Please provide a description of the function:def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
clean_train=True,
testing=False,
backprop_through_attack=False,
nb_filters=NB_FILTERS, num_threads=None,
attack_string=None):
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
X_train, Y_train = mnist.get_set('train')
X_test, Y_test = mnist.get_set('test')
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
# Initialize the attack object
attack_class = attack_selection(attack_string)
attack_params = {'eps': 0.3, 'clip_min': 0.,
'clip_max': 1.}
rng = np.random.RandomState([2018, 6, 18])
if clean_train:
model = ModelBasicCNNTFE(nb_filters=nb_filters)
def evaluate_clean():
eval_params = {'batch_size': batch_size}
acc = model_eval(model, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
train(model, X_train, Y_train, evaluate=evaluate_clean,
args=train_params, rng=rng, var_list=model.get_params())
if testing:
# Calculate training error
eval_params = {'batch_size': batch_size}
acc = model_eval(model, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Evaluate the accuracy of the MNIST model on adversarial examples
eval_par = {'batch_size': batch_size}
attack = attack_class(model)
acc = model_eval(
model, X_test, Y_test, args=eval_par,
attack=attack, attack_args=attack_params)
print('Test accuracy on adversarial examples: %0.4f\n' % acc)
report.clean_train_adv_eval = acc
# Calculate training error
if testing:
eval_par = {'batch_size': batch_size}
acc = model_eval(
model, X_train, Y_train, args=eval_par,
attack=attack, attack_args=attack_params)
print('Train accuracy on adversarial examples: %0.4f\n' % acc)
report.train_clean_train_adv_eval = acc
attack = None
print("Repeating the process, using adversarial training")
model_adv_train = ModelBasicCNNTFE(nb_filters=nb_filters)
attack = attack_class(model_adv_train)
def evaluate_adv():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': batch_size}
accuracy = model_eval(
model_adv_train, X_test, Y_test,
args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
report.adv_train_clean_eval = accuracy
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(
model_adv_train, X_test, Y_test,
args=eval_params, attack=attack,
attack_args=attack_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
report.adv_train_adv_eval = accuracy
# Perform and evaluate adversarial training
train(model_adv_train, X_train, Y_train, evaluate=evaluate_adv,
args=train_params, rng=rng,
var_list=model_adv_train.get_params(),
attack=attack, attack_args=attack_params)
# Calculate training errors
if testing:
eval_params = {'batch_size': batch_size}
accuracy = model_eval(
model_adv_train, X_train, Y_train, args=eval_params,
attack=None, attack_args=None)
report.train_adv_train_clean_eval = accuracy
accuracy = model_eval(
model_adv_train, X_train, Y_train, args=eval_params,
attack=attack, attack_args=attack_params)
report.train_adv_train_adv_eval = accuracy
return report
|
[
"\n MNIST cleverhans tutorial\n :param train_start: index of first training set example.\n :param train_end: index of last training set example.\n :param test_start: index of first test set example.\n :param test_end: index of last test set example.\n :param nb_epochs: number of epochs to train model.\n :param batch_size: size of training batches.\n :param learning_rate: learning rate for training.\n :param clean_train: perform normal training on clean examples only\n before performing adversarial training.\n :param testing: if true, complete an AccuracyReport for unit tests\n to verify that performance is adequate.\n :param backprop_through_attack: If True, backprop through adversarial\n example construction process during\n adversarial training.\n :param nb_filters: number of filters in the CNN used for training.\n :param num_threads: number of threads used for running the process.\n :param attack_string: attack name for crafting adversarial attacks and\n adversarial training, in string format.\n :return: an AccuracyReport object\n ",
"Evaluate the accuracy of the MNIST model on legitimate test\n examples\n "
] |
Please provide a description of the function:def sudo_remove_dirtree(dir_name):
try:
subprocess.check_output(['sudo', 'rm', '-rf', dir_name])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t remove directory {0}'.format(dir_name), e)
|
[
"Removes directory tree as a superuser.\n\n Args:\n dir_name: name of the directory to remove.\n\n This function is necessary to cleanup directories created from inside a\n Docker, since they usually written as a root, thus have to be removed as a\n root.\n "
] |
Please provide a description of the function:def main(args):
title = '## Starting evaluation of round {0} ##'.format(args.round_name)
logging.info('\n'
+ '#' * len(title) + '\n'
+ '#' * len(title) + '\n'
+ '##' + ' ' * (len(title)-2) + '##' + '\n'
+ title + '\n'
+ '#' * len(title) + '\n'
+ '#' * len(title) + '\n'
+ '##' + ' ' * (len(title)-2) + '##' + '\n')
if args.blacklisted_submissions:
logging.warning('BLACKLISTED SUBMISSIONS: %s',
args.blacklisted_submissions)
random.seed()
logging.info('Running nvidia-docker to ensure that GPU works')
shell_call(['docker', 'run', '--runtime=nvidia',
'--rm', 'nvidia/cuda', 'nvidia-smi'])
eval_worker = EvaluationWorker(
worker_id=args.worker_id,
storage_client=eval_lib.CompetitionStorageClient(
args.project_id, args.storage_bucket),
datastore_client=eval_lib.CompetitionDatastoreClient(
args.project_id, args.round_name),
storage_bucket=args.storage_bucket,
round_name=args.round_name,
dataset_name=args.dataset_name,
blacklisted_submissions=args.blacklisted_submissions,
num_defense_shards=args.num_defense_shards)
eval_worker.run_work()
|
[
"Main function which runs worker."
] |
Please provide a description of the function:def download(self):
# Structure of the download directory:
# submission_dir=LOCAL_SUBMISSIONS_DIR/submission_id
# submission_dir/s.ext <-- archived submission
# submission_dir/extracted <-- extracted submission
# Check whether submission is already there
if self.extracted_submission_dir:
return
self.submission_dir = os.path.join(LOCAL_SUBMISSIONS_DIR,
self.submission_id)
if (os.path.isdir(self.submission_dir)
and os.path.isdir(os.path.join(self.submission_dir, 'extracted'))):
# submission already there, just re-read metadata
self.extracted_submission_dir = os.path.join(self.submission_dir,
'extracted')
with open(os.path.join(self.extracted_submission_dir, 'metadata.json'),
'r') as f:
meta_json = json.load(f)
self.container_name = str(meta_json[METADATA_CONTAINER])
self.entry_point = str(meta_json[METADATA_ENTRY_POINT])
return
# figure out submission location in the Cloud and determine extractor
submission_cloud_path = os.path.join('gs://', self.storage_bucket,
self.submission.path)
extract_command_tmpl = None
extension = None
for k, v in iteritems(EXTRACT_COMMAND):
if submission_cloud_path.endswith(k):
extension = k
extract_command_tmpl = v
break
if not extract_command_tmpl:
raise WorkerError('Unsupported submission extension')
# download archive
try:
os.makedirs(self.submission_dir)
tmp_extract_dir = os.path.join(self.submission_dir, 'tmp')
os.makedirs(tmp_extract_dir)
download_path = os.path.join(self.submission_dir, 's' + extension)
try:
logging.info('Downloading submission from %s to %s',
submission_cloud_path, download_path)
shell_call(['gsutil', 'cp', submission_cloud_path, download_path])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t copy submission locally', e)
# extract archive
try:
shell_call(extract_command_tmpl,
src=download_path, dst=tmp_extract_dir)
except subprocess.CalledProcessError as e:
# proceed even if extraction returned non zero error code,
# sometimes it's just warning
logging.warning('Submission extraction returned non-zero error code. '
'It may be just a warning, continuing execution. '
'Error: %s', e)
try:
make_directory_writable(tmp_extract_dir)
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t make submission directory writable', e)
# determine root of the submission
tmp_root_dir = tmp_extract_dir
root_dir_content = [d for d in os.listdir(tmp_root_dir)
if d != '__MACOSX']
if (len(root_dir_content) == 1
and os.path.isdir(os.path.join(tmp_root_dir, root_dir_content[0]))):
tmp_root_dir = os.path.join(tmp_root_dir, root_dir_content[0])
# move files to extract subdirectory
self.extracted_submission_dir = os.path.join(self.submission_dir,
'extracted')
try:
shell_call(['mv', os.path.join(tmp_root_dir),
self.extracted_submission_dir])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t move submission files', e)
# read metadata file
try:
with open(os.path.join(self.extracted_submission_dir, 'metadata.json'),
'r') as f:
meta_json = json.load(f)
except IOError as e:
raise WorkerError(
'Can''t read metadata.json for submission "{0}"'.format(
self.submission_id),
e)
try:
self.container_name = str(meta_json[METADATA_CONTAINER])
self.entry_point = str(meta_json[METADATA_ENTRY_POINT])
type_from_meta = METADATA_JSON_TYPE_TO_TYPE[meta_json[METADATA_TYPE]]
except KeyError as e:
raise WorkerError('Invalid metadata.json file', e)
if type_from_meta != self.type:
raise WorkerError('Inconsistent submission type in metadata: '
+ type_from_meta + ' vs ' + self.type)
except WorkerError as e:
self.extracted_submission_dir = None
sudo_remove_dirtree(self.submission_dir)
raise
|
[
"Method which downloads submission to local directory."
] |
Please provide a description of the function:def temp_copy_extracted_submission(self):
tmp_copy_dir = os.path.join(self.submission_dir, 'tmp_copy')
shell_call(['cp', '-R', os.path.join(self.extracted_submission_dir),
tmp_copy_dir])
return tmp_copy_dir
|
[
"Creates a temporary copy of extracted submission.\n\n When executed, submission is allowed to modify it's own directory. So\n to ensure that submission does not pass any data between runs, new\n copy of the submission is made before each run. After a run temporary copy\n of submission is deleted.\n\n Returns:\n directory where temporary copy is located\n "
] |
Please provide a description of the function:def run_without_time_limit(self, cmd):
cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME] + cmd
logging.info('Docker command: %s', ' '.join(cmd))
start_time = time.time()
retval = subprocess.call(cmd)
elapsed_time_sec = int(time.time() - start_time)
logging.info('Elapsed time of attack: %d', elapsed_time_sec)
logging.info('Docker retval: %d', retval)
if retval != 0:
logging.warning('Docker returned non-zero retval: %d', retval)
raise WorkerError('Docker returned non-zero retval ' + str(retval))
return elapsed_time_sec
|
[
"Runs docker command without time limit.\n\n Args:\n cmd: list with the command line arguments which are passed to docker\n binary\n\n Returns:\n how long it took to run submission in seconds\n\n Raises:\n WorkerError: if error occurred during execution of the submission\n "
] |
Please provide a description of the function:def run_with_time_limit(self, cmd, time_limit=SUBMISSION_TIME_LIMIT):
if time_limit < 0:
return self.run_without_time_limit(cmd)
container_name = str(uuid.uuid4())
cmd = [DOCKER_BINARY, 'run', DOCKER_NVIDIA_RUNTIME,
'--detach', '--name', container_name] + cmd
logging.info('Docker command: %s', ' '.join(cmd))
logging.info('Time limit %d seconds', time_limit)
retval = subprocess.call(cmd)
start_time = time.time()
elapsed_time_sec = 0
while is_docker_still_running(container_name):
elapsed_time_sec = int(time.time() - start_time)
if elapsed_time_sec < time_limit:
time.sleep(1)
else:
kill_docker_container(container_name)
logging.warning('Submission was killed because run out of time')
logging.info('Elapsed time of submission: %d', elapsed_time_sec)
logging.info('Docker retval: %d', retval)
if retval != 0:
logging.warning('Docker returned non-zero retval: %d', retval)
raise WorkerError('Docker returned non-zero retval ' + str(retval))
return elapsed_time_sec
|
[
"Runs docker command and enforces time limit.\n\n Args:\n cmd: list with the command line arguments which are passed to docker\n binary after run\n time_limit: time limit, in seconds. Negative value means no limit.\n\n Returns:\n how long it took to run submission in seconds\n\n Raises:\n WorkerError: if error occurred during execution of the submission\n "
] |
Please provide a description of the function:def run(self, input_dir, output_dir, epsilon):
logging.info('Running attack %s', self.submission_id)
tmp_run_dir = self.temp_copy_extracted_submission()
cmd = ['--network=none',
'-m=24g',
'--cpus=3.75',
'-v', '{0}:/input_images:ro'.format(input_dir),
'-v', '{0}:/output_images'.format(output_dir),
'-v', '{0}:/code'.format(tmp_run_dir),
'-w', '/code',
self.container_name,
'./' + self.entry_point,
'/input_images',
'/output_images',
str(epsilon)]
elapsed_time_sec = self.run_with_time_limit(cmd)
sudo_remove_dirtree(tmp_run_dir)
return elapsed_time_sec
|
[
"Runs attack inside Docker.\n\n Args:\n input_dir: directory with input (dataset).\n output_dir: directory where output (adversarial images) should be written.\n epsilon: maximum allowed size of adversarial perturbation,\n should be in range [0, 255].\n\n Returns:\n how long it took to run submission in seconds\n "
] |
Please provide a description of the function:def run(self, input_dir, output_file_path):
logging.info('Running defense %s', self.submission_id)
tmp_run_dir = self.temp_copy_extracted_submission()
output_dir = os.path.dirname(output_file_path)
output_filename = os.path.basename(output_file_path)
cmd = ['--network=none',
'-m=24g',
'--cpus=3.75',
'-v', '{0}:/input_images:ro'.format(input_dir),
'-v', '{0}:/output_data'.format(output_dir),
'-v', '{0}:/code'.format(tmp_run_dir),
'-w', '/code',
self.container_name,
'./' + self.entry_point,
'/input_images',
'/output_data/' + output_filename]
elapsed_time_sec = self.run_with_time_limit(cmd)
sudo_remove_dirtree(tmp_run_dir)
return elapsed_time_sec
|
[
"Runs defense inside Docker.\n\n Args:\n input_dir: directory with input (adversarial images).\n output_file_path: path of the output file.\n\n Returns:\n how long it took to run submission in seconds\n "
] |
Please provide a description of the function:def read_dataset_metadata(self):
if self.dataset_meta:
return
shell_call(['gsutil', 'cp',
'gs://' + self.storage_client.bucket_name + '/'
+ 'dataset/' + self.dataset_name + '_dataset.csv',
LOCAL_DATASET_METADATA_FILE])
with open(LOCAL_DATASET_METADATA_FILE, 'r') as f:
self.dataset_meta = eval_lib.DatasetMetadata(f)
|
[
"Read `dataset_meta` field from bucket"
] |
Please provide a description of the function:def fetch_attacks_data(self):
if self.attacks_data_initialized:
return
# init data from datastore
self.submissions.init_from_datastore()
self.dataset_batches.init_from_datastore()
self.adv_batches.init_from_datastore()
# copy dataset locally
if not os.path.exists(LOCAL_DATASET_DIR):
os.makedirs(LOCAL_DATASET_DIR)
eval_lib.download_dataset(self.storage_client, self.dataset_batches,
LOCAL_DATASET_DIR,
os.path.join(LOCAL_DATASET_COPY,
self.dataset_name, 'images'))
# download dataset metadata
self.read_dataset_metadata()
# mark as initialized
self.attacks_data_initialized = True
|
[
"Initializes data necessary to execute attacks.\n\n This method could be called multiple times, only first call does\n initialization, subsequent calls are noop.\n "
] |
Please provide a description of the function:def run_attack_work(self, work_id):
adv_batch_id = (
self.attack_work.work[work_id]['output_adversarial_batch_id'])
adv_batch = self.adv_batches[adv_batch_id]
dataset_batch_id = adv_batch['dataset_batch_id']
submission_id = adv_batch['submission_id']
epsilon = self.dataset_batches[dataset_batch_id]['epsilon']
logging.info('Attack work piece: '
'dataset_batch_id="%s" submission_id="%s" '
'epsilon=%d', dataset_batch_id, submission_id, epsilon)
if submission_id in self.blacklisted_submissions:
raise WorkerError('Blacklisted submission')
# get attack
attack = AttackSubmission(submission_id, self.submissions,
self.storage_bucket)
attack.download()
# prepare input
input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id)
if attack.type == TYPE_TARGETED:
# prepare file with target classes
target_class_filename = os.path.join(input_dir, 'target_class.csv')
self.dataset_meta.save_target_classes_for_batch(target_class_filename,
self.dataset_batches,
dataset_batch_id)
# prepare output directory
if os.path.exists(LOCAL_OUTPUT_DIR):
sudo_remove_dirtree(LOCAL_OUTPUT_DIR)
os.mkdir(LOCAL_OUTPUT_DIR)
if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR):
shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR)
os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR)
if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR):
shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR)
os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR)
# run attack
elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon)
if attack.type == TYPE_TARGETED:
# remove target class file
os.remove(target_class_filename)
# enforce epsilon and compute hashes
image_hashes = eval_lib.enforce_epsilon_and_compute_hash(
input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon)
if not image_hashes:
logging.warning('No images saved by the attack.')
return elapsed_time_sec, submission_id
# write images back to datastore
# rename images and add information to adversarial batch
for clean_image_id, hash_val in iteritems(image_hashes):
# we will use concatenation of batch_id and image_id
# as adversarial image id and as a filename of adversarial images
adv_img_id = adv_batch_id + '_' + clean_image_id
# rename the image
os.rename(
os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, clean_image_id + '.png'),
os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, adv_img_id + '.png'))
# populate values which will be written to datastore
image_path = '{0}/adversarial_images/{1}/{1}.zip/{2}.png'.format(
self.round_name, adv_batch_id, adv_img_id)
# u'' + foo is a a python 2/3 compatible way of casting foo to unicode
adv_batch['images'][adv_img_id] = {
'clean_image_id': u'' + str(clean_image_id),
'image_path': u'' + str(image_path),
'image_hash': u'' + str(hash_val),
}
# archive all images and copy to storage
zipped_images_filename = os.path.join(LOCAL_ZIPPED_OUTPUT_DIR,
adv_batch_id + '.zip')
try:
logging.debug('Compressing adversarial images to %s',
zipped_images_filename)
shell_call([
'zip', '-j', '-r', zipped_images_filename,
LOCAL_PROCESSED_OUTPUT_DIR])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t make archive from adversarial iamges', e)
# upload archive to storage
dst_filename = '{0}/adversarial_images/{1}/{1}.zip'.format(
self.round_name, adv_batch_id)
logging.debug(
'Copying archive with adversarial images to %s', dst_filename)
self.storage_client.new_blob(dst_filename).upload_from_filename(
zipped_images_filename)
# writing adv batch to datastore
logging.debug('Writing adversarial batch to datastore')
self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id)
return elapsed_time_sec, submission_id
|
[
"Runs one attack work.\n\n Args:\n work_id: ID of the piece of work to run\n\n Returns:\n elapsed_time_sec, submission_id - elapsed time and id of the submission\n\n Raises:\n WorkerError: if error occurred during execution.\n "
] |
Please provide a description of the function:def run_attacks(self):
logging.info('******** Start evaluation of attacks ********')
prev_submission_id = None
while True:
# wait until work is available
self.attack_work.read_all_from_datastore()
if not self.attack_work.work:
logging.info('Work is not populated, waiting...')
time.sleep(SLEEP_TIME)
continue
if self.attack_work.is_all_work_competed():
logging.info('All attack work completed.')
break
# download all attacks data and dataset
self.fetch_attacks_data()
# pick piece of work
work_id = self.attack_work.try_pick_piece_of_work(
self.worker_id, submission_id=prev_submission_id)
if not work_id:
logging.info('Failed to pick work, waiting...')
time.sleep(SLEEP_TIME_SHORT)
continue
logging.info('Selected work_id: %s', work_id)
# execute work
try:
elapsed_time_sec, prev_submission_id = self.run_attack_work(work_id)
logging.info('Work %s is done', work_id)
# indicate that work is completed
is_work_update = self.attack_work.update_work_as_completed(
self.worker_id, work_id,
other_values={'elapsed_time': elapsed_time_sec})
except WorkerError as e:
logging.info('Failed to run work:\n%s', str(e))
is_work_update = self.attack_work.update_work_as_completed(
self.worker_id, work_id, error=str(e))
if not is_work_update:
logging.warning('Can''t update work "%s" as completed by worker %d',
work_id, self.worker_id)
logging.info('******** Finished evaluation of attacks ********')
|
[
"Method which evaluates all attack work.\n\n In a loop this method queries not completed attack work, picks one\n attack work and runs it.\n "
] |
Please provide a description of the function:def fetch_defense_data(self):
if self.defenses_data_initialized:
return
logging.info('Fetching defense data from datastore')
# init data from datastore
self.submissions.init_from_datastore()
self.dataset_batches.init_from_datastore()
self.adv_batches.init_from_datastore()
# read dataset metadata
self.read_dataset_metadata()
# mark as initialized
self.defenses_data_initialized = True
|
[
"Lazy initialization of data necessary to execute defenses."
] |
Please provide a description of the function:def run_defense_work(self, work_id):
class_batch_id = (
self.defense_work.work[work_id]['output_classification_batch_id'])
class_batch = self.class_batches.read_batch_from_datastore(class_batch_id)
adversarial_batch_id = class_batch['adversarial_batch_id']
submission_id = class_batch['submission_id']
cloud_result_path = class_batch['result_path']
logging.info('Defense work piece: '
'adversarial_batch_id="%s" submission_id="%s"',
adversarial_batch_id, submission_id)
if submission_id in self.blacklisted_submissions:
raise WorkerError('Blacklisted submission')
# get defense
defense = DefenseSubmission(submission_id, self.submissions,
self.storage_bucket)
defense.download()
# prepare input - copy adversarial batch locally
input_dir = os.path.join(LOCAL_INPUT_DIR, adversarial_batch_id)
if os.path.exists(input_dir):
sudo_remove_dirtree(input_dir)
os.makedirs(input_dir)
try:
shell_call([
'gsutil', '-m', 'cp',
# typical location of adv batch:
# testing-round/adversarial_images/ADVBATCH000/
os.path.join('gs://', self.storage_bucket, self.round_name,
'adversarial_images', adversarial_batch_id, '*'),
input_dir
])
adv_images_files = os.listdir(input_dir)
if (len(adv_images_files) == 1) and adv_images_files[0].endswith('.zip'):
logging.info('Adversarial batch is in zip archive %s',
adv_images_files[0])
shell_call([
'unzip', os.path.join(input_dir, adv_images_files[0]),
'-d', input_dir
])
os.remove(os.path.join(input_dir, adv_images_files[0]))
adv_images_files = os.listdir(input_dir)
logging.info('%d adversarial images copied', len(adv_images_files))
except (subprocess.CalledProcessError, IOError) as e:
raise WorkerError('Can''t copy adversarial batch locally', e)
# prepare output directory
if os.path.exists(LOCAL_OUTPUT_DIR):
sudo_remove_dirtree(LOCAL_OUTPUT_DIR)
os.mkdir(LOCAL_OUTPUT_DIR)
output_filname = os.path.join(LOCAL_OUTPUT_DIR, 'result.csv')
# run defense
elapsed_time_sec = defense.run(input_dir, output_filname)
# evaluate defense result
batch_result = eval_lib.analyze_one_classification_result(
storage_client=None,
file_path=output_filname,
adv_batch=self.adv_batches.data[adversarial_batch_id],
dataset_batches=self.dataset_batches,
dataset_meta=self.dataset_meta)
# copy result of the defense into storage
try:
shell_call([
'gsutil', 'cp', output_filname,
os.path.join('gs://', self.storage_bucket, cloud_result_path)
])
except subprocess.CalledProcessError as e:
raise WorkerError('Can''t result to Cloud Storage', e)
return elapsed_time_sec, submission_id, batch_result
|
[
"Runs one defense work.\n\n Args:\n work_id: ID of the piece of work to run\n\n Returns:\n elapsed_time_sec, submission_id - elapsed time and id of the submission\n\n Raises:\n WorkerError: if error occurred during execution.\n "
] |
Please provide a description of the function:def run_defenses(self):
logging.info('******** Start evaluation of defenses ********')
prev_submission_id = None
need_reload_work = True
while True:
# wait until work is available
if need_reload_work:
if self.num_defense_shards:
shard_with_work = self.defense_work.read_undone_from_datastore(
shard_id=(self.worker_id % self.num_defense_shards),
num_shards=self.num_defense_shards)
else:
shard_with_work = self.defense_work.read_undone_from_datastore()
logging.info('Loaded %d records of undone work from shard %s',
len(self.defense_work), str(shard_with_work))
if not self.defense_work.work:
logging.info('Work is not populated, waiting...')
time.sleep(SLEEP_TIME)
continue
if self.defense_work.is_all_work_competed():
logging.info('All defense work completed.')
break
# download all defense data and dataset
self.fetch_defense_data()
need_reload_work = False
# pick piece of work
work_id = self.defense_work.try_pick_piece_of_work(
self.worker_id, submission_id=prev_submission_id)
if not work_id:
need_reload_work = True
logging.info('Failed to pick work, waiting...')
time.sleep(SLEEP_TIME_SHORT)
continue
logging.info('Selected work_id: %s', work_id)
# execute work
try:
elapsed_time_sec, prev_submission_id, batch_result = (
self.run_defense_work(work_id))
logging.info('Work %s is done', work_id)
# indicate that work is completed
is_work_update = self.defense_work.update_work_as_completed(
self.worker_id, work_id,
other_values={'elapsed_time': elapsed_time_sec,
'stat_correct': batch_result[0],
'stat_error': batch_result[1],
'stat_target_class': batch_result[2],
'stat_num_images': batch_result[3]})
except WorkerError as e:
logging.info('Failed to run work:\n%s', str(e))
if str(e).startswith('Docker returned non-zero retval'):
logging.info('Running nvidia-docker to ensure that GPU works')
shell_call(['nvidia-docker', 'run', '--rm', 'nvidia/cuda',
'nvidia-smi'])
is_work_update = self.defense_work.update_work_as_completed(
self.worker_id, work_id, error=str(e))
if not is_work_update:
logging.warning('Can''t update work "%s" as completed by worker %d',
work_id, self.worker_id)
need_reload_work = True
logging.info('******** Finished evaluation of defenses ********')
|
[
"Method which evaluates all defense work.\n\n In a loop this method queries not completed defense work,\n picks one defense work and runs it.\n "
] |
Please provide a description of the function:def run_work(self):
if os.path.exists(LOCAL_EVAL_ROOT_DIR):
sudo_remove_dirtree(LOCAL_EVAL_ROOT_DIR)
self.run_attacks()
self.run_defenses()
|
[
"Run attacks and defenses"
] |
Please provide a description of the function:def arg_type(arg_names, kwargs):
assert isinstance(arg_names, tuple)
passed = tuple(name in kwargs for name in arg_names)
passed_and_not_none = []
for name in arg_names:
if name in kwargs:
passed_and_not_none.append(kwargs[name] is not None)
else:
passed_and_not_none.append(False)
passed_and_not_none = tuple(passed_and_not_none)
dtypes = []
for name in arg_names:
if name not in kwargs:
dtypes.append(None)
continue
value = kwargs[name]
if value is None:
dtypes.append(None)
continue
assert hasattr(value, 'dtype'), type(value)
dtype = value.dtype
if not isinstance(dtype, np.dtype):
dtype = dtype.as_np_dtype
assert isinstance(dtype, np.dtype)
dtypes.append(dtype)
dtypes = tuple(dtypes)
return (passed, passed_and_not_none, dtypes)
|
[
"\n Returns a hashable summary of the types of arg_names within kwargs.\n :param arg_names: tuple containing names of relevant arguments\n :param kwargs: dict mapping string argument names to values.\n These must be values for which we can create a tf placeholder.\n Currently supported: numpy darray or something that can ducktype it\n returns:\n API contract is to return a hashable object describing all\n structural consequences of argument values that can otherwise\n be fed into a graph of fixed structure.\n Currently this is implemented as a tuple of tuples that track:\n - whether each argument was passed\n - whether each argument was passed and not None\n - the dtype of each argument\n Callers shouldn't rely on the exact structure of this object,\n just its hashability and one-to-one mapping between graph structures.\n "
] |
Please provide a description of the function:def construct_graph(self, fixed, feedable, x_val, hash_key):
# try our very best to create a TF placeholder for each of the
# feedable keyword arguments, and check the types are one of
# the allowed types
class_name = str(self.__class__).split(".")[-1][:-2]
_logger.info("Constructing new graph for attack " + class_name)
# remove the None arguments, they are just left blank
for k in list(feedable.keys()):
if feedable[k] is None:
del feedable[k]
# process all of the rest and create placeholders for them
new_kwargs = dict(x for x in fixed.items())
for name, value in feedable.items():
given_type = value.dtype
if isinstance(value, np.ndarray):
if value.ndim == 0:
# This is pretty clearly not a batch of data
new_kwargs[name] = tf.placeholder(given_type, shape=[], name=name)
else:
# Assume that this is a batch of data, make the first axis variable
# in size
new_shape = [None] + list(value.shape[1:])
new_kwargs[name] = tf.placeholder(given_type, new_shape, name=name)
elif isinstance(value, utils.known_number_types):
new_kwargs[name] = tf.placeholder(given_type, shape=[], name=name)
else:
raise ValueError("Could not identify type of argument " +
name + ": " + str(value))
# x is a special placeholder we always want to have
x_shape = [None] + list(x_val.shape)[1:]
x = tf.placeholder(self.tf_dtype, shape=x_shape)
# now we generate the graph that we want
x_adv = self.generate(x, **new_kwargs)
self.graphs[hash_key] = (x, new_kwargs, x_adv)
if len(self.graphs) >= 10:
warnings.warn("Calling generate_np() with multiple different "
"structural parameters is inefficient and should"
" be avoided. Calling generate() is preferred.")
|
[
"\n Construct the graph required to run the attack through generate_np.\n\n :param fixed: Structural elements that require defining a new graph.\n :param feedable: Arguments that can be fed to the same graph when\n they take different values.\n :param x_val: symbolic adversarial example\n :param hash_key: the key used to store this graph in our cache\n "
] |
Please provide a description of the function:def generate_np(self, x_val, **kwargs):
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
packed = self.construct_variables(kwargs)
fixed, feedable, _, hash_key = packed
if hash_key not in self.graphs:
self.construct_graph(fixed, feedable, x_val, hash_key)
else:
# remove the None arguments, they are just left blank
for k in list(feedable.keys()):
if feedable[k] is None:
del feedable[k]
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {x: x_val}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
return self.sess.run(x_adv, feed_dict)
|
[
"\n Generate adversarial examples and return them as a NumPy array.\n Sub-classes *should not* implement this method unless they must\n perform special handling of arguments.\n\n :param x_val: A NumPy array with the original inputs.\n :param **kwargs: optional parameters used by child classes.\n :return: A NumPy array holding the adversarial examples.\n "
] |
Please provide a description of the function:def construct_variables(self, kwargs):
if isinstance(self.feedable_kwargs, dict):
warnings.warn("Using a dict for `feedable_kwargs is deprecated."
"Switch to using a tuple."
"It is not longer necessary to specify the types "
"of the arguments---we build a different graph "
"for each received type."
"Using a dict may become an error on or after "
"2019-04-18.")
feedable_names = tuple(sorted(self.feedable_kwargs.keys()))
else:
feedable_names = self.feedable_kwargs
if not isinstance(feedable_names, tuple):
raise TypeError("Attack.feedable_kwargs should be a tuple, but "
"for subclass " + str(type(self)) + " it is "
+ str(self.feedable_kwargs) + " of type "
+ str(type(self.feedable_kwargs)))
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict(
(k, v) for k, v in kwargs.items() if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = {k: v for k, v in kwargs.items() if k in feedable_names}
for k in feedable:
if isinstance(feedable[k], (float, int)):
feedable[k] = np.array(feedable[k])
for key in kwargs:
if key not in fixed and key not in feedable:
raise ValueError(str(type(self)) + ": Undeclared argument: " + key)
feed_arg_type = arg_type(feedable_names, feedable)
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items())) + tuple([feed_arg_type])
return fixed, feedable, feed_arg_type, hash_key
|
[
"\n Construct the inputs to the attack graph to be used by generate_np.\n\n :param kwargs: Keyword arguments to generate_np.\n :return:\n Structural arguments\n Feedable arguments\n Output of `arg_type` describing feedable arguments\n A unique key\n "
] |
Please provide a description of the function:def get_or_guess_labels(self, x, kwargs):
if 'y' in kwargs and 'y_target' in kwargs:
raise ValueError("Can not set both 'y' and 'y_target'.")
elif 'y' in kwargs:
labels = kwargs['y']
elif 'y_target' in kwargs and kwargs['y_target'] is not None:
labels = kwargs['y_target']
else:
preds = self.model.get_probs(x)
preds_max = reduce_max(preds, 1, keepdims=True)
original_predictions = tf.to_float(tf.equal(preds, preds_max))
labels = tf.stop_gradient(original_predictions)
del preds
if isinstance(labels, np.ndarray):
nb_classes = labels.shape[1]
else:
nb_classes = labels.get_shape().as_list()[1]
return labels, nb_classes
|
[
"\n Get the label to use in generating an adversarial example for x.\n The kwargs are fed directly from the kwargs of the attack.\n If 'y' is in kwargs, then assume it's an untargeted attack and\n use that as the label.\n If 'y_target' is in kwargs and is not none, then assume it's a\n targeted attack and use that as the label.\n Otherwise, use the model's prediction as the label and perform an\n untargeted attack.\n "
] |
Please provide a description of the function:def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False,
concat_softmax=False):
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
stride=4, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
stride=2, activation_fn=tf.nn.relu)
out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
stride=1, activation_fn=tf.nn.relu)
out = layers.flatten(out)
with tf.variable_scope("state_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
state_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
state_score = noisy_dense(state_hidden, name='noisy_fc2',
size=1)
else:
state_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
state_score = layers.fully_connected(state_hidden,
num_outputs=1,
activation_fn=None)
with tf.variable_scope("action_value"):
if noisy:
# Apply noisy network on fully connected layers
# ref: https://arxiv.org/abs/1706.10295
actions_hidden = noisy_dense(out, name='noisy_fc1', size=512,
activation_fn=tf.nn.relu)
action_scores = noisy_dense(actions_hidden, name='noisy_fc2',
size=num_actions)
else:
actions_hidden = layers.fully_connected(
out,
num_outputs=512,
activation_fn=tf.nn.relu
)
action_scores = layers.fully_connected(
actions_hidden,
num_outputs=num_actions,
activation_fn=None
)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(
action_scores_mean,
1
)
return state_score + action_scores
|
[
"As described in https://arxiv.org/abs/1511.06581"
] |
Please provide a description of the function:def mnist_tutorial_jsma(train_start=0, train_end=60000, test_start=0,
test_end=10000, viz_enabled=VIZ_ENABLED,
nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,
source_samples=SOURCE_SAMPLES,
learning_rate=LEARNING_RATE):
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session and set as Keras backend session
sess = tf.Session()
print("Created TensorFlow session.")
set_log_level(logging.DEBUG)
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
nb_filters = 64
# Define TF model graph
model = ModelBasicCNN('model1', nb_classes, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=0.1)
print("Defined TensorFlow model graph.")
###########################################################################
# Training the model using TensorFlow
###########################################################################
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
sess.run(tf.global_variables_initializer())
rng = np.random.RandomState([2017, 8, 30])
train(sess, loss, x_train, y_train, args=train_params, rng=rng)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
assert x_test.shape[0] == test_end - test_start, x_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
###########################################################################
# Craft adversarial examples using the Jacobian-based saliency map approach
###########################################################################
print('Crafting ' + str(source_samples) + ' * ' + str(nb_classes - 1) +
' adversarial examples')
# Keep track of success (adversarial example classified in target)
results = np.zeros((nb_classes, source_samples), dtype='i')
# Rate of perturbed features for each test set example and target class
perturbations = np.zeros((nb_classes, source_samples), dtype='f')
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols, nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
# Instantiate a SaliencyMapMethod attack object
jsma = SaliencyMapMethod(model, sess=sess)
jsma_params = {'theta': 1., 'gamma': 0.1,
'clip_min': 0., 'clip_max': 1.,
'y_target': None}
figure = None
# Loop over the samples we want to perturb into adversarial examples
for sample_ind in xrange(0, source_samples):
print('--------------------------------------')
print('Attacking input %i/%i' % (sample_ind + 1, source_samples))
sample = x_test[sample_ind:(sample_ind + 1)]
# We want to find an adversarial example for each possible target class
# (i.e. all classes that differ from the label given in the dataset)
current_class = int(np.argmax(y_test[sample_ind]))
target_classes = other_classes(nb_classes, current_class)
# For the grid visualization, keep original images along the diagonal
grid_viz_data[current_class, current_class, :, :, :] = np.reshape(
sample, (img_rows, img_cols, nchannels))
# Loop over all target classes
for target in target_classes:
print('Generating adv. example for target class %i' % target)
# This call runs the Jacobian-based saliency map approach
one_hot_target = np.zeros((1, nb_classes), dtype=np.float32)
one_hot_target[0, target] = 1
jsma_params['y_target'] = one_hot_target
adv_x = jsma.generate_np(sample, **jsma_params)
# Check if success was achieved
res = int(model_argmax(sess, x, preds, adv_x) == target)
# Computer number of modified features
adv_x_reshape = adv_x.reshape(-1)
test_in_reshape = x_test[sample_ind].reshape(-1)
nb_changed = np.where(adv_x_reshape != test_in_reshape)[0].shape[0]
percent_perturb = float(nb_changed) / adv_x.reshape(-1).shape[0]
# Display the original and adversarial images side-by-side
if viz_enabled:
figure = pair_visual(
np.reshape(sample, (img_rows, img_cols, nchannels)),
np.reshape(adv_x, (img_rows, img_cols, nchannels)), figure)
# Add our adversarial example to our grid data
grid_viz_data[target, current_class, :, :, :] = np.reshape(
adv_x, (img_rows, img_cols, nchannels))
# Update the arrays for later analysis
results[target, sample_ind] = res
perturbations[target, sample_ind] = percent_perturb
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
nb_targets_tried = ((nb_classes - 1) * source_samples)
succ_rate = float(np.sum(results)) / nb_targets_tried
print('Avg. rate of successful adv. examples {0:.4f}'.format(succ_rate))
report.clean_train_adv_eval = 1. - succ_rate
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(perturbations)
print('Avg. rate of perturbed features {0:.4f}'.format(percent_perturbed))
# Compute the average distortion introduced for successful samples only
percent_perturb_succ = np.mean(perturbations * (results == 1))
print('Avg. rate of perturbed features for successful '
'adversarial examples {0:.4f}'.format(percent_perturb_succ))
# Close TF session
sess.close()
# Finally, block & display a grid of all the adversarial examples
if viz_enabled:
import matplotlib.pyplot as plt
plt.close(figure)
_ = grid_visual(grid_viz_data)
return report
|
[
"\n MNIST tutorial for the Jacobian-based saliency map approach (JSMA)\n :param train_start: index of first training set example\n :param train_end: index of last training set example\n :param test_start: index of first test set example\n :param test_end: index of last test set example\n :param viz_enabled: (boolean) activate plots of adversarial examples\n :param nb_epochs: number of epochs to train model\n :param batch_size: size of training batches\n :param nb_classes: number of output classes\n :param source_samples: number of test inputs to attack\n :param learning_rate: learning rate for training\n :return: an AccuracyReport object\n "
] |
Please provide a description of the function:def generate(self, x, **kwargs):
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
momentum = tf.zeros_like(x)
adv_x = x
# Fix labels to the first model predictions for loss computation
y, _nb_classes = self.get_or_guess_labels(x, kwargs)
y = y / reduce_sum(y, 1, keepdims=True)
targeted = (self.y_target is not None)
def cond(i, _, __):
return tf.less(i, self.nb_iter)
def body(i, ax, m):
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, ax)
# Normalize current gradient and add it to the accumulated gradient
red_ind = list(range(1, len(grad.get_shape())))
avoid_zero_div = tf.cast(1e-12, grad.dtype)
grad = grad / tf.maximum(
avoid_zero_div,
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
m = self.decay_factor * m + grad
optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
if self.ord == 1:
raise NotImplementedError("This attack hasn't been tested for ord=1."
"It's not clear that FGM makes a good inner "
"loop step for iterative optimization since "
"it updates just one coordinate at a time.")
# Update and clip adversarial example in current iteration
ax = ax + optimal_perturbation
ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)
if self.clip_min is not None and self.clip_max is not None:
ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)
ax = tf.stop_gradient(ax)
return i + 1, ax, m
_, adv_x, _ = tf.while_loop(
cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True,
maximum_iterations=self.nb_iter)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x
|
[
"\n Generate symbolic graph for adversarial examples and return.\n\n :param x: The model's symbolic inputs.\n :param kwargs: Keyword arguments. See `parse_params` for documentation.\n ",
"Iterate until number of iterations completed",
"Do a momentum step"
] |
Please provide a description of the function:def parse_params(self,
eps=0.3,
eps_iter=0.06,
nb_iter=10,
y=None,
ord=np.inf,
decay_factor=1.0,
clip_min=None,
clip_max=None,
y_target=None,
sanity_checks=True,
**kwargs):
# Save attack-specific parameters
self.eps = eps
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.y_target = y_target
self.ord = ord
self.decay_factor = decay_factor
self.clip_min = clip_min
self.clip_max = clip_max
self.sanity_checks = sanity_checks
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if len(kwargs.keys()) > 0:
warnings.warn("kwargs is unused and will be removed on or after "
"2019-04-26.")
return True
|
[
"\n Take in a dictionary of parameters and applies attack-specific checks\n before saving them as attributes.\n\n Attack-specific parameters:\n\n :param eps: (optional float) maximum distortion of adversarial example\n compared to original input\n :param eps_iter: (optional float) step size for each attack iteration\n :param nb_iter: (optional int) Number of attack iterations.\n :param y: (optional) A tensor with the true labels.\n :param y_target: (optional) A tensor with the labels to target. Leave\n y_target=None if y is also set. Labels should be\n one-hot-encoded.\n :param ord: (optional) Order of the norm (mimics Numpy).\n Possible values: np.inf, 1 or 2.\n :param decay_factor: (optional) Decay factor for the momentum term.\n :param clip_min: (optional float) Minimum input component value\n :param clip_max: (optional float) Maximum input component value\n "
] |
Please provide a description of the function:def train(sess, loss, x_train, y_train,
init_all=False, evaluate=None, feed=None, args=None,
rng=None, var_list=None, fprop_args=None, optimizer=None,
devices=None, x_batch_preprocessor=None, use_ema=False,
ema_decay=.998, run_canary=None,
loss_threshold=1e5, dataset_train=None, dataset_size=None):
# Check whether the hardware is working correctly
canary.run_canary()
if run_canary is not None:
warnings.warn("The `run_canary` argument is deprecated. The canary "
"is now much cheaper and thus runs all the time. The "
"canary now uses its own loss function so it is not "
"necessary to turn off the canary when training with "
" a stochastic loss. Simply quit passing `run_canary`."
"Passing `run_canary` may become an error on or after "
"2019-10-16.")
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
# Be sure to support 0 epochs for debugging purposes
if args.nb_epochs is None:
raise ValueError("`args` must specify number of epochs")
if optimizer is None:
if args.learning_rate is None:
raise ValueError("Learning rate was not given in args dict")
assert args.batch_size, "Batch size was not given in args dict"
if rng is None:
rng = np.random.RandomState()
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError("optimizer object must be from a child class of "
"tf.train.Optimizer")
grads = []
xs = []
preprocessed_xs = []
ys = []
if dataset_train is not None:
assert x_train is None and y_train is None and x_batch_preprocessor is None
if dataset_size is None:
raise ValueError("You must provide a dataset size")
data_iterator = dataset_train.make_one_shot_iterator().get_next()
x_train, y_train = sess.run(data_iterator)
devices = infer_devices(devices)
for device in devices:
with tf.device(device):
x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:])
y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:])
xs.append(x)
ys.append(y)
if x_batch_preprocessor is not None:
x = x_batch_preprocessor(x)
# We need to keep track of these so that the canary can feed
# preprocessed values. If the canary had to feed raw values,
# stochastic preprocessing could make the canary fail.
preprocessed_xs.append(x)
loss_value = loss.fprop(x, y, **fprop_args)
grads.append(optimizer.compute_gradients(
loss_value, var_list=var_list))
num_devices = len(devices)
print("num_devices: ", num_devices)
grad = avg_grads(grads)
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.apply_gradients(grad)
epoch_tf = tf.placeholder(tf.int32, [])
batch_tf = tf.placeholder(tf.int32, [])
if use_ema:
if callable(ema_decay):
ema_decay = ema_decay(epoch_tf, batch_tf)
ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
with tf.control_dependencies([train_step]):
train_step = ema.apply(var_list)
# Get pointers to the EMA's running average variables
avg_params = [ema.average(param) for param in var_list]
# Make temporary buffers used for swapping the live and running average
# parameters
tmp_params = [tf.Variable(param, trainable=False)
for param in var_list]
# Define the swapping operation
param_to_tmp = [tf.assign(tmp, param)
for tmp, param in safe_zip(tmp_params, var_list)]
with tf.control_dependencies(param_to_tmp):
avg_to_param = [tf.assign(param, avg)
for param, avg in safe_zip(var_list, avg_params)]
with tf.control_dependencies(avg_to_param):
tmp_to_avg = [tf.assign(avg, tmp)
for avg, tmp in safe_zip(avg_params, tmp_params)]
swap = tmp_to_avg
batch_size = args.batch_size
assert batch_size % num_devices == 0
device_batch_size = batch_size // num_devices
if init_all:
sess.run(tf.global_variables_initializer())
else:
initialize_uninitialized_global_variables(sess)
for epoch in xrange(args.nb_epochs):
if dataset_train is not None:
nb_batches = int(math.ceil(float(dataset_size) / batch_size))
else:
# Indices to shuffle training set
index_shuf = list(range(len(x_train)))
# Randomly repeat a few training examples each epoch to avoid
# having a too-small batch
while len(index_shuf) % batch_size != 0:
index_shuf.append(rng.randint(len(x_train)))
nb_batches = len(index_shuf) // batch_size
rng.shuffle(index_shuf)
# Shuffling here versus inside the loop doesn't seem to affect
# timing very much, but shuffling here makes the code slightly
# easier to read
x_train_shuffled = x_train[index_shuf]
y_train_shuffled = y_train[index_shuf]
prev = time.time()
for batch in range(nb_batches):
if dataset_train is not None:
x_train_shuffled, y_train_shuffled = sess.run(data_iterator)
start, end = 0, batch_size
else:
# Compute batch start and end indices
start = batch * batch_size
end = (batch + 1) * batch_size
# Perform one training step
diff = end - start
assert diff == batch_size
feed_dict = {epoch_tf: epoch, batch_tf: batch}
for dev_idx in xrange(num_devices):
cur_start = start + dev_idx * device_batch_size
cur_end = start + (dev_idx + 1) * device_batch_size
feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end]
feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end]
if cur_end != end and dataset_train is None:
msg = ("batch_size (%d) must be a multiple of num_devices "
"(%d).\nCUDA_VISIBLE_DEVICES: %s"
"\ndevices: %s")
args = (batch_size, num_devices,
os.environ['CUDA_VISIBLE_DEVICES'],
str(devices))
raise ValueError(msg % args)
if feed is not None:
feed_dict.update(feed)
_, loss_numpy = sess.run(
[train_step, loss_value], feed_dict=feed_dict)
if np.abs(loss_numpy) > loss_threshold:
raise ValueError("Extreme loss during training: ", loss_numpy)
if np.isnan(loss_numpy) or np.isinf(loss_numpy):
raise ValueError("NaN/Inf loss during training")
assert (dataset_train is not None or
end == len(index_shuf)) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
if use_ema:
# Before running evaluation, load the running average
# parameters into the live slot, so we can see how well
# the EMA parameters are performing
sess.run(swap)
evaluate()
if use_ema:
# Swap the parameters back, so that we continue training
# on the live parameters
sess.run(swap)
if use_ema:
# When training is done, swap the running average parameters into
# the live slot, so that we use them when we deploy the model
sess.run(swap)
return True
|
[
"\n Run (optionally multi-replica, synchronous) training to minimize `loss`\n :param sess: TF session to use when training the graph\n :param loss: tensor, the loss to minimize\n :param x_train: numpy array with training inputs or tf Dataset\n :param y_train: numpy array with training outputs or tf Dataset\n :param init_all: (boolean) If set to true, all TF variables in the session\n are (re)initialized, otherwise only previously\n uninitialized variables are initialized before training.\n :param evaluate: function that is run after each training iteration\n (typically to display the test/validation accuracy).\n :param feed: An optional dictionary that is appended to the feeding\n dictionary before the session runs. Can be used to feed\n the learning phase of a Keras model for instance.\n :param args: dict or argparse `Namespace` object.\n Should contain `nb_epochs`, `learning_rate`,\n `batch_size`\n :param rng: Instance of numpy.random.RandomState\n :param var_list: Optional list of parameters to train.\n :param fprop_args: dict, extra arguments to pass to fprop (loss and model).\n :param optimizer: Optimizer to be used for training\n :param devices: list of device names to use for training\n If None, defaults to: all GPUs, if GPUs are available\n all devices, if no GPUs are available\n :param x_batch_preprocessor: callable\n Takes a single tensor containing an x_train batch as input\n Returns a single tensor containing an x_train batch as output\n Called to preprocess the data before passing the data to the Loss\n :param use_ema: bool\n If true, uses an exponential moving average of the model parameters\n :param ema_decay: float or callable\n The decay parameter for EMA, if EMA is used\n If a callable rather than a float, this is a callable that takes\n the epoch and batch as arguments and returns the ema_decay for\n the current batch.\n :param loss_threshold: float\n Raise an exception if the loss exceeds this value.\n This is intended to rapidly detect numerical problems.\n Sometimes the loss may legitimately be higher than this value. In\n such cases, raise the value. If needed it can be np.inf.\n :param dataset_train: tf Dataset instance.\n Used as a replacement for x_train, y_train for faster performance.\n :param dataset_size: integer, the size of the dataset_train.\n :return: True if model trained\n "
] |
Please provide a description of the function:def avg_grads(tower_grads):
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = [g for g, _ in grad_and_vars]
# Average over the 'tower' dimension.
grad = tf.add_n(grads) / len(grads)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
assert all(v is grad_and_var[1] for grad_and_var in grad_and_vars)
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
[
"Calculate the average gradient for each shared variable across all\n towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been\n averaged across all towers.\n\n Modified from this tutorial: https://tinyurl.com/n3jr2vm\n "
] |
Please provide a description of the function:def create_adv_by_name(model, x, attack_type, sess, dataset, y=None, **kwargs):
# TODO: black box attacks
attack_names = {'FGSM': FastGradientMethod,
'MadryEtAl': MadryEtAl,
'MadryEtAl_y': MadryEtAl,
'MadryEtAl_multigpu': MadryEtAlMultiGPU,
'MadryEtAl_y_multigpu': MadryEtAlMultiGPU
}
if attack_type not in attack_names:
raise Exception('Attack %s not defined.' % attack_type)
attack_params_shared = {
'mnist': {'eps': .3, 'eps_iter': 0.01, 'clip_min': 0., 'clip_max': 1.,
'nb_iter': 40},
'cifar10': {'eps': 8./255, 'eps_iter': 0.01, 'clip_min': 0.,
'clip_max': 1., 'nb_iter': 20}
}
with tf.variable_scope(attack_type):
attack_class = attack_names[attack_type]
attack = attack_class(model, sess=sess)
# Extract feedable and structural keyword arguments from kwargs
fd_kwargs = attack.feedable_kwargs.keys() + attack.structural_kwargs
params = attack_params_shared[dataset].copy()
params.update({k: v for k, v in kwargs.items() if v is not None})
params = {k: v for k, v in params.items() if k in fd_kwargs}
if '_y' in attack_type:
params['y'] = y
logging.info(params)
adv_x = attack.generate(x, **params)
return adv_x
|
[
"\n Creates the symbolic graph of an adversarial example given the name of\n an attack. Simplifies creating the symbolic graph of an attack by defining\n dataset-specific parameters.\n Dataset-specific default parameters are used unless a different value is\n given in kwargs.\n\n :param model: an object of Model class\n :param x: Symbolic input to the attack.\n :param attack_type: A string that is the name of an attack.\n :param sess: Tensorflow session.\n :param dataset: The name of the dataset as a string to use for default\n params.\n :param y: (optional) a symbolic variable for the labels.\n :param kwargs: (optional) additional parameters to be passed to the attack.\n "
] |
Please provide a description of the function:def log_value(self, tag, val, desc=''):
logging.info('%s (%s): %.4f' % (desc, tag, val))
self.summary.value.add(tag=tag, simple_value=val)
|
[
"\n Log values to standard output and Tensorflow summary.\n\n :param tag: summary tag.\n :param val: (required float or numpy array) value to be logged.\n :param desc: (optional) additional description to be printed.\n "
] |
Please provide a description of the function:def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type):
end = (len(X_test) // self.batch_size) * self.batch_size
if self.hparams.fast_tests:
end = 10*self.batch_size
acc = model_eval(self.sess, x, y, preds_adv, X_test[:end],
Y_test[:end], args=self.eval_params)
self.log_value('test_accuracy_%s' % att_type, acc,
'Test accuracy on adversarial examples')
return acc
|
[
"\n Evaluate the accuracy of the model on adversarial examples\n\n :param x: symbolic input to model.\n :param y: symbolic variable for the label.\n :param preds_adv: symbolic variable for the prediction on an\n adversarial example.\n :param X_test: NumPy array of test set inputs.\n :param Y_test: NumPy array of test set labels.\n :param att_type: name of the attack.\n "
] |
Please provide a description of the function:def eval_multi(self, inc_epoch=True):
sess = self.sess
preds = self.preds
x = self.x_pre
y = self.y
X_train = self.X_train
Y_train = self.Y_train
X_test = self.X_test
Y_test = self.Y_test
writer = self.writer
self.summary = tf.Summary()
report = {}
# Evaluate on train set
subsample_factor = 100
X_train_subsampled = X_train[::subsample_factor]
Y_train_subsampled = Y_train[::subsample_factor]
acc_train = model_eval(sess, x, y, preds, X_train_subsampled,
Y_train_subsampled, args=self.eval_params)
self.log_value('train_accuracy_subsampled', acc_train,
'Clean accuracy, subsampled train')
report['train'] = acc_train
# Evaluate on the test set
acc = model_eval(sess, x, y, preds, X_test, Y_test,
args=self.eval_params)
self.log_value('test_accuracy_natural', acc,
'Clean accuracy, natural test')
report['test'] = acc
# Evaluate against adversarial attacks
if self.epoch % self.hparams.eval_iters == 0:
for att_type in self.attack_type_test:
_, preds_adv = self.attacks[att_type]
acc = self.eval_advs(x, y, preds_adv, X_test, Y_test, att_type)
report[att_type] = acc
if self.writer:
writer.add_summary(self.summary, self.epoch)
# Add examples of adversarial examples to the summary
if self.writer and self.epoch % 20 == 0 and self.sum_op is not None:
sm_val = self.sess.run(self.sum_op,
feed_dict={x: X_test[:self.batch_size],
y: Y_test[:self.batch_size]})
if self.writer:
writer.add_summary(sm_val)
self.epoch += 1 if inc_epoch else 0
return report
|
[
"\n Run the evaluation on multiple attacks.\n "
] |
Please provide a description of the function:def run_canary():
# Note: please do not edit this function unless you have access to a machine
# with GPUs suffering from the bug and can verify that the canary still
# crashes after your edits. Due to the transient nature of the GPU bug it is
# not possible to unit test the canary in our continuous integration system.
global last_run
current = time.time()
if last_run is None or current - last_run > 3600:
last_run = current
else:
# Run the canary at most once per hour
return
# Try very hard not to let the canary affect the graph for the rest of the
# python process
canary_graph = tf.Graph()
with canary_graph.as_default():
devices = infer_devices()
num_devices = len(devices)
if num_devices < 3:
# We have never observed GPU failure when less than 3 GPUs were used
return
v = np.random.RandomState([2018, 10, 16]).randn(2, 2)
# Try very hard not to let this Variable end up in any collections used
# by the rest of the python process
w = tf.Variable(v, trainable=False, collections=[])
loss = tf.reduce_sum(tf.square(w))
grads = []
for device in devices:
with tf.device(device):
grad, = tf.gradients(loss, w)
grads.append(grad)
sess = tf.Session()
sess.run(tf.variables_initializer([w]))
grads = sess.run(grads)
first = grads[0]
for grad in grads[1:]:
if not np.allclose(first, grad):
first_string = str(first)
grad_string = str(grad)
raise RuntimeError("Something is wrong with your GPUs or GPU driver."
"%(num_devices)d different GPUS were asked to "
"calculate the same 2x2 gradient. One returned "
"%(first_string)s and another returned "
"%(grad_string)s. This can usually be fixed by "
"rebooting the machine." %
{"num_devices" : num_devices,
"first_string" : first_string,
"grad_string" : grad_string})
sess.close()
|
[
"\n Runs some code that will crash if the GPUs / GPU driver are suffering from\n a common bug. This helps to prevent contaminating results in the rest of\n the library with incorrect calculations.\n "
] |
Please provide a description of the function:def _wrap(f):
def wrapper(*args, **kwargs):
warnings.warn(str(f) + " is deprecated. Switch to calling the equivalent function in tensorflow. "
" This function was originally needed as a compatibility layer for old versions of tensorflow, "
" but support for those versions has now been dropped.")
return f(*args, **kwargs)
return wrapper
|
[
"\n Wraps a callable `f` in a function that warns that the function is deprecated.\n ",
"\n Issues a deprecation warning and passes through the arguments.\n "
] |
Please provide a description of the function:def reduce_function(op_func, input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
warnings.warn("`reduce_function` is deprecated and may be removed on or after 2019-09-08.")
out = op_func(input_tensor, axis=axis, keepdims=keepdims, name=name, reduction_indices=reduction_indices)
return out
|
[
"\n This function used to be needed to support tf 1.4 and early, but support for tf 1.4 and earlier is now dropped.\n :param op_func: expects the function to handle eg: tf.reduce_sum.\n :param input_tensor: The tensor to reduce. Should have numeric type.\n :param axis: The dimensions to reduce. If None (the default),\n reduces all dimensions. Must be in the range\n [-rank(input_tensor), rank(input_tensor)).\n :param keepdims: If true, retains reduced dimensions with length 1.\n :param name: A name for the operation (optional).\n :param reduction_indices: The old (deprecated) name for axis.\n :return: outputs same value as op_func.\n "
] |
Please provide a description of the function:def softmax_cross_entropy_with_logits(sentinel=None,
labels=None,
logits=None,
dim=-1):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
name = "softmax_cross_entropy_with_logits"
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)"
% name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
try:
f = tf.nn.softmax_cross_entropy_with_logits_v2
except AttributeError:
raise RuntimeError("This version of TensorFlow is no longer supported. See cleverhans/README.md")
labels = tf.stop_gradient(labels)
loss = f(labels=labels, logits=logits, dim=dim)
return loss
|
[
"\n Wrapper around tf.nn.softmax_cross_entropy_with_logits_v2 to handle\n deprecated warning\n "
] |
Please provide a description of the function:def enforce_epsilon_and_compute_hash(dataset_batch_dir, adv_dir, output_dir,
epsilon):
dataset_images = [f for f in os.listdir(dataset_batch_dir)
if f.endswith('.png')]
image_hashes = {}
resize_warning = False
for img_name in dataset_images:
if not os.path.exists(os.path.join(adv_dir, img_name)):
logging.warning('Image %s not found in the output', img_name)
continue
image = np.array(
Image.open(os.path.join(dataset_batch_dir, img_name)).convert('RGB'))
image = image.astype('int32')
image_max_clip = np.clip(image + epsilon, 0, 255).astype('uint8')
image_min_clip = np.clip(image - epsilon, 0, 255).astype('uint8')
# load and resize adversarial image if needed
adv_image = Image.open(os.path.join(adv_dir, img_name)).convert('RGB')
# Image.size is reversed compared to np.array.shape
if adv_image.size[::-1] != image.shape[:2]:
resize_warning = True
adv_image = adv_image.resize((image.shape[1], image.shape[0]),
Image.BICUBIC)
adv_image = np.array(adv_image)
clipped_adv_image = np.clip(adv_image,
image_min_clip,
image_max_clip)
Image.fromarray(clipped_adv_image).save(os.path.join(output_dir, img_name))
# compute hash
image_hashes[img_name[:-4]] = hashlib.sha1(
clipped_adv_image.view(np.uint8)).hexdigest()
if resize_warning:
logging.warning('One or more adversarial images had incorrect size')
return image_hashes
|
[
"Enforces size of perturbation on images, and compute hashes for all images.\n\n Args:\n dataset_batch_dir: directory with the images of specific dataset batch\n adv_dir: directory with generated adversarial images\n output_dir: directory where to copy result\n epsilon: size of perturbation\n\n Returns:\n dictionary with mapping form image ID to hash.\n "
] |
Please provide a description of the function:def download_dataset(storage_client, image_batches, target_dir,
local_dataset_copy=None):
for batch_id, batch_value in iteritems(image_batches.data):
batch_dir = os.path.join(target_dir, batch_id)
os.mkdir(batch_dir)
for image_id, image_val in iteritems(batch_value['images']):
dst_filename = os.path.join(batch_dir, image_id + '.png')
# try to use local copy first
if local_dataset_copy:
local_filename = os.path.join(local_dataset_copy,
os.path.basename(image_val['image_path']))
if os.path.exists(local_filename):
shutil.copyfile(local_filename, dst_filename)
continue
# download image from cloud
cloud_path = ('gs://' + storage_client.bucket_name
+ '/' + image_val['image_path'])
if not os.path.exists(dst_filename):
subprocess.call(['gsutil', 'cp', cloud_path, dst_filename])
|
[
"Downloads dataset, organize it by batches and rename images.\n\n Args:\n storage_client: instance of the CompetitionStorageClient\n image_batches: subclass of ImageBatchesBase with data about images\n target_dir: target directory, should exist and be empty\n local_dataset_copy: directory with local dataset copy, if local copy is\n available then images will be takes from there instead of Cloud Storage\n\n Data in the target directory will be organized into subdirectories by batches,\n thus path to each image will be \"target_dir/BATCH_ID/IMAGE_ID.png\"\n where BATCH_ID - ID of the batch (key of image_batches.data),\n IMAGE_ID - ID of the image (key of image_batches.data[batch_id]['images'])\n "
] |
Please provide a description of the function:def save_target_classes_for_batch(self,
filename,
image_batches,
batch_id):
images = image_batches.data[batch_id]['images']
with open(filename, 'w') as f:
for image_id, image_val in iteritems(images):
target_class = self.get_target_class(image_val['dataset_image_id'])
f.write('{0}.png,{1}\n'.format(image_id, target_class))
|
[
"Saves file with target class for given dataset batch.\n\n Args:\n filename: output filename\n image_batches: instance of ImageBatchesBase with dataset batches\n batch_id: dataset batch ID\n "
] |
Please provide a description of the function:def tf_min_eig_vec(self):
# Full eigen decomposition requires the explicit psd matrix M
_, matrix_m = self.dual_object.get_full_psd_matrix()
[eig_vals, eig_vectors] = tf.self_adjoint_eig(matrix_m)
index = tf.argmin(eig_vals)
return tf.reshape(
eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1])
|
[
"Function for min eigen vector using tf's full eigen decomposition."
] |
Please provide a description of the function:def tf_smooth_eig_vec(self):
_, matrix_m = self.dual_object.get_full_psd_matrix()
# Easier to think in terms of max so negating the matrix
[eig_vals, eig_vectors] = tf.self_adjoint_eig(-matrix_m)
exp_eig_vals = tf.exp(tf.divide(eig_vals, self.smooth_placeholder))
scaling_factor = tf.reduce_sum(exp_eig_vals)
# Multiplying each eig vector by exponential of corresponding eig value
# Scaling factor normalizes the vector to be unit norm
eig_vec_smooth = tf.divide(
tf.matmul(eig_vectors, tf.diag(tf.sqrt(exp_eig_vals))),
tf.sqrt(scaling_factor))
return tf.reshape(
tf.reduce_sum(eig_vec_smooth, axis=1),
shape=[eig_vec_smooth.shape[0].value, 1])
|
[
"Function that returns smoothed version of min eigen vector."
] |
Please provide a description of the function:def get_min_eig_vec_proxy(self, use_tf_eig=False):
if use_tf_eig:
# If smoothness parameter is too small, essentially no smoothing
# Just output the eigen vector corresponding to min
return tf.cond(self.smooth_placeholder < 1E-8,
self.tf_min_eig_vec,
self.tf_smooth_eig_vec)
# Using autograph to automatically handle
# the control flow of minimum_eigen_vector
min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)
def _vector_prod_fn(x):
return self.dual_object.get_psd_product(x)
estimated_eigen_vector = min_eigen_tf(
x=self.eig_init_vec_placeholder,
num_steps=self.eig_num_iter_placeholder,
learning_rate=self.params['eig_learning_rate'],
vector_prod_fn=_vector_prod_fn)
return estimated_eigen_vector
|
[
"Computes the min eigen value and corresponding vector of matrix M.\n\n Args:\n use_tf_eig: Whether to use tf's default full eigen decomposition\n Returns:\n eig_vec: Minimum absolute eigen value\n eig_val: Corresponding eigen vector\n "
] |
Please provide a description of the function:def get_scipy_eig_vec(self):
if not self.params['has_conv']:
matrix_m = self.sess.run(self.dual_object.matrix_m)
min_eig_vec_val, estimated_eigen_vector = eigs(matrix_m, k=1, which='SR',
tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
else:
dim = self.dual_object.matrix_m_dimension
input_vector = tf.placeholder(tf.float32, shape=(dim, 1))
output_vector = self.dual_object.get_psd_product(input_vector)
def np_vector_prod_fn(np_vector):
np_vector = np.reshape(np_vector, [-1, 1])
output_np_vector = self.sess.run(output_vector, feed_dict={input_vector:np_vector})
return output_np_vector
linear_operator = LinearOperator((dim, dim), matvec=np_vector_prod_fn)
# Performing shift invert scipy operation when eig val estimate is available
min_eig_vec_val, estimated_eigen_vector = eigs(linear_operator,
k=1, which='SR', tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
|
[
"Computes scipy estimate of min eigenvalue for matrix M.\n\n Returns:\n eig_vec: Minimum absolute eigen value\n eig_val: Corresponding eigen vector\n "
] |
Please provide a description of the function:def prepare_for_optimization(self):
if self.params['eig_type'] == 'TF':
self.eig_vec_estimate = self.get_min_eig_vec_proxy()
elif self.params['eig_type'] == 'LZS':
self.eig_vec_estimate = self.dual_object.m_min_vec
else:
self.eig_vec_estimate = tf.placeholder(tf.float32, shape=(self.dual_object.matrix_m_dimension, 1))
self.stopped_eig_vec_estimate = tf.stop_gradient(self.eig_vec_estimate)
# Eig value is v^\top M v, where v is eigen vector
self.eig_val_estimate = tf.matmul(
tf.transpose(self.stopped_eig_vec_estimate),
self.dual_object.get_psd_product(self.stopped_eig_vec_estimate))
# Penalizing negative of min eigen value because we want min eig value
# to be positive
self.total_objective = (
self.dual_object.unconstrained_objective
+ 0.5 * tf.square(
tf.maximum(-self.penalty_placeholder * self.eig_val_estimate, 0)))
global_step = tf.Variable(0, trainable=False)
# Set up learning rate as a placeholder
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# Set up the optimizer
if self.params['optimizer'] == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'momentum':
self.optimizer = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate,
momentum=self.params['momentum_parameter'],
use_nesterov=True)
else:
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
# Write out the projection step
self.train_step = self.optimizer.minimize(
self.total_objective, global_step=global_step)
self.sess.run(tf.global_variables_initializer())
# Projecting the dual variables
proj_ops = []
for i in range(self.dual_object.nn_params.num_hidden_layers + 1):
# Lambda_pos is non negative for switch indices,
# Unconstrained for positive indices
# Zero for negative indices
proj_ops.append(self.dual_object.lambda_pos[i].assign(
tf.multiply(self.dual_object.positive_indices[i],
self.dual_object.lambda_pos[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_pos[i]))))
proj_ops.append(self.dual_object.lambda_neg[i].assign(
tf.multiply(self.dual_object.negative_indices[i],
self.dual_object.lambda_neg[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_neg[i]))))
# Lambda_quad is only non zero and positive for switch
proj_ops.append(self.dual_object.lambda_quad[i].assign(
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_quad[i]))))
# Lambda_lu is always non negative
proj_ops.append(self.dual_object.lambda_lu[i].assign(
tf.nn.relu(self.dual_object.lambda_lu[i])))
self.proj_step = tf.group(proj_ops)
# Create folder for saving stats if the folder is not None
if (self.params.get('stats_folder') and
not tf.gfile.IsDirectory(self.params['stats_folder'])):
tf.gfile.MkDir(self.params['stats_folder'])
|
[
"Create tensorflow op for running one step of descent."
] |
Please provide a description of the function:def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val,
penalty_val, learning_rate_val):
# Running step
step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val,
self.eig_num_iter_placeholder: eig_num_iter_val,
self.smooth_placeholder: smooth_val,
self.penalty_placeholder: penalty_val,
self.learning_rate: learning_rate_val}
if self.params['eig_type'] == 'SCIPY':
current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec()
step_feed_dict.update({
self.eig_vec_estimate: current_eig_vector
})
elif self.params['eig_type'] == 'LZS':
step_feed_dict.update({
self.dual_object.m_min_vec_ph: self.dual_object.m_min_vec_estimate
})
self.sess.run(self.train_step, feed_dict=step_feed_dict)
[
_, self.dual_object.m_min_vec_estimate, self.current_eig_val_estimate
] = self.sess.run([
self.proj_step,
self.eig_vec_estimate,
self.eig_val_estimate
], feed_dict=step_feed_dict)
if self.current_step % self.params['print_stats_steps'] == 0:
[self.current_total_objective, self.current_unconstrained_objective,
self.dual_object.m_min_vec_estimate,
self.current_eig_val_estimate,
self.current_nu] = self.sess.run(
[self.total_objective,
self.dual_object.unconstrained_objective,
self.eig_vec_estimate,
self.eig_val_estimate,
self.dual_object.nu], feed_dict=step_feed_dict)
stats = {
'total_objective':
float(self.current_total_objective),
'unconstrained_objective':
float(self.current_unconstrained_objective),
'min_eig_val_estimate':
float(self.current_eig_val_estimate)
}
tf.logging.info('Current inner step: %d, optimization stats: %s',
self.current_step, stats)
if self.params['stats_folder'] is not None:
stats = json.dumps(stats)
filename = os.path.join(self.params['stats_folder'],
str(self.current_step) + '.json')
with tf.gfile.Open(filename) as file_f:
file_f.write(stats)
# Project onto feasible set of dual variables
if self.current_step % self.params['projection_steps'] == 0 and self.current_unconstrained_objective < 0:
nu = self.sess.run(self.dual_object.nu)
dual_feed_dict = {
self.dual_object.h_min_vec_ph: self.dual_object.h_min_vec_estimate
}
_, min_eig_val_h_lz = self.dual_object.get_lanczos_eig(compute_m=False, feed_dict=dual_feed_dict)
projected_dual_feed_dict = {
self.dual_object.projected_dual.nu: nu,
self.dual_object.projected_dual.min_eig_val_h: min_eig_val_h_lz
}
if self.dual_object.projected_dual.compute_certificate(self.current_step, projected_dual_feed_dict):
return True
return False
|
[
"Run one step of gradient descent for optimization.\n\n Args:\n eig_init_vec_val: Start value for eigen value computations\n eig_num_iter_val: Number of iterations to run for eigen computations\n smooth_val: Value of smoothness parameter\n penalty_val: Value of penalty for the current step\n learning_rate_val: Value of learning rate\n Returns:\n found_cert: True is negative certificate is found, False otherwise\n "
] |
Please provide a description of the function:def run_optimization(self):
penalty_val = self.params['init_penalty']
# Don't use smoothing initially - very inaccurate for large dimension
self.smooth_on = False
smooth_val = 0
learning_rate_val = self.params['init_learning_rate']
self.current_outer_step = 1
while self.current_outer_step <= self.params['outer_num_steps']:
tf.logging.info('Running outer step %d with penalty %f',
self.current_outer_step, penalty_val)
# Running inner loop of optimization with current_smooth_val,
# current_penalty as smoothness parameters and penalty respectively
self.current_step = 0
# Run first step with random eig initialization and large number of steps
found_cert = self.run_one_step(
self.dual_object.m_min_vec_estimate,
self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val)
if found_cert:
return True
while self.current_step < self.params['inner_num_steps']:
self.current_step = self.current_step + 1
found_cert = self.run_one_step(self.dual_object.m_min_vec_estimate,
self.params['small_eig_num_steps'],
smooth_val, penalty_val,
learning_rate_val)
if found_cert:
return True
# Update penalty only if it looks like current objective is optimizes
if self.current_total_objective < UPDATE_PARAM_CONSTANT:
penalty_val = penalty_val * self.params['beta']
learning_rate_val = learning_rate_val*self.params['learning_rate_decay']
else:
# To get more accurate gradient estimate
self.params['small_eig_num_steps'] = (
1.5 * self.params['small_eig_num_steps'])
# If eigen values seem small enough, turn on smoothing
# useful only when performing full eigen decomposition
if np.abs(self.current_eig_val_estimate) < 0.01:
smooth_val = self.params['smoothness_parameter']
self.current_outer_step = self.current_outer_step + 1
return False
|
[
"Run the optimization, call run_one_step with suitable placeholders.\n\n Returns:\n True if certificate is found\n False otherwise\n "
] |
Please provide a description of the function:def load_target_class(input_dir):
with tf.gfile.Open(os.path.join(input_dir, 'target_class.csv')) as f:
return {row[0]: int(row[1]) for row in csv.reader(f) if len(row) >= 2}
|
[
"Loads target classes."
] |
Please provide a description of the function:def save_images(images, filenames, output_dir):
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png')
|
[
"Saves images to the output directory.\n\n Args:\n images: array with minibatch of images\n filenames: list of filenames without path\n If number of file names in this list less than number of images in\n the minibatch then only first len(filenames) images will be saved.\n output_dir: directory where to save images\n "
] |
Please provide a description of the function:def main(_):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# eps is a difference between pixels so it should be in [0, 2] interval.
# Renormalizing epsilon from [0, 255] to [0, 2].
eps = 2.0 * FLAGS.max_epsilon / 255.0
alpha = 2.0 * FLAGS.iter_alpha / 255.0
num_iter = FLAGS.num_iter
batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
nb_classes = 1001
tf.logging.set_verbosity(tf.logging.INFO)
all_images_taget_class = load_target_class(FLAGS.input_dir)
with tf.Graph().as_default():
# Prepare graph
x_input = tf.placeholder(tf.float32, shape=batch_shape)
x_max = tf.clip_by_value(x_input + eps, -1.0, 1.0)
x_min = tf.clip_by_value(x_input - eps, -1.0, 1.0)
with slim.arg_scope(inception.inception_v3_arg_scope()):
inception.inception_v3(
x_input, num_classes=nb_classes, is_training=False)
x_adv = x_input
target_class_input = tf.placeholder(tf.int32, shape=[FLAGS.batch_size])
one_hot_target_class = tf.one_hot(target_class_input, nb_classes)
for _ in range(num_iter):
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits, end_points = inception.inception_v3(
x_adv, num_classes=nb_classes, is_training=False, reuse=True)
cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class,
logits,
label_smoothing=0.1,
weights=1.0)
cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class,
end_points['AuxLogits'],
label_smoothing=0.1,
weights=0.4)
x_next = x_adv - alpha * tf.sign(tf.gradients(cross_entropy, x_adv)[0])
x_next = tf.clip_by_value(x_next, x_min, x_max)
x_adv = x_next
# Run computation
saver = tf.train.Saver(slim.get_model_variables())
session_creator = tf.train.ChiefSessionCreator(
scaffold=tf.train.Scaffold(saver=saver),
checkpoint_filename_with_path=FLAGS.checkpoint_path,
master=FLAGS.master)
with tf.train.MonitoredSession(session_creator=session_creator) as sess:
for filenames, images in load_images(FLAGS.input_dir, batch_shape):
target_class_for_batch = (
[all_images_taget_class[n] for n in filenames]
+ [0] * (FLAGS.batch_size - len(filenames)))
adv_images = sess.run(x_adv,
feed_dict={
x_input: images,
target_class_input: target_class_for_batch
})
save_images(adv_images, filenames, FLAGS.output_dir)
|
[
"Run the sample attack"
] |
Please provide a description of the function:def deepfool_batch(sess,
x,
pred,
logits,
grads,
X,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
nb_classes,
feed=None):
X_adv = deepfool_attack(
sess,
x,
pred,
logits,
grads,
X,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=feed)
return np.asarray(X_adv, dtype=np_dtype)
|
[
"\n Applies DeepFool to a batch of inputs\n :param sess: TF session\n :param x: The input placeholder\n :param pred: The model's sorted symbolic output of logits, only the top\n nb_candidate classes are contained\n :param logits: The model's unnormalized output tensor (the input to\n the softmax layer)\n :param grads: Symbolic gradients of the top nb_candidate classes, procuded\n from gradient_graph\n :param X: Numpy array with sample inputs\n :param nb_candidate: The number of classes to test against, i.e.,\n deepfool only consider nb_candidate classes when\n attacking(thus accelerate speed). The nb_candidate\n classes are chosen according to the prediction\n confidence during implementation.\n :param overshoot: A termination criterion to prevent vanishing updates\n :param max_iter: Maximum number of iteration for DeepFool\n :param clip_min: Minimum value for components of the example returned\n :param clip_max: Maximum value for components of the example returned\n :param nb_classes: Number of model output classes\n :return: Adversarial examples\n "
] |
Please provide a description of the function:def deepfool_attack(sess,
x,
predictions,
logits,
grads,
sample,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=None):
adv_x = copy.copy(sample)
# Initialize the loop variables
iteration = 0
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
r_tot = np.zeros(sample.shape)
original = current # use original label as the reference
_logger.debug(
"Starting DeepFool attack up to %s iterations", max_iter)
# Repeat this main loop until we have achieved misclassification
while (np.any(current == original) and iteration < max_iter):
if iteration % 5 == 0 and iteration > 0:
_logger.info("Attack result at iteration %s is %s", iteration, current)
gradients = sess.run(grads, feed_dict={x: adv_x})
predictions_val = sess.run(predictions, feed_dict={x: adv_x})
for idx in range(sample.shape[0]):
pert = np.inf
if current[idx] != original[idx]:
continue
for k in range(1, nb_candidate):
w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
f_k = predictions_val[idx, k] - predictions_val[idx, 0]
# adding value 0.00001 to prevent f_k = 0
pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
if pert_k < pert:
pert = pert_k
w = w_k
r_i = pert * w / np.linalg.norm(w)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
adv_x = np.clip(r_tot + sample, clip_min, clip_max)
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
# Update loop variables
iteration = iteration + 1
# need more revision, including info like how many succeed
_logger.info("Attack result at iteration %s is %s", iteration, current)
_logger.info("%s out of %s become adversarial examples at iteration %s",
sum(current != original),
sample.shape[0],
iteration)
# need to clip this image into the given range
adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
return adv_x
|
[
"\n TensorFlow implementation of DeepFool.\n Paper link: see https://arxiv.org/pdf/1511.04599.pdf\n :param sess: TF session\n :param x: The input placeholder\n :param predictions: The model's sorted symbolic output of logits, only the\n top nb_candidate classes are contained\n :param logits: The model's unnormalized output tensor (the input to\n the softmax layer)\n :param grads: Symbolic gradients of the top nb_candidate classes, procuded\n from gradient_graph\n :param sample: Numpy array with sample input\n :param nb_candidate: The number of classes to test against, i.e.,\n deepfool only consider nb_candidate classes when\n attacking(thus accelerate speed). The nb_candidate\n classes are chosen according to the prediction\n confidence during implementation.\n :param overshoot: A termination criterion to prevent vanishing updates\n :param max_iter: Maximum number of iteration for DeepFool\n :param clip_min: Minimum value for components of the example returned\n :param clip_max: Maximum value for components of the example returned\n :return: Adversarial examples\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.